summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJay Civelli <jcivelli@google.com>2017-07-26 01:57:21 +0000
committerandroid-build-merger <android-build-merger@google.com>2017-07-26 01:57:21 +0000
commita29e6994243913703862fdbe17c059150c9ae149 (patch)
treece481b397e704599e05c63cc5856d4d63389026c
parent11449a22cb2aa56becac551a01c8f61467f433cb (diff)
parent0457669b858da2f5c6ed40d14c2ce8597546d90a (diff)
downloadlibchrome-a29e6994243913703862fdbe17c059150c9ae149.tar.gz
libchrome: Uprev the library to r456626 from Chromium am: 0601274935
am: 0457669b85 Change-Id: I14fee6d53fd60f779d55001a272e88efbca37c48
-rw-r--r--Android.bp42
-rw-r--r--SConstruct9
-rw-r--r--base/BUILD.gn554
-rw-r--r--base/DEPS1
-rw-r--r--base/allocator/BUILD.gn78
-rw-r--r--base/allocator/README.md4
-rw-r--r--base/allocator/allocator.gyp450
-rw-r--r--base/allocator/allocator_shim.cc206
-rw-r--r--base/allocator/allocator_shim.h53
-rw-r--r--base/allocator/allocator_shim_default_dispatch_to_glibc.cc53
-rw-r--r--base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc83
-rw-r--r--base/allocator/allocator_shim_internals.h2
-rw-r--r--base/allocator/allocator_shim_override_cpp_symbols.h45
-rw-r--r--base/allocator/allocator_shim_override_libc_symbols.h47
-rw-r--r--base/allocator/allocator_shim_override_linker_wrapped_symbols.h56
-rw-r--r--base/at_exit.cc10
-rw-r--r--base/at_exit.h4
-rw-r--r--base/atomic_ref_count.h2
-rw-r--r--base/base.gyp1801
-rw-r--r--base/base.gypi1106
-rw-r--r--base/base_nacl.gyp158
-rw-r--r--base/base_switches.cc6
-rw-r--r--base/base_switches.h1
-rw-r--r--base/base_unittests.isolate56
-rw-r--r--base/bind.h56
-rw-r--r--base/bind_helpers.h123
-rw-r--r--base/bind_internal.h114
-rw-r--r--base/bind_unittest.cc826
-rw-r--r--base/bit_cast.h33
-rw-r--r--base/bits.h59
-rw-r--r--base/bits_unittest.cc20
-rw-r--r--base/callback.h464
-rw-r--r--base/callback_forward.h22
-rw-r--r--base/callback_helpers.h11
-rw-r--r--base/callback_helpers_unittest.cc18
-rw-r--r--base/callback_internal.cc71
-rw-r--r--base/callback_internal.h56
-rw-r--r--base/callback_unittest.cc37
-rw-r--r--base/cancelable_callback.h10
-rw-r--r--base/command_line.cc9
-rw-r--r--base/compiler_specific.h38
-rw-r--r--base/containers/mru_cache.h8
-rw-r--r--base/containers/scoped_ptr_hash_map.h176
-rw-r--r--base/containers/small_map.h23
-rw-r--r--base/cpu.cc55
-rw-r--r--base/cpu.h2
-rw-r--r--base/cpu_unittest.cc10
-rw-r--r--base/critical_closure.h27
-rw-r--r--base/debug/activity_tracker.cc1389
-rw-r--r--base/debug/activity_tracker.h1102
-rw-r--r--base/debug/activity_tracker_unittest.cc340
-rw-r--r--base/debug/alias.cc3
-rw-r--r--base/debug/debugger_posix.cc2
-rw-r--r--base/debug/debugging_flags.h5
-rw-r--r--base/debug/dump_without_crashing.cc35
-rw-r--r--base/debug/dump_without_crashing.h31
-rw-r--r--base/debug/leak_tracker_unittest.cc2
-rw-r--r--base/debug/profiler.cc225
-rw-r--r--base/debug/profiler.h94
-rw-r--r--base/debug/stack_trace.cc252
-rw-r--r--base/debug/stack_trace.h65
-rw-r--r--base/debug/stack_trace_posix.cc49
-rw-r--r--base/debug/task_annotator.cc44
-rw-r--r--base/debug/task_annotator.h2
-rw-r--r--base/debug/task_annotator_unittest.cc2
-rw-r--r--base/debug/thread_heap_usage_tracker.h117
-rw-r--r--base/feature_list.cc95
-rw-r--r--base/feature_list.h29
-rw-r--r--base/feature_list_unittest.cc69
-rw-r--r--base/file_version_info_unittest.cc144
-rw-r--r--base/files/dir_reader_posix_unittest.cc2
-rw-r--r--base/files/file.cc8
-rw-r--r--base/files/file.h71
-rw-r--r--base/files/file_descriptor_watcher_posix.cc210
-rw-r--r--base/files/file_descriptor_watcher_posix.h99
-rw-r--r--base/files/file_descriptor_watcher_posix_unittest.cc318
-rw-r--r--base/files/file_path.cc3
-rw-r--r--base/files/file_path.h7
-rw-r--r--base/files/file_path_unittest.cc1
-rw-r--r--base/files/file_path_watcher.cc9
-rw-r--r--base/files/file_path_watcher.h47
-rw-r--r--base/files/file_path_watcher_fsevents.cc94
-rw-r--r--base/files/file_path_watcher_fsevents.h9
-rw-r--r--base/files/file_path_watcher_kqueue.cc163
-rw-r--r--base/files/file_path_watcher_kqueue.h30
-rw-r--r--base/files/file_path_watcher_linux.cc160
-rw-r--r--base/files/file_path_watcher_mac.cc61
-rw-r--r--base/files/file_path_watcher_unittest.cc140
-rw-r--r--base/files/file_posix.cc9
-rw-r--r--base/files/file_tracing.cc37
-rw-r--r--base/files/file_tracing.h12
-rw-r--r--base/files/file_unittest.cc186
-rw-r--r--base/files/file_util.h19
-rw-r--r--base/files/file_util_mac.mm12
-rw-r--r--base/files/file_util_posix.cc76
-rw-r--r--base/files/important_file_writer.cc61
-rw-r--r--base/files/important_file_writer.h59
-rw-r--r--base/files/important_file_writer_unittest.cc149
-rw-r--r--base/files/memory_mapped_file_posix.cc9
-rw-r--r--base/files/scoped_file.cc8
-rw-r--r--base/files/scoped_temp_dir.cc5
-rw-r--r--base/files/scoped_temp_dir.h4
-rw-r--r--base/files/scoped_temp_dir_unittest.cc8
-rw-r--r--base/id_map.h139
-rw-r--r--base/id_map_unittest.cc70
-rw-r--r--base/json/json_file_value_serializer.cc11
-rw-r--r--base/json/json_file_value_serializer.h11
-rw-r--r--base/json/json_parser.cc321
-rw-r--r--base/json/json_parser.h55
-rw-r--r--base/json/json_parser_unittest.cc91
-rw-r--r--base/json/json_reader.h5
-rw-r--r--base/json/json_reader_unittest.cc58
-rw-r--r--base/json/json_string_value_serializer.cc13
-rw-r--r--base/json/json_string_value_serializer.h13
-rw-r--r--base/json/json_value_converter.h153
-rw-r--r--base/json/json_value_converter_unittest.cc11
-rw-r--r--base/json/json_value_serializer_unittest.cc154
-rw-r--r--base/json/json_writer.cc20
-rw-r--r--base/json/json_writer.h2
-rw-r--r--base/json/json_writer_unittest.cc20
-rw-r--r--base/lazy_instance.h60
-rw-r--r--base/lazy_instance_unittest.cc23
-rw-r--r--base/location.h2
-rw-r--r--base/logging.cc44
-rw-r--r--base/logging.h402
-rw-r--r--base/logging_unittest.cc206
-rw-r--r--base/mac/bind_objc_block.h22
-rw-r--r--base/mac/bundle_locations.h1
-rw-r--r--base/mac/foundation_util.h9
-rw-r--r--base/mac/foundation_util.mm5
-rw-r--r--base/mac/mac_util.h130
-rw-r--r--base/mac/mach_port_broker.mm7
-rw-r--r--base/mac/scoped_authorizationref.h2
-rw-r--r--base/mac/scoped_block.h28
-rw-r--r--base/mac/scoped_nsobject.h7
-rw-r--r--base/mac/sdk_forward_declarations.h538
-rw-r--r--base/mac/sdk_forward_declarations.mm40
-rw-r--r--base/memory/aligned_memory_unittest.cc8
-rw-r--r--base/memory/linked_ptr.h2
-rw-r--r--base/memory/ref_counted.cc17
-rw-r--r--base/memory/ref_counted.h66
-rw-r--r--base/memory/ref_counted_delete_on_message_loop.h75
-rw-r--r--base/memory/ref_counted_unittest.cc35
-rw-r--r--base/memory/scoped_vector.h16
-rw-r--r--base/memory/scoped_vector_unittest.cc2
-rw-r--r--base/memory/shared_memory.h59
-rw-r--r--base/memory/shared_memory_handle.h57
-rw-r--r--base/memory/shared_memory_handle_mac.cc145
-rw-r--r--base/memory/shared_memory_helper.cc98
-rw-r--r--base/memory/shared_memory_helper.h33
-rw-r--r--base/memory/shared_memory_mac.cc176
-rw-r--r--base/memory/shared_memory_posix.cc167
-rw-r--r--base/memory/shared_memory_tracker.cc93
-rw-r--r--base/memory/shared_memory_tracker.h56
-rw-r--r--base/memory/shared_memory_unittest.cc23
-rw-r--r--base/memory/singleton.h23
-rw-r--r--base/memory/weak_ptr.cc4
-rw-r--r--base/memory/weak_ptr_unittest.cc15
-rw-r--r--base/message_loop/incoming_task_queue.cc6
-rw-r--r--base/message_loop/incoming_task_queue.h1
-rw-r--r--base/message_loop/message_loop.cc219
-rw-r--r--base/message_loop/message_loop.h172
-rw-r--r--base/message_loop/message_loop_task_runner_unittest.cc55
-rw-r--r--base/message_loop/message_loop_test.cc268
-rw-r--r--base/message_loop/message_loop_unittest.cc191
-rw-r--r--base/message_loop/message_pump.cc7
-rw-r--r--base/message_loop/message_pump.h9
-rw-r--r--base/message_loop/message_pump_default.cc39
-rw-r--r--base/message_loop/message_pump_glib.h2
-rw-r--r--base/message_loop/message_pump_glib_unittest.cc117
-rw-r--r--base/message_loop/message_pump_libevent.cc38
-rw-r--r--base/message_loop/message_pump_libevent.h19
-rw-r--r--base/message_loop/message_pump_mac.h19
-rw-r--r--base/message_loop/message_pump_mac.mm151
-rw-r--r--base/metrics/field_trial.cc745
-rw-r--r--base/metrics/field_trial.h235
-rw-r--r--base/metrics/field_trial_param_associator.cc80
-rw-r--r--base/metrics/field_trial_param_associator.h71
-rw-r--r--base/metrics/field_trial_unittest.cc260
-rw-r--r--base/metrics/histogram.cc26
-rw-r--r--base/metrics/histogram.h15
-rw-r--r--base/metrics/histogram_base.cc16
-rw-r--r--base/metrics/histogram_base.h6
-rw-r--r--base/metrics/histogram_macros.h507
-rw-r--r--base/metrics/histogram_macros_internal.h157
-rw-r--r--base/metrics/histogram_macros_local.h88
-rw-r--r--base/metrics/histogram_samples.h21
-rw-r--r--base/metrics/histogram_snapshot_manager.cc4
-rw-r--r--base/metrics/histogram_snapshot_manager.h2
-rw-r--r--base/metrics/histogram_unittest.cc13
-rw-r--r--base/metrics/persistent_histogram_allocator.cc233
-rw-r--r--base/metrics/persistent_histogram_allocator.h46
-rw-r--r--base/metrics/persistent_histogram_allocator_unittest.cc147
-rw-r--r--base/metrics/persistent_memory_allocator.cc288
-rw-r--r--base/metrics/persistent_memory_allocator.h352
-rw-r--r--base/metrics/persistent_memory_allocator_unittest.cc153
-rw-r--r--base/metrics/persistent_sample_map.cc62
-rw-r--r--base/metrics/persistent_sample_map.h1
-rw-r--r--base/metrics/persistent_sample_map_unittest.cc37
-rw-r--r--base/metrics/sample_map_unittest.cc1
-rw-r--r--base/metrics/sample_vector_unittest.cc1
-rw-r--r--base/metrics/sparse_histogram.cc6
-rw-r--r--base/metrics/sparse_histogram.h34
-rw-r--r--base/metrics/sparse_histogram_unittest.cc1
-rw-r--r--base/metrics/statistics_recorder.cc187
-rw-r--r--base/metrics/statistics_recorder.h35
-rw-r--r--base/metrics/statistics_recorder_unittest.cc71
-rw-r--r--base/metrics/user_metrics.cc6
-rw-r--r--base/metrics/user_metrics.h3
-rw-r--r--base/native_library.h20
-rw-r--r--base/native_library_posix.cc21
-rw-r--r--base/numerics/safe_conversions.h268
-rw-r--r--base/numerics/safe_conversions_impl.h683
-rw-r--r--base/numerics/safe_math.h585
-rw-r--r--base/numerics/safe_math_impl.h861
-rw-r--r--base/numerics/safe_numerics_unittest.cc692
-rw-r--r--base/observer_list.h170
-rw-r--r--base/observer_list_threadsafe.h143
-rw-r--r--base/observer_list_unittest.cc450
-rw-r--r--base/optional.h105
-rw-r--r--base/optional_unittest.cc103
-rw-r--r--base/pending_task.cc24
-rw-r--r--base/pending_task.h11
-rw-r--r--base/pickle.cc1
-rw-r--r--base/posix/global_descriptors.cc16
-rw-r--r--base/posix/global_descriptors.h21
-rw-r--r--base/post_task_and_reply_with_result_internal.h35
-rw-r--r--base/power_monitor/power_monitor_device_source.h19
-rw-r--r--base/power_monitor/power_monitor_source.h9
-rw-r--r--base/process/internal_linux.cc41
-rw-r--r--base/process/internal_linux.h8
-rw-r--r--base/process/kill.h11
-rw-r--r--base/process/kill_posix.cc2
-rw-r--r--base/process/launch.cc36
-rw-r--r--base/process/launch.h53
-rw-r--r--base/process/launch_mac.cc153
-rw-r--r--base/process/launch_posix.cc21
-rw-r--r--base/process/memory.cc54
-rw-r--r--base/process/memory.h83
-rw-r--r--base/process/memory_linux.cc212
-rw-r--r--base/process/memory_stubs.cc49
-rw-r--r--base/process/port_provider_mac.cc3
-rw-r--r--base/process/process.h42
-rw-r--r--base/process/process_metrics.cc5
-rw-r--r--base/process/process_metrics.h32
-rw-r--r--base/process/process_metrics_linux.cc33
-rw-r--r--base/process/process_metrics_mac.cc46
-rw-r--r--base/process/process_metrics_posix.cc2
-rw-r--r--base/process/process_metrics_unittest.cc19
-rw-r--r--base/process/process_posix.cc33
-rw-r--r--base/profiler/scoped_profile.h36
-rw-r--r--base/rand_util_posix.cc3
-rw-r--r--base/run_loop.cc4
-rw-r--r--base/run_loop.h3
-rw-r--r--base/scoped_generic.h8
-rw-r--r--base/scoped_observer.h2
-rw-r--r--base/security_unittest.cc21
-rw-r--r--base/sequence_checker.h30
-rw-r--r--base/sequence_checker_impl.cc75
-rw-r--r--base/sequence_checker_impl.h36
-rw-r--r--base/sequence_checker_unittest.cc419
-rw-r--r--base/sequence_token.cc92
-rw-r--r--base/sequence_token.h115
-rw-r--r--base/sequence_token_unittest.cc133
-rw-r--r--base/sequenced_task_runner.cc20
-rw-r--r--base/sequenced_task_runner.h37
-rw-r--r--base/sequenced_task_runner_helpers.h81
-rw-r--r--base/sha1.cc (renamed from base/sha1_portable.cc)9
-rw-r--r--base/stl_util.h296
-rw-r--r--base/stl_util_unittest.cc156
-rw-r--r--base/strings/string_number_conversions.cc5
-rw-r--r--base/strings/string_number_conversions_unittest.cc87
-rw-r--r--base/strings/string_split.cc24
-rw-r--r--base/strings/string_split.h20
-rw-r--r--base/strings/string_split_unittest.cc27
-rw-r--r--base/strings/string_util.cc68
-rw-r--r--base/strings/string_util.h24
-rw-r--r--base/strings/string_util_unittest.cc110
-rw-r--r--base/strings/utf_string_conversion_utils.h3
-rw-r--r--base/strings/utf_string_conversions.cc4
-rw-r--r--base/synchronization/atomic_flag.cc32
-rw-r--r--base/synchronization/atomic_flag.h44
-rw-r--r--base/synchronization/atomic_flag_unittest.cc131
-rw-r--r--base/synchronization/cancellation_flag.cc26
-rw-r--r--base/synchronization/cancellation_flag.h41
-rw-r--r--base/synchronization/cancellation_flag_unittest.cc65
-rw-r--r--base/synchronization/condition_variable.h6
-rw-r--r--base/synchronization/condition_variable_posix.cc2
-rw-r--r--base/synchronization/lock.h17
-rw-r--r--base/synchronization/lock_impl.h5
-rw-r--r--base/synchronization/lock_impl_posix.cc55
-rw-r--r--base/synchronization/waitable_event.h14
-rw-r--r--base/synchronization/waitable_event_posix.cc20
-rw-r--r--base/synchronization/waitable_event_unittest.cc57
-rw-r--r--base/synchronization/waitable_event_watcher.h57
-rw-r--r--base/synchronization/waitable_event_watcher_posix.cc135
-rw-r--r--base/sys_byteorder.h16
-rw-r--r--base/sys_info.h12
-rw-r--r--base/sys_info_chromeos.cc12
-rw-r--r--base/sys_info_mac.mm8
-rw-r--r--base/sys_info_posix.cc45
-rw-r--r--base/sys_info_unittest.cc10
-rw-r--r--base/task/cancelable_task_tracker.cc58
-rw-r--r--base/task/cancelable_task_tracker.h52
-rw-r--r--base/task/cancelable_task_tracker_unittest.cc17
-rw-r--r--base/task_runner.cc11
-rw-r--r--base/task_runner.h6
-rw-r--r--base/task_runner_util.h48
-rw-r--r--base/task_scheduler/scheduler_lock_impl.cc23
-rw-r--r--base/task_scheduler/scheduler_lock_unittest.cc14
-rw-r--r--base/task_scheduler/scoped_set_task_priority_for_current_thread.cc41
-rw-r--r--base/task_scheduler/scoped_set_task_priority_for_current_thread.h36
-rw-r--r--base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc26
-rw-r--r--base/task_scheduler/sequence.cc26
-rw-r--r--base/task_scheduler/sequence.h38
-rw-r--r--base/task_scheduler/sequence_unittest.cc93
-rw-r--r--base/task_scheduler/task.cc12
-rw-r--r--base/task_scheduler/task.h11
-rw-r--r--base/task_scheduler/task_traits.cc56
-rw-r--r--base/task_scheduler/task_traits.h84
-rw-r--r--base/task_scheduler/test_utils.h19
-rw-r--r--base/template_util.h67
-rw-r--r--base/test/BUILD.gn104
-rw-r--r--base/test/gtest_util.cc112
-rw-r--r--base/test/gtest_util.h102
-rw-r--r--base/test/mock_entropy_provider.cc20
-rw-r--r--base/test/mock_entropy_provider.h32
-rw-r--r--base/test/multiprocess_test.cc16
-rw-r--r--base/test/multiprocess_test.h35
-rw-r--r--base/test/multiprocess_test_android.cc476
-rw-r--r--base/test/opaque_ref_counted.cc30
-rw-r--r--base/test/opaque_ref_counted.h5
-rw-r--r--base/test/scoped_feature_list.cc74
-rw-r--r--base/test/scoped_feature_list.h59
-rw-r--r--base/test/sequenced_worker_pool_owner.cc8
-rw-r--r--base/test/sequenced_worker_pool_owner.h2
-rw-r--r--base/test/test_file_util.h21
-rw-r--r--base/test/test_io_thread.cc23
-rw-r--r--base/test/test_io_thread.h16
-rw-r--r--base/test/test_mock_time_task_runner.cc321
-rw-r--r--base/test/test_mock_time_task_runner.h223
-rw-r--r--base/test/test_pending_task.cc4
-rw-r--r--base/test/test_pending_task.h9
-rw-r--r--base/test/test_simple_task_runner.cc62
-rw-r--r--base/test/test_simple_task_runner.h32
-rw-r--r--base/test/test_timeouts.cc8
-rw-r--r--base/test/test_timeouts.h3
-rw-r--r--base/test/trace_event_analyzer.cc4
-rw-r--r--base/test/trace_event_analyzer_unittest.cc2
-rw-r--r--base/third_party/dynamic_annotations/dynamic_annotations.c269
-rw-r--r--base/threading/non_thread_safe.h15
-rw-r--r--base/threading/non_thread_safe_unittest.cc25
-rw-r--r--base/threading/platform_thread.h30
-rw-r--r--base/threading/platform_thread_linux.cc79
-rw-r--r--base/threading/platform_thread_mac.mm5
-rw-r--r--base/threading/platform_thread_posix.cc34
-rw-r--r--base/threading/platform_thread_unittest.cc20
-rw-r--r--base/threading/post_task_and_reply_impl.cc70
-rw-r--r--base/threading/post_task_and_reply_impl.h27
-rw-r--r--base/threading/sequenced_task_runner_handle.cc47
-rw-r--r--base/threading/sequenced_task_runner_handle.h5
-rw-r--r--base/threading/sequenced_worker_pool.cc571
-rw-r--r--base/threading/sequenced_worker_pool.h100
-rw-r--r--base/threading/simple_thread.cc59
-rw-r--r--base/threading/simple_thread.h60
-rw-r--r--base/threading/simple_thread_unittest.cc107
-rw-r--r--base/threading/thread.cc206
-rw-r--r--base/threading/thread.h141
-rw-r--r--base/threading/thread_checker.h22
-rw-r--r--base/threading/thread_checker_impl.cc47
-rw-r--r--base/threading/thread_checker_impl.h36
-rw-r--r--base/threading/thread_checker_unittest.cc268
-rw-r--r--base/threading/thread_local.h93
-rw-r--r--base/threading/thread_local_posix.cc43
-rw-r--r--base/threading/thread_local_storage.cc271
-rw-r--r--base/threading/thread_local_storage.h28
-rw-r--r--base/threading/thread_local_storage_unittest.cc10
-rw-r--r--base/threading/thread_restrictions.cc22
-rw-r--r--base/threading/thread_restrictions.h39
-rw-r--r--base/threading/thread_task_runner_handle.cc46
-rw-r--r--base/threading/thread_task_runner_handle.h12
-rw-r--r--base/threading/thread_unittest.cc348
-rw-r--r--base/threading/worker_pool.cc20
-rw-r--r--base/threading/worker_pool.h8
-rw-r--r--base/threading/worker_pool_posix.cc40
-rw-r--r--base/threading/worker_pool_posix.h7
-rw-r--r--base/threading/worker_pool_posix_unittest.cc6
-rw-r--r--base/time/time.cc47
-rw-r--r--base/time/time.h93
-rw-r--r--base/time/time_mac.cc76
-rw-r--r--base/time/time_posix.cc99
-rw-r--r--base/time/time_unittest.cc33
-rw-r--r--base/timer/timer.cc44
-rw-r--r--base/timer/timer.h49
-rw-r--r--base/timer/timer_unittest.cc521
-rw-r--r--base/trace_event/category_registry.cc156
-rw-r--r--base/trace_event/category_registry.h93
-rw-r--r--base/trace_event/common/trace_event_common.h163
-rw-r--r--base/trace_event/etw_manifest/etw_manifest.gyp41
-rw-r--r--base/trace_event/event_name_filter.cc26
-rw-r--r--base/trace_event/event_name_filter.h46
-rw-r--r--base/trace_event/event_name_filter_unittest.cc41
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker.cc58
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker.h27
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc97
-rw-r--r--base/trace_event/heap_profiler_allocation_register.cc8
-rw-r--r--base/trace_event/heap_profiler_allocation_register.h32
-rw-r--r--base/trace_event/heap_profiler_event_filter.cc67
-rw-r--r--base/trace_event/heap_profiler_event_filter.h40
-rw-r--r--base/trace_event/heap_profiler_heap_dump_writer.cc3
-rw-r--r--base/trace_event/heap_profiler_stack_frame_deduplicator.cc20
-rw-r--r--base/trace_event/heap_profiler_stack_frame_deduplicator.h2
-rw-r--r--base/trace_event/heap_profiler_type_name_deduplicator.cc28
-rw-r--r--base/trace_event/malloc_dump_provider.cc178
-rw-r--r--base/trace_event/malloc_dump_provider.h2
-rw-r--r--base/trace_event/memory_allocator_dump.h6
-rw-r--r--base/trace_event/memory_dump_manager.cc314
-rw-r--r--base/trace_event/memory_dump_manager.h65
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc200
-rw-r--r--base/trace_event/memory_dump_provider.h20
-rw-r--r--base/trace_event/memory_dump_request_args.cc17
-rw-r--r--base/trace_event/memory_dump_request_args.h18
-rw-r--r--base/trace_event/memory_dump_scheduler.cc304
-rw-r--r--base/trace_event/memory_dump_scheduler.h141
-rw-r--r--base/trace_event/memory_dump_session_state.cc15
-rw-r--r--base/trace_event/memory_dump_session_state.h22
-rw-r--r--base/trace_event/memory_infra_background_whitelist.cc71
-rw-r--r--base/trace_event/memory_usage_estimator.cc14
-rw-r--r--base/trace_event/memory_usage_estimator.h549
-rw-r--r--base/trace_event/memory_usage_estimator_unittest.cc244
-rw-r--r--base/trace_event/process_memory_dump.cc24
-rw-r--r--base/trace_event/process_memory_dump.h1
-rw-r--r--base/trace_event/trace_buffer.cc13
-rw-r--r--base/trace_event/trace_category.h109
-rw-r--r--base/trace_event/trace_config.cc249
-rw-r--r--base/trace_event/trace_config.h63
-rw-r--r--base/trace_event/trace_config_memory_test_util.h181
-rw-r--r--base/trace_event/trace_config_unittest.cc178
-rw-r--r--base/trace_event/trace_event.gypi107
-rw-r--r--base/trace_event/trace_event.h407
-rw-r--r--base/trace_event/trace_event_argument.cc36
-rw-r--r--base/trace_event/trace_event_argument_unittest.cc10
-rw-r--r--base/trace_event/trace_event_filter.cc21
-rw-r--r--base/trace_event/trace_event_filter.h51
-rw-r--r--base/trace_event/trace_event_filter_test_utils.cc61
-rw-r--r--base/trace_event/trace_event_filter_test_utils.h53
-rw-r--r--base/trace_event/trace_event_impl.cc68
-rw-r--r--base/trace_event/trace_event_impl.h5
-rw-r--r--base/trace_event/trace_event_memory_overhead.cc20
-rw-r--r--base/trace_event/trace_event_synthetic_delay.h3
-rw-r--r--base/trace_event/trace_event_unittest.cc770
-rw-r--r--base/trace_event/trace_log.cc750
-rw-r--r--base/trace_event/trace_log.h140
-rw-r--r--base/trace_event/trace_log_constants.cc3
-rw-r--r--base/trace_event/trace_sampling_thread.cc107
-rw-r--r--base/trace_event/trace_sampling_thread.h54
-rw-r--r--base/tracked_objects.cc326
-rw-r--r--base/tracked_objects.h286
-rw-r--r--base/tracked_objects_unittest.cc262
-rw-r--r--base/tuple.h106
-rw-r--r--base/values.cc987
-rw-r--r--base/values.h400
-rw-r--r--base/values_unittest.cc576
-rw-r--r--base/version.cc2
-rw-r--r--base/version.h12
-rw-r--r--base/version_unittest.cc27
-rw-r--r--base/win/scoped_comptr.h2
-rw-r--r--base/win/scoped_handle_test_dll.cc4
-rw-r--r--base/win/scoped_hdc.h4
-rw-r--r--components/timers/alarm_timer_chromeos.cc493
-rw-r--r--components/timers/alarm_timer_chromeos.h96
-rw-r--r--crypto/BUILD.gn11
-rw-r--r--crypto/apple_keychain.h6
-rw-r--r--crypto/auto_cbb.h35
-rw-r--r--crypto/crypto.gyp236
-rw-r--r--crypto/crypto.gypi88
-rw-r--r--crypto/crypto_nacl.gyp44
-rw-r--r--crypto/crypto_unittests.isolate42
-rw-r--r--crypto/ec_private_key.h56
-rw-r--r--crypto/ec_signature_creator_impl.h2
-rw-r--r--crypto/hmac.cc49
-rw-r--r--crypto/hmac.h5
-rw-r--r--crypto/hmac_nss.cc118
-rw-r--r--crypto/nss_crypto_module_delegate.h1
-rw-r--r--crypto/nss_util.cc226
-rw-r--r--crypto/nss_util.h1
-rw-r--r--crypto/nss_util_internal.h2
-rw-r--r--crypto/openssl_bio_string.cc77
-rw-r--r--crypto/openssl_bio_string.h29
-rw-r--r--crypto/openssl_bio_string_unittest.cc63
-rw-r--r--crypto/openssl_util.cc4
-rw-r--r--crypto/openssl_util.h2
-rw-r--r--crypto/p224_spake.cc15
-rw-r--r--crypto/p224_spake.h6
-rw-r--r--crypto/p224_spake_unittest.cc2
-rw-r--r--crypto/p224_unittest.cc14
-rw-r--r--crypto/random.h2
-rw-r--r--crypto/random_unittest.cc2
-rw-r--r--crypto/rsa_private_key.cc423
-rw-r--r--crypto/rsa_private_key.h179
-rw-r--r--crypto/rsa_private_key_nss.cc151
-rw-r--r--crypto/rsa_private_key_unittest.cc16
-rw-r--r--crypto/scoped_openssl_types.h62
-rw-r--r--crypto/scoped_test_nss_chromeos_user.cc2
-rw-r--r--crypto/scoped_test_nss_db.cc2
-rw-r--r--crypto/scoped_test_system_nss_key_slot.h2
-rw-r--r--crypto/secure_hash.cc2
-rw-r--r--crypto/sha2.cc2
-rw-r--r--crypto/signature_creator.h9
-rw-r--r--crypto/signature_creator_nss.cc119
-rw-r--r--crypto/signature_creator_unittest.cc1
-rw-r--r--crypto/signature_verifier.h26
-rw-r--r--crypto/signature_verifier_nss.cc213
-rw-r--r--crypto/signature_verifier_unittest.cc1
-rw-r--r--crypto/symmetric_key.cc4
-rw-r--r--crypto/symmetric_key.h38
-rw-r--r--crypto/symmetric_key_nss.cc151
-rw-r--r--crypto/third_party/nss/chromium-blapi.h101
-rw-r--r--crypto/third_party/nss/chromium-blapit.h91
-rw-r--r--crypto/third_party/nss/chromium-nss.h79
-rw-r--r--crypto/third_party/nss/chromium-prtypes.h77
-rw-r--r--crypto/third_party/nss/chromium-sha256.h51
-rw-r--r--crypto/third_party/nss/rsawrapr.c160
-rw-r--r--crypto/third_party/nss/sha512.cc1390
-rw-r--r--crypto/wincrypt_shim.h6
-rw-r--r--dbus/BUILD.gn2
-rw-r--r--dbus/bus.cc36
-rw-r--r--dbus/bus.h10
-rw-r--r--dbus/dbus.gyp141
-rw-r--r--dbus/dbus_statistics.cc118
-rw-r--r--dbus/exported_object.cc10
-rw-r--r--dbus/file_descriptor.cc68
-rw-r--r--dbus/file_descriptor.h92
-rw-r--r--dbus/message.cc19
-rw-r--r--dbus/message.h9
-rw-r--r--dbus/mock_bus.h9
-rw-r--r--dbus/mock_object_manager.h1
-rw-r--r--dbus/mock_object_proxy.h4
-rw-r--r--dbus/object_manager.cc75
-rw-r--r--dbus/object_manager.h20
-rw-r--r--dbus/object_proxy.cc7
-rw-r--r--dbus/object_proxy.h14
-rw-r--r--dbus/property.cc132
-rw-r--r--dbus/property.h23
-rw-r--r--dbus/values_util.cc59
-rw-r--r--sandbox/BUILD.gn8
-rw-r--r--sandbox/linux/BUILD.gn40
-rw-r--r--sandbox/linux/bpf_dsl/bpf_dsl.h5
-rw-r--r--sandbox/linux/bpf_dsl/bpf_dsl_forward.h8
-rw-r--r--sandbox/linux/bpf_dsl/bpf_dsl_impl.h1
-rw-r--r--sandbox/linux/sandbox_linux.gypi434
-rw-r--r--sandbox/linux/sandbox_linux_nacl_nonsfi.gyp87
-rw-r--r--sandbox/linux/sandbox_linux_test_sources.gypi93
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc14
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc26
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc94
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc18
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h9
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc3
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc6
-rw-r--r--sandbox/linux/services/credentials.cc61
-rw-r--r--sandbox/linux/services/credentials.h3
-rw-r--r--sandbox/linux/services/credentials_unittest.cc14
-rw-r--r--sandbox/linux/services/syscall_wrappers.cc4
-rw-r--r--sandbox/linux/services/syscall_wrappers.h4
-rw-r--r--sandbox/linux/suid/process_util_linux.c1
-rw-r--r--sandbox/linux/syscall_broker/broker_file_permission.h2
-rw-r--r--sandbox/linux/syscall_broker/broker_file_permission_unittest.cc17
-rw-r--r--sandbox/linux/system_headers/arm64_linux_syscalls.h4
-rw-r--r--sandbox/linux/system_headers/mips64_linux_syscalls.h8
-rw-r--r--sandbox/linux/system_headers/mips_linux_syscalls.h8
-rw-r--r--sandbox/mac/BUILD.gn5
-rw-r--r--sandbox/mac/sandbox_mac.gypi104
-rw-r--r--sandbox/sandbox.gyp35
-rw-r--r--sandbox/sandbox_linux_unittests.isolate23
-rw-r--r--sandbox/win/BUILD.gn45
-rw-r--r--sandbox/win/sandbox_win.gypi432
-rw-r--r--sandbox/win/src/internal_types.h2
-rw-r--r--sandbox/win/src/sandbox.vcproj10
-rw-r--r--sandbox/win/src/sandbox_types.h7
-rw-r--r--sandbox/win/src/security_level.h11
-rw-r--r--sandbox/win/wow_helper.sln19
-rwxr-xr-xsandbox/win/wow_helper/wow_helper.exebin67072 -> 0 bytes
-rw-r--r--sandbox/win/wow_helper/wow_helper.pdbbin699392 -> 0 bytes
-rw-r--r--sandbox/win/wow_helper/wow_helper.vcproj215
585 files changed, 31255 insertions, 23180 deletions
diff --git a/Android.bp b/Android.bp
index ed7e2834aa..ecf12a6ba4 100644
--- a/Android.bp
+++ b/Android.bp
@@ -77,14 +77,19 @@ libchromeCommonSrc = [
"base/callback_internal.cc",
"base/command_line.cc",
"base/cpu.cc",
+ "base/debug/activity_tracker.cc",
"base/debug/alias.cc",
"base/debug/debugger.cc",
"base/debug/debugger_posix.cc",
+ "base/debug/dump_without_crashing.cc",
+ "base/debug/profiler.cc",
"base/debug/stack_trace.cc",
"base/debug/stack_trace_posix.cc",
"base/debug/task_annotator.cc",
"base/environment.cc",
+ "base/feature_list.cc",
"base/files/file.cc",
+ "base/files/file_descriptor_watcher_posix.cc",
"base/files/file_enumerator.cc",
"base/files/file_enumerator_posix.cc",
"base/files/file_path.cc",
@@ -115,6 +120,8 @@ libchromeCommonSrc = [
"base/memory/aligned_memory.cc",
"base/memory/ref_counted.cc",
"base/memory/ref_counted_memory.cc",
+ "base/memory/shared_memory_helper.cc",
+ "base/memory/shared_memory_tracker.cc",
"base/memory/singleton.cc",
"base/memory/weak_ptr.cc",
"base/message_loop/incoming_task_queue.cc",
@@ -125,6 +132,7 @@ libchromeCommonSrc = [
"base/message_loop/message_pump_libevent.cc",
"base/metrics/bucket_ranges.cc",
"base/metrics/field_trial.cc",
+ "base/metrics/field_trial_param_associator.cc",
"base/metrics/metrics_hashes.cc",
"base/metrics/histogram_base.cc",
"base/metrics/histogram.cc",
@@ -139,12 +147,15 @@ libchromeCommonSrc = [
"base/metrics/statistics_recorder.cc",
"base/pending_task.cc",
"base/pickle.cc",
+ "base/posix/global_descriptors.cc",
"base/posix/file_descriptor_shuffle.cc",
"base/posix/safe_strerror.cc",
"base/process/kill.cc",
"base/process/kill_posix.cc",
"base/process/launch.cc",
"base/process/launch_posix.cc",
+ "base/process/memory.cc",
+ "base/process/memory_linux.cc",
"base/process/process_handle.cc",
"base/process/process_handle_posix.cc",
"base/process/process_iterator.cc",
@@ -158,8 +169,9 @@ libchromeCommonSrc = [
"base/rand_util_posix.cc",
"base/run_loop.cc",
"base/sequence_checker_impl.cc",
+ "base/sequence_token.cc",
"base/sequenced_task_runner.cc",
- "base/sha1_portable.cc",
+ "base/sha1.cc",
"base/strings/pattern.cc",
"base/strings/safe_sprintf.cc",
"base/strings/string16.cc",
@@ -171,7 +183,7 @@ libchromeCommonSrc = [
"base/strings/string_util_constants.cc",
"base/strings/utf_string_conversions.cc",
"base/strings/utf_string_conversion_utils.cc",
- "base/synchronization/cancellation_flag.cc",
+ "base/synchronization/atomic_flag.cc",
"base/synchronization/condition_variable_posix.cc",
"base/synchronization/lock.cc",
"base/synchronization/lock_impl_posix.cc",
@@ -183,11 +195,12 @@ libchromeCommonSrc = [
"base/task/cancelable_task_tracker.cc",
"base/task_runner.cc",
"base/task_scheduler/scheduler_lock_impl.cc",
+ "base/task_scheduler/scoped_set_task_priority_for_current_thread.cc",
"base/task_scheduler/sequence.cc",
"base/task_scheduler/sequence_sort_key.cc",
"base/task_scheduler/task.cc",
"base/task_scheduler/task_traits.cc",
- "base/test/trace_event_analyzer.cc",
+ "base/third_party/dynamic_annotations/dynamic_annotations.c",
"base/third_party/icu/icu_utf.cc",
"base/third_party/nspr/prtime.cc",
"base/threading/non_thread_safe_impl.cc",
@@ -200,7 +213,6 @@ libchromeCommonSrc = [
"base/threading/thread_checker_impl.cc",
"base/threading/thread_collision_warner.cc",
"base/threading/thread_id_name_manager.cc",
- "base/threading/thread_local_posix.cc",
"base/threading/thread_local_storage.cc",
"base/threading/thread_local_storage_posix.cc",
"base/threading/thread_restrictions.cc",
@@ -215,10 +227,13 @@ libchromeCommonSrc = [
"base/time/time_posix.cc",
"base/timer/elapsed_timer.cc",
"base/timer/timer.cc",
+ "base/trace_event/category_registry.cc",
+ "base/trace_event/event_name_filter.cc",
"base/trace_event/heap_profiler_allocation_context.cc",
"base/trace_event/heap_profiler_allocation_context_tracker.cc",
"base/trace_event/heap_profiler_allocation_register.cc",
"base/trace_event/heap_profiler_allocation_register_posix.cc",
+ "base/trace_event/heap_profiler_event_filter.cc",
"base/trace_event/heap_profiler_heap_dump_writer.cc",
"base/trace_event/heap_profiler_stack_frame_deduplicator.cc",
"base/trace_event/heap_profiler_type_name_deduplicator.cc",
@@ -227,20 +242,22 @@ libchromeCommonSrc = [
"base/trace_event/memory_allocator_dump_guid.cc",
"base/trace_event/memory_dump_manager.cc",
"base/trace_event/memory_dump_request_args.cc",
+ "base/trace_event/memory_dump_scheduler.cc",
"base/trace_event/memory_dump_session_state.cc",
"base/trace_event/memory_infra_background_whitelist.cc",
+ "base/trace_event/memory_usage_estimator.cc",
"base/trace_event/process_memory_dump.cc",
"base/trace_event/process_memory_maps.cc",
"base/trace_event/process_memory_totals.cc",
"base/trace_event/trace_buffer.cc",
"base/trace_event/trace_config.cc",
"base/trace_event/trace_event_argument.cc",
+ "base/trace_event/trace_event_filter.cc",
"base/trace_event/trace_event_impl.cc",
"base/trace_event/trace_event_memory_overhead.cc",
"base/trace_event/trace_event_synthetic_delay.cc",
"base/trace_event/trace_log.cc",
"base/trace_event/trace_log_constants.cc",
- "base/trace_event/trace_sampling_thread.cc",
"base/tracked_objects.cc",
"base/tracking_info.cc",
"base/values.cc",
@@ -364,6 +381,7 @@ cc_library_static {
host_supported: true,
srcs: [
+ "base/test/gtest_util.cc",
"base/test/simple_test_clock.cc",
"base/test/simple_test_tick_clock.cc",
"base/test/test_file_util.cc",
@@ -416,12 +434,13 @@ cc_test {
"base/cancelable_callback_unittest.cc",
"base/command_line_unittest.cc",
"base/cpu_unittest.cc",
+ "base/debug/activity_tracker_unittest.cc",
"base/debug/debugger_unittest.cc",
"base/debug/leak_tracker_unittest.cc",
"base/debug/task_annotator_unittest.cc",
"base/environment_unittest.cc",
- "base/file_version_info_unittest.cc",
"base/files/dir_reader_posix_unittest.cc",
+ "base/files/file_descriptor_watcher_posix_unittest.cc",
"base/files/file_path_watcher_unittest.cc",
"base/files/file_path_unittest.cc",
"base/files/file_unittest.cc",
@@ -476,6 +495,7 @@ cc_test {
"base/scoped_generic_unittest.cc",
"base/security_unittest.cc",
"base/sequence_checker_unittest.cc",
+ "base/sequence_token_unittest.cc",
"base/sha1_unittest.cc",
"base/stl_util_unittest.cc",
"base/strings/pattern_unittest.cc",
@@ -487,7 +507,7 @@ cc_test {
"base/strings/string_util_unittest.cc",
"base/strings/sys_string_conversions_unittest.cc",
"base/strings/utf_string_conversions_unittest.cc",
- "base/synchronization/cancellation_flag_unittest.cc",
+ "base/synchronization/atomic_flag_unittest.cc",
"base/synchronization/condition_variable_unittest.cc",
"base/synchronization/lock_unittest.cc",
"base/synchronization/waitable_event_unittest.cc",
@@ -496,19 +516,22 @@ cc_test {
"base/task/cancelable_task_tracker_unittest.cc",
"base/task_runner_util_unittest.cc",
"base/task_scheduler/scheduler_lock_unittest.cc",
+ "base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc",
"base/task_scheduler/sequence_sort_key_unittest.cc",
"base/task_scheduler/sequence_unittest.cc",
"base/task_scheduler/task_traits.cc",
"base/template_util_unittest.cc",
+ "base/test/mock_entropy_provider.cc",
"base/test/multiprocess_test.cc",
- "base/test/multiprocess_test_android.cc",
"base/test/opaque_ref_counted.cc",
+ "base/test/scoped_feature_list.cc",
"base/test/scoped_locale.cc",
"base/test/sequenced_worker_pool_owner.cc",
"base/test/test_file_util.cc",
"base/test/test_file_util_linux.cc",
"base/test/test_file_util_posix.cc",
"base/test/test_io_thread.cc",
+ "base/test/test_mock_time_task_runner.cc",
"base/test/test_pending_task.cc",
"base/test/test_simple_task_runner.cc",
"base/test/test_switches.cc",
@@ -529,14 +552,17 @@ cc_test {
"base/time/time_unittest.cc",
"base/timer/hi_res_timer_manager_unittest.cc",
"base/timer/timer_unittest.cc",
+ "base/trace_event/event_name_filter_unittest.cc",
"base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
"base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc",
"base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc",
"base/trace_event/memory_allocator_dump_unittest.cc",
"base/trace_event/memory_dump_manager_unittest.cc",
+ "base/trace_event/memory_usage_estimator_unittest.cc",
"base/trace_event/process_memory_dump_unittest.cc",
"base/trace_event/trace_config_unittest.cc",
"base/trace_event/trace_event_argument_unittest.cc",
+ "base/trace_event/trace_event_filter_test_utils.cc",
"base/trace_event/trace_event_synthetic_delay_unittest.cc",
"base/trace_event/trace_event_unittest.cc",
"base/tracked_objects_unittest.cc",
diff --git a/SConstruct b/SConstruct
index 72e022e6cd..49cef6f434 100644
--- a/SConstruct
+++ b/SConstruct
@@ -270,7 +270,6 @@ base_libs = [
'name' : 'crypto',
'sources' : """
hmac.cc
- hmac_nss.cc
nss_key_util.cc
nss_util.cc
openssl_util.cc
@@ -283,9 +282,9 @@ base_libs = [
secure_hash.cc
secure_util.cc
sha2.cc
- signature_creator_nss.cc
- signature_verifier_nss.cc
- symmetric_key_nss.cc
+ signature_creator.cc
+ signature_verifier.cc
+ symmetric_key.cc
third_party/nss/rsawrapr.c
third_party/nss/sha512.cc
""",
@@ -309,7 +308,7 @@ base_libs = [
linux/seccomp-bpf/trap.cc
linux/seccomp-bpf-helpers/baseline_policy.cc
- linux/seccomp-bpf-helpers/sigsys_handlers.cc
+ linux/seccomp-bpf-helpers/sigsys_handlers.cc
linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
linux/seccomp-bpf-helpers/syscall_sets.cc
diff --git a/base/BUILD.gn b/base/BUILD.gn
index c14798959b..f84856de5c 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -13,18 +13,22 @@
# unpredictably for the various build types, we prefer a slightly different
# style. Instead, there are big per-platform blocks of inclusions and
# exclusions. If a given file has an inclusion or exclusion rule that applies
-# for multiple conditions, perfer to duplicate it in both lists. This makes it
+# for multiple conditions, prefer to duplicate it in both lists. This makes it
# a bit easier to see which files apply in which cases rather than having a
# huge sequence of random-looking conditionals.
import("//build/buildflag_header.gni")
import("//build/config/allocator.gni")
+import("//build/config/arm.gni")
import("//build/config/chromecast_build.gni")
+import("//build/config/clang/clang.gni")
import("//build/config/compiler/compiler.gni")
+import("//build/config/dcheck_always_on.gni")
import("//build/config/nacl/config.gni")
import("//build/config/sysroot.gni")
import("//build/config/ui.gni")
import("//build/nocompile.gni")
+import("//testing/libfuzzer/fuzzer_test.gni")
import("//testing/test.gni")
declare_args() {
@@ -32,16 +36,20 @@ declare_args() {
# See //base/build_time.cc and //build/write_build_date_header.py for more
# details and the expected format.
override_build_date = "N/A"
+
+ # Turn on memory profiling in the task profiler when the heap shim is
+ # available. Profiling can then be enabled at runtime by passing the command
+ # line flag --enable-heap-profiling=task-profiler.
+ enable_memory_task_profiler = use_experimental_allocator_shim
+
+ # Partition alloc is included by default except iOS.
+ use_partition_alloc = !is_ios
}
if (is_android) {
import("//build/config/android/rules.gni")
}
-if (is_win) {
- import("//build/config/win/visual_studio_version.gni")
-}
-
config("base_flags") {
if (is_clang) {
cflags = [
@@ -141,6 +149,11 @@ component("base") {
"allocator/allocator_check.h",
"allocator/allocator_extension.cc",
"allocator/allocator_extension.h",
+ "allocator/allocator_interception_mac.h",
+ "allocator/allocator_interception_mac.mm",
+ "allocator/allocator_shim.h",
+ "allocator/malloc_zone_functions_mac.cc",
+ "allocator/malloc_zone_functions_mac.h",
"android/animation_frame_time_histogram.cc",
"android/animation_frame_time_histogram.h",
"android/apk_assets.cc",
@@ -162,23 +175,29 @@ component("base") {
"android/context_utils.cc",
"android/context_utils.h",
"android/cpu_features.cc",
+ "android/cpu_features.h",
"android/cxa_demangle_stub.cc",
+ "android/early_trace_event_binding.cc",
+ "android/early_trace_event_binding.h",
"android/event_log.cc",
"android/event_log.h",
"android/field_trial_list.cc",
"android/field_trial_list.h",
- "android/fifo_utils.cc",
- "android/fifo_utils.h",
"android/important_file_writer_android.cc",
"android/important_file_writer_android.h",
+ "android/java_exception_reporter.cc",
+ "android/java_exception_reporter.h",
"android/java_handler_thread.cc",
"android/java_handler_thread.h",
+ "android/java_message_handler_factory.h",
"android/java_runtime.cc",
"android/java_runtime.h",
"android/jni_android.cc",
"android/jni_android.h",
"android/jni_array.cc",
"android/jni_array.h",
+ "android/jni_generator/jni_generator_helper.h",
+ "android/jni_int_wrapper.h",
"android/jni_registrar.cc",
"android/jni_registrar.h",
"android/jni_string.cc",
@@ -206,16 +225,24 @@ component("base") {
"android/record_user_action.h",
"android/scoped_java_ref.cc",
"android/scoped_java_ref.h",
+ "android/statistics_recorder_android.cc",
+ "android/statistics_recorder_android.h",
"android/sys_utils.cc",
"android/sys_utils.h",
- "android/thread_utils.h",
+ "android/throw_uncaught_exception.cc",
+ "android/throw_uncaught_exception.h",
+ "android/time_utils.cc",
+ "android/time_utils.h",
"android/trace_event_binding.cc",
"android/trace_event_binding.h",
+ "android/unguessable_token_android.cc",
+ "android/unguessable_token_android.h",
"at_exit.cc",
"at_exit.h",
"atomic_ref_count.h",
"atomic_sequence_num.h",
"atomicops.h",
+ "atomicops_internals_atomicword_compat.h",
"atomicops_internals_portable.h",
"atomicops_internals_x86_msvc.h",
"auto_reset.h",
@@ -238,25 +265,34 @@ component("base") {
"build_time.cc",
"build_time.h",
"callback.h",
+ "callback_forward.h",
"callback_helpers.cc",
"callback_helpers.h",
"callback_internal.cc",
"callback_internal.h",
+ "callback_list.h",
"cancelable_callback.h",
"command_line.cc",
"command_line.h",
"compiler_specific.h",
"containers/adapters.h",
+ "containers/flat_set.h",
"containers/hash_tables.h",
"containers/linked_list.h",
"containers/mru_cache.h",
- "containers/scoped_ptr_hash_map.h",
"containers/small_map.h",
"containers/stack_container.h",
"cpu.cc",
"cpu.h",
"critical_closure.h",
"critical_closure_internal_ios.mm",
+
+ # This file depends on files from the "debug/allocator" target,
+ # but this target does not depend on "debug/allocator".
+ "debug/activity_analyzer.cc",
+ "debug/activity_analyzer.h",
+ "debug/activity_tracker.cc",
+ "debug/activity_tracker.h",
"debug/alias.cc",
"debug/alias.h",
"debug/asan_invalid_access.cc",
@@ -273,10 +309,6 @@ component("base") {
"debug/dump_without_crashing.h",
"debug/gdi_debug_util_win.cc",
"debug/gdi_debug_util_win.h",
-
- # This file depends on files from the "debug/allocator" target,
- # but this target does not depend on "debug/allocator" (see
- # allocator.gyp for details).
"debug/leak_annotations.h",
"debug/leak_tracker.h",
"debug/proc_maps_linux.cc",
@@ -290,13 +322,18 @@ component("base") {
"debug/stack_trace_win.cc",
"debug/task_annotator.cc",
"debug/task_annotator.h",
+ "debug/thread_heap_usage_tracker.cc",
+ "debug/thread_heap_usage_tracker.h",
"deferred_sequenced_task_runner.cc",
"deferred_sequenced_task_runner.h",
"environment.cc",
"environment.h",
+ "event_types.h",
"feature_list.cc",
"feature_list.h",
"file_descriptor_posix.h",
+ "file_descriptor_store.cc",
+ "file_descriptor_store.h",
"file_version_info.h",
"file_version_info_mac.h",
"file_version_info_mac.mm",
@@ -306,6 +343,9 @@ component("base") {
"files/dir_reader_linux.h",
"files/dir_reader_posix.h",
"files/file.cc",
+ "files/file.h",
+ "files/file_descriptor_watcher_posix.cc",
+ "files/file_descriptor_watcher_posix.h",
"files/file_enumerator.cc",
"files/file_enumerator.h",
"files/file_enumerator_posix.cc",
@@ -345,6 +385,10 @@ component("base") {
"files/memory_mapped_file_win.cc",
"files/scoped_file.cc",
"files/scoped_file.h",
+ "files/scoped_platform_handle.cc",
+ "files/scoped_platform_handle.h",
+ "files/scoped_platform_handle_posix.cc",
+ "files/scoped_platform_handle_win.cc",
"files/scoped_temp_dir.cc",
"files/scoped_temp_dir.h",
"format_macros.h",
@@ -354,6 +398,7 @@ component("base") {
"hash.cc",
"hash.h",
"id_map.h",
+ "ios/block_types.h",
"ios/crb_protocol_observers.h",
"ios/crb_protocol_observers.mm",
"ios/device_util.h",
@@ -404,8 +449,8 @@ component("base") {
"mac/dispatch_source_mach.h",
"mac/foundation_util.h",
"mac/foundation_util.mm",
- "mac/launch_services_util.cc",
"mac/launch_services_util.h",
+ "mac/launch_services_util.mm",
"mac/launchd.cc",
"mac/launchd.h",
"mac/mac_logging.h",
@@ -420,6 +465,8 @@ component("base") {
"mac/mach_port_util.h",
"mac/objc_property_releaser.h",
"mac/objc_property_releaser.mm",
+ "mac/objc_release_properties.h",
+ "mac/objc_release_properties.mm",
"mac/os_crash_dumps.cc",
"mac/os_crash_dumps.h",
"mac/scoped_aedesc.h",
@@ -427,6 +474,7 @@ component("base") {
"mac/scoped_block.h",
"mac/scoped_cftyperef.h",
"mac/scoped_dispatch_object.h",
+ "mac/scoped_ionotificationportref.h",
"mac/scoped_ioobject.h",
"mac/scoped_ioplugininterface.h",
"mac/scoped_launch_data.h",
@@ -458,6 +506,12 @@ component("base") {
"memory/free_deleter.h",
"memory/linked_ptr.h",
"memory/manual_constructor.h",
+ "memory/memory_coordinator_client.cc",
+ "memory/memory_coordinator_client.h",
+ "memory/memory_coordinator_client_registry.cc",
+ "memory/memory_coordinator_client_registry.h",
+ "memory/memory_coordinator_proxy.cc",
+ "memory/memory_coordinator_proxy.h",
"memory/memory_pressure_listener.cc",
"memory/memory_pressure_listener.h",
"memory/memory_pressure_monitor.cc",
@@ -472,7 +526,7 @@ component("base") {
"memory/raw_scoped_refptr_mismatch_checker.h",
"memory/ref_counted.cc",
"memory/ref_counted.h",
- "memory/ref_counted_delete_on_message_loop.h",
+ "memory/ref_counted_delete_on_sequence.h",
"memory/ref_counted_memory.cc",
"memory/ref_counted_memory.h",
"memory/scoped_policy.h",
@@ -482,6 +536,8 @@ component("base") {
"memory/shared_memory_handle.h",
"memory/shared_memory_handle_mac.cc",
"memory/shared_memory_handle_win.cc",
+ "memory/shared_memory_helper.cc",
+ "memory/shared_memory_helper.h",
"memory/shared_memory_mac.cc",
"memory/shared_memory_nacl.cc",
"memory/shared_memory_posix.cc",
@@ -512,10 +568,15 @@ component("base") {
"message_loop/message_pump_mac.mm",
"message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h",
+ "message_loop/timer_slack.h",
"metrics/bucket_ranges.cc",
"metrics/bucket_ranges.h",
"metrics/field_trial.cc",
"metrics/field_trial.h",
+ "metrics/field_trial_param_associator.cc",
+ "metrics/field_trial_param_associator.h",
+ "metrics/field_trial_params.cc",
+ "metrics/field_trial_params.h",
"metrics/histogram.cc",
"metrics/histogram.h",
"metrics/histogram_base.cc",
@@ -523,7 +584,11 @@ component("base") {
"metrics/histogram_delta_serialization.cc",
"metrics/histogram_delta_serialization.h",
"metrics/histogram_flattener.h",
+ "metrics/histogram_functions.cc",
+ "metrics/histogram_functions.h",
"metrics/histogram_macros.h",
+ "metrics/histogram_macros_internal.h",
+ "metrics/histogram_macros_local.h",
"metrics/histogram_samples.cc",
"metrics/histogram_samples.h",
"metrics/histogram_snapshot_manager.cc",
@@ -547,6 +612,7 @@ component("base") {
"metrics/user_metrics.cc",
"metrics/user_metrics.h",
"metrics/user_metrics_action.h",
+ "native_library.cc",
"native_library.h",
"native_library_ios.mm",
"native_library_mac.mm",
@@ -560,6 +626,8 @@ component("base") {
"numerics/safe_conversions_impl.h",
"numerics/safe_math.h",
"numerics/safe_math_impl.h",
+ "numerics/saturated_arithmetic.h",
+ "numerics/saturated_arithmetic_arm.h",
"observer_list.h",
"observer_list_threadsafe.h",
"optional.h",
@@ -575,23 +643,18 @@ component("base") {
"pickle.h",
"posix/eintr_wrapper.h",
"posix/file_descriptor_shuffle.cc",
+ "posix/file_descriptor_shuffle.h",
"posix/global_descriptors.cc",
"posix/global_descriptors.h",
"posix/safe_strerror.cc",
"posix/safe_strerror.h",
"posix/unix_domain_socket_linux.cc",
"posix/unix_domain_socket_linux.h",
+ "post_task_and_reply_with_result_internal.h",
"power_monitor/power_monitor.cc",
"power_monitor/power_monitor.h",
"power_monitor/power_monitor_device_source.cc",
"power_monitor/power_monitor_device_source.h",
- "power_monitor/power_monitor_device_source_android.cc",
- "power_monitor/power_monitor_device_source_android.h",
- "power_monitor/power_monitor_device_source_chromeos.cc",
- "power_monitor/power_monitor_device_source_ios.mm",
- "power_monitor/power_monitor_device_source_mac.mm",
- "power_monitor/power_monitor_device_source_posix.cc",
- "power_monitor/power_monitor_device_source_win.cc",
"power_monitor/power_monitor_source.cc",
"power_monitor/power_monitor_source.h",
"power_monitor/power_observer.h",
@@ -617,6 +680,7 @@ component("base") {
"process/port_provider_mac.h",
"process/process.h",
"process/process_handle.cc",
+ "process/process_handle.h",
#"process/process_handle_freebsd.cc", # Unused in Chromium build.
"process/process_handle_linux.cc",
@@ -639,6 +703,7 @@ component("base") {
#"process/process_iterator_openbsd.cc", # Unused in Chromium build.
"process/process_iterator_win.cc",
"process/process_linux.cc",
+ "process/process_mac.cc",
"process/process_metrics.cc",
"process/process_metrics.h",
@@ -671,6 +736,7 @@ component("base") {
"rand_util_win.cc",
"run_loop.cc",
"run_loop.h",
+ "scoped_clear_errno.h",
"scoped_generic.h",
"scoped_native_library.cc",
"scoped_native_library.h",
@@ -678,6 +744,8 @@ component("base") {
"sequence_checker.h",
"sequence_checker_impl.cc",
"sequence_checker_impl.h",
+ "sequence_token.cc",
+ "sequence_token.h",
"sequenced_task_runner.cc",
"sequenced_task_runner.h",
"sequenced_task_runner_helpers.h",
@@ -725,7 +793,8 @@ component("base") {
"sync_socket.h",
"sync_socket_posix.cc",
"sync_socket_win.cc",
- "synchronization/cancellation_flag.cc",
+ "synchronization/atomic_flag.cc",
+ "synchronization/atomic_flag.h",
"synchronization/cancellation_flag.h",
"synchronization/condition_variable.h",
"synchronization/condition_variable_posix.cc",
@@ -751,6 +820,9 @@ component("base") {
"sys_info.h",
"sys_info_android.cc",
"sys_info_chromeos.cc",
+ "sys_info_internal.h",
+ "syslog_logging.cc",
+ "syslog_logging.h",
#"sys_info_freebsd.cc", # Unused in Chromium build.
"sys_info_ios.mm",
@@ -769,20 +841,28 @@ component("base") {
"task_runner_util.h",
"task_scheduler/delayed_task_manager.cc",
"task_scheduler/delayed_task_manager.h",
+ "task_scheduler/initialization_util.cc",
+ "task_scheduler/initialization_util.h",
+ "task_scheduler/post_task.cc",
+ "task_scheduler/post_task.h",
"task_scheduler/priority_queue.cc",
"task_scheduler/priority_queue.h",
"task_scheduler/scheduler_lock.h",
"task_scheduler/scheduler_lock_impl.cc",
"task_scheduler/scheduler_lock_impl.h",
- "task_scheduler/scheduler_service_thread.cc",
- "task_scheduler/scheduler_service_thread.h",
+ "task_scheduler/scheduler_single_thread_task_runner_manager.cc",
+ "task_scheduler/scheduler_single_thread_task_runner_manager.h",
"task_scheduler/scheduler_worker.cc",
"task_scheduler/scheduler_worker.h",
"task_scheduler/scheduler_worker_pool.h",
"task_scheduler/scheduler_worker_pool_impl.cc",
"task_scheduler/scheduler_worker_pool_impl.h",
+ "task_scheduler/scheduler_worker_pool_params.cc",
+ "task_scheduler/scheduler_worker_pool_params.h",
"task_scheduler/scheduler_worker_stack.cc",
"task_scheduler/scheduler_worker_stack.h",
+ "task_scheduler/scoped_set_task_priority_for_current_thread.cc",
+ "task_scheduler/scoped_set_task_priority_for_current_thread.h",
"task_scheduler/sequence.cc",
"task_scheduler/sequence.h",
"task_scheduler/sequence_sort_key.cc",
@@ -795,9 +875,12 @@ component("base") {
"task_scheduler/task_scheduler_impl.h",
"task_scheduler/task_tracker.cc",
"task_scheduler/task_tracker.h",
+ "task_scheduler/task_tracker_posix.cc",
+ "task_scheduler/task_tracker_posix.h",
"task_scheduler/task_traits.cc",
"task_scheduler/task_traits.h",
"template_util.h",
+ "test/malloc_wrapper.h",
"third_party/dmg_fp/dmg_fp.h",
"third_party/dmg_fp/dtoa_wrapper.cc",
"third_party/dmg_fp/g_fmt.cc",
@@ -806,6 +889,7 @@ component("base") {
"third_party/nspr/prtime.cc",
"third_party/nspr/prtime.h",
"third_party/superfasthash/superfasthash.c",
+ "third_party/valgrind/memcheck.h",
"threading/non_thread_safe.h",
"threading/non_thread_safe_impl.cc",
"threading/non_thread_safe_impl.h",
@@ -835,13 +919,10 @@ component("base") {
"threading/thread_id_name_manager.cc",
"threading/thread_id_name_manager.h",
"threading/thread_local.h",
- "threading/thread_local_android.cc",
- "threading/thread_local_posix.cc",
"threading/thread_local_storage.cc",
"threading/thread_local_storage.h",
"threading/thread_local_storage_posix.cc",
"threading/thread_local_storage_win.cc",
- "threading/thread_local_win.cc",
"threading/thread_restrictions.cc",
"threading/thread_restrictions.h",
"threading/thread_task_runner_handle.cc",
@@ -875,9 +956,15 @@ component("base") {
"timer/mock_timer.h",
"timer/timer.cc",
"timer/timer.h",
+ "trace_event/auto_open_close_event.cc",
+ "trace_event/auto_open_close_event.h",
"trace_event/blame_context.cc",
"trace_event/blame_context.h",
+ "trace_event/category_registry.cc",
+ "trace_event/category_registry.h",
"trace_event/common/trace_event_common.h",
+ "trace_event/event_name_filter.cc",
+ "trace_event/event_name_filter.h",
"trace_event/heap_profiler.h",
"trace_event/heap_profiler_allocation_context.cc",
"trace_event/heap_profiler_allocation_context.h",
@@ -887,6 +974,8 @@ component("base") {
"trace_event/heap_profiler_allocation_register.h",
"trace_event/heap_profiler_allocation_register_posix.cc",
"trace_event/heap_profiler_allocation_register_win.cc",
+ "trace_event/heap_profiler_event_filter.cc",
+ "trace_event/heap_profiler_event_filter.h",
"trace_event/heap_profiler_heap_dump_writer.cc",
"trace_event/heap_profiler_heap_dump_writer.h",
"trace_event/heap_profiler_stack_frame_deduplicator.cc",
@@ -895,6 +984,8 @@ component("base") {
"trace_event/heap_profiler_type_name_deduplicator.h",
"trace_event/java_heap_dump_provider_android.cc",
"trace_event/java_heap_dump_provider_android.h",
+ "trace_event/malloc_dump_provider.cc",
+ "trace_event/malloc_dump_provider.h",
"trace_event/memory_allocator_dump.cc",
"trace_event/memory_allocator_dump.h",
"trace_event/memory_allocator_dump_guid.cc",
@@ -904,10 +995,14 @@ component("base") {
"trace_event/memory_dump_provider.h",
"trace_event/memory_dump_request_args.cc",
"trace_event/memory_dump_request_args.h",
+ "trace_event/memory_dump_scheduler.cc",
+ "trace_event/memory_dump_scheduler.h",
"trace_event/memory_dump_session_state.cc",
"trace_event/memory_dump_session_state.h",
"trace_event/memory_infra_background_whitelist.cc",
"trace_event/memory_infra_background_whitelist.h",
+ "trace_event/memory_usage_estimator.cc",
+ "trace_event/memory_usage_estimator.h",
"trace_event/process_memory_dump.cc",
"trace_event/process_memory_dump.h",
"trace_event/process_memory_maps.cc",
@@ -916,6 +1011,7 @@ component("base") {
"trace_event/process_memory_totals.h",
"trace_event/trace_buffer.cc",
"trace_event/trace_buffer.h",
+ "trace_event/trace_category.h",
"trace_event/trace_config.cc",
"trace_event/trace_config.h",
"trace_event/trace_event.h",
@@ -924,6 +1020,8 @@ component("base") {
"trace_event/trace_event_argument.h",
"trace_event/trace_event_etw_export_win.cc",
"trace_event/trace_event_etw_export_win.h",
+ "trace_event/trace_event_filter.cc",
+ "trace_event/trace_event_filter.h",
"trace_event/trace_event_impl.cc",
"trace_event/trace_event_impl.h",
"trace_event/trace_event_memory_overhead.cc",
@@ -935,17 +1033,15 @@ component("base") {
"trace_event/trace_log.cc",
"trace_event/trace_log.h",
"trace_event/trace_log_constants.cc",
- "trace_event/trace_sampling_thread.cc",
- "trace_event/trace_sampling_thread.h",
"trace_event/tracing_agent.cc",
"trace_event/tracing_agent.h",
- "trace_event/winheap_dump_provider_win.cc",
- "trace_event/winheap_dump_provider_win.h",
"tracked_objects.cc",
"tracked_objects.h",
"tracking_info.cc",
"tracking_info.h",
"tuple.h",
+ "unguessable_token.cc",
+ "unguessable_token.h",
"value_conversions.cc",
"value_conversions.h",
"values.cc",
@@ -1007,6 +1103,7 @@ component("base") {
"win/wrapped_window_proc.h",
]
+ all_dependent_configs = []
defines = []
data = []
@@ -1032,14 +1129,48 @@ component("base") {
]
# Needed for <atomic> if using newer C++ library than sysroot
- if (!use_sysroot && (is_android || is_linux)) {
+ if (!use_sysroot && (is_android || (is_linux && !is_chromecast))) {
libs = [ "atomic" ]
}
if (use_experimental_allocator_shim) {
- # The allocator shim is part of the base API. This is to allow clients of
- # base should to install hooks into the allocator path.
- public_deps += [ "//base/allocator:unified_allocator_shim" ]
+ # TODO(primiano): support other platforms, currently this works only on
+ # Linux/CrOS/Android. http://crbug.com/550886 .
+ sources += [
+ "allocator/allocator_shim.cc",
+ "allocator/allocator_shim.h",
+ "allocator/allocator_shim_internals.h",
+ "allocator/allocator_shim_override_cpp_symbols.h",
+ "allocator/allocator_shim_override_libc_symbols.h",
+ ]
+ if (is_win) {
+ sources += [
+ "allocator/allocator_shim_default_dispatch_to_winheap.cc",
+ "allocator/allocator_shim_override_ucrt_symbols_win.h",
+ "allocator/winheap_stubs_win.cc",
+ "allocator/winheap_stubs_win.h",
+ ]
+ } else if (is_linux && use_allocator == "tcmalloc") {
+ sources += [
+ "allocator/allocator_shim_default_dispatch_to_tcmalloc.cc",
+ "allocator/allocator_shim_override_glibc_weak_symbols.h",
+ ]
+ deps += [ "//base/allocator:tcmalloc" ]
+ } else if (is_linux && use_allocator == "none") {
+ sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ]
+ } else if (is_android && use_allocator == "none") {
+ sources += [
+ "allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
+ "allocator/allocator_shim_override_linker_wrapped_symbols.h",
+ ]
+ all_dependent_configs += [ "//base/allocator:wrap_malloc_symbols" ]
+ } else if (is_mac) {
+ sources += [
+ "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc",
+ "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h",
+ "allocator/allocator_shim_override_mac_symbols.h",
+ ]
+ }
}
# Allow more direct string conversions on platforms with native utf8
@@ -1050,10 +1181,7 @@ component("base") {
# Android.
if (is_android) {
- sources -= [
- "debug/stack_trace_posix.cc",
- "power_monitor/power_monitor_device_source_posix.cc",
- ]
+ sources -= [ "debug/stack_trace_posix.cc" ]
# Android uses some Linux sources, put those back.
set_sources_assignment_filter([])
@@ -1061,14 +1189,14 @@ component("base") {
"debug/proc_maps_linux.cc",
"files/file_path_watcher_linux.cc",
"posix/unix_domain_socket_linux.cc",
+ "power_monitor/power_monitor_device_source_android.cc",
+ "power_monitor/power_monitor_device_source_android.h",
"process/internal_linux.cc",
"process/memory_linux.cc",
"process/process_handle_linux.cc",
"process/process_iterator_linux.cc",
"process/process_metrics_linux.cc",
"sys_info_linux.cc",
- "trace_event/malloc_dump_provider.cc",
- "trace_event/malloc_dump_provider.h",
]
set_sources_assignment_filter(sources_assignment_filter)
@@ -1085,7 +1213,7 @@ component("base") {
# Chromeos.
if (is_chromeos) {
- sources -= [ "power_monitor/power_monitor_device_source_posix.cc" ]
+ sources += [ "power_monitor/power_monitor_device_source_chromeos.cc" ]
}
# NaCl.
@@ -1119,7 +1247,10 @@ component("base") {
"memory/discardable_memory_allocator.h",
"memory/discardable_shared_memory.cc",
"memory/discardable_shared_memory.h",
+ "memory/shared_memory_helper.cc",
+ "memory/shared_memory_helper.h",
"memory/shared_memory_posix.cc",
+ "native_library.cc",
"native_library_posix.cc",
"path_service.cc",
"process/kill.cc",
@@ -1136,6 +1267,8 @@ component("base") {
"synchronization/read_write_lock_posix.cc",
"sys_info.cc",
"sys_info_posix.cc",
+ "task_scheduler/initialization_util.cc",
+ "task_scheduler/initialization_util.h",
"trace_event/trace_event_system_stats_monitor.cc",
]
@@ -1147,6 +1280,8 @@ component("base") {
configs += [ ":nacl_nonsfi_warnings" ]
} else {
sources -= [
+ "files/file_descriptor_watcher_posix.cc",
+ "files/file_descriptor_watcher_posix.h",
"files/file_util.cc",
"files/file_util.h",
"files/file_util_posix.cc",
@@ -1159,6 +1294,8 @@ component("base") {
"process/launch.h",
"process/launch_posix.cc",
"rand_util_posix.cc",
+ "task_scheduler/task_tracker_posix.cc",
+ "task_scheduler/task_tracker_posix.h",
]
}
} else {
@@ -1170,16 +1307,37 @@ component("base") {
"rand_util_nacl.cc",
"synchronization/read_write_lock_nacl.cc",
]
+
+ if (use_partition_alloc) {
+ # Add stuff that doesn't work in NaCl.
+ sources += [
+ # PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
+ "allocator/partition_allocator/address_space_randomization.cc",
+ "allocator/partition_allocator/address_space_randomization.h",
+ "allocator/partition_allocator/oom.h",
+ "allocator/partition_allocator/page_allocator.cc",
+ "allocator/partition_allocator/page_allocator.h",
+ "allocator/partition_allocator/partition_alloc.cc",
+ "allocator/partition_allocator/partition_alloc.h",
+ "allocator/partition_allocator/spin_lock.cc",
+ "allocator/partition_allocator/spin_lock.h",
+ ]
+ }
}
# Windows.
if (is_win) {
sources += [
+ "power_monitor/power_monitor_device_source_win.cc",
"profiler/win32_stack_frame_unwinder.cc",
"profiler/win32_stack_frame_unwinder.h",
]
sources -= [
+ "file_descriptor_store.cc",
+ "file_descriptor_store.h",
+ "memory/shared_memory_helper.cc",
+ "memory/shared_memory_helper.h",
"message_loop/message_pump_libevent.cc",
"strings/string16.cc",
]
@@ -1187,6 +1345,7 @@ component("base") {
deps += [
"//base/trace_event/etw_manifest:chrome_events_win",
"//base/win:base_win_features",
+ "//base/win:eventlog_messages",
]
if (is_component_build) {
@@ -1201,63 +1360,60 @@ component("base") {
# These runtime files are copied to the output directory by the
# vs_toolchain script that runs as part of toolchain configuration.
- if (visual_studio_version == "2015") {
- data += [
- "$root_out_dir/msvcp140${vcrt_suffix}.dll",
- "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
- "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
-
- # Universal Windows 10 CRT files
- "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
- "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
- "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
- "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
- ]
- } else {
- data += [
- "$root_out_dir/msvcp120${vcrt_suffix}.dll",
- "$root_out_dir/msvcr120${vcrt_suffix}.dll",
- ]
- }
+ data += [
+ "$root_out_dir/msvcp140${vcrt_suffix}.dll",
+ "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
+ "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
+
+ # Universal Windows 10 CRT files
+ "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
+ "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
+ "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
+ "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
+ ]
if (is_asan) {
- data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+ if (current_cpu == "x64") {
+ data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-x86_64.dll" ]
+ } else {
+ data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+ }
}
}
@@ -1271,7 +1427,7 @@ component("base") {
"userenv.lib",
"winmm.lib",
]
- all_dependent_configs = [ ":base_win_linker_flags" ]
+ all_dependent_configs += [ ":base_win_linker_flags" ]
} else if (!is_nacl || is_nacl_nonsfi) {
# Non-Windows.
deps += [ "//base/third_party/libevent" ]
@@ -1280,9 +1436,10 @@ component("base") {
# Desktop Mac.
if (is_mac) {
sources += [
- "trace_event/malloc_dump_provider.cc",
- "trace_event/malloc_dump_provider.h",
+ "mac/scoped_typeref.h",
+ "power_monitor/power_monitor_device_source_mac.mm",
]
+
libs = [
"ApplicationServices.framework",
"AppKit.framework",
@@ -1313,11 +1470,6 @@ component("base") {
# Linux.
if (is_linux) {
- sources += [
- "trace_event/malloc_dump_provider.cc",
- "trace_event/malloc_dump_provider.h",
- ]
-
if (is_asan || is_lsan || is_msan || is_tsan) {
# For llvm-sanitizer.
data += [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ]
@@ -1332,7 +1484,7 @@ component("base") {
defines += [ "USE_SYMBOLIZE" ]
configs += linux_configs
- all_dependent_configs = linux_configs
+ all_dependent_configs += linux_configs
# These dependencies are not required on Android, and in the case
# of xdg_mime must be excluded due to licensing restrictions.
@@ -1406,6 +1558,8 @@ component("base") {
"mac/mach_logging.h",
"mac/objc_property_releaser.h",
"mac/objc_property_releaser.mm",
+ "mac/objc_release_properties.h",
+ "mac/objc_release_properties.mm",
"mac/scoped_block.h",
"mac/scoped_mach_port.cc",
"mac/scoped_mach_port.h",
@@ -1421,6 +1575,7 @@ component("base") {
"memory/shared_memory_posix.cc",
"message_loop/message_pump_mac.h",
"message_loop/message_pump_mac.mm",
+ "power_monitor/power_monitor_device_source_ios.mm",
"process/memory_stubs.cc",
"strings/sys_string_conversions_mac.mm",
"threading/platform_thread_mac.mm",
@@ -1430,6 +1585,17 @@ component("base") {
set_sources_assignment_filter(sources_assignment_filter)
}
+ if (is_posix && !is_mac && !is_ios && !is_android && !is_chromeos) {
+ sources += [ "power_monitor/power_monitor_device_source_posix.cc" ]
+ }
+
+ if (is_posix && !is_mac && !is_nacl) {
+ sources += [
+ "memory/shared_memory_tracker.cc",
+ "memory/shared_memory_tracker.h",
+ ]
+ }
+
if (!use_glib) {
sources -= [
"message_loop/message_pump_glib.cc",
@@ -1437,7 +1603,7 @@ component("base") {
]
}
- if (is_asan || is_lsan || is_msan || is_tsan) {
+ if (using_sanitizer) {
data += [ "//tools/valgrind/asan/" ]
if (is_win) {
data +=
@@ -1459,7 +1625,10 @@ component("base") {
buildflag_header("debugging_flags") {
header = "debugging_flags.h"
header_dir = "base/debug"
- flags = [ "ENABLE_PROFILING=$enable_profiling" ]
+ flags = [
+ "ENABLE_PROFILING=$enable_profiling",
+ "ENABLE_MEMORY_TASK_PROFILER=$enable_memory_task_profiler",
+ ]
}
# This is the subset of files from base that should not be used with a dynamic
@@ -1469,10 +1638,20 @@ static_library("base_static") {
sources = [
"base_switches.cc",
"base_switches.h",
+ "task_scheduler/switches.cc",
+ "task_scheduler/switches.h",
"win/pe_image.cc",
"win/pe_image.h",
]
+ if (is_win) {
+ # Disable sanitizer coverage in win/pe_image.cc. It is called by the sandbox
+ # before sanitizer coverage can initialize. http://crbug.com/484711
+ configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
+ configs +=
+ [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
+ }
+
if (!is_debug) {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
@@ -1493,12 +1672,14 @@ component("i18n") {
"i18n/case_conversion.h",
"i18n/char_iterator.cc",
"i18n/char_iterator.h",
+ "i18n/character_encoding.cc",
+ "i18n/character_encoding.h",
+ "i18n/encoding_detection.cc",
+ "i18n/encoding_detection.h",
"i18n/file_util_icu.cc",
"i18n/file_util_icu.h",
"i18n/i18n_constants.cc",
"i18n/i18n_constants.h",
- "i18n/icu_encoding_detection.cc",
- "i18n/icu_encoding_detection.h",
"i18n/icu_string_conversions.cc",
"i18n/icu_string_conversions.h",
"i18n/icu_util.cc",
@@ -1525,6 +1706,7 @@ component("i18n") {
defines = [ "BASE_I18N_IMPLEMENTATION" ]
configs += [ "//build/config/compiler:wexit_time_destructors" ]
public_deps = [
+ "//third_party/ced",
"//third_party/icu",
]
deps = [
@@ -1550,6 +1732,7 @@ test("base_perftests") {
"message_loop/message_pump_perftest.cc",
# "test/run_all_unittests.cc",
+ "json/json_perftest.cc",
"threading/thread_perftest.cc",
]
deps = [
@@ -1710,6 +1893,7 @@ if (is_ios || is_mac) {
test("base_unittests") {
sources = [
+ "allocator/malloc_zone_functions_mac_unittest.cc",
"allocator/tcmalloc_unittest.cc",
"android/application_status_listener_unittest.cc",
"android/content_uri_utils_unittest.cc",
@@ -1720,12 +1904,14 @@ test("base_unittests") {
"android/path_utils_unittest.cc",
"android/scoped_java_ref_unittest.cc",
"android/sys_utils_unittest.cc",
+ "android/unguessable_token_android_unittest.cc",
"at_exit_unittest.cc",
"atomicops_unittest.cc",
"barrier_closure_unittest.cc",
"base64_unittest.cc",
"base64url_unittest.cc",
"big_endian_unittest.cc",
+ "bind_helpers_unittest.cc",
"bind_unittest.cc",
"bit_cast_unittest.cc",
"bits_unittest.cc",
@@ -1736,24 +1922,28 @@ test("base_unittests") {
"cancelable_callback_unittest.cc",
"command_line_unittest.cc",
"containers/adapters_unittest.cc",
+ "containers/flat_set_unittest.cc",
"containers/hash_tables_unittest.cc",
"containers/linked_list_unittest.cc",
"containers/mru_cache_unittest.cc",
- "containers/scoped_ptr_hash_map_unittest.cc",
"containers/small_map_unittest.cc",
"containers/stack_container_unittest.cc",
"cpu_unittest.cc",
+ "debug/activity_analyzer_unittest.cc",
+ "debug/activity_tracker_unittest.cc",
"debug/crash_logging_unittest.cc",
"debug/debugger_unittest.cc",
"debug/leak_tracker_unittest.cc",
"debug/proc_maps_linux_unittest.cc",
"debug/stack_trace_unittest.cc",
"debug/task_annotator_unittest.cc",
+ "debug/thread_heap_usage_tracker_unittest.cc",
"deferred_sequenced_task_runner_unittest.cc",
"environment_unittest.cc",
"feature_list_unittest.cc",
"file_version_info_win_unittest.cc",
"files/dir_reader_posix_unittest.cc",
+ "files/file_descriptor_watcher_posix_unittest.cc",
"files/file_locking_unittest.cc",
"files/file_path_unittest.cc",
"files/file_path_watcher_unittest.cc",
@@ -1763,6 +1953,7 @@ test("base_unittests") {
"files/file_util_unittest.cc",
"files/important_file_writer_unittest.cc",
"files/memory_mapped_file_unittest.cc",
+ "files/scoped_platform_handle_unittest.cc",
"files/scoped_temp_dir_unittest.cc",
"gmock_unittest.cc",
"guid_unittest.cc",
@@ -1770,6 +1961,7 @@ test("base_unittests") {
"i18n/break_iterator_unittest.cc",
"i18n/case_conversion_unittest.cc",
"i18n/char_iterator_unittest.cc",
+ "i18n/character_encoding_unittest.cc",
"i18n/file_util_icu_unittest.cc",
"i18n/icu_string_conversions_unittest.cc",
"i18n/message_formatter_unittest.cc",
@@ -1797,6 +1989,7 @@ test("base_unittests") {
"mac/mac_util_unittest.mm",
"mac/mach_port_broker_unittest.cc",
"mac/objc_property_releaser_unittest.mm",
+ "mac/objc_release_properties_unittest.mm",
"mac/scoped_nsobject_unittest.mm",
"mac/scoped_objc_class_swizzler_unittest.mm",
"mac/scoped_sending_event_unittest.mm",
@@ -1804,9 +1997,11 @@ test("base_unittests") {
"memory/aligned_memory_unittest.cc",
"memory/discardable_shared_memory_unittest.cc",
"memory/linked_ptr_unittest.cc",
+ "memory/memory_coordinator_client_registry_unittest.cc",
"memory/memory_pressure_listener_unittest.cc",
"memory/memory_pressure_monitor_chromeos_unittest.cc",
"memory/memory_pressure_monitor_mac_unittest.cc",
+ "memory/memory_pressure_monitor_unittest.cc",
"memory/memory_pressure_monitor_win_unittest.cc",
"memory/ptr_util_unittest.cc",
"memory/ref_counted_memory_unittest.cc",
@@ -1821,10 +2016,13 @@ test("base_unittests") {
"message_loop/message_loop_unittest.cc",
"message_loop/message_pump_glib_unittest.cc",
"message_loop/message_pump_io_ios_unittest.cc",
+ "message_loop/message_pump_mac_unittest.cc",
"metrics/bucket_ranges_unittest.cc",
+ "metrics/field_trial_params_unittest.cc",
"metrics/field_trial_unittest.cc",
"metrics/histogram_base_unittest.cc",
"metrics/histogram_delta_serialization_unittest.cc",
+ "metrics/histogram_functions_unittest.cc",
"metrics/histogram_macros_unittest.cc",
"metrics/histogram_snapshot_manager_unittest.cc",
"metrics/histogram_unittest.cc",
@@ -1838,10 +2036,12 @@ test("base_unittests") {
"metrics/statistics_recorder_unittest.cc",
"native_library_unittest.cc",
"numerics/safe_numerics_unittest.cc",
+ "numerics/saturated_arithmetic_unittest.cc",
"observer_list_unittest.cc",
"optional_unittest.cc",
"os_compat_android_unittest.cc",
"path_service_unittest.cc",
+ "pending_task_unittest.cc",
"pickle_unittest.cc",
"posix/file_descriptor_shuffle_unittest.cc",
"posix/unix_domain_socket_linux_unittest.cc",
@@ -1862,6 +2062,8 @@ test("base_unittests") {
"scoped_native_library_unittest.cc",
"security_unittest.cc",
"sequence_checker_unittest.cc",
+ "sequence_token_unittest.cc",
+ "sequenced_task_runner_unittest.cc",
"sha1_unittest.cc",
"stl_util_unittest.cc",
"strings/nullable_string16_unittest.cc",
@@ -1881,7 +2083,7 @@ test("base_unittests") {
"strings/utf_string_conversions_unittest.cc",
"supports_user_data_unittest.cc",
"sync_socket_unittest.cc",
- "synchronization/cancellation_flag_unittest.cc",
+ "synchronization/atomic_flag_unittest.cc",
"synchronization/condition_variable_unittest.cc",
"synchronization/lock_unittest.cc",
"synchronization/read_write_lock_unittest.cc",
@@ -1895,27 +2097,33 @@ test("base_unittests") {
"task_scheduler/delayed_task_manager_unittest.cc",
"task_scheduler/priority_queue_unittest.cc",
"task_scheduler/scheduler_lock_unittest.cc",
- "task_scheduler/scheduler_service_thread_unittest.cc",
+ "task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc",
"task_scheduler/scheduler_worker_pool_impl_unittest.cc",
"task_scheduler/scheduler_worker_stack_unittest.cc",
"task_scheduler/scheduler_worker_unittest.cc",
+ "task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc",
"task_scheduler/sequence_sort_key_unittest.cc",
"task_scheduler/sequence_unittest.cc",
"task_scheduler/task_scheduler_impl_unittest.cc",
+ "task_scheduler/task_tracker_posix_unittest.cc",
"task_scheduler/task_tracker_unittest.cc",
+ "task_scheduler/task_traits_unittest.cc",
+ "task_scheduler/task_unittest.cc",
"task_scheduler/test_task_factory.cc",
"task_scheduler/test_task_factory.h",
"task_scheduler/test_utils.h",
"template_util_unittest.cc",
"test/histogram_tester_unittest.cc",
- "test/icu_test_util.cc",
- "test/icu_test_util.h",
+ "test/mock_callback_unittest.cc",
+ "test/scoped_mock_time_message_loop_task_runner_unittest.cc",
+ "test/scoped_task_scheduler_unittest.cc",
"test/test_pending_task_unittest.cc",
"test/test_reg_util_win_unittest.cc",
"test/trace_event_analyzer_unittest.cc",
"test/user_action_tester_unittest.cc",
"threading/non_thread_safe_unittest.cc",
"threading/platform_thread_unittest.cc",
+ "threading/post_task_and_reply_impl_unittest.cc",
"threading/sequenced_task_runner_handle_unittest.cc",
"threading/sequenced_worker_pool_unittest.cc",
"threading/simple_thread_unittest.cc",
@@ -1924,6 +2132,7 @@ test("base_unittests") {
"threading/thread_id_name_manager_unittest.cc",
"threading/thread_local_storage_unittest.cc",
"threading/thread_local_unittest.cc",
+ "threading/thread_task_runner_handle_unittest.cc",
"threading/thread_unittest.cc",
"threading/watchdog_unittest.cc",
"threading/worker_pool_posix_unittest.cc",
@@ -1936,6 +2145,7 @@ test("base_unittests") {
"timer/timer_unittest.cc",
"tools_sanity_unittest.cc",
"trace_event/blame_context_unittest.cc",
+ "trace_event/event_name_filter_unittest.cc",
"trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
"trace_event/heap_profiler_allocation_register_unittest.cc",
"trace_event/heap_profiler_heap_dump_writer_unittest.cc",
@@ -1944,15 +2154,19 @@ test("base_unittests") {
"trace_event/java_heap_dump_provider_android_unittest.cc",
"trace_event/memory_allocator_dump_unittest.cc",
"trace_event/memory_dump_manager_unittest.cc",
+ "trace_event/memory_usage_estimator_unittest.cc",
"trace_event/process_memory_dump_unittest.cc",
+ "trace_event/trace_category_unittest.cc",
"trace_event/trace_config_unittest.cc",
"trace_event/trace_event_argument_unittest.cc",
+ "trace_event/trace_event_filter_test_utils.cc",
+ "trace_event/trace_event_filter_test_utils.h",
"trace_event/trace_event_synthetic_delay_unittest.cc",
"trace_event/trace_event_system_stats_monitor_unittest.cc",
"trace_event/trace_event_unittest.cc",
- "trace_event/winheap_dump_provider_win_unittest.cc",
"tracked_objects_unittest.cc",
"tuple_unittest.cc",
+ "unguessable_token_unittest.cc",
"values_unittest.cc",
"version_unittest.cc",
"vlog_unittest.cc",
@@ -1986,7 +2200,9 @@ test("base_unittests") {
":base",
":i18n",
":message_loop_tests",
- "//base/test:run_all_unittests",
+ "//base/allocator:features",
+ "//base/test:native_library_test_utils",
+ "//base/test:run_all_base_unittests",
"//base/test:test_support",
"//base/third_party/dynamic_annotations",
"//testing/gmock",
@@ -1994,6 +2210,10 @@ test("base_unittests") {
"//third_party/icu",
]
+ data_deps = [
+ "//base/test:test_shared_library",
+ ]
+
if (is_ios || is_mac) {
deps += [ ":base_unittests_arc" ]
}
@@ -2016,10 +2236,15 @@ test("base_unittests") {
}
if (is_android) {
+ sources -= [
+ "process/process_unittest.cc",
+ "process/process_util_unittest.cc",
+ ]
deps += [
":base_java",
":base_java_unittest_support",
"//base/android/jni_generator:jni_generator_tests",
+ "//base/test:test_support_java",
]
}
@@ -2042,6 +2267,7 @@ test("base_unittests") {
"mac/bind_objc_block_unittest.mm",
"mac/foundation_util_unittest.mm",
"mac/objc_property_releaser_unittest.mm",
+ "mac/objc_release_properties_unittest.mm",
"mac/scoped_nsobject_unittest.mm",
"strings/sys_string_conversions_mac_unittest.mm",
]
@@ -2050,6 +2276,10 @@ test("base_unittests") {
# TODO(GYP): dep on copy_test_data_ios action.
}
+ if (use_partition_alloc) {
+ sources += [ "allocator/partition_allocator/partition_alloc_unittest.cc" ]
+ }
+
if (is_mac) {
libs = [
"CoreFoundation.framework",
@@ -2064,10 +2294,6 @@ test("base_unittests") {
deps += [ "//base/test:malloc_wrapper" ]
- if (use_glib) {
- configs += [ "//build/config/linux:glib" ]
- }
-
if (!is_component_build) {
# Set rpath to find libmalloc_wrapper.so even in a non-component build.
configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
@@ -2156,6 +2382,7 @@ if (enable_nocompile_tests) {
"callback_list_unittest.nc",
"callback_unittest.nc",
"memory/weak_ptr_unittest.nc",
+ "metrics/histogram_unittest.nc",
]
deps = [
@@ -2167,7 +2394,6 @@ if (enable_nocompile_tests) {
}
if (is_android) {
- # GYP: //base.gyp:base_jni_headers
generate_jni("base_jni_headers") {
sources = [
"android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
@@ -2179,10 +2405,12 @@ if (is_android) {
"android/java/src/org/chromium/base/ContentUriUtils.java",
"android/java/src/org/chromium/base/ContextUtils.java",
"android/java/src/org/chromium/base/CpuFeatures.java",
+ "android/java/src/org/chromium/base/EarlyTraceEvent.java",
"android/java/src/org/chromium/base/EventLog.java",
"android/java/src/org/chromium/base/FieldTrialList.java",
"android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
"android/java/src/org/chromium/base/JNIUtils.java",
+ "android/java/src/org/chromium/base/JavaExceptionReporter.java",
"android/java/src/org/chromium/base/JavaHandlerThread.java",
"android/java/src/org/chromium/base/LocaleUtils.java",
"android/java/src/org/chromium/base/MemoryPressureListener.java",
@@ -2192,10 +2420,14 @@ if (is_android) {
"android/java/src/org/chromium/base/SysUtils.java",
"android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
+ "android/java/src/org/chromium/base/ThrowUncaughtException.java",
+ "android/java/src/org/chromium/base/TimeUtils.java",
"android/java/src/org/chromium/base/TraceEvent.java",
+ "android/java/src/org/chromium/base/UnguessableToken.java",
"android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
"android/java/src/org/chromium/base/metrics/RecordHistogram.java",
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+ "android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
]
public_deps = [
@@ -2205,13 +2437,11 @@ if (is_android) {
jni_package = "base"
}
- # GYP: //base.gyp:android_runtime_jni_headers
generate_jar_jni("android_runtime_jni_headers") {
jni_package = "base"
classes = [ "java/lang/Runtime.class" ]
}
- # GYP: //base.gyp:base_java
android_library("base_java") {
srcjar_deps = [
":base_android_java_enums_srcjar",
@@ -2220,6 +2450,7 @@ if (is_android) {
]
deps = [
+ "//third_party/android_tools:android_support_annotations_java",
"//third_party/android_tools:android_support_multidex_java",
"//third_party/jsr-305:jsr_305_javalib",
]
@@ -2240,22 +2471,24 @@ if (is_android) {
"android/java/src/org/chromium/base/ContentUriUtils.java",
"android/java/src/org/chromium/base/ContextUtils.java",
"android/java/src/org/chromium/base/CpuFeatures.java",
+ "android/java/src/org/chromium/base/EarlyTraceEvent.java",
"android/java/src/org/chromium/base/EventLog.java",
"android/java/src/org/chromium/base/FieldTrialList.java",
"android/java/src/org/chromium/base/FileUtils.java",
"android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
"android/java/src/org/chromium/base/JNIUtils.java",
+ "android/java/src/org/chromium/base/JavaExceptionReporter.java",
"android/java/src/org/chromium/base/JavaHandlerThread.java",
"android/java/src/org/chromium/base/LocaleUtils.java",
"android/java/src/org/chromium/base/Log.java",
"android/java/src/org/chromium/base/MemoryPressureListener.java",
+ "android/java/src/org/chromium/base/NonThreadSafe.java",
"android/java/src/org/chromium/base/ObserverList.java",
"android/java/src/org/chromium/base/PackageUtils.java",
"android/java/src/org/chromium/base/PathService.java",
"android/java/src/org/chromium/base/PathUtils.java",
"android/java/src/org/chromium/base/PerfTraceEvent.java",
"android/java/src/org/chromium/base/PowerMonitor.java",
- "android/java/src/org/chromium/base/PowerStatusReceiver.java",
"android/java/src/org/chromium/base/Promise.java",
"android/java/src/org/chromium/base/ResourceExtractor.java",
"android/java/src/org/chromium/base/SecureRandomInitializer.java",
@@ -2263,7 +2496,10 @@ if (is_android) {
"android/java/src/org/chromium/base/SysUtils.java",
"android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
+ "android/java/src/org/chromium/base/ThrowUncaughtException.java",
+ "android/java/src/org/chromium/base/TimeUtils.java",
"android/java/src/org/chromium/base/TraceEvent.java",
+ "android/java/src/org/chromium/base/UnguessableToken.java",
"android/java/src/org/chromium/base/VisibleForTesting.java",
"android/java/src/org/chromium/base/annotations/AccessedByNative.java",
"android/java/src/org/chromium/base/annotations/CalledByNative.java",
@@ -2283,8 +2519,10 @@ if (is_android) {
"android/java/src/org/chromium/base/library_loader/ModernLinker.java",
"android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
"android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
+ "android/java/src/org/chromium/base/metrics/CachedMetrics.java",
"android/java/src/org/chromium/base/metrics/RecordHistogram.java",
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+ "android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
"android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
]
@@ -2297,34 +2535,46 @@ if (is_android) {
]
}
- # GYP: //base.gyp:base_javatests
android_library("base_javatests") {
+ testonly = true
deps = [
":base_java",
":base_java_test_support",
+ "//third_party/android_support_test_runner:runner_java",
+ "//third_party/junit:junit",
]
java_files = [
"android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
"android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
"android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
"android/javatests/src/org/chromium/base/CommandLineTest.java",
+
+ # TODO(nona): move to Junit once that is built for Android N.
+ "android/javatests/src/org/chromium/base/LocaleUtilsTest.java",
"android/javatests/src/org/chromium/base/ObserverListTest.java",
"android/javatests/src/org/chromium/base/metrics/RecordHistogramTest.java",
]
}
- # GYP: //base.gyp:base_java_test_support
android_library("base_java_test_support") {
+ testonly = true
deps = [
":base_java",
"//testing/android/reporter:reporter_java",
+ "//third_party/android_support_test_runner:exposed_instrumentation_api_publish_java",
+ "//third_party/android_support_test_runner:runner_java",
+ "//third_party/hamcrest:hamcrest_core_java",
+ "//third_party/junit",
]
java_files = [
"test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
"test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
"test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
+ "test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java",
"test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
"test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
"test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
@@ -2336,6 +2586,7 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java",
"test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java",
"test/android/javatests/src/org/chromium/base/test/util/Manual.java",
+ "test/android/javatests/src/org/chromium/base/test/util/Matchers.java",
"test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java",
"test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java",
"test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java",
@@ -2357,25 +2608,11 @@ if (is_android) {
]
}
- # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
- # in the multidex shadow library. crbug.com/522043
- # GYP: //base.gyp:base_junit_test_support
- java_library("base_junit_test_support") {
- testonly = true
- java_files = [ "test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java" ]
- deps = [
- "//third_party/android_tools:android_support_multidex_java",
- "//third_party/robolectric:android-all-4.3_r2-robolectric-0",
- "//third_party/robolectric:robolectric_java",
- ]
- srcjar_deps = [ ":base_build_config_gen" ]
- }
-
- # GYP: //base.gyp:base_junit_tests
junit_binary("base_junit_tests") {
java_files = [
"android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
"android/junit/src/org/chromium/base/LogTest.java",
+ "android/junit/src/org/chromium/base/NonThreadSafeTest.java",
"android/junit/src/org/chromium/base/PromiseTest.java",
"test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
"test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
@@ -2385,14 +2622,10 @@ if (is_android) {
deps = [
":base_java",
":base_java_test_support",
- ":base_junit_test_support",
]
+ srcjar_deps = [ ":base_build_config_gen" ]
}
- # GYP: //base.gyp:base_java_application_state
- # GYP: //base.gyp:base_java_library_load_from_apk_status_codes
- # GYP: //base.gyp:base_java_library_process_type
- # GYP: //base.gyp:base_java_memory_pressure_level
java_cpp_enum("base_android_java_enums_srcjar") {
sources = [
"android/application_status_listener.h",
@@ -2402,7 +2635,6 @@ if (is_android) {
]
}
- # GYP: //base/base.gyp:base_build_config_gen
java_cpp_template("base_build_config_gen") {
sources = [
"android/java/templates/BuildConfig.template",
@@ -2410,12 +2642,11 @@ if (is_android) {
package_name = "org/chromium/base"
defines = []
- if (!is_java_debug) {
- defines += [ "NDEBUG" ]
+ if (is_java_debug || dcheck_always_on) {
+ defines += [ "_DCHECK_IS_ON" ]
}
}
- # GYP: //base/base.gyp:base_native_libraries_gen
java_cpp_template("base_native_libraries_gen") {
sources = [
"android/java/templates/NativeLibraries.template",
@@ -2423,12 +2654,23 @@ if (is_android) {
package_name = "org/chromium/base/library_loader"
}
- # GYP: //base.gyp:base_java_unittest_support
android_library("base_java_unittest_support") {
+ testonly = true
deps = [
":base_java",
]
- java_files =
- [ "test/android/java/src/org/chromium/base/ContentUriTestUtils.java" ]
+ java_files = [
+ "test/android/java/src/org/chromium/base/ContentUriTestUtils.java",
+ "test/android/java/src/org/chromium/base/TestSystemMessageHandler.java",
+ ]
}
}
+
+fuzzer_test("base_json_correctness_fuzzer") {
+ sources = [
+ "json/correctness_fuzzer.cc",
+ ]
+ deps = [
+ ":base",
+ ]
+}
diff --git a/base/DEPS b/base/DEPS
index c0e95a00f6..4b25f3f310 100644
--- a/base/DEPS
+++ b/base/DEPS
@@ -2,6 +2,7 @@ include_rules = [
"+jni",
"+third_party/ashmem",
"+third_party/apple_apsl",
+ "+third_party/ced",
"+third_party/lss",
"+third_party/modp_b64",
"+third_party/tcmalloc",
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 490b8e871b..8cdb06161f 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -13,8 +13,10 @@ declare_args() {
enable_debugallocation = is_debug
}
-# Allocator shim is only enabled for Release static builds.
-win_use_allocator_shim = is_win && !is_component_build && !is_debug
+# The Windows-only allocator shim is only enabled for Release static builds, and
+# is mutually exclusive with the generalized shim.
+win_use_allocator_shim = is_win && !is_component_build && !is_debug &&
+ !use_experimental_allocator_shim && !is_asan
# This "allocator" meta-target will forward to the default allocator according
# to the build settings.
@@ -93,6 +95,8 @@ if (win_use_allocator_shim) {
sources = [
"allocator_shim_win.cc",
"allocator_shim_win.h",
+ "winheap_stubs_win.cc",
+ "winheap_stubs_win.h",
]
configs += [ ":allocator_shim_define" ]
}
@@ -218,6 +222,14 @@ if (use_allocator == "tcmalloc") {
":tcmalloc_flags",
]
+ # Thumb mode disabled due to bug in clang integrated assembler
+ # TODO(https://llvm.org/bugs/show_bug.cgi?id=31058)
+ configs -= [ "//build/config/compiler:compiler_arm_thumb" ]
+ configs += [ "//build/config/compiler:compiler_arm" ]
+
+ # TODO(crbug.com/633719) Make tcmalloc work with AFDO if possible.
+ configs -= [ "//build/config/compiler:afdo" ]
+
deps = []
if (enable_profiling) {
@@ -276,52 +288,22 @@ if (use_allocator == "tcmalloc") {
buildflag_header("features") {
header = "features.h"
- flags = [ "USE_EXPERIMENTAL_ALLOCATOR_SHIM=$use_experimental_allocator_shim" ]
+ flags = [
+ "USE_EXPERIMENTAL_ALLOCATOR_SHIM=$use_experimental_allocator_shim",
+ "ENABLE_WIN_ALLOCATOR_SHIM_TESTS=($use_experimental_allocator_shim || $win_use_allocator_shim)",
+ ]
}
-if (use_experimental_allocator_shim) {
- # Used to shim malloc symbols on Android. see //base/allocator/README.md.
- config("wrap_malloc_symbols") {
- ldflags = [
- "-Wl,-wrap,calloc",
- "-Wl,-wrap,free",
- "-Wl,-wrap,malloc",
- "-Wl,-wrap,memalign",
- "-Wl,-wrap,posix_memalign",
- "-Wl,-wrap,pvalloc",
- "-Wl,-wrap,realloc",
- "-Wl,-wrap,valloc",
- ]
- }
-
- source_set("unified_allocator_shim") {
- # TODO(primiano): support other platforms, currently this works only on
- # Linux/CrOS/Android. http://crbug.com/550886 .
- configs += [ "//base:base_implementation" ] # for BASE_EXPORT
- visibility = [ "//base:base" ]
- sources = [
- "allocator_shim.cc",
- "allocator_shim.h",
- "allocator_shim_internals.h",
- "allocator_shim_override_cpp_symbols.h",
- "allocator_shim_override_libc_symbols.h",
- ]
- if (is_linux && use_allocator == "tcmalloc") {
- sources += [
- "allocator_shim_default_dispatch_to_tcmalloc.cc",
- "allocator_shim_override_glibc_weak_symbols.h",
- ]
- deps = [
- ":tcmalloc",
- ]
- } else if (is_linux && use_allocator == "none") {
- sources += [ "allocator_shim_default_dispatch_to_glibc.cc" ]
- } else if (is_android && use_allocator == "none") {
- sources += [
- "allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
- "allocator_shim_override_linker_wrapped_symbols.h",
- ]
- all_dependent_configs = [ ":wrap_malloc_symbols" ]
- }
- }
+# Used to shim malloc symbols on Android. see //base/allocator/README.md.
+config("wrap_malloc_symbols") {
+ ldflags = [
+ "-Wl,-wrap,calloc",
+ "-Wl,-wrap,free",
+ "-Wl,-wrap,malloc",
+ "-Wl,-wrap,memalign",
+ "-Wl,-wrap,posix_memalign",
+ "-Wl,-wrap,pvalloc",
+ "-Wl,-wrap,realloc",
+ "-Wl,-wrap,valloc",
+ ]
}
diff --git a/base/allocator/README.md b/base/allocator/README.md
index 164df51ae6..a211732c3f 100644
--- a/base/allocator/README.md
+++ b/base/allocator/README.md
@@ -189,8 +189,8 @@ Related links
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
-- [Memory-Infra: Tools to profile memory usage in Chrome](components/tracing/docs/memory_infra.md)
+- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
-[url-memory-infra-heap-profiler]: components/tracing/docs/heap_profiler.md
+[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
diff --git a/base/allocator/allocator.gyp b/base/allocator/allocator.gyp
deleted file mode 100644
index 674d4d645f..0000000000
--- a/base/allocator/allocator.gyp
+++ /dev/null
@@ -1,450 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'target_defaults': {
- 'variables': {
- # This code gets run a lot and debugged rarely, so it should be fast
- # by default. See http://crbug.com/388949.
- 'debug_optimize': '2',
- 'win_debug_Optimization': '0',
- # Run time checks are incompatible with any level of optimizations.
- 'win_debug_RuntimeChecks': '0',
- },
- },
- 'variables': {
- 'tcmalloc_dir': '../../third_party/tcmalloc/chromium',
- 'use_vtable_verify%': 0,
- # Provide a way to force disable debugallocation in Debug builds
- # e.g. for profiling (it's more rare to profile Debug builds,
- # but people sometimes need to do that).
- 'disable_debugallocation%': 0,
- },
- 'targets': [
- # The only targets that should depend on allocator are 'base' and
- # executables that don't depend, directly or indirectly, on base (a few).
- # All the other targets get a transitive dependency on this target via base.
- {
- 'target_name': 'allocator',
- 'variables': {
- 'conditions': [
- ['use_allocator!="none" or (OS=="win" and win_use_allocator_shim==1)', {
- 'allocator_target_type%': 'static_library',
- }, {
- 'allocator_target_type%': 'none',
- }],
- ],
- },
- 'type': '<(allocator_target_type)',
- 'toolsets': ['host', 'target'],
- 'conditions': [
- ['OS=="win" and win_use_allocator_shim==1', {
- 'msvs_settings': {
- # TODO(sgk): merge this with build/common.gypi settings
- 'VCLibrarianTool': {
- 'AdditionalOptions': ['/ignore:4006,4221'],
- },
- 'VCLinkerTool': {
- 'AdditionalOptions': ['/ignore:4006'],
- },
- },
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- 'allocator_shim_win.cc',
- 'allocator_shim_win.h',
- ],
- 'configurations': {
- 'Debug_Base': {
- 'msvs_settings': {
- 'VCCLCompilerTool': {
- 'RuntimeLibrary': '0',
- },
- },
- },
- },
- }], # OS=="win"
- ['use_allocator=="tcmalloc"', {
- # Disable the heap checker in tcmalloc.
- 'defines': [
- 'NO_HEAP_CHECK',
- ],
- 'dependencies': [
- '../third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- ],
- # The order of this include_dirs matters, as tc-malloc has its own
- # base/ mini-fork. Do not factor these out of this conditions section.
- 'include_dirs': [
- '.',
- '<(tcmalloc_dir)/src/base',
- '<(tcmalloc_dir)/src',
- '../..',
- ],
- 'sources': [
- # Generated for our configuration from tcmalloc's build
- # and checked in.
- '<(tcmalloc_dir)/src/config.h',
- '<(tcmalloc_dir)/src/config_android.h',
- '<(tcmalloc_dir)/src/config_linux.h',
- '<(tcmalloc_dir)/src/config_win.h',
-
- # all tcmalloc native and forked files
- '<(tcmalloc_dir)/src/addressmap-inl.h',
- '<(tcmalloc_dir)/src/base/abort.cc',
- '<(tcmalloc_dir)/src/base/abort.h',
- '<(tcmalloc_dir)/src/base/arm_instruction_set_select.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-arm-generic.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-arm-v6plus.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-windows.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-x86.cc',
- '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
- '<(tcmalloc_dir)/src/base/atomicops.h',
- '<(tcmalloc_dir)/src/base/commandlineflags.h',
- '<(tcmalloc_dir)/src/base/cycleclock.h',
- # We don't list dynamic_annotations.c since its copy is already
- # present in the dynamic_annotations target.
- '<(tcmalloc_dir)/src/base/dynamic_annotations.h',
- '<(tcmalloc_dir)/src/base/elf_mem_image.cc',
- '<(tcmalloc_dir)/src/base/elf_mem_image.h',
- '<(tcmalloc_dir)/src/base/elfcore.h',
- '<(tcmalloc_dir)/src/base/googleinit.h',
- '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
- '<(tcmalloc_dir)/src/base/linuxthreads.cc',
- '<(tcmalloc_dir)/src/base/linuxthreads.h',
- '<(tcmalloc_dir)/src/base/logging.cc',
- '<(tcmalloc_dir)/src/base/logging.h',
- '<(tcmalloc_dir)/src/base/low_level_alloc.cc',
- '<(tcmalloc_dir)/src/base/low_level_alloc.h',
- '<(tcmalloc_dir)/src/base/simple_mutex.h',
- '<(tcmalloc_dir)/src/base/spinlock.cc',
- '<(tcmalloc_dir)/src/base/spinlock.h',
- '<(tcmalloc_dir)/src/base/spinlock_internal.cc',
- '<(tcmalloc_dir)/src/base/spinlock_internal.h',
- '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
- '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
- '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
- '<(tcmalloc_dir)/src/base/stl_allocator.h',
- '<(tcmalloc_dir)/src/base/synchronization_profiling.h',
- '<(tcmalloc_dir)/src/base/sysinfo.cc',
- '<(tcmalloc_dir)/src/base/sysinfo.h',
- '<(tcmalloc_dir)/src/base/thread_annotations.h',
- '<(tcmalloc_dir)/src/base/thread_lister.c',
- '<(tcmalloc_dir)/src/base/thread_lister.h',
- '<(tcmalloc_dir)/src/base/vdso_support.cc',
- '<(tcmalloc_dir)/src/base/vdso_support.h',
- '<(tcmalloc_dir)/src/central_freelist.cc',
- '<(tcmalloc_dir)/src/central_freelist.h',
- '<(tcmalloc_dir)/src/common.cc',
- '<(tcmalloc_dir)/src/common.h',
- '<(tcmalloc_dir)/src/debugallocation.cc',
- '<(tcmalloc_dir)/src/free_list.cc',
- '<(tcmalloc_dir)/src/free_list.h',
- '<(tcmalloc_dir)/src/getpc.h',
- '<(tcmalloc_dir)/src/gperftools/heap-checker.h',
- '<(tcmalloc_dir)/src/gperftools/heap-profiler.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_extension.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_extension_c.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_hook.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_hook_c.h',
- '<(tcmalloc_dir)/src/gperftools/profiler.h',
- '<(tcmalloc_dir)/src/gperftools/stacktrace.h',
- '<(tcmalloc_dir)/src/gperftools/tcmalloc.h',
- '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
- '<(tcmalloc_dir)/src/heap-checker.cc',
- '<(tcmalloc_dir)/src/heap-profile-table.cc',
- '<(tcmalloc_dir)/src/heap-profile-table.h',
- '<(tcmalloc_dir)/src/heap-profiler.cc',
- '<(tcmalloc_dir)/src/internal_logging.cc',
- '<(tcmalloc_dir)/src/internal_logging.h',
- '<(tcmalloc_dir)/src/libc_override.h',
- '<(tcmalloc_dir)/src/libc_override_gcc_and_weak.h',
- '<(tcmalloc_dir)/src/libc_override_glibc.h',
- '<(tcmalloc_dir)/src/libc_override_osx.h',
- '<(tcmalloc_dir)/src/libc_override_redefine.h',
- '<(tcmalloc_dir)/src/linked_list.h',
- '<(tcmalloc_dir)/src/malloc_extension.cc',
- '<(tcmalloc_dir)/src/malloc_hook-inl.h',
- '<(tcmalloc_dir)/src/malloc_hook.cc',
- '<(tcmalloc_dir)/src/malloc_hook_mmap_freebsd.h',
- '<(tcmalloc_dir)/src/malloc_hook_mmap_linux.h',
- '<(tcmalloc_dir)/src/maybe_threads.cc',
- '<(tcmalloc_dir)/src/maybe_threads.h',
- '<(tcmalloc_dir)/src/memfs_malloc.cc',
- '<(tcmalloc_dir)/src/memory_region_map.cc',
- '<(tcmalloc_dir)/src/memory_region_map.h',
- '<(tcmalloc_dir)/src/packed-cache-inl.h',
- '<(tcmalloc_dir)/src/page_heap.cc',
- '<(tcmalloc_dir)/src/page_heap.h',
- '<(tcmalloc_dir)/src/page_heap_allocator.h',
- '<(tcmalloc_dir)/src/pagemap.h',
- '<(tcmalloc_dir)/src/profile-handler.cc',
- '<(tcmalloc_dir)/src/profile-handler.h',
- '<(tcmalloc_dir)/src/profiledata.cc',
- '<(tcmalloc_dir)/src/profiledata.h',
- '<(tcmalloc_dir)/src/profiler.cc',
- '<(tcmalloc_dir)/src/raw_printer.cc',
- '<(tcmalloc_dir)/src/raw_printer.h',
- '<(tcmalloc_dir)/src/sampler.cc',
- '<(tcmalloc_dir)/src/sampler.h',
- '<(tcmalloc_dir)/src/span.cc',
- '<(tcmalloc_dir)/src/span.h',
- '<(tcmalloc_dir)/src/stack_trace_table.cc',
- '<(tcmalloc_dir)/src/stack_trace_table.h',
- '<(tcmalloc_dir)/src/stacktrace.cc',
- '<(tcmalloc_dir)/src/stacktrace_arm-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_config.h',
- '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
- '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
- '<(tcmalloc_dir)/src/static_vars.cc',
- '<(tcmalloc_dir)/src/static_vars.h',
- '<(tcmalloc_dir)/src/symbolize.cc',
- '<(tcmalloc_dir)/src/symbolize.h',
- '<(tcmalloc_dir)/src/system-alloc.cc',
- '<(tcmalloc_dir)/src/system-alloc.h',
- '<(tcmalloc_dir)/src/tcmalloc.cc',
- '<(tcmalloc_dir)/src/tcmalloc_guard.h',
- '<(tcmalloc_dir)/src/thread_cache.cc',
- '<(tcmalloc_dir)/src/thread_cache.h',
-
- 'debugallocation_shim.cc',
- ],
- # sources! means that these are not compiled directly.
- 'sources!': [
- # We simply don't use these, but list them above so that IDE
- # users can view the full available source for reference, etc.
- '<(tcmalloc_dir)/src/addressmap-inl.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-x86-msvc.h',
- '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
- '<(tcmalloc_dir)/src/base/atomicops.h',
- '<(tcmalloc_dir)/src/base/commandlineflags.h',
- '<(tcmalloc_dir)/src/base/cycleclock.h',
- '<(tcmalloc_dir)/src/base/elf_mem_image.h',
- '<(tcmalloc_dir)/src/base/elfcore.h',
- '<(tcmalloc_dir)/src/base/googleinit.h',
- '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
- '<(tcmalloc_dir)/src/base/simple_mutex.h',
- '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
- '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
- '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
- '<(tcmalloc_dir)/src/base/stl_allocator.h',
- '<(tcmalloc_dir)/src/base/thread_annotations.h',
- '<(tcmalloc_dir)/src/getpc.h',
- '<(tcmalloc_dir)/src/gperftools/heap-checker.h',
- '<(tcmalloc_dir)/src/gperftools/heap-profiler.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_extension.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_extension_c.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_hook.h',
- '<(tcmalloc_dir)/src/gperftools/malloc_hook_c.h',
- '<(tcmalloc_dir)/src/gperftools/profiler.h',
- '<(tcmalloc_dir)/src/gperftools/stacktrace.h',
- '<(tcmalloc_dir)/src/gperftools/tcmalloc.h',
- '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
- '<(tcmalloc_dir)/src/heap-checker.cc',
- '<(tcmalloc_dir)/src/libc_override.h',
- '<(tcmalloc_dir)/src/libc_override_gcc_and_weak.h',
- '<(tcmalloc_dir)/src/libc_override_glibc.h',
- '<(tcmalloc_dir)/src/libc_override_osx.h',
- '<(tcmalloc_dir)/src/libc_override_redefine.h',
- '<(tcmalloc_dir)/src/malloc_hook_mmap_freebsd.h',
- '<(tcmalloc_dir)/src/malloc_hook_mmap_linux.h',
- '<(tcmalloc_dir)/src/memfs_malloc.cc',
- '<(tcmalloc_dir)/src/packed-cache-inl.h',
- '<(tcmalloc_dir)/src/page_heap_allocator.h',
- '<(tcmalloc_dir)/src/pagemap.h',
- '<(tcmalloc_dir)/src/stacktrace_arm-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_config.h',
- '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
- '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
- '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
- '<(tcmalloc_dir)/src/tcmalloc_guard.h',
-
- # Included by debugallocation_shim.cc.
- '<(tcmalloc_dir)/src/debugallocation.cc',
- '<(tcmalloc_dir)/src/tcmalloc.cc',
- ],
- 'variables': {
- 'clang_warning_flags': [
- # tcmalloc initializes some fields in the wrong order.
- '-Wno-reorder',
- # tcmalloc contains some unused local template specializations.
- '-Wno-unused-function',
- # tcmalloc uses COMPILE_ASSERT without static_assert but with
- # typedefs.
- '-Wno-unused-local-typedefs',
- # for magic2_ in debugallocation.cc (only built in Debug builds)
- # typedefs.
- '-Wno-unused-private-field',
- ],
- },
- 'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="solaris" or OS=="android"', {
- 'sources!': [
- '<(tcmalloc_dir)/src/system-alloc.h',
- ],
- # We enable all warnings by default, but upstream disables a few.
- # Keep "-Wno-*" flags in sync with upstream by comparing against:
- # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
- 'cflags': [
- '-Wno-sign-compare',
- '-Wno-unused-result',
- ],
- 'link_settings': {
- 'ldflags': [
- # Don't let linker rip this symbol out, otherwise the heap&cpu
- # profilers will not initialize properly on startup.
- '-Wl,-uIsHeapProfilerRunning,-uProfilerStart',
- # Do the same for heap leak checker.
- '-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi',
- '-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl',
- '-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv',
- ],
- },
- # Compiling tcmalloc with -fvisibility=default is only necessary when
- # not using the allocator shim, which provides the correct visibility
- # annotations for those symbols which need to be exported (see
- # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
- # //base/allocator/allocator_shim_internals.h for the definition of
- # SHIM_ALWAYS_EXPORT).
- 'conditions': [
- ['use_experimental_allocator_shim==0', {
- 'cflags!': [
- '-fvisibility=hidden',
- ],
- }],
- ],
- }],
- ['profiling!=1', {
- 'sources!': [
- # cpuprofiler
- '<(tcmalloc_dir)/src/base/thread_lister.c',
- '<(tcmalloc_dir)/src/base/thread_lister.h',
- '<(tcmalloc_dir)/src/profile-handler.cc',
- '<(tcmalloc_dir)/src/profile-handler.h',
- '<(tcmalloc_dir)/src/profiledata.cc',
- '<(tcmalloc_dir)/src/profiledata.h',
- '<(tcmalloc_dir)/src/profiler.cc',
- ],
- }],
- ['use_experimental_allocator_shim==1', {
- 'defines': [
- 'TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC',
- ],
- }]
- ],
- 'configurations': {
- 'Debug_Base': {
- 'conditions': [
- ['disable_debugallocation==0', {
- 'defines': [
- # Use debugallocation for Debug builds to catch problems
- # early and cleanly, http://crbug.com/30715 .
- 'TCMALLOC_FOR_DEBUGALLOCATION',
- ],
- }],
- ],
- },
- },
- }], # use_allocator=="tcmalloc
- # For CrOS builds with vtable verification. According to the author of
- # crrev.com/10854031 this is used in conjuction with some other CrOS
- # build flag, to enable verification of any allocator that uses virtual
- # function calls.
- ['use_vtable_verify==1', {
- 'cflags': [
- '-fvtable-verify=preinit',
- ],
- }],
- ['order_profiling != 0', {
- 'target_conditions' : [
- ['_toolset=="target"', {
- 'cflags!': [ '-finstrument-functions' ],
- }],
- ],
- }],
- ], # conditions of 'allocator' target.
- }, # 'allocator' target.
- {
- # GN: //base/allocator:features
- # When referenced from a target that might be compiled in the host
- # toolchain, always refer to 'allocator_features#target'.
- 'target_name': 'allocator_features',
- 'includes': [ '../../build/buildflag_header.gypi' ],
- 'variables': {
- 'buildflag_header_path': 'base/allocator/features.h',
- 'buildflag_flags': [
- 'USE_EXPERIMENTAL_ALLOCATOR_SHIM=<(use_experimental_allocator_shim)',
- ],
- },
- }, # 'allocator_features' target.
- ], # targets.
- 'conditions': [
- ['use_experimental_allocator_shim==1', {
- 'targets': [
- {
- # GN: //base/allocator:unified_allocator_shim
- 'target_name': 'unified_allocator_shim',
- 'toolsets': ['host', 'target'],
- 'type': 'static_library',
- 'defines': [ 'BASE_IMPLEMENTATION' ],
- 'sources': [
- 'allocator_shim.cc',
- 'allocator_shim.h',
- 'allocator_shim_internals.h',
- 'allocator_shim_override_cpp_symbols.h',
- 'allocator_shim_override_libc_symbols.h',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'conditions': [
- ['OS=="linux" and use_allocator=="tcmalloc"', {
- 'sources': [
- 'allocator_shim_default_dispatch_to_tcmalloc.cc',
- 'allocator_shim_override_glibc_weak_symbols.h',
- ],
- }],
- ['use_allocator=="none" and (OS=="linux" or (OS=="android" and _toolset == "host" and host_os == "linux"))', {
- 'sources': [
- 'allocator_shim_default_dispatch_to_glibc.cc',
- ],
- }],
- ['OS=="android" and _toolset == "target"', {
- 'sources': [
- 'allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc',
- 'allocator_shim_override_linker_wrapped_symbols.h',
- ],
- # On Android all references to malloc & friends symbols are
- # rewritten, at link time, and routed to the shim.
- # See //base/allocator/README.md.
- 'all_dependent_settings': {
- 'ldflags': [
- '-Wl,-wrap,calloc',
- '-Wl,-wrap,free',
- '-Wl,-wrap,malloc',
- '-Wl,-wrap,memalign',
- '-Wl,-wrap,posix_memalign',
- '-Wl,-wrap,pvalloc',
- '-Wl,-wrap,realloc',
- '-Wl,-wrap,valloc',
- ],
- },
- }],
- ]
- }, # 'unified_allocator_shim' target.
- ],
- }]
- ],
-}
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
index 09ed45fde3..fbdbdfc8c2 100644
--- a/base/allocator/allocator_shim.cc
+++ b/base/allocator/allocator_shim.cc
@@ -5,16 +5,26 @@
#include "base/allocator/allocator_shim.h"
#include <errno.h>
-#include <unistd.h>
#include <new>
#include "base/atomicops.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
+#if !defined(OS_WIN)
+#include <unistd.h>
+#else
+#include "base/allocator/winheap_stubs_win.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include <malloc/malloc.h>
+#endif
+
// No calls to malloc / new in this file. They would would cause re-entrancy of
// the shim, which is hard to deal with. Keep this code as simple as possible
// and don't use any external C++ object here, not even //base ones. Even if
@@ -28,30 +38,25 @@ subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
&allocator::AllocatorDispatch::default_dispatch);
bool g_call_new_handler_on_malloc_failure = false;
-subtle::Atomic32 g_new_handler_lock = 0;
-// In theory this should be just base::ThreadChecker. But we can't afford
-// the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
-bool CalledOnValidThread() {
- using subtle::Atomic32;
- const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
- static Atomic32 g_tid = kInvalidTID;
- Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
- Atomic32 prev_tid =
- subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
- return prev_tid == kInvalidTID || prev_tid == cur_tid;
-}
+#if !defined(OS_WIN)
+subtle::Atomic32 g_new_handler_lock = 0;
+#endif
-inline size_t GetPageSize() {
+inline size_t GetCachedPageSize() {
static size_t pagesize = 0;
if (!pagesize)
- pagesize = sysconf(_SC_PAGESIZE);
+ pagesize = base::GetPageSize();
return pagesize;
}
// Calls the std::new handler thread-safely. Returns true if a new_handler was
// set and called, false if no new_handler was set.
-bool CallNewHandler() {
+bool CallNewHandler(size_t size) {
+#if defined(OS_WIN)
+ return base::allocator::WinCallNewHandler(size);
+#else
+ ALLOW_UNUSED_PARAM(size);
// TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed
// to be thread safe and would avoid the spinlock boilerplate here. However
// it doesn't seem to be available yet in the Linux chroot headers yet.
@@ -69,6 +74,7 @@ bool CallNewHandler() {
// Assume the new_handler will abort if it fails. Exception are disabled and
// we don't support the case of a new_handler throwing std::bad_balloc.
return true;
+#endif
}
inline const allocator::AllocatorDispatch* GetChainHead() {
@@ -95,14 +101,34 @@ void SetCallNewHandlerOnMallocFailure(bool value) {
void* UncheckedAlloc(size_t size) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
- return chain_head->alloc_function(chain_head, size);
+ return chain_head->alloc_function(chain_head, size, nullptr);
}
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
- // Ensure this is always called on the same thread.
- DCHECK(CalledOnValidThread());
-
- dispatch->next = GetChainHead();
+ // Loop in case of (an unlikely) race on setting the list head.
+ size_t kMaxRetries = 7;
+ for (size_t i = 0; i < kMaxRetries; ++i) {
+ const AllocatorDispatch* chain_head = GetChainHead();
+ dispatch->next = chain_head;
+
+ // This function guarantees to be thread-safe w.r.t. concurrent
+ // insertions. It also has to guarantee that all the threads always
+ // see a consistent chain, hence the MemoryBarrier() below.
+ // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
+ // we don't really want this to be a release-store with a corresponding
+ // acquire-load during malloc().
+ subtle::MemoryBarrier();
+ subtle::AtomicWord old_value =
+ reinterpret_cast<subtle::AtomicWord>(chain_head);
+ // Set the chain head to the new dispatch atomically. If we lose the race,
+ // the comparison will fail, and the new head of chain will be returned.
+ if (subtle::NoBarrier_CompareAndSwap(
+ &g_chain_head, old_value,
+ reinterpret_cast<subtle::AtomicWord>(dispatch)) == old_value) {
+ // Success.
+ return;
+ }
+ }
// This function does not guarantee to be thread-safe w.r.t. concurrent
// insertions, but still has to guarantee that all the threads always
@@ -117,7 +143,6 @@ void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
}
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
- DCHECK(CalledOnValidThread());
DCHECK_EQ(GetChainHead(), dispatch);
subtle::NoBarrier_Store(&g_chain_head,
reinterpret_cast<subtle::AtomicWord>(dispatch->next));
@@ -127,8 +152,10 @@ void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
} // namespace base
// The Shim* functions below are the entry-points into the shim-layer and
-// are supposed to be invoked / aliased by the allocator_shim_override_*
+// are supposed to be invoked by the allocator_shim_override_*
// headers to route the malloc / new symbols through the shim layer.
+// They are defined as ALWAYS_INLINE in order to remove a level of indirection
+// between the system-defined entry points and the shim implementations.
extern "C" {
// The general pattern for allocations is:
@@ -143,102 +170,155 @@ extern "C" {
// just suicide priting a message).
// - Assume it did succeed if it returns, in which case reattempt the alloc.
-void* ShimCppNew(size_t size) {
+ALWAYS_INLINE void* ShimCppNew(size_t size) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
- ptr = chain_head->alloc_function(chain_head, size);
- } while (!ptr && CallNewHandler());
+ void* context = nullptr;
+#if defined(OS_MACOSX)
+ context = malloc_default_zone();
+#endif
+ ptr = chain_head->alloc_function(chain_head, size, context);
+ } while (!ptr && CallNewHandler(size));
return ptr;
}
-void ShimCppDelete(void* address) {
+ALWAYS_INLINE void ShimCppDelete(void* address) {
+ void* context = nullptr;
+#if defined(OS_MACOSX)
+ context = malloc_default_zone();
+#endif
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
- return chain_head->free_function(chain_head, address);
+ return chain_head->free_function(chain_head, address, context);
}
-void* ShimMalloc(size_t size) {
+ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
- ptr = chain_head->alloc_function(chain_head, size);
- } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+ ptr = chain_head->alloc_function(chain_head, size, context);
+ } while (!ptr && g_call_new_handler_on_malloc_failure &&
+ CallNewHandler(size));
return ptr;
}
-void* ShimCalloc(size_t n, size_t size) {
+ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
- ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size);
- } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+ ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
+ context);
+ } while (!ptr && g_call_new_handler_on_malloc_failure &&
+ CallNewHandler(size));
return ptr;
}
-void* ShimRealloc(void* address, size_t size) {
+ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
// realloc(size == 0) means free() and might return a nullptr. We should
// not call the std::new_handler in that case, though.
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
- ptr = chain_head->realloc_function(chain_head, address, size);
+ ptr = chain_head->realloc_function(chain_head, address, size, context);
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
- CallNewHandler());
+ CallNewHandler(size));
return ptr;
}
-void* ShimMemalign(size_t alignment, size_t size) {
+ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
- ptr = chain_head->alloc_aligned_function(chain_head, alignment, size);
- } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+ ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
+ context);
+ } while (!ptr && g_call_new_handler_on_malloc_failure &&
+ CallNewHandler(size));
return ptr;
}
-int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
+ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
// in tc_malloc.cc.
if (((alignment % sizeof(void*)) != 0) ||
((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
return EINVAL;
}
- void* ptr = ShimMemalign(alignment, size);
+ void* ptr = ShimMemalign(alignment, size, nullptr);
*res = ptr;
return ptr ? 0 : ENOMEM;
}
-void* ShimValloc(size_t size) {
- return ShimMemalign(GetPageSize(), size);
+ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
+ return ShimMemalign(GetCachedPageSize(), size, context);
}
-void* ShimPvalloc(size_t size) {
+ALWAYS_INLINE void* ShimPvalloc(size_t size) {
// pvalloc(0) should allocate one page, according to its man page.
if (size == 0) {
- size = GetPageSize();
+ size = GetCachedPageSize();
} else {
- size = (size + GetPageSize() - 1) & ~(GetPageSize() - 1);
+ size = (size + GetCachedPageSize() - 1) & ~(GetCachedPageSize() - 1);
}
- return ShimMemalign(GetPageSize(), size);
+ // The third argument is nullptr because pvalloc is glibc only and does not
+ // exist on OSX/BSD systems.
+ return ShimMemalign(GetCachedPageSize(), size, nullptr);
}
-void ShimFree(void* address) {
+ALWAYS_INLINE void ShimFree(void* address, void* context) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
- return chain_head->free_function(chain_head, address);
+ return chain_head->free_function(chain_head, address, context);
+}
+
+ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ return chain_head->get_size_estimate_function(
+ chain_head, const_cast<void*>(address), context);
+}
+
+ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
+ void** results,
+ unsigned num_requested,
+ void* context) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ return chain_head->batch_malloc_function(chain_head, size, results,
+ num_requested, context);
+}
+
+ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
+ unsigned num_to_be_freed,
+ void* context) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ return chain_head->batch_free_function(chain_head, to_be_freed,
+ num_to_be_freed, context);
+}
+
+ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ return chain_head->free_definite_size_function(chain_head, ptr, size,
+ context);
}
} // extern "C"
-// Cpp symbols (new / delete) should always be routed through the shim layer.
+#if !defined(OS_WIN) && !defined(OS_MACOSX)
+// Cpp symbols (new / delete) should always be routed through the shim layer
+// except on Windows and macOS where the malloc intercept is deep enough that it
+// also catches the cpp calls.
#include "base/allocator/allocator_shim_override_cpp_symbols.h"
+#endif
+#if defined(OS_ANDROID) || defined(ANDROID)
// Android does not support symbol interposition. The way malloc symbols are
// intercepted on Android is by using link-time -wrap flags.
-#if !defined(OS_ANDROID) && !defined(ANDROID)
-// Ditto for plain malloc() / calloc() / free() etc. symbols.
-#include "base/allocator/allocator_shim_override_libc_symbols.h"
-#else
#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
+#elif defined(OS_WIN)
+// On Windows we use plain link-time overriding of the CRT symbols.
+#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
+#elif defined(OS_MACOSX)
+#include "base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h"
+#include "base/allocator/allocator_shim_override_mac_symbols.h"
+#else
+#include "base/allocator/allocator_shim_override_libc_symbols.h"
#endif
// In the case of tcmalloc we also want to plumb into the glibc hooks
@@ -248,6 +328,22 @@ void ShimFree(void* address) {
#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
#endif
+#if defined(OS_MACOSX)
+namespace base {
+namespace allocator {
+void InitializeAllocatorShim() {
+ // Prepares the default dispatch. After the intercepted malloc calls have
+ // traversed the shim this will route them to the default malloc zone.
+ InitializeDefaultDispatchToMacAllocator();
+
+ // This replaces the default malloc zone, causing calls to malloc & friends
+ // from the codebase to be routed to ShimMalloc() above.
+ OverrideMacSymbols();
+}
+} // namespace allocator
+} // namespace base
+#endif
+
// Cross-checks.
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/allocator_shim.h b/base/allocator/allocator_shim.h
index f1a1e3d7ce..65ac1eb7de 100644
--- a/base/allocator/allocator_shim.h
+++ b/base/allocator/allocator_shim.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include "base/base_export.h"
+#include "build/build_config.h"
namespace base {
namespace allocator {
@@ -45,23 +46,54 @@ namespace allocator {
// wihout introducing unnecessary perf hits.
struct AllocatorDispatch {
- using AllocFn = void*(const AllocatorDispatch* self, size_t size);
+ using AllocFn = void*(const AllocatorDispatch* self,
+ size_t size,
+ void* context);
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
size_t n,
- size_t size);
+ size_t size,
+ void* context);
using AllocAlignedFn = void*(const AllocatorDispatch* self,
size_t alignment,
- size_t size);
+ size_t size,
+ void* context);
using ReallocFn = void*(const AllocatorDispatch* self,
void* address,
- size_t size);
- using FreeFn = void(const AllocatorDispatch* self, void* address);
+ size_t size,
+ void* context);
+ using FreeFn = void(const AllocatorDispatch* self,
+ void* address,
+ void* context);
+ // Returns the best available estimate for the actual amount of memory
+ // consumed by the allocation |address|. If possible, this should include
+ // heap overhead or at least a decent estimate of the full cost of the
+ // allocation. If no good estimate is possible, returns zero.
+ using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
+ void* address,
+ void* context);
+ using BatchMallocFn = unsigned(const AllocatorDispatch* self,
+ size_t size,
+ void** results,
+ unsigned num_requested,
+ void* context);
+ using BatchFreeFn = void(const AllocatorDispatch* self,
+ void** to_be_freed,
+ unsigned num_to_be_freed,
+ void* context);
+ using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
+ void* ptr,
+ size_t size,
+ void* context);
AllocFn* const alloc_function;
AllocZeroInitializedFn* const alloc_zero_initialized_function;
AllocAlignedFn* const alloc_aligned_function;
ReallocFn* const realloc_function;
FreeFn* const free_function;
+ GetSizeEstimateFn* const get_size_estimate_function;
+ BatchMallocFn* const batch_malloc_function;
+ BatchFreeFn* const batch_free_function;
+ FreeDefiniteSizeFn* const free_definite_size_function;
const AllocatorDispatch* next;
@@ -79,10 +111,10 @@ BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
// regardless of SetCallNewHandlerOnMallocFailure().
BASE_EXPORT void* UncheckedAlloc(size_t size);
-// Inserts |dispatch| in front of the allocator chain. This method is NOT
+// Inserts |dispatch| in front of the allocator chain. This method is
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
-// The callers have the responsibility of linearizing the changes to the chain
-// (or more likely call these always on the same thread).
+// The callers have responsibility for inserting a single dispatch no more
+// than once.
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
@@ -90,6 +122,11 @@ BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
// in malloc(), which we really don't want.
BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
+#if defined(OS_MACOSX)
+// On macOS, the allocator shim needs to be turned on during runtime.
+BASE_EXPORT void InitializeAllocatorShim();
+#endif // defined(OS_MACOSX)
+
} // namespace allocator
} // namespace base
diff --git a/base/allocator/allocator_shim_default_dispatch_to_glibc.cc b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
index 02facbad2e..6f386d4cc0 100644
--- a/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
+++ b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
@@ -4,6 +4,10 @@
#include "base/allocator/allocator_shim.h"
+#include <malloc.h>
+
+#include "base/compiler_specific.h"
+
// This translation unit defines a default dispatch for the allocator shim which
// routes allocations to libc functions.
// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
@@ -20,33 +24,60 @@ namespace {
using base::allocator::AllocatorDispatch;
-void* GlibcMalloc(const AllocatorDispatch*, size_t size) {
+void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __libc_malloc(size);
}
-void* GlibcCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+void* GlibcCalloc(const AllocatorDispatch*,
+ size_t n,
+ size_t size,
+ void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __libc_calloc(n, size);
}
-void* GlibcRealloc(const AllocatorDispatch*, void* address, size_t size) {
+void* GlibcRealloc(const AllocatorDispatch*,
+ void* address,
+ size_t size,
+ void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __libc_realloc(address, size);
}
-void* GlibcMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+void* GlibcMemalign(const AllocatorDispatch*,
+ size_t alignment,
+ size_t size,
+ void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __libc_memalign(alignment, size);
}
-void GlibcFree(const AllocatorDispatch*, void* address) {
+void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
+ ALLOW_UNUSED_PARAM(context);
__libc_free(address);
}
+size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
+ void* address,
+ void* context) {
+ // TODO(siggi, primiano): malloc_usable_size may need redirection in the
+ // presence of interposing shims that divert allocations.
+ ALLOW_UNUSED_PARAM(context);
+ return malloc_usable_size(address);
+}
+
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
- &GlibcMalloc, /* alloc_function */
- &GlibcCalloc, /* alloc_zero_initialized_function */
- &GlibcMemalign, /* alloc_aligned_function */
- &GlibcRealloc, /* realloc_function */
- &GlibcFree, /* free_function */
- nullptr, /* next */
+ &GlibcMalloc, /* alloc_function */
+ &GlibcCalloc, /* alloc_zero_initialized_function */
+ &GlibcMemalign, /* alloc_aligned_function */
+ &GlibcRealloc, /* realloc_function */
+ &GlibcFree, /* free_function */
+ &GlibcGetSizeEstimate, /* get_size_estimate_function */
+ nullptr, /* batch_malloc_function */
+ nullptr, /* batch_free_function */
+ nullptr, /* free_definite_size_function */
+ nullptr, /* next */
};
diff --git a/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
index 7955cb7877..3ad13ef98f 100644
--- a/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
+++ b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -2,7 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <malloc.h>
+
#include "base/allocator/allocator_shim.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID) && __ANDROID_API__ < 17
+#include <dlfcn.h>
+#endif
// This translation unit defines a default dispatch for the allocator shim which
// routes allocations to the original libc functions when using the link-time
@@ -25,33 +33,86 @@ namespace {
using base::allocator::AllocatorDispatch;
-void* RealMalloc(const AllocatorDispatch*, size_t size) {
+void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __real_malloc(size);
}
-void* RealCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+void* RealCalloc(const AllocatorDispatch*,
+ size_t n,
+ size_t size,
+ void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __real_calloc(n, size);
}
-void* RealRealloc(const AllocatorDispatch*, void* address, size_t size) {
+void* RealRealloc(const AllocatorDispatch*,
+ void* address,
+ size_t size,
+ void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __real_realloc(address, size);
}
-void* RealMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+void* RealMemalign(const AllocatorDispatch*,
+ size_t alignment,
+ size_t size,
+ void* context) {
+ ALLOW_UNUSED_PARAM(context);
return __real_memalign(alignment, size);
}
-void RealFree(const AllocatorDispatch*, void* address) {
+void RealFree(const AllocatorDispatch*, void* address, void* context) {
+ ALLOW_UNUSED_PARAM(context);
__real_free(address);
}
+#if defined(OS_ANDROID) && __ANDROID_API__ < 17
+size_t DummyMallocUsableSize(const void*) { return 0; }
+#endif
+
+size_t RealSizeEstimate(const AllocatorDispatch*,
+ void* address,
+ void* context) {
+ ALLOW_UNUSED_PARAM(address);
+ ALLOW_UNUSED_PARAM(context);
+#if defined(OS_ANDROID)
+#if __ANDROID_API__ < 17
+ // malloc_usable_size() is available only starting from API 17.
+ // TODO(dskiba): remove once we start building against 17+.
+ using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
+ static MallocUsableSizeFunction usable_size_function = nullptr;
+ if (!usable_size_function) {
+ void* function_ptr = dlsym(RTLD_DEFAULT, "malloc_usable_size");
+ if (function_ptr) {
+ usable_size_function = reinterpret_cast<MallocUsableSizeFunction>(
+ function_ptr);
+ } else {
+ usable_size_function = &DummyMallocUsableSize;
+ }
+ }
+ return usable_size_function(address);
+#else
+ return malloc_usable_size(address);
+#endif
+#endif // OS_ANDROID
+
+ // TODO(primiano): This should be redirected to malloc_usable_size or
+ // the like.
+ return 0;
+}
+
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
- &RealMalloc, /* alloc_function */
- &RealCalloc, /* alloc_zero_initialized_function */
- &RealMemalign, /* alloc_aligned_function */
- &RealRealloc, /* realloc_function */
- &RealFree, /* free_function */
- nullptr, /* next */
+ &RealMalloc, /* alloc_function */
+ &RealCalloc, /* alloc_zero_initialized_function */
+ &RealMemalign, /* alloc_aligned_function */
+ &RealRealloc, /* realloc_function */
+ &RealFree, /* free_function */
+ &RealSizeEstimate, /* get_size_estimate_function */
+ nullptr, /* batch_malloc_function */
+ nullptr, /* batch_free_function */
+ nullptr, /* free_definite_size_function */
+ nullptr, /* next */
};
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
index fc3624c596..82624ee45b 100644
--- a/base/allocator/allocator_shim_internals.h
+++ b/base/allocator/allocator_shim_internals.h
@@ -20,8 +20,6 @@
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
-#define SHIM_ALIAS_SYMBOL(fn) __attribute__((alias(#fn)))
-
#endif // __GNUC__
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/base/allocator/allocator_shim_override_cpp_symbols.h b/base/allocator/allocator_shim_override_cpp_symbols.h
index 616716fb96..3313687250 100644
--- a/base/allocator/allocator_shim_override_cpp_symbols.h
+++ b/base/allocator/allocator_shim_override_cpp_symbols.h
@@ -7,36 +7,45 @@
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
-// Alias the default new/delete C++ symbols to the shim entry points.
-// This file is strongly inspired by tcmalloc's libc_override_redefine.h.
+// Preempt the default new/delete C++ symbols so they call the shim entry
+// points. This file is strongly inspired by tcmalloc's
+// libc_override_redefine.h.
#include <new>
#include "base/allocator/allocator_shim_internals.h"
-SHIM_ALWAYS_EXPORT void* operator new(size_t size)
- SHIM_ALIAS_SYMBOL(ShimCppNew);
+SHIM_ALWAYS_EXPORT void* operator new(size_t size) {
+ return ShimCppNew(size);
+}
-SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW
- SHIM_ALIAS_SYMBOL(ShimCppDelete);
+SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW {
+ ShimCppDelete(p);
+}
-SHIM_ALWAYS_EXPORT void* operator new[](size_t size)
- SHIM_ALIAS_SYMBOL(ShimCppNew);
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size) {
+ return ShimCppNew(size);
+}
-SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW
- SHIM_ALIAS_SYMBOL(ShimCppDelete);
+SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW {
+ ShimCppDelete(p);
+}
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
- const std::nothrow_t&) __THROW
- SHIM_ALIAS_SYMBOL(ShimCppNew);
+ const std::nothrow_t&) __THROW {
+ return ShimCppNew(size);
+}
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
- const std::nothrow_t&) __THROW
- SHIM_ALIAS_SYMBOL(ShimCppNew);
+ const std::nothrow_t&) __THROW {
+ return ShimCppNew(size);
+}
-SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW
- SHIM_ALIAS_SYMBOL(ShimCppDelete);
+SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW {
+ ShimCppDelete(p);
+}
SHIM_ALWAYS_EXPORT void operator delete[](void* p,
- const std::nothrow_t&) __THROW
- SHIM_ALIAS_SYMBOL(ShimCppDelete);
+ const std::nothrow_t&) __THROW {
+ ShimCppDelete(p);
+}
diff --git a/base/allocator/allocator_shim_override_libc_symbols.h b/base/allocator/allocator_shim_override_libc_symbols.h
index 37b3b4eb12..b77cbb1fe9 100644
--- a/base/allocator/allocator_shim_override_libc_symbols.h
+++ b/base/allocator/allocator_shim_override_libc_symbols.h
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Its purpose is to SHIM_ALIAS_SYMBOL the Libc symbols for malloc/new to the
+// Its purpose is to preempt the Libc symbols for malloc/new so they call the
// shim layer entry points.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
@@ -16,32 +16,41 @@
extern "C" {
-SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW
- SHIM_ALIAS_SYMBOL(ShimMalloc);
+SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
+ return ShimMalloc(size, nullptr);
+}
-SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW
- SHIM_ALIAS_SYMBOL(ShimFree);
+SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
+ ShimFree(ptr, nullptr);
+}
-SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW
- SHIM_ALIAS_SYMBOL(ShimRealloc);
+SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
+ return ShimRealloc(ptr, size, nullptr);
+}
-SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW
- SHIM_ALIAS_SYMBOL(ShimCalloc);
+SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
+ return ShimCalloc(n, size, nullptr);
+}
-SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW
- SHIM_ALIAS_SYMBOL(ShimFree);
+SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
+ ShimFree(ptr, nullptr);
+}
-SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW
- SHIM_ALIAS_SYMBOL(ShimMemalign);
+SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
+ return ShimMemalign(align, s, nullptr);
+}
-SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW
- SHIM_ALIAS_SYMBOL(ShimValloc);
+SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
+ return ShimValloc(size, nullptr);
+}
-SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW
- SHIM_ALIAS_SYMBOL(ShimPvalloc);
+SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
+ return ShimPvalloc(size);
+}
-SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW
- SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
+SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
+ return ShimPosixMemalign(r, a, s);
+}
// The default dispatch translation unit has to define also the following
// symbols (unless they are ultimately routed to the system symbols):
diff --git a/base/allocator/allocator_shim_override_linker_wrapped_symbols.h b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
index 5b85d6ee2f..6bf73c39f2 100644
--- a/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
+++ b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
@@ -17,28 +17,38 @@
extern "C" {
-SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t, size_t)
- SHIM_ALIAS_SYMBOL(ShimCalloc);
-
-SHIM_ALWAYS_EXPORT void __wrap_free(void*)
- SHIM_ALIAS_SYMBOL(ShimFree);
-
-SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t)
- SHIM_ALIAS_SYMBOL(ShimMalloc);
-
-SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t, size_t)
- SHIM_ALIAS_SYMBOL(ShimMemalign);
-
-SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void**, size_t, size_t)
- SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
-
-SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t)
- SHIM_ALIAS_SYMBOL(ShimPvalloc);
-
-SHIM_ALWAYS_EXPORT void* __wrap_realloc(void*, size_t)
- SHIM_ALIAS_SYMBOL(ShimRealloc);
-
-SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t)
- SHIM_ALIAS_SYMBOL(ShimValloc);
+SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
+ return ShimCalloc(n, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
+ ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
+ return ShimMalloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
+ return ShimMemalign(align, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
+ size_t align,
+ size_t size) {
+ return ShimPosixMemalign(res, align, size);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
+ return ShimPvalloc(size);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
+ return ShimRealloc(address, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
+ return ShimValloc(size, nullptr);
+}
} // extern "C"
diff --git a/base/at_exit.cc b/base/at_exit.cc
index cfe4cf9a58..5dcc83cb2f 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -22,6 +22,8 @@ namespace base {
// this for thread-safe access, since it will only be modified in testing.
static AtExitManager* g_top_manager = NULL;
+static bool g_disable_managers = false;
+
AtExitManager::AtExitManager()
: processing_callbacks_(false), next_manager_(g_top_manager) {
// If multiple modules instantiate AtExitManagers they'll end up living in this
@@ -39,7 +41,8 @@ AtExitManager::~AtExitManager() {
}
DCHECK_EQ(this, g_top_manager);
- ProcessCallbacksNow();
+ if (!g_disable_managers)
+ ProcessCallbacksNow();
g_top_manager = next_manager_;
}
@@ -88,6 +91,11 @@ void AtExitManager::ProcessCallbacksNow() {
DCHECK(g_top_manager->stack_.empty());
}
+void AtExitManager::DisableAllAtExitManagers() {
+ AutoLock lock(g_top_manager->lock_);
+ g_disable_managers = true;
+}
+
AtExitManager::AtExitManager(bool shadow)
: processing_callbacks_(false), next_manager_(g_top_manager) {
DCHECK(shadow || !g_top_manager);
diff --git a/base/at_exit.h b/base/at_exit.h
index 02e18ed9eb..6bf3f50350 100644
--- a/base/at_exit.h
+++ b/base/at_exit.h
@@ -49,6 +49,10 @@ class BASE_EXPORT AtExitManager {
// is possible to register new callbacks after calling this function.
static void ProcessCallbacksNow();
+ // Disable all registered at-exit callbacks. This is used only in a single-
+ // process mode.
+ static void DisableAllAtExitManagers();
+
protected:
// This constructor will allow this instance of AtExitManager to be created
// even if one already exists. This should only be used for testing!
diff --git a/base/atomic_ref_count.h b/base/atomic_ref_count.h
index 2ab7242002..93c1f0dfd4 100644
--- a/base/atomic_ref_count.h
+++ b/base/atomic_ref_count.h
@@ -12,7 +12,7 @@
namespace base {
-typedef subtle::Atomic32 AtomicRefCount;
+typedef subtle::AtomicWord AtomicRefCount;
// Increment a reference count by "increment", which must exceed 0.
inline void AtomicRefCountIncN(volatile AtomicRefCount *ptr,
diff --git a/base/base.gyp b/base/base.gyp
deleted file mode 100644
index a534d5ccb7..0000000000
--- a/base/base.gyp
+++ /dev/null
@@ -1,1801 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'includes': [
- '../build/win_precompile.gypi',
- 'base.gypi',
- ],
- 'targets': [
- {
- 'target_name': 'base',
- 'type': '<(component)',
- 'toolsets': ['host', 'target'],
- 'variables': {
- 'base_target': 1,
- 'enable_wexit_time_destructors': 1,
- 'optimize': 'max',
- },
- 'dependencies': [
- 'allocator/allocator.gyp:allocator',
- 'allocator/allocator.gyp:allocator_features#target',
- 'base_debugging_flags#target',
- 'base_win_features#target',
- 'base_static',
- 'base_build_date#target',
- '../testing/gtest.gyp:gtest_prod',
- '../third_party/modp_b64/modp_b64.gyp:modp_b64',
- 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- ],
- # TODO(gregoryd): direct_dependent_settings should be shared with the
- # 64-bit target, but it doesn't work due to a bug in gyp
- 'direct_dependent_settings': {
- 'include_dirs': [
- '..',
- ],
- },
- 'conditions': [
- ['desktop_linux == 1 or chromeos == 1', {
- 'conditions': [
- ['chromeos==1', {
- 'sources/': [ ['include', '_chromeos\\.cc$'] ]
- }],
- ],
- 'dependencies': [
- 'symbolize',
- 'xdg_mime',
- ],
- 'defines': [
- 'USE_SYMBOLIZE',
- ],
- }, { # desktop_linux == 0 and chromeos == 0
- 'sources/': [
- ['exclude', '/xdg_user_dirs/'],
- ['exclude', '_nss\\.cc$'],
- ],
- }],
- ['use_glib==1', {
- 'dependencies': [
- '../build/linux/system.gyp:glib',
- ],
- 'export_dependent_settings': [
- '../build/linux/system.gyp:glib',
- ],
- }],
- ['OS == "android" and _toolset == "host"', {
- # Always build base as a static_library for host toolset, even if
- # we're doing a component build. Specifically, we only care about the
- # target toolset using components since that's what developers are
- # focusing on. In theory we should do this more generally for all
- # targets when building for host, but getting the gyp magic
- # per-toolset for the "component" variable is hard, and we really only
- # need base on host.
- 'type': 'static_library',
- # Base for host support is the minimum required to run the
- # ssl false start blacklist tool. It requires further changes
- # to generically support host builds (and tests).
- # Note: when building for host, gyp has OS == "android",
- # hence the *_android.cc files are included but the actual code
- # doesn't have OS_ANDROID / ANDROID defined.
- 'conditions': [
- ['host_os == "mac"', {
- 'sources/': [
- ['exclude', '^native_library_linux\\.cc$'],
- ['exclude', '^process_util_linux\\.cc$'],
- ['exclude', '^sys_info_linux\\.cc$'],
- ['exclude', '^sys_string_conversions_linux\\.cc$'],
- ['exclude', '^worker_pool_linux\\.cc$'],
- ],
- }],
- ],
- }],
- ['OS == "android" and _toolset == "target"', {
- 'dependencies': [
- 'base_java',
- 'base_jni_headers',
- '../build/android/ndk.gyp:cpu_features',
- '../third_party/ashmem/ashmem.gyp:ashmem',
- ],
- 'link_settings': {
- 'libraries': [
- '-llog',
- ],
- },
- 'sources!': [
- 'debug/stack_trace_posix.cc',
- ],
- }],
- ['os_bsd==1', {
- 'include_dirs': [
- '/usr/local/include',
- ],
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ],
- },
- }],
- ['OS == "linux"', {
- 'link_settings': {
- 'libraries': [
- # We need rt for clock_gettime().
- '-lrt',
- # For 'native_library_linux.cc'
- '-ldl',
- ],
- },
- 'conditions': [
- ['use_allocator!="tcmalloc"', {
- 'defines': [
- 'NO_TCMALLOC',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'NO_TCMALLOC',
- ],
- },
- }],
- ],
- }],
- ['use_sysroot==0 and (OS == "android" or OS == "linux")', {
- 'link_settings': {
- 'libraries': [
- # Needed for <atomic> when building with newer C++ library.
- '-latomic',
- ],
- },
- }],
- ['OS == "win"', {
- # Specify delayload for base.dll.
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'DelayLoadDLLs': [
- 'cfgmgr32.dll',
- 'powrprof.dll',
- 'setupapi.dll',
- ],
- 'AdditionalDependencies': [
- 'cfgmgr32.lib',
- 'powrprof.lib',
- 'setupapi.lib',
- 'userenv.lib',
- 'winmm.lib',
- ],
- },
- },
- # Specify delayload for components that link with base.lib.
- 'all_dependent_settings': {
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'DelayLoadDLLs': [
- 'cfgmgr32.dll',
- 'powrprof.dll',
- 'setupapi.dll',
- ],
- 'AdditionalDependencies': [
- 'cfgmgr32.lib',
- 'powrprof.lib',
- 'setupapi.lib',
- 'userenv.lib',
- 'winmm.lib',
- ],
- },
- },
- },
- 'dependencies': [
- 'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
- ],
- }],
- ['OS == "mac" or (OS == "ios" and _toolset == "host")', {
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
- '$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework',
- '$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
- '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
- '$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
- '$(SDKROOT)/System/Library/Frameworks/Security.framework',
- '$(SDKROOT)/usr/lib/libbsm.dylib',
- ],
- },
- }],
- ['OS == "ios" and _toolset != "host"', {
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreGraphics.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreText.framework',
- '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
- '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
- ],
- },
- }],
- ['OS != "win" and (OS != "ios" or _toolset == "host")', {
- 'dependencies': ['third_party/libevent/libevent.gyp:libevent'],
- },],
- ['component=="shared_library"', {
- 'conditions': [
- ['OS=="win"', {
- 'sources!': [
- 'debug/debug_on_start_win.cc',
- ],
- }],
- ],
- }],
- ['OS=="ios"', {
- 'sources!': [
- 'sync_socket.h',
- 'sync_socket_posix.cc',
- ]
- }],
- ['use_experimental_allocator_shim==1', {
- 'dependencies': [ 'allocator/allocator.gyp:unified_allocator_shim']
- }],
- ],
- 'sources': [
- 'auto_reset.h',
- 'linux_util.cc',
- 'linux_util.h',
- 'message_loop/message_pump_android.cc',
- 'message_loop/message_pump_android.h',
- 'message_loop/message_pump_glib.cc',
- 'message_loop/message_pump_glib.h',
- 'message_loop/message_pump_io_ios.cc',
- 'message_loop/message_pump_io_ios.h',
- 'message_loop/message_pump_libevent.cc',
- 'message_loop/message_pump_libevent.h',
- 'message_loop/message_pump_mac.h',
- 'message_loop/message_pump_mac.mm',
- 'metrics/field_trial.cc',
- 'metrics/field_trial.h',
- 'posix/file_descriptor_shuffle.cc',
- 'posix/file_descriptor_shuffle.h',
- 'sync_socket.h',
- 'sync_socket_posix.cc',
- 'sync_socket_win.cc',
- 'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
- 'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
- ],
- 'includes': [
- '../build/android/increase_size_for_speed.gypi',
- ],
- },
- {
- 'target_name': 'base_i18n',
- 'type': '<(component)',
- 'variables': {
- 'enable_wexit_time_destructors': 1,
- 'optimize': 'max',
- 'base_i18n_target': 1,
- },
- 'dependencies': [
- 'base',
- 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- '../third_party/icu/icu.gyp:icui18n',
- '../third_party/icu/icu.gyp:icuuc',
- ],
- 'conditions': [
- ['OS == "win"', {
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [
- 4267,
- ],
- }],
- ['icu_use_data_file_flag==1', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
- }, { # else icu_use_data_file_flag !=1
- 'conditions': [
- ['OS=="win"', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
- }, {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
- }],
- ],
- }],
- ['OS == "ios"', {
- 'toolsets': ['host', 'target'],
- }],
- ],
- 'export_dependent_settings': [
- 'base',
- '../third_party/icu/icu.gyp:icuuc',
- '../third_party/icu/icu.gyp:icui18n',
- ],
- 'includes': [
- '../build/android/increase_size_for_speed.gypi',
- ],
- },
- {
- 'target_name': 'base_message_loop_tests',
- 'type': 'static_library',
- 'dependencies': [
- 'base',
- '../testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'message_loop/message_loop_test.cc',
- 'message_loop/message_loop_test.h',
- ],
- },
- {
- # This is the subset of files from base that should not be used with a
- # dynamic library. Note that this library cannot depend on base because
- # base depends on base_static.
- 'target_name': 'base_static',
- 'type': 'static_library',
- 'variables': {
- 'enable_wexit_time_destructors': 1,
- 'optimize': 'max',
- },
- 'toolsets': ['host', 'target'],
- 'sources': [
- 'base_switches.cc',
- 'base_switches.h',
- 'win/pe_image.cc',
- 'win/pe_image.h',
- ],
- 'include_dirs': [
- '..',
- ],
- 'includes': [
- '../build/android/increase_size_for_speed.gypi',
- ],
- },
- # Include this target for a main() function that simply instantiates
- # and runs a base::TestSuite.
- {
- 'target_name': 'run_all_unittests',
- 'type': 'static_library',
- 'dependencies': [
- 'test_support_base',
- ],
- 'sources': [
- 'test/run_all_unittests.cc',
- ],
- },
- {
- 'target_name': 'base_unittests',
- 'type': '<(gtest_target_type)',
- 'sources': [
- 'allocator/tcmalloc_unittest.cc',
- 'android/application_status_listener_unittest.cc',
- 'android/content_uri_utils_unittest.cc',
- 'android/jni_android_unittest.cc',
- 'android/jni_array_unittest.cc',
- 'android/jni_string_unittest.cc',
- 'android/library_loader/library_prefetcher_unittest.cc',
- 'android/path_utils_unittest.cc',
- 'android/scoped_java_ref_unittest.cc',
- 'android/sys_utils_unittest.cc',
- 'at_exit_unittest.cc',
- 'atomicops_unittest.cc',
- 'barrier_closure_unittest.cc',
- 'base64_unittest.cc',
- 'base64url_unittest.cc',
- 'big_endian_unittest.cc',
- 'bind_unittest.cc',
- 'bind_unittest.nc',
- 'bit_cast_unittest.cc',
- 'bits_unittest.cc',
- 'build_time_unittest.cc',
- 'callback_helpers_unittest.cc',
- 'callback_list_unittest.cc',
- 'callback_list_unittest.nc',
- 'callback_unittest.cc',
- 'callback_unittest.nc',
- 'cancelable_callback_unittest.cc',
- 'command_line_unittest.cc',
- 'containers/adapters_unittest.cc',
- 'containers/hash_tables_unittest.cc',
- 'containers/linked_list_unittest.cc',
- 'containers/mru_cache_unittest.cc',
- 'containers/scoped_ptr_hash_map_unittest.cc',
- 'containers/small_map_unittest.cc',
- 'containers/stack_container_unittest.cc',
- 'cpu_unittest.cc',
- 'debug/crash_logging_unittest.cc',
- 'debug/debugger_unittest.cc',
- 'debug/leak_tracker_unittest.cc',
- 'debug/proc_maps_linux_unittest.cc',
- 'debug/stack_trace_unittest.cc',
- 'debug/task_annotator_unittest.cc',
- 'deferred_sequenced_task_runner_unittest.cc',
- 'environment_unittest.cc',
- 'feature_list_unittest.cc',
- 'file_version_info_win_unittest.cc',
- 'files/dir_reader_posix_unittest.cc',
- 'files/file_locking_unittest.cc',
- 'files/file_path_unittest.cc',
- 'files/file_path_watcher_unittest.cc',
- 'files/file_proxy_unittest.cc',
- 'files/file_unittest.cc',
- 'files/file_util_proxy_unittest.cc',
- 'files/file_util_unittest.cc',
- 'files/important_file_writer_unittest.cc',
- 'files/memory_mapped_file_unittest.cc',
- 'files/scoped_temp_dir_unittest.cc',
- 'gmock_unittest.cc',
- 'guid_unittest.cc',
- 'hash_unittest.cc',
- 'i18n/break_iterator_unittest.cc',
- 'i18n/case_conversion_unittest.cc',
- 'i18n/char_iterator_unittest.cc',
- 'i18n/file_util_icu_unittest.cc',
- 'i18n/icu_string_conversions_unittest.cc',
- 'i18n/message_formatter_unittest.cc',
- 'i18n/number_formatting_unittest.cc',
- 'i18n/rtl_unittest.cc',
- 'i18n/streaming_utf8_validator_unittest.cc',
- 'i18n/string_search_unittest.cc',
- 'i18n/time_formatting_unittest.cc',
- 'i18n/timezone_unittest.cc',
- 'id_map_unittest.cc',
- 'ios/crb_protocol_observers_unittest.mm',
- 'ios/device_util_unittest.mm',
- 'ios/weak_nsobject_unittest.mm',
- 'json/json_parser_unittest.cc',
- 'json/json_reader_unittest.cc',
- 'json/json_value_converter_unittest.cc',
- 'json/json_value_serializer_unittest.cc',
- 'json/json_writer_unittest.cc',
- 'json/string_escape_unittest.cc',
- 'lazy_instance_unittest.cc',
- 'logging_unittest.cc',
- 'mac/bind_objc_block_unittest.mm',
- 'mac/call_with_eh_frame_unittest.mm',
- 'mac/dispatch_source_mach_unittest.cc',
- 'mac/foundation_util_unittest.mm',
- 'mac/mac_util_unittest.mm',
- 'mac/mach_port_broker_unittest.cc',
- 'mac/objc_property_releaser_unittest.mm',
- 'mac/scoped_nsobject_unittest.mm',
- 'mac/scoped_objc_class_swizzler_unittest.mm',
- 'mac/scoped_sending_event_unittest.mm',
- 'md5_unittest.cc',
- 'memory/aligned_memory_unittest.cc',
- 'memory/discardable_shared_memory_unittest.cc',
- 'memory/linked_ptr_unittest.cc',
- 'memory/memory_pressure_listener_unittest.cc',
- 'memory/memory_pressure_monitor_chromeos_unittest.cc',
- 'memory/memory_pressure_monitor_mac_unittest.cc',
- 'memory/memory_pressure_monitor_win_unittest.cc',
- 'memory/ptr_util_unittest.cc',
- 'memory/ref_counted_memory_unittest.cc',
- 'memory/ref_counted_unittest.cc',
- 'memory/scoped_vector_unittest.cc',
- 'memory/shared_memory_mac_unittest.cc',
- 'memory/shared_memory_unittest.cc',
- 'memory/shared_memory_win_unittest.cc',
- 'memory/singleton_unittest.cc',
- 'memory/weak_ptr_unittest.cc',
- 'memory/weak_ptr_unittest.nc',
- 'message_loop/message_loop_task_runner_unittest.cc',
- 'message_loop/message_loop_unittest.cc',
- 'message_loop/message_pump_glib_unittest.cc',
- 'message_loop/message_pump_io_ios_unittest.cc',
- 'message_loop/message_pump_libevent_unittest.cc',
- 'metrics/bucket_ranges_unittest.cc',
- 'metrics/field_trial_unittest.cc',
- 'metrics/histogram_base_unittest.cc',
- 'metrics/histogram_delta_serialization_unittest.cc',
- 'metrics/histogram_macros_unittest.cc',
- 'metrics/histogram_snapshot_manager_unittest.cc',
- 'metrics/histogram_unittest.cc',
- 'metrics/metrics_hashes_unittest.cc',
- 'metrics/persistent_histogram_allocator_unittest.cc',
- 'metrics/persistent_memory_allocator_unittest.cc',
- 'metrics/persistent_sample_map_unittest.cc',
- 'metrics/sample_map_unittest.cc',
- 'metrics/sample_vector_unittest.cc',
- 'metrics/sparse_histogram_unittest.cc',
- 'metrics/statistics_recorder_unittest.cc',
- 'native_library_unittest.cc',
- 'numerics/safe_numerics_unittest.cc',
- 'observer_list_unittest.cc',
- 'optional_unittest.cc',
- 'os_compat_android_unittest.cc',
- 'path_service_unittest.cc',
- 'pickle_unittest.cc',
- 'posix/file_descriptor_shuffle_unittest.cc',
- 'posix/unix_domain_socket_linux_unittest.cc',
- 'power_monitor/power_monitor_unittest.cc',
- 'process/memory_unittest.cc',
- 'process/memory_unittest_mac.h',
- 'process/memory_unittest_mac.mm',
- 'process/process_metrics_unittest.cc',
- 'process/process_metrics_unittest_ios.cc',
- 'process/process_unittest.cc',
- 'process/process_util_unittest.cc',
- 'profiler/stack_sampling_profiler_unittest.cc',
- 'profiler/tracked_time_unittest.cc',
- 'rand_util_unittest.cc',
- 'run_loop_unittest.cc',
- 'scoped_clear_errno_unittest.cc',
- 'scoped_generic_unittest.cc',
- 'scoped_native_library_unittest.cc',
- 'security_unittest.cc',
- 'sequence_checker_unittest.cc',
- 'sha1_unittest.cc',
- 'stl_util_unittest.cc',
- 'strings/nullable_string16_unittest.cc',
- 'strings/pattern_unittest.cc',
- 'strings/safe_sprintf_unittest.cc',
- 'strings/string16_unittest.cc',
- 'strings/string_number_conversions_unittest.cc',
- 'strings/string_piece_unittest.cc',
- 'strings/string_split_unittest.cc',
- 'strings/string_tokenizer_unittest.cc',
- 'strings/string_util_unittest.cc',
- 'strings/stringize_macros_unittest.cc',
- 'strings/stringprintf_unittest.cc',
- 'strings/sys_string_conversions_mac_unittest.mm',
- 'strings/sys_string_conversions_unittest.cc',
- 'strings/utf_offset_string_conversions_unittest.cc',
- 'strings/utf_string_conversions_unittest.cc',
- 'supports_user_data_unittest.cc',
- 'sync_socket_unittest.cc',
- 'synchronization/cancellation_flag_unittest.cc',
- 'synchronization/condition_variable_unittest.cc',
- 'synchronization/lock_unittest.cc',
- 'synchronization/read_write_lock_unittest.cc',
- 'synchronization/waitable_event_unittest.cc',
- 'synchronization/waitable_event_watcher_unittest.cc',
- 'sys_byteorder_unittest.cc',
- 'sys_info_unittest.cc',
- 'system_monitor/system_monitor_unittest.cc',
- 'task/cancelable_task_tracker_unittest.cc',
- 'task_runner_util_unittest.cc',
- 'task_scheduler/delayed_task_manager_unittest.cc',
- 'task_scheduler/priority_queue_unittest.cc',
- 'task_scheduler/scheduler_lock_unittest.cc',
- 'task_scheduler/scheduler_service_thread_unittest.cc',
- 'task_scheduler/scheduler_worker_unittest.cc',
- 'task_scheduler/scheduler_worker_pool_impl_unittest.cc',
- 'task_scheduler/scheduler_worker_stack_unittest.cc',
- 'task_scheduler/sequence_sort_key_unittest.cc',
- 'task_scheduler/sequence_unittest.cc',
- 'task_scheduler/task_scheduler_impl_unittest.cc',
- 'task_scheduler/task_tracker_unittest.cc',
- 'task_scheduler/test_task_factory.cc',
- 'task_scheduler/test_task_factory.h',
- 'task_scheduler/test_utils.h',
- 'template_util_unittest.cc',
- 'test/histogram_tester_unittest.cc',
- 'test/test_pending_task_unittest.cc',
- 'test/test_reg_util_win_unittest.cc',
- 'test/trace_event_analyzer_unittest.cc',
- 'test/user_action_tester_unittest.cc',
- 'threading/non_thread_safe_unittest.cc',
- 'threading/platform_thread_unittest.cc',
- 'threading/sequenced_worker_pool_unittest.cc',
- 'threading/sequenced_task_runner_handle_unittest.cc',
- 'threading/simple_thread_unittest.cc',
- 'threading/thread_checker_unittest.cc',
- 'threading/thread_collision_warner_unittest.cc',
- 'threading/thread_id_name_manager_unittest.cc',
- 'threading/thread_local_storage_unittest.cc',
- 'threading/thread_local_unittest.cc',
- 'threading/thread_unittest.cc',
- 'threading/watchdog_unittest.cc',
- 'threading/worker_pool_posix_unittest.cc',
- 'threading/worker_pool_unittest.cc',
- 'time/pr_time_unittest.cc',
- 'time/time_unittest.cc',
- 'time/time_win_unittest.cc',
- 'timer/hi_res_timer_manager_unittest.cc',
- 'timer/mock_timer_unittest.cc',
- 'timer/timer_unittest.cc',
- 'tools_sanity_unittest.cc',
- 'tracked_objects_unittest.cc',
- 'tuple_unittest.cc',
- 'values_unittest.cc',
- 'version_unittest.cc',
- 'vlog_unittest.cc',
- 'win/dllmain.cc',
- 'win/enum_variant_unittest.cc',
- 'win/event_trace_consumer_unittest.cc',
- 'win/event_trace_controller_unittest.cc',
- 'win/event_trace_provider_unittest.cc',
- 'win/i18n_unittest.cc',
- 'win/iunknown_impl_unittest.cc',
- 'win/message_window_unittest.cc',
- 'win/object_watcher_unittest.cc',
- 'win/pe_image_unittest.cc',
- 'win/registry_unittest.cc',
- 'win/scoped_bstr_unittest.cc',
- 'win/scoped_comptr_unittest.cc',
- 'win/scoped_handle_unittest.cc',
- 'win/scoped_process_information_unittest.cc',
- 'win/scoped_variant_unittest.cc',
- 'win/shortcut_unittest.cc',
- 'win/startup_information_unittest.cc',
- 'win/wait_chain_unittest.cc',
- 'win/win_util_unittest.cc',
- 'win/windows_version_unittest.cc',
- 'win/wrapped_window_proc_unittest.cc',
- '<@(trace_event_test_sources)',
- ],
- 'dependencies': [
- 'base',
- 'base_i18n',
- 'base_message_loop_tests',
- 'base_static',
- 'run_all_unittests',
- 'test_support_base',
- 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- '../testing/gmock.gyp:gmock',
- '../testing/gtest.gyp:gtest',
- '../third_party/icu/icu.gyp:icui18n',
- '../third_party/icu/icu.gyp:icuuc',
- ],
- 'includes': ['../build/nocompile.gypi'],
- 'variables': {
- # TODO(ajwong): Is there a way to autodetect this?
- 'module_dir': 'base'
- },
- 'conditions': [
- ['cfi_vptr==1 and cfi_cast==1', {
- 'defines': [
- # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
- 'CFI_CAST_CHECK',
- ],
- }],
- ['OS == "ios" or OS == "mac"', {
- 'dependencies': [
- 'base_unittests_arc',
- ],
- }],
- ['OS == "android"', {
- 'dependencies': [
- 'android/jni_generator/jni_generator.gyp:jni_generator_tests',
- '../testing/android/native_test.gyp:native_test_native_code',
- ],
- }],
- ['OS == "ios" and _toolset != "host"', {
- 'sources/': [
- # This test needs multiple processes.
- ['exclude', '^files/file_locking_unittest\\.cc$'],
- # iOS does not support FilePathWatcher.
- ['exclude', '^files/file_path_watcher_unittest\\.cc$'],
- # Only test the iOS-meaningful portion of memory and process_utils.
- ['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'],
- ['exclude', '^memory/shared_memory_unittest\\.cc$'],
- ['exclude', '^process/memory_unittest'],
- ['exclude', '^process/process_unittest\\.cc$'],
- ['exclude', '^process/process_util_unittest\\.cc$'],
- ['include', '^process/process_util_unittest_ios\\.cc$'],
- # iOS does not use message_pump_libevent.
- ['exclude', '^message_loop/message_pump_libevent_unittest\\.cc$'],
- ],
- 'actions': [
- {
- 'action_name': 'copy_test_data',
- 'variables': {
- 'test_data_files': [
- 'test/data',
- ],
- 'test_data_prefix': 'base',
- },
- 'includes': [ '../build/copy_test_data_ios.gypi' ],
- },
- ],
- }],
- ['desktop_linux == 1 or chromeos == 1', {
- 'defines': [
- 'USE_SYMBOLIZE',
- ],
- 'conditions': [
- [ 'desktop_linux==1', {
- 'sources': [
- 'nix/xdg_util_unittest.cc',
- ],
- }],
- ],
- }],
- ['use_glib == 1', {
- 'dependencies': [
- '../build/linux/system.gyp:glib',
- ],
- }, { # use_glib == 0
- 'sources!': [
- 'message_loop/message_pump_glib_unittest.cc',
- ]
- }],
- ['use_ozone == 1', {
- 'sources!': [
- 'message_loop/message_pump_glib_unittest.cc',
- ]
- }],
- ['OS == "linux"', {
- 'dependencies': [
- 'malloc_wrapper',
- ],
- }],
- [ 'OS == "win" and target_arch == "x64"', {
- 'sources': [
- 'profiler/win32_stack_frame_unwinder_unittest.cc',
- ],
- 'dependencies': [
- 'base_profiler_test_support_library',
- ],
- }],
- ['OS == "win"', {
- 'dependencies': [
- 'scoped_handle_test_dll'
- ],
- 'sources!': [
- 'file_descriptor_shuffle_unittest.cc',
- 'files/dir_reader_posix_unittest.cc',
- 'message_loop/message_pump_libevent_unittest.cc',
- 'threading/worker_pool_posix_unittest.cc',
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [
- 4267,
- ],
- 'conditions': [
- ['icu_use_data_file_flag==0', {
- # This is needed to trigger the dll copy step on windows.
- # TODO(mark): This should not be necessary.
- 'dependencies': [
- '../third_party/icu/icu.gyp:icudata',
- ],
- }],
- ],
- }, { # OS != "win"
- 'dependencies': [
- 'third_party/libevent/libevent.gyp:libevent'
- ],
- }],
- ['use_experimental_allocator_shim==1', {
- 'sources': [ 'allocator/allocator_shim_unittest.cc']
- }],
- ], # conditions
- 'target_conditions': [
- ['OS == "ios" and _toolset != "host"', {
- 'sources/': [
- # Pull in specific Mac files for iOS (which have been filtered out
- # by file name rules).
- ['include', '^mac/bind_objc_block_unittest\\.mm$'],
- ['include', '^mac/foundation_util_unittest\\.mm$',],
- ['include', '^mac/objc_property_releaser_unittest\\.mm$'],
- ['include', '^mac/scoped_nsobject_unittest\\.mm$'],
- ['include', '^sys_string_conversions_mac_unittest\\.mm$'],
- ],
- }],
- ['OS == "android"', {
- 'sources/': [
- ['include', '^debug/proc_maps_linux_unittest\\.cc$'],
- ],
- }],
- # Enable more direct string conversions on platforms with native utf8
- # strings
- ['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
- 'defines': ['SYSTEM_NATIVE_UTF8'],
- }],
- # SyncSocket isn't used on iOS
- ['OS=="ios"', {
- 'sources!': [
- 'sync_socket_unittest.cc',
- ],
- }],
- ], # target_conditions
- },
- {
- # GN: //base:base_perftests
- 'target_name': 'base_perftests',
- 'type': '<(gtest_target_type)',
- 'dependencies': [
- 'base',
- 'test_support_base',
- '../testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'message_loop/message_pump_perftest.cc',
- 'test/run_all_unittests.cc',
- 'threading/thread_perftest.cc',
- '../testing/perf/perf_test.cc'
- ],
- 'conditions': [
- ['OS == "android"', {
- 'dependencies': [
- '../testing/android/native_test.gyp:native_test_native_code',
- ],
- }],
- ],
- },
- {
- # GN: //base:base_i18n_perftests
- 'target_name': 'base_i18n_perftests',
- 'type': '<(gtest_target_type)',
- 'dependencies': [
- 'test_support_base',
- 'test_support_perf',
- '../testing/gtest.gyp:gtest',
- 'base_i18n',
- 'base',
- ],
- 'sources': [
- 'i18n/streaming_utf8_validator_perftest.cc',
- ],
- },
- {
- # GN: //base/test:test_support
- 'target_name': 'test_support_base',
- 'type': 'static_library',
- 'dependencies': [
- 'base',
- 'base_static',
- 'base_i18n',
- '../testing/gmock.gyp:gmock',
- '../testing/gtest.gyp:gtest',
- '../third_party/icu/icu.gyp:icuuc',
- '../third_party/libxml/libxml.gyp:libxml',
- 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- ],
- 'export_dependent_settings': [
- 'base',
- ],
- 'conditions': [
- ['os_posix==0', {
- 'sources!': [
- 'test/scoped_locale.cc',
- 'test/scoped_locale.h',
- ],
- }],
- ['os_bsd==1', {
- 'sources!': [
- 'test/test_file_util_linux.cc',
- ],
- }],
- ['OS == "android"', {
- 'dependencies': [
- 'base_unittests_jni_headers',
- 'base_java_unittest_support',
- ],
- }],
- ['OS == "ios"', {
- 'toolsets': ['host', 'target'],
- }],
- ],
- 'sources': [
- 'test/gtest_util.cc',
- 'test/gtest_util.h',
- 'test/gtest_xml_unittest_result_printer.cc',
- 'test/gtest_xml_unittest_result_printer.h',
- 'test/gtest_xml_util.cc',
- 'test/gtest_xml_util.h',
- 'test/histogram_tester.cc',
- 'test/histogram_tester.h',
- 'test/icu_test_util.cc',
- 'test/icu_test_util.h',
- 'test/ios/wait_util.h',
- 'test/ios/wait_util.mm',
- 'test/launcher/test_launcher.cc',
- 'test/launcher/test_launcher.h',
- 'test/launcher/test_launcher_tracer.cc',
- 'test/launcher/test_launcher_tracer.h',
- 'test/launcher/test_result.cc',
- 'test/launcher/test_result.h',
- 'test/launcher/test_results_tracker.cc',
- 'test/launcher/test_results_tracker.h',
- 'test/launcher/unit_test_launcher.cc',
- 'test/launcher/unit_test_launcher.h',
- 'test/launcher/unit_test_launcher_ios.cc',
- 'test/mock_chrome_application_mac.h',
- 'test/mock_chrome_application_mac.mm',
- 'test/mock_devices_changed_observer.cc',
- 'test/mock_devices_changed_observer.h',
- 'test/mock_entropy_provider.cc',
- 'test/mock_entropy_provider.h',
- 'test/mock_log.cc',
- 'test/mock_log.h',
- 'test/multiprocess_test.cc',
- 'test/multiprocess_test.h',
- 'test/multiprocess_test_android.cc',
- 'test/null_task_runner.cc',
- 'test/null_task_runner.h',
- 'test/opaque_ref_counted.cc',
- 'test/opaque_ref_counted.h',
- 'test/perf_log.cc',
- 'test/perf_log.h',
- 'test/perf_test_suite.cc',
- 'test/perf_test_suite.h',
- 'test/perf_time_logger.cc',
- 'test/perf_time_logger.h',
- 'test/power_monitor_test_base.cc',
- 'test/power_monitor_test_base.h',
- 'test/scoped_command_line.cc',
- 'test/scoped_command_line.h',
- 'test/scoped_locale.cc',
- 'test/scoped_locale.h',
- 'test/scoped_path_override.cc',
- 'test/scoped_path_override.h',
- 'test/sequenced_task_runner_test_template.cc',
- 'test/sequenced_task_runner_test_template.h',
- 'test/sequenced_worker_pool_owner.cc',
- 'test/sequenced_worker_pool_owner.h',
- 'test/simple_test_clock.cc',
- 'test/simple_test_clock.h',
- 'test/simple_test_tick_clock.cc',
- 'test/simple_test_tick_clock.h',
- 'test/task_runner_test_template.cc',
- 'test/task_runner_test_template.h',
- 'test/test_discardable_memory_allocator.cc',
- 'test/test_discardable_memory_allocator.h',
- 'test/test_file_util.cc',
- 'test/test_file_util.h',
- 'test/test_file_util_android.cc',
- 'test/test_file_util_linux.cc',
- 'test/test_file_util_mac.cc',
- 'test/test_file_util_posix.cc',
- 'test/test_file_util_win.cc',
- 'test/test_io_thread.cc',
- 'test/test_io_thread.h',
- 'test/test_listener_ios.h',
- 'test/test_listener_ios.mm',
- 'test/test_message_loop.cc',
- 'test/test_message_loop.h',
- 'test/test_mock_time_task_runner.cc',
- 'test/test_mock_time_task_runner.h',
- 'test/test_pending_task.cc',
- 'test/test_pending_task.h',
- 'test/test_reg_util_win.cc',
- 'test/test_reg_util_win.h',
- 'test/test_shortcut_win.cc',
- 'test/test_shortcut_win.h',
- 'test/test_simple_task_runner.cc',
- 'test/test_simple_task_runner.h',
- 'test/test_suite.cc',
- 'test/test_suite.h',
- 'test/test_support_android.cc',
- 'test/test_support_android.h',
- 'test/test_support_ios.h',
- 'test/test_support_ios.mm',
- 'test/test_switches.cc',
- 'test/test_switches.h',
- 'test/test_timeouts.cc',
- 'test/test_timeouts.h',
- 'test/test_ui_thread_android.cc',
- 'test/test_ui_thread_android.h',
- 'test/thread_test_helper.cc',
- 'test/thread_test_helper.h',
- 'test/trace_event_analyzer.cc',
- 'test/trace_event_analyzer.h',
- 'test/trace_to_file.cc',
- 'test/trace_to_file.h',
- 'test/user_action_tester.cc',
- 'test/user_action_tester.h',
- 'test/values_test_util.cc',
- 'test/values_test_util.h',
- ],
- 'target_conditions': [
- ['OS == "ios"', {
- 'sources/': [
- # Pull in specific Mac files for iOS (which have been filtered out
- # by file name rules).
- ['include', '^test/test_file_util_mac\\.cc$'],
- ],
- }],
- ['OS == "ios" and _toolset == "target"', {
- 'sources!': [
- # iOS uses its own unit test launcher.
- 'test/launcher/unit_test_launcher.cc',
- ],
- }],
- ['OS == "ios" and _toolset == "host"', {
- 'sources!': [
- 'test/launcher/unit_test_launcher_ios.cc',
- 'test/test_support_ios.h',
- 'test/test_support_ios.mm',
- ],
- }],
- ], # target_conditions
- },
- {
- 'target_name': 'test_support_perf',
- 'type': 'static_library',
- 'dependencies': [
- 'base',
- 'test_support_base',
- '../testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'test/run_all_perftests.cc',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'PERF_TEST',
- ],
- },
- },
- {
- 'target_name': 'test_launcher_nacl_nonsfi',
- 'conditions': [
- ['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
- 'type': 'static_library',
- 'sources': [
- 'test/launcher/test_launcher_nacl_nonsfi.cc',
- ],
- 'dependencies': [
- 'test_support_base',
- ],
- }, {
- 'type': 'none',
- }],
- ],
- },
- {
- # GN version: //base/debug:debugging_flags
- # Since this generates a file, it must only be referenced in the target
- # toolchain or there will be multiple rules that generate the header.
- # When referenced from a target that might be compiled in the host
- # toolchain, always refer to 'base_debugging_flags#target'.
- 'target_name': 'base_debugging_flags',
- 'includes': [ '../build/buildflag_header.gypi' ],
- 'variables': {
- 'buildflag_header_path': 'base/debug/debugging_flags.h',
- 'buildflag_flags': [
- 'ENABLE_PROFILING=<(profiling)',
- ],
- },
- },
- {
- # GN version: //base/win:base_win_features
- # Since this generates a file, it must only be referenced in the target
- # toolchain or there will be multiple rules that generate the header.
- # When referenced from a target that might be compiled in the host
- # toolchain, always refer to 'base_win_features#target'.
- 'target_name': 'base_win_features',
- 'conditions': [
- ['OS=="win"', {
- 'includes': [ '../build/buildflag_header.gypi' ],
- 'variables': {
- 'buildflag_header_path': 'base/win/base_features.h',
- 'buildflag_flags': [
- 'SINGLE_MODULE_MODE_HANDLE_VERIFIER=<(single_module_mode_handle_verifier)',
- ],
- },
- }, {
- 'type': 'none',
- }],
- ],
- },
- {
- 'type': 'none',
- 'target_name': 'base_build_date',
- 'hard_dependency': 1,
- 'actions': [{
- 'action_name': 'generate_build_date_headers',
- 'inputs': [
- '<(DEPTH)/build/write_build_date_header.py',
- '<(DEPTH)/build/util/LASTCHANGE'
- ],
- 'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h' ],
- 'action': [
- 'python', '<(DEPTH)/build/write_build_date_header.py',
- '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h',
- '<(build_type)'
- ]
- }],
- 'conditions': [
- [ 'buildtype == "Official"', {
- 'variables': {
- 'build_type': 'official'
- }
- }, {
- 'variables': {
- 'build_type': 'default'
- }
- }],
- ]
- },
- ],
- 'conditions': [
- ['OS=="ios" and "<(GENERATOR)"=="ninja"', {
- 'targets': [
- {
- 'target_name': 'test_launcher',
- 'toolsets': ['host'],
- 'type': 'executable',
- 'dependencies': [
- 'test_support_base',
- ],
- 'sources': [
- 'test/launcher/test_launcher_ios.cc',
- ],
- },
- ],
- }],
- ['OS!="ios"', {
- 'targets': [
- {
- # GN: //base:check_example
- 'target_name': 'check_example',
- 'type': 'executable',
- 'sources': [
- 'check_example.cc',
- ],
- 'dependencies': [
- 'base',
- ],
- },
- {
- 'target_name': 'build_utf8_validator_tables',
- 'type': 'executable',
- 'toolsets': ['host'],
- 'dependencies': [
- 'base',
- '../third_party/icu/icu.gyp:icuuc',
- ],
- 'sources': [
- 'i18n/build_utf8_validator_tables.cc'
- ],
- },
- ],
- }],
- ['OS == "win" and target_arch=="ia32"', {
- 'targets': [
- # The base_win64 target here allows us to use base for Win64 targets
- # (the normal build is 32 bits).
- {
- 'target_name': 'base_win64',
- 'type': '<(component)',
- 'variables': {
- 'base_target': 1,
- },
- 'dependencies': [
- 'base_build_date',
- 'base_debugging_flags#target',
- 'base_static_win64',
- '../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
- 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
- 'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
- ],
- # TODO(gregoryd): direct_dependent_settings should be shared with the
- # 32-bit target, but it doesn't work due to a bug in gyp
- 'direct_dependent_settings': {
- 'include_dirs': [
- '..',
- ],
- },
- 'defines': [
- 'BASE_WIN64',
- '<@(nacl_win64_defines)',
- ],
- 'configurations': {
- 'Common_Base': {
- 'msvs_target_platform': 'x64',
- },
- },
- 'conditions': [
- ['component == "shared_library"', {
- 'sources!': [
- 'debug/debug_on_start_win.cc',
- ],
- }],
- ],
- # Specify delayload for base_win64.dll.
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'DelayLoadDLLs': [
- 'cfgmgr32.dll',
- 'powrprof.dll',
- 'setupapi.dll',
- ],
- 'AdditionalDependencies': [
- 'cfgmgr32.lib',
- 'powrprof.lib',
- 'setupapi.lib',
- 'userenv.lib',
- 'winmm.lib',
- ],
- },
- },
- # Specify delayload for components that link with base_win64.lib.
- 'all_dependent_settings': {
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'DelayLoadDLLs': [
- 'cfgmgr32.dll',
- 'powrprof.dll',
- 'setupapi.dll',
- ],
- 'AdditionalDependencies': [
- 'cfgmgr32.lib',
- 'powrprof.lib',
- 'setupapi.lib',
- 'userenv.lib',
- 'winmm.lib',
- ],
- },
- },
- },
- # TODO(rvargas): Bug 78117. Remove this.
- 'msvs_disabled_warnings': [
- 4244,
- 4996,
- 4267,
- ],
- 'sources': [
- 'auto_reset.h',
- 'linux_util.cc',
- 'linux_util.h',
- 'md5.cc',
- 'md5.h',
- 'message_loop/message_pump_libevent.cc',
- 'message_loop/message_pump_libevent.h',
- 'metrics/field_trial.cc',
- 'metrics/field_trial.h',
- 'posix/file_descriptor_shuffle.cc',
- 'posix/file_descriptor_shuffle.h',
- 'sync_socket.h',
- 'sync_socket_posix.cc',
- 'sync_socket_win.cc',
- 'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
- 'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
- ],
- },
- {
- 'target_name': 'base_i18n_nacl_win64',
- 'type': '<(component)',
- # TODO(gregoryd): direct_dependent_settings should be shared with the
- # 32-bit target, but it doesn't work due to a bug in gyp
- 'direct_dependent_settings': {
- 'include_dirs': [
- '..',
- ],
- },
- 'defines': [
- '<@(nacl_win64_defines)',
- 'BASE_I18N_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'sources': [
- 'i18n/icu_util_nacl_win64.cc',
- ],
- 'configurations': {
- 'Common_Base': {
- 'msvs_target_platform': 'x64',
- },
- },
- },
- {
- # TODO(rvargas): Remove this when gyp finally supports a clean model.
- # See bug 36232.
- 'target_name': 'base_static_win64',
- 'type': 'static_library',
- 'sources': [
- 'base_switches.cc',
- 'base_switches.h',
- 'win/pe_image.cc',
- 'win/pe_image.h',
- ],
- 'sources!': [
- # base64.cc depends on modp_b64.
- 'base64.cc',
- ],
- 'include_dirs': [
- '..',
- ],
- 'configurations': {
- 'Common_Base': {
- 'msvs_target_platform': 'x64',
- },
- },
- 'defines': [
- '<@(nacl_win64_defines)',
- ],
- # TODO(rvargas): Bug 78117. Remove this.
- 'msvs_disabled_warnings': [
- 4244,
- ],
- },
- ],
- }],
- ['OS == "win" and target_arch=="x64"', {
- 'targets': [
- {
- 'target_name': 'base_profiler_test_support_library',
- # Must be a shared library so that it can be unloaded during testing.
- 'type': 'shared_library',
- 'include_dirs': [
- '..',
- ],
- 'sources': [
- 'profiler/test_support_library.cc',
- ],
- },
- ]
- }],
- ['os_posix==1 and OS!="mac" and OS!="ios"', {
- 'targets': [
- {
- 'target_name': 'symbolize',
- 'type': 'static_library',
- 'toolsets': ['host', 'target'],
- 'variables': {
- 'chromium_code': 0,
- },
- 'conditions': [
- ['OS == "solaris"', {
- 'include_dirs': [
- '/usr/gnu/include',
- '/usr/gnu/include/libelf',
- ],
- },],
- ],
- 'cflags': [
- '-Wno-sign-compare',
- ],
- 'cflags!': [
- '-Wextra',
- ],
- 'defines': [
- 'GLOG_BUILD_CONFIG_INCLUDE="build/build_config.h"',
- ],
- 'sources': [
- 'third_party/symbolize/config.h',
- 'third_party/symbolize/demangle.cc',
- 'third_party/symbolize/demangle.h',
- 'third_party/symbolize/glog/logging.h',
- 'third_party/symbolize/glog/raw_logging.h',
- 'third_party/symbolize/symbolize.cc',
- 'third_party/symbolize/symbolize.h',
- 'third_party/symbolize/utilities.h',
- ],
- 'include_dirs': [
- '..',
- ],
- 'includes': [
- '../build/android/increase_size_for_speed.gypi',
- ],
- },
- {
- 'target_name': 'xdg_mime',
- 'type': 'static_library',
- 'toolsets': ['host', 'target'],
- 'variables': {
- 'chromium_code': 0,
- },
- 'cflags!': [
- '-Wextra',
- ],
- 'sources': [
- 'third_party/xdg_mime/xdgmime.c',
- 'third_party/xdg_mime/xdgmime.h',
- 'third_party/xdg_mime/xdgmimealias.c',
- 'third_party/xdg_mime/xdgmimealias.h',
- 'third_party/xdg_mime/xdgmimecache.c',
- 'third_party/xdg_mime/xdgmimecache.h',
- 'third_party/xdg_mime/xdgmimeglob.c',
- 'third_party/xdg_mime/xdgmimeglob.h',
- 'third_party/xdg_mime/xdgmimeicon.c',
- 'third_party/xdg_mime/xdgmimeicon.h',
- 'third_party/xdg_mime/xdgmimeint.c',
- 'third_party/xdg_mime/xdgmimeint.h',
- 'third_party/xdg_mime/xdgmimemagic.c',
- 'third_party/xdg_mime/xdgmimemagic.h',
- 'third_party/xdg_mime/xdgmimeparent.c',
- 'third_party/xdg_mime/xdgmimeparent.h',
- ],
- 'includes': [
- '../build/android/increase_size_for_speed.gypi',
- ],
- },
- ],
- }],
- ['OS == "linux"', {
- 'targets': [
- {
- 'target_name': 'malloc_wrapper',
- 'type': 'shared_library',
- 'dependencies': [
- 'base',
- ],
- 'sources': [
- 'test/malloc_wrapper.cc',
- ],
- }
- ],
- }],
- ['OS == "android"', {
- 'targets': [
- {
- # GN: //base:base_jni_headers
- 'target_name': 'base_jni_headers',
- 'type': 'none',
- 'sources': [
- 'android/java/src/org/chromium/base/ApkAssets.java',
- 'android/java/src/org/chromium/base/ApplicationStatus.java',
- 'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
- 'android/java/src/org/chromium/base/BuildInfo.java',
- 'android/java/src/org/chromium/base/Callback.java',
- 'android/java/src/org/chromium/base/CommandLine.java',
- 'android/java/src/org/chromium/base/ContentUriUtils.java',
- 'android/java/src/org/chromium/base/ContextUtils.java',
- 'android/java/src/org/chromium/base/CpuFeatures.java',
- 'android/java/src/org/chromium/base/EventLog.java',
- 'android/java/src/org/chromium/base/FieldTrialList.java',
- 'android/java/src/org/chromium/base/ImportantFileWriterAndroid.java',
- 'android/java/src/org/chromium/base/JNIUtils.java',
- 'android/java/src/org/chromium/base/JavaHandlerThread.java',
- 'android/java/src/org/chromium/base/LocaleUtils.java',
- 'android/java/src/org/chromium/base/MemoryPressureListener.java',
- 'android/java/src/org/chromium/base/PathService.java',
- 'android/java/src/org/chromium/base/PathUtils.java',
- 'android/java/src/org/chromium/base/PowerMonitor.java',
- 'android/java/src/org/chromium/base/SysUtils.java',
- 'android/java/src/org/chromium/base/SystemMessageHandler.java',
- 'android/java/src/org/chromium/base/ThreadUtils.java',
- 'android/java/src/org/chromium/base/TraceEvent.java',
- 'android/java/src/org/chromium/base/library_loader/LibraryLoader.java',
- 'android/java/src/org/chromium/base/metrics/RecordHistogram.java',
- 'android/java/src/org/chromium/base/metrics/RecordUserAction.java',
- ],
- 'variables': {
- 'jni_gen_package': 'base',
- },
- 'dependencies': [
- 'android_runtime_jni_headers',
- ],
- 'includes': [ '../build/jni_generator.gypi' ],
- },
- {
- # GN: //base:android_runtime_jni_headers
- 'target_name': 'android_runtime_jni_headers',
- 'type': 'none',
- 'variables': {
- 'jni_gen_package': 'base',
- 'input_java_class': 'java/lang/Runtime.class',
- },
- 'includes': [ '../build/jar_file_jni_generator.gypi' ],
- },
- {
- # GN: //base:base_unittests_jni_headers
- 'target_name': 'base_unittests_jni_headers',
- 'type': 'none',
- 'sources': [
- 'test/android/java/src/org/chromium/base/ContentUriTestUtils.java',
- 'test/android/java/src/org/chromium/base/TestUiThread.java',
- ],
- 'variables': {
- 'jni_gen_package': 'base',
- },
- 'includes': [ '../build/jni_generator.gypi' ],
- },
- {
- # GN: //base:base_native_libraries_gen
- 'target_name': 'base_native_libraries_gen',
- 'type': 'none',
- 'sources': [
- 'android/java/templates/NativeLibraries.template',
- ],
- 'variables': {
- 'package_name': 'org/chromium/base/library_loader',
- 'template_deps': [],
- },
- 'includes': [ '../build/android/java_cpp_template.gypi' ],
- },
- {
- # GN: //base:base_build_config_gen
- 'target_name': 'base_build_config_gen',
- 'type': 'none',
- 'sources': [
- 'android/java/templates/BuildConfig.template',
- ],
- 'variables': {
- 'package_name': 'org/chromium/base',
- 'template_deps': [],
- },
- 'includes': ['../build/android/java_cpp_template.gypi'],
- },
- {
- # GN: //base:base_android_java_enums_srcjar
- 'target_name': 'base_java_library_process_type',
- 'type': 'none',
- 'variables': {
- 'source_file': 'android/library_loader/library_loader_hooks.h',
- },
- 'includes': [ '../build/android/java_cpp_enum.gypi' ],
- },
- {
- # GN: //base:base_java
- 'target_name': 'base_java',
- 'type': 'none',
- 'variables': {
- 'java_in_dir': 'android/java',
- 'jar_excluded_classes': [
- '*/BuildConfig.class',
- '*/NativeLibraries.class',
- ],
- },
- 'dependencies': [
- 'base_java_application_state',
- 'base_java_library_load_from_apk_status_codes',
- 'base_java_library_process_type',
- 'base_java_memory_pressure_level',
- 'base_build_config_gen',
- 'base_native_libraries_gen',
- '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
- '../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
- ],
- 'all_dependent_settings': {
- 'variables': {
- 'generate_build_config': 1,
- },
- },
- 'includes': [ '../build/java.gypi' ],
- },
- {
- # GN: //base:base_java_unittest_support
- 'target_name': 'base_java_unittest_support',
- 'type': 'none',
- 'dependencies': [
- 'base_java',
- ],
- 'variables': {
- 'java_in_dir': '../base/test/android/java',
- },
- 'includes': [ '../build/java.gypi' ],
- },
- {
- # GN: //base:base_android_java_enums_srcjar
- 'target_name': 'base_java_application_state',
- 'type': 'none',
- 'variables': {
- 'source_file': 'android/application_status_listener.h',
- },
- 'includes': [ '../build/android/java_cpp_enum.gypi' ],
- },
- {
- # GN: //base:base_android_java_enums_srcjar
- 'target_name': 'base_java_library_load_from_apk_status_codes',
- 'type': 'none',
- 'variables': {
- 'source_file': 'android/library_loader/library_load_from_apk_status_codes.h'
- },
- 'includes': [ '../build/android/java_cpp_enum.gypi' ],
- },
- {
- # GN: //base:base_android_java_enums_srcjar
- 'target_name': 'base_java_memory_pressure_level',
- 'type': 'none',
- 'variables': {
- 'source_file': 'memory/memory_pressure_listener.h',
- },
- 'includes': [ '../build/android/java_cpp_enum.gypi' ],
- },
- {
- # GN: //base:base_java_test_support
- 'target_name': 'base_java_test_support',
- 'type': 'none',
- 'dependencies': [
- 'base_java',
- '../testing/android/on_device_instrumentation.gyp:reporter_java',
- ],
- 'variables': {
- 'java_in_dir': '../base/test/android/javatests',
- },
- 'includes': [ '../build/java.gypi' ],
- },
- {
- # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
- # in the multidex shadow library. crbug.com/522043
- # GN: //base:base_junit_test_support
- 'target_name': 'base_junit_test_support',
- 'type': 'none',
- 'dependencies': [
- 'base_build_config_gen',
- '../testing/android/junit/junit_test.gyp:junit_test_support',
- '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
- ],
- 'variables': {
- 'src_paths': [
- '../base/test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java',
- ],
- },
- 'includes': [ '../build/host_jar.gypi' ]
- },
- {
- # GN: //base:base_junit_tests
- 'target_name': 'base_junit_tests',
- 'type': 'none',
- 'dependencies': [
- 'base_java',
- 'base_java_test_support',
- 'base_junit_test_support',
- '../testing/android/junit/junit_test.gyp:junit_test_support',
- ],
- 'variables': {
- 'main_class': 'org.chromium.testing.local.JunitTestMain',
- 'src_paths': [
- '../base/android/junit/',
- '../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
- '../base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java',
- '../base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java',
- '../base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java',
- ],
- 'test_type': 'junit',
- 'wrapper_script_name': 'helper/<(_target_name)',
- },
- 'includes': [
- '../build/android/test_runner.gypi',
- '../build/host_jar.gypi',
- ],
- },
- {
- # GN: //base:base_javatests
- 'target_name': 'base_javatests',
- 'type': 'none',
- 'dependencies': [
- 'base_java',
- 'base_java_test_support',
- ],
- 'variables': {
- 'java_in_dir': '../base/android/javatests',
- },
- 'includes': [ '../build/java.gypi' ],
- },
- {
- # GN: //base/android/linker:chromium_android_linker
- 'target_name': 'chromium_android_linker',
- 'type': 'shared_library',
- 'sources': [
- 'android/linker/android_dlext.h',
- 'android/linker/legacy_linker_jni.cc',
- 'android/linker/legacy_linker_jni.h',
- 'android/linker/linker_jni.cc',
- 'android/linker/linker_jni.h',
- 'android/linker/modern_linker_jni.cc',
- 'android/linker/modern_linker_jni.h',
- ],
- # The crazy linker is never instrumented.
- 'cflags!': [
- '-finstrument-functions',
- ],
- 'dependencies': [
- # The NDK contains the crazy_linker here:
- # '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
- # However, we use our own fork. See bug 384700.
- '../third_party/android_crazy_linker/crazy_linker.gyp:crazy_linker',
- ],
- },
- {
- # GN: //base:base_perftests_apk
- 'target_name': 'base_perftests_apk',
- 'type': 'none',
- 'dependencies': [
- 'base_perftests',
- ],
- 'variables': {
- 'test_suite_name': 'base_perftests',
- },
- 'includes': [ '../build/apk_test.gypi' ],
- },
- {
- # GN: //base:base_unittests_apk
- 'target_name': 'base_unittests_apk',
- 'type': 'none',
- 'dependencies': [
- 'base_java',
- 'base_unittests',
- ],
- 'variables': {
- 'test_suite_name': 'base_unittests',
- 'isolate_file': 'base_unittests.isolate',
- },
- 'includes': [ '../build/apk_test.gypi' ],
- },
- ],
- 'conditions': [
- ['test_isolation_mode != "noop"',
- {
- 'targets': [
- {
- 'target_name': 'base_unittests_apk_run',
- 'type': 'none',
- 'dependencies': [
- 'base_unittests_apk',
- ],
- 'includes': [
- '../build/isolate.gypi',
- ],
- 'sources': [
- 'base_unittests_apk.isolate',
- ],
- },
- ]
- }
- ],
- ],
- }],
- ['OS == "win"', {
- 'targets': [
- {
- # Target to manually rebuild pe_image_test.dll which is checked into
- # base/test/data/pe_image.
- 'target_name': 'pe_image_test',
- 'type': 'shared_library',
- 'sources': [
- 'win/pe_image_test.cc',
- ],
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
- 'DelayLoadDLLs': [
- 'cfgmgr32.dll',
- 'shell32.dll',
- ],
- 'AdditionalDependencies': [
- 'cfgmgr32.lib',
- 'shell32.lib',
- ],
- },
- },
- },
- {
- 'target_name': 'scoped_handle_test_dll',
- 'type': 'loadable_module',
- 'dependencies': [
- 'base',
- ],
- 'sources': [
- 'win/scoped_handle_test_dll.cc',
- ],
- },
- ],
- }],
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'base_unittests_run',
- 'type': 'none',
- 'dependencies': [
- 'base_unittests',
- ],
- 'includes': [
- '../build/isolate.gypi',
- ],
- 'sources': [
- 'base_unittests.isolate',
- ],
- },
- ],
- }],
- ['OS == "ios" or OS == "mac"', {
- 'targets': [
- {
- 'target_name': 'base_unittests_arc',
- 'type': 'static_library',
- 'dependencies': [
- 'base',
- '../testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'mac/bind_objc_block_unittest_arc.mm',
- 'mac/scoped_nsobject_unittest_arc.mm'
- ],
- 'xcode_settings': {
- 'CLANG_ENABLE_OBJC_ARC': 'YES',
- },
- 'target_conditions': [
- ['OS == "ios" and _toolset != "host"', {
- 'sources/': [
- ['include', 'mac/bind_objc_block_unittest_arc\\.mm$'],
- ['include', 'mac/scoped_nsobject_unittest_arc\\.mm$'],
- ],
- }]
- ],
- },
- ],
- }],
- ],
-}
diff --git a/base/base.gypi b/base/base.gypi
deleted file mode 100644
index cb41e79310..0000000000
--- a/base/base.gypi
+++ /dev/null
@@ -1,1106 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'includes': [
- 'trace_event/trace_event.gypi',
- ],
- 'target_defaults': {
- 'variables': {
- 'base_target': 0,
- 'base_i18n_target': 0,
- },
- 'target_conditions': [
- # This part is shared between the targets defined below.
- ['base_target==1', {
- 'sources': [
- '../build/build_config.h',
- 'allocator/allocator_check.cc',
- 'allocator/allocator_check.h',
- 'allocator/allocator_extension.cc',
- 'allocator/allocator_extension.h',
- 'android/animation_frame_time_histogram.cc',
- 'android/animation_frame_time_histogram.h',
- 'android/apk_assets.cc',
- 'android/apk_assets.h',
- 'android/application_status_listener.cc',
- 'android/application_status_listener.h',
- 'android/base_jni_onload.cc',
- 'android/base_jni_onload.h',
- 'android/base_jni_registrar.cc',
- 'android/base_jni_registrar.h',
- 'android/build_info.cc',
- 'android/build_info.h',
- 'android/callback_android.cc',
- 'android/callback_android.h',
- 'android/command_line_android.cc',
- 'android/command_line_android.h',
- 'android/content_uri_utils.cc',
- 'android/content_uri_utils.h',
- 'android/context_utils.cc',
- 'android/context_utils.h',
- 'android/cpu_features.cc',
- 'android/cxa_demangle_stub.cc',
- 'android/event_log.cc',
- 'android/event_log.h',
- 'android/field_trial_list.cc',
- 'android/field_trial_list.h',
- 'android/fifo_utils.cc',
- 'android/fifo_utils.h',
- 'android/important_file_writer_android.cc',
- 'android/important_file_writer_android.h',
- 'android/java_handler_thread.cc',
- 'android/java_handler_thread.h',
- 'android/java_runtime.cc',
- 'android/java_runtime.h',
- 'android/jni_android.cc',
- 'android/jni_android.h',
- 'android/jni_array.cc',
- 'android/jni_array.h',
- 'android/jni_registrar.cc',
- 'android/jni_registrar.h',
- 'android/jni_string.cc',
- 'android/jni_string.h',
- 'android/jni_utils.cc',
- 'android/jni_utils.h',
- 'android/jni_weak_ref.cc',
- 'android/jni_weak_ref.h',
- 'android/library_loader/library_load_from_apk_status_codes.h',
- 'android/library_loader/library_loader_hooks.cc',
- 'android/library_loader/library_loader_hooks.h',
- 'android/library_loader/library_prefetcher.cc',
- 'android/library_loader/library_prefetcher.h',
- 'android/locale_utils.cc',
- 'android/locale_utils.h',
- 'android/memory_pressure_listener_android.cc',
- 'android/memory_pressure_listener_android.h',
- 'android/path_service_android.cc',
- 'android/path_service_android.h',
- 'android/path_utils.cc',
- 'android/path_utils.h',
- 'android/record_histogram.cc',
- 'android/record_histogram.h',
- 'android/record_user_action.cc',
- 'android/record_user_action.h',
- 'android/scoped_java_ref.cc',
- 'android/scoped_java_ref.h',
- 'android/sys_utils.cc',
- 'android/sys_utils.h',
- 'android/thread_utils.h',
- 'android/trace_event_binding.cc',
- 'android/trace_event_binding.h',
- 'at_exit.cc',
- 'at_exit.h',
- 'atomic_ref_count.h',
- 'atomic_sequence_num.h',
- 'atomicops.h',
- 'atomicops_internals_portable.h',
- 'atomicops_internals_x86_msvc.h',
- 'barrier_closure.cc',
- 'barrier_closure.h',
- 'base64.cc',
- 'base64.h',
- 'base64url.cc',
- 'base64url.h',
- 'base_export.h',
- 'base_paths.cc',
- 'base_paths.h',
- 'base_paths_android.cc',
- 'base_paths_android.h',
- 'base_paths_mac.h',
- 'base_paths_mac.mm',
- 'base_paths_posix.cc',
- 'base_paths_posix.h',
- 'base_paths_win.cc',
- 'base_paths_win.h',
- 'base_switches.h',
- 'big_endian.cc',
- 'big_endian.h',
- 'bind.h',
- 'bind_helpers.cc',
- 'bind_helpers.h',
- 'bind_internal.h',
- 'bit_cast.h',
- 'bits.h',
- 'build_time.cc',
- 'build_time.h',
- 'callback.h',
- 'callback_helpers.cc',
- 'callback_helpers.h',
- 'callback_internal.cc',
- 'callback_internal.h',
- 'callback_list.h',
- 'cancelable_callback.h',
- 'command_line.cc',
- 'command_line.h',
- 'compiler_specific.h',
- 'containers/adapters.h',
- 'containers/hash_tables.h',
- 'containers/linked_list.h',
- 'containers/mru_cache.h',
- 'containers/scoped_ptr_hash_map.h',
- 'containers/small_map.h',
- 'containers/stack_container.h',
- 'cpu.cc',
- 'cpu.h',
- 'critical_closure.h',
- 'critical_closure_internal_ios.mm',
- 'debug/alias.cc',
- 'debug/alias.h',
- 'debug/asan_invalid_access.cc',
- 'debug/asan_invalid_access.h',
- 'debug/close_handle_hook_win.cc',
- 'debug/close_handle_hook_win.h',
- 'debug/crash_logging.cc',
- 'debug/crash_logging.h',
- 'debug/debugger.cc',
- 'debug/debugger.h',
- 'debug/debugger_posix.cc',
- 'debug/debugger_win.cc',
- 'debug/dump_without_crashing.cc',
- 'debug/dump_without_crashing.h',
- 'debug/gdi_debug_util_win.cc',
- 'debug/gdi_debug_util_win.h',
- # This file depends on files from the 'allocator' target,
- # but this target does not depend on 'allocator' (see
- # allocator.gyp for details).
- 'debug/leak_annotations.h',
- 'debug/leak_tracker.h',
- 'debug/proc_maps_linux.cc',
- 'debug/proc_maps_linux.h',
- 'debug/profiler.cc',
- 'debug/profiler.h',
- 'debug/stack_trace.cc',
- 'debug/stack_trace.h',
- 'debug/stack_trace_android.cc',
- 'debug/stack_trace_posix.cc',
- 'debug/stack_trace_win.cc',
- 'debug/task_annotator.cc',
- 'debug/task_annotator.h',
- 'deferred_sequenced_task_runner.cc',
- 'deferred_sequenced_task_runner.h',
- 'environment.cc',
- 'environment.h',
- 'feature_list.cc',
- 'feature_list.h',
- 'file_descriptor_posix.h',
- 'file_version_info.h',
- 'file_version_info_mac.h',
- 'file_version_info_mac.mm',
- 'file_version_info_win.cc',
- 'file_version_info_win.h',
- 'files/dir_reader_fallback.h',
- 'files/dir_reader_linux.h',
- 'files/dir_reader_posix.h',
- 'files/file.cc',
- 'files/file.h',
- 'files/file_enumerator.cc',
- 'files/file_enumerator.h',
- 'files/file_enumerator_posix.cc',
- 'files/file_enumerator_win.cc',
- 'files/file_path.cc',
- 'files/file_path.h',
- 'files/file_path_constants.cc',
- 'files/file_path_watcher.cc',
- 'files/file_path_watcher.h',
- 'files/file_path_watcher_fsevents.cc',
- 'files/file_path_watcher_fsevents.h',
- 'files/file_path_watcher_kqueue.cc',
- 'files/file_path_watcher_kqueue.h',
- 'files/file_path_watcher_linux.cc',
- 'files/file_path_watcher_mac.cc',
- 'files/file_path_watcher_stub.cc',
- 'files/file_path_watcher_win.cc',
- 'files/file_posix.cc',
- 'files/file_proxy.cc',
- 'files/file_proxy.h',
- 'files/file_tracing.cc',
- 'files/file_tracing.h',
- 'files/file_util.cc',
- 'files/file_util.h',
- 'files/file_util_android.cc',
- 'files/file_util_linux.cc',
- 'files/file_util_mac.mm',
- 'files/file_util_posix.cc',
- 'files/file_util_proxy.cc',
- 'files/file_util_proxy.h',
- 'files/file_util_win.cc',
- 'files/file_win.cc',
- 'files/important_file_writer.cc',
- 'files/important_file_writer.h',
- 'files/memory_mapped_file.cc',
- 'files/memory_mapped_file.h',
- 'files/memory_mapped_file_posix.cc',
- 'files/memory_mapped_file_win.cc',
- 'files/scoped_file.cc',
- 'files/scoped_file.h',
- 'files/scoped_temp_dir.cc',
- 'files/scoped_temp_dir.h',
- 'format_macros.h',
- 'gtest_prod_util.h',
- 'guid.cc',
- 'guid.h',
- 'hash.cc',
- 'hash.h',
- 'id_map.h',
- 'ios/block_types.h',
- 'ios/crb_protocol_observers.h',
- 'ios/crb_protocol_observers.mm',
- 'ios/device_util.h',
- 'ios/device_util.mm',
- 'ios/ios_util.h',
- 'ios/ios_util.mm',
- 'ios/ns_error_util.h',
- 'ios/ns_error_util.mm',
- 'ios/scoped_critical_action.h',
- 'ios/scoped_critical_action.mm',
- 'ios/weak_nsobject.h',
- 'ios/weak_nsobject.mm',
- 'json/json_file_value_serializer.cc',
- 'json/json_file_value_serializer.h',
- 'json/json_parser.cc',
- 'json/json_parser.h',
- 'json/json_reader.cc',
- 'json/json_reader.h',
- 'json/json_string_value_serializer.cc',
- 'json/json_string_value_serializer.h',
- 'json/json_value_converter.cc',
- 'json/json_value_converter.h',
- 'json/json_writer.cc',
- 'json/json_writer.h',
- 'json/string_escape.cc',
- 'json/string_escape.h',
- 'lazy_instance.cc',
- 'lazy_instance.h',
- 'location.cc',
- 'location.h',
- 'logging.cc',
- 'logging.h',
- 'logging_win.cc',
- 'logging_win.h',
- 'mac/authorization_util.h',
- 'mac/authorization_util.mm',
- 'mac/bind_objc_block.h',
- 'mac/bundle_locations.h',
- 'mac/bundle_locations.mm',
- 'mac/call_with_eh_frame.cc',
- 'mac/call_with_eh_frame.h',
- 'mac/call_with_eh_frame_asm.S',
- 'mac/close_nocancel.cc',
- 'mac/cocoa_protocols.h',
- 'mac/dispatch_source_mach.cc',
- 'mac/dispatch_source_mach.h',
- 'mac/foundation_util.h',
- 'mac/foundation_util.mm',
- 'mac/launch_services_util.cc',
- 'mac/launch_services_util.h',
- 'mac/launchd.cc',
- 'mac/launchd.h',
- 'mac/mac_logging.h',
- 'mac/mac_logging.mm',
- 'mac/mac_util.h',
- 'mac/mac_util.mm',
- 'mac/mach_logging.cc',
- 'mac/mach_logging.h',
- 'mac/mach_port_broker.h',
- 'mac/mach_port_broker.mm',
- 'mac/mach_port_util.cc',
- 'mac/mach_port_util.h',
- 'mac/objc_property_releaser.h',
- 'mac/objc_property_releaser.mm',
- 'mac/os_crash_dumps.cc',
- 'mac/os_crash_dumps.h',
- 'mac/scoped_aedesc.h',
- 'mac/scoped_authorizationref.h',
- 'mac/scoped_block.h',
- 'mac/scoped_cftyperef.h',
- 'mac/scoped_dispatch_object.h',
- 'mac/scoped_ioobject.h',
- 'mac/scoped_ioplugininterface.h',
- 'mac/scoped_launch_data.h',
- 'mac/scoped_mach_port.cc',
- 'mac/scoped_mach_port.h',
- 'mac/scoped_mach_vm.cc',
- 'mac/scoped_mach_vm.h',
- 'mac/scoped_nsautorelease_pool.h',
- 'mac/scoped_nsautorelease_pool.mm',
- 'mac/scoped_nsobject.h',
- 'mac/scoped_nsobject.mm',
- 'mac/scoped_objc_class_swizzler.h',
- 'mac/scoped_objc_class_swizzler.mm',
- 'mac/scoped_sending_event.h',
- 'mac/scoped_sending_event.mm',
- 'mac/scoped_typeref.h',
- 'mac/sdk_forward_declarations.h',
- 'mac/sdk_forward_declarations.mm',
- 'macros.h',
- 'md5.cc',
- 'md5.h',
- 'memory/aligned_memory.cc',
- 'memory/aligned_memory.h',
- 'memory/discardable_memory.cc',
- 'memory/discardable_memory.h',
- 'memory/discardable_memory_allocator.cc',
- 'memory/discardable_memory_allocator.h',
- 'memory/discardable_shared_memory.cc',
- 'memory/discardable_shared_memory.h',
- 'memory/free_deleter.h',
- 'memory/linked_ptr.h',
- 'memory/manual_constructor.h',
- 'memory/memory_pressure_listener.cc',
- 'memory/memory_pressure_listener.h',
- 'memory/memory_pressure_monitor.cc',
- 'memory/memory_pressure_monitor.h',
- 'memory/memory_pressure_monitor_chromeos.cc',
- 'memory/memory_pressure_monitor_chromeos.h',
- 'memory/memory_pressure_monitor_mac.cc',
- 'memory/memory_pressure_monitor_mac.h',
- 'memory/memory_pressure_monitor_win.cc',
- 'memory/memory_pressure_monitor_win.h',
- 'memory/ptr_util.h',
- 'memory/raw_scoped_refptr_mismatch_checker.h',
- 'memory/ref_counted.cc',
- 'memory/ref_counted.h',
- 'memory/ref_counted_delete_on_message_loop.h',
- 'memory/ref_counted_memory.cc',
- 'memory/ref_counted_memory.h',
- 'memory/scoped_policy.h',
- 'memory/scoped_vector.h',
- 'memory/shared_memory.h',
- 'memory/shared_memory_android.cc',
- 'memory/shared_memory_handle.h',
- 'memory/shared_memory_handle_mac.cc',
- 'memory/shared_memory_handle_win.cc',
- 'memory/shared_memory_mac.cc',
- 'memory/shared_memory_nacl.cc',
- 'memory/shared_memory_posix.cc',
- 'memory/shared_memory_win.cc',
- 'memory/singleton.cc',
- 'memory/singleton.h',
- 'memory/weak_ptr.cc',
- 'memory/weak_ptr.h',
- 'message_loop/incoming_task_queue.cc',
- 'message_loop/incoming_task_queue.h',
- 'message_loop/message_loop.cc',
- 'message_loop/message_loop.h',
- 'message_loop/message_loop_task_runner.cc',
- 'message_loop/message_loop_task_runner.h',
- 'message_loop/message_pump.cc',
- 'message_loop/message_pump.h',
- 'message_loop/message_pump_android.cc',
- 'message_loop/message_pump_android.h',
- 'message_loop/message_pump_default.cc',
- 'message_loop/message_pump_default.h',
- 'message_loop/message_pump_win.cc',
- 'message_loop/message_pump_win.h',
- 'message_loop/timer_slack.h',
- 'metrics/bucket_ranges.cc',
- 'metrics/bucket_ranges.h',
- 'metrics/histogram.cc',
- 'metrics/histogram.h',
- 'metrics/histogram_base.cc',
- 'metrics/histogram_base.h',
- 'metrics/histogram_delta_serialization.cc',
- 'metrics/histogram_delta_serialization.h',
- 'metrics/histogram_flattener.h',
- 'metrics/histogram_macros.h',
- 'metrics/histogram_samples.cc',
- 'metrics/histogram_samples.h',
- 'metrics/histogram_snapshot_manager.cc',
- 'metrics/histogram_snapshot_manager.h',
- 'metrics/metrics_hashes.cc',
- 'metrics/metrics_hashes.h',
- 'metrics/persistent_histogram_allocator.cc',
- 'metrics/persistent_histogram_allocator.h',
- 'metrics/persistent_memory_allocator.cc',
- 'metrics/persistent_memory_allocator.h',
- 'metrics/persistent_sample_map.cc',
- 'metrics/persistent_sample_map.h',
- 'metrics/sample_map.cc',
- 'metrics/sample_map.h',
- 'metrics/sample_vector.cc',
- 'metrics/sample_vector.h',
- 'metrics/sparse_histogram.cc',
- 'metrics/sparse_histogram.h',
- 'metrics/statistics_recorder.cc',
- 'metrics/statistics_recorder.h',
- 'metrics/user_metrics.cc',
- 'metrics/user_metrics.h',
- 'metrics/user_metrics_action.h',
- 'native_library.h',
- 'native_library_ios.mm',
- 'native_library_mac.mm',
- 'native_library_posix.cc',
- 'native_library_win.cc',
- 'nix/mime_util_xdg.cc',
- 'nix/mime_util_xdg.h',
- 'nix/xdg_util.cc',
- 'nix/xdg_util.h',
- 'numerics/safe_conversions.h',
- 'numerics/safe_conversions_impl.h',
- 'numerics/safe_math.h',
- 'numerics/safe_math_impl.h',
- 'observer_list.h',
- 'observer_list_threadsafe.h',
- 'optional.h',
- 'os_compat_android.cc',
- 'os_compat_android.h',
- 'os_compat_nacl.cc',
- 'os_compat_nacl.h',
- 'path_service.cc',
- 'path_service.h',
- 'pending_task.cc',
- 'pending_task.h',
- 'pickle.cc',
- 'pickle.h',
- 'posix/eintr_wrapper.h',
- 'posix/global_descriptors.cc',
- 'posix/global_descriptors.h',
- 'posix/safe_strerror.cc',
- 'posix/safe_strerror.h',
- 'posix/unix_domain_socket_linux.cc',
- 'posix/unix_domain_socket_linux.h',
- 'power_monitor/power_monitor.cc',
- 'power_monitor/power_monitor.h',
- 'power_monitor/power_monitor_device_source.cc',
- 'power_monitor/power_monitor_device_source.h',
- 'power_monitor/power_monitor_device_source_android.cc',
- 'power_monitor/power_monitor_device_source_android.h',
- 'power_monitor/power_monitor_device_source_chromeos.cc',
- 'power_monitor/power_monitor_device_source_ios.mm',
- 'power_monitor/power_monitor_device_source_mac.mm',
- 'power_monitor/power_monitor_device_source_posix.cc',
- 'power_monitor/power_monitor_device_source_win.cc',
- 'power_monitor/power_monitor_source.cc',
- 'power_monitor/power_monitor_source.h',
- 'power_monitor/power_observer.h',
- 'process/internal_linux.cc',
- 'process/internal_linux.h',
- 'process/kill.cc',
- 'process/kill.h',
- 'process/kill_mac.cc',
- 'process/kill_posix.cc',
- 'process/kill_win.cc',
- 'process/launch.cc',
- 'process/launch.h',
- 'process/launch_ios.cc',
- 'process/launch_mac.cc',
- 'process/launch_posix.cc',
- 'process/launch_win.cc',
- 'process/memory.cc',
- 'process/memory.h',
- 'process/memory_linux.cc',
- 'process/memory_mac.mm',
- 'process/memory_win.cc',
- 'process/port_provider_mac.cc',
- 'process/port_provider_mac.h',
- 'process/process.h',
- 'process/process_handle.cc',
- 'process/process_handle_freebsd.cc',
- 'process/process_handle_linux.cc',
- 'process/process_handle_mac.cc',
- 'process/process_handle_openbsd.cc',
- 'process/process_handle_posix.cc',
- 'process/process_handle_win.cc',
- 'process/process_info.h',
- 'process/process_info_linux.cc',
- 'process/process_info_mac.cc',
- 'process/process_info_win.cc',
- 'process/process_iterator.cc',
- 'process/process_iterator.h',
- 'process/process_iterator_freebsd.cc',
- 'process/process_iterator_linux.cc',
- 'process/process_iterator_mac.cc',
- 'process/process_iterator_openbsd.cc',
- 'process/process_iterator_win.cc',
- 'process/process_linux.cc',
- 'process/process_metrics.cc',
- 'process/process_metrics.h',
- 'process/process_metrics_freebsd.cc',
- 'process/process_metrics_ios.cc',
- 'process/process_metrics_linux.cc',
- 'process/process_metrics_mac.cc',
- 'process/process_metrics_nacl.cc',
- 'process/process_metrics_openbsd.cc',
- 'process/process_metrics_posix.cc',
- 'process/process_metrics_win.cc',
- 'process/process_posix.cc',
- 'process/process_win.cc',
- 'profiler/native_stack_sampler.cc',
- 'profiler/native_stack_sampler.h',
- 'profiler/native_stack_sampler_posix.cc',
- 'profiler/native_stack_sampler_win.cc',
- 'profiler/scoped_profile.cc',
- 'profiler/scoped_profile.h',
- 'profiler/scoped_tracker.cc',
- 'profiler/scoped_tracker.h',
- 'profiler/stack_sampling_profiler.cc',
- 'profiler/stack_sampling_profiler.h',
- 'profiler/tracked_time.cc',
- 'profiler/tracked_time.h',
- 'rand_util.cc',
- 'rand_util.h',
- 'rand_util_nacl.cc',
- 'rand_util_posix.cc',
- 'rand_util_win.cc',
- 'run_loop.cc',
- 'run_loop.h',
- 'scoped_generic.h',
- 'scoped_native_library.cc',
- 'scoped_native_library.h',
- 'scoped_observer.h',
- 'sequence_checker.h',
- 'sequence_checker_impl.cc',
- 'sequence_checker_impl.h',
- 'sequenced_task_runner.cc',
- 'sequenced_task_runner.h',
- 'sequenced_task_runner_helpers.h',
- 'sha1.cc',
- 'sha1.h',
- 'single_thread_task_runner.h',
- 'stl_util.h',
- 'strings/latin1_string_conversions.cc',
- 'strings/latin1_string_conversions.h',
- 'strings/nullable_string16.cc',
- 'strings/nullable_string16.h',
- 'strings/pattern.cc',
- 'strings/pattern.h',
- 'strings/safe_sprintf.cc',
- 'strings/safe_sprintf.h',
- 'strings/string16.cc',
- 'strings/string16.h',
- 'strings/string_number_conversions.cc',
- 'strings/string_number_conversions.h',
- 'strings/string_piece.cc',
- 'strings/string_piece.h',
- 'strings/string_split.cc',
- 'strings/string_split.h',
- 'strings/string_tokenizer.h',
- 'strings/string_util.cc',
- 'strings/string_util.h',
- 'strings/string_util_constants.cc',
- 'strings/string_util_posix.h',
- 'strings/string_util_win.h',
- 'strings/stringize_macros.h',
- 'strings/stringprintf.cc',
- 'strings/stringprintf.h',
- 'strings/sys_string_conversions.h',
- 'strings/sys_string_conversions_mac.mm',
- 'strings/sys_string_conversions_posix.cc',
- 'strings/sys_string_conversions_win.cc',
- 'strings/utf_offset_string_conversions.cc',
- 'strings/utf_offset_string_conversions.h',
- 'strings/utf_string_conversion_utils.cc',
- 'strings/utf_string_conversion_utils.h',
- 'strings/utf_string_conversions.cc',
- 'strings/utf_string_conversions.h',
- 'supports_user_data.cc',
- 'supports_user_data.h',
- 'synchronization/cancellation_flag.cc',
- 'synchronization/cancellation_flag.h',
- 'synchronization/condition_variable.h',
- 'synchronization/condition_variable_posix.cc',
- 'synchronization/condition_variable_win.cc',
- 'synchronization/lock.cc',
- 'synchronization/lock.h',
- 'synchronization/lock_impl.h',
- 'synchronization/lock_impl_posix.cc',
- 'synchronization/lock_impl_win.cc',
- 'synchronization/read_write_lock.h',
- 'synchronization/read_write_lock_nacl.cc',
- 'synchronization/read_write_lock_posix.cc',
- 'synchronization/read_write_lock_win.cc',
- 'synchronization/spin_wait.h',
- 'synchronization/waitable_event.h',
- 'synchronization/waitable_event_posix.cc',
- 'synchronization/waitable_event_watcher.h',
- 'synchronization/waitable_event_watcher_posix.cc',
- 'synchronization/waitable_event_watcher_win.cc',
- 'synchronization/waitable_event_win.cc',
- 'sys_byteorder.h',
- 'sys_info.cc',
- 'sys_info.h',
- 'sys_info_android.cc',
- 'sys_info_chromeos.cc',
- 'sys_info_freebsd.cc',
- 'sys_info_internal.h',
- 'sys_info_ios.mm',
- 'sys_info_linux.cc',
- 'sys_info_mac.mm',
- 'sys_info_openbsd.cc',
- 'sys_info_posix.cc',
- 'sys_info_win.cc',
- 'system_monitor/system_monitor.cc',
- 'system_monitor/system_monitor.h',
- 'task/cancelable_task_tracker.cc',
- 'task/cancelable_task_tracker.h',
- 'task_runner.cc',
- 'task_runner.h',
- 'task_runner_util.h',
- 'task_scheduler/delayed_task_manager.cc',
- 'task_scheduler/delayed_task_manager.h',
- 'task_scheduler/priority_queue.cc',
- 'task_scheduler/priority_queue.h',
- 'task_scheduler/scheduler_lock.h',
- 'task_scheduler/scheduler_lock_impl.cc',
- 'task_scheduler/scheduler_lock_impl.h',
- 'task_scheduler/scheduler_service_thread.cc',
- 'task_scheduler/scheduler_service_thread.h',
- 'task_scheduler/scheduler_worker.cc',
- 'task_scheduler/scheduler_worker.h',
- 'task_scheduler/scheduler_worker_pool.h',
- 'task_scheduler/scheduler_worker_pool_impl.cc',
- 'task_scheduler/scheduler_worker_pool_impl.h',
- 'task_scheduler/scheduler_worker_stack.cc',
- 'task_scheduler/scheduler_worker_stack.h',
- 'task_scheduler/sequence.cc',
- 'task_scheduler/sequence.h',
- 'task_scheduler/sequence_sort_key.cc',
- 'task_scheduler/sequence_sort_key.h',
- 'task_scheduler/task.cc',
- 'task_scheduler/task.h',
- 'task_scheduler/task_scheduler.cc',
- 'task_scheduler/task_scheduler.h',
- 'task_scheduler/task_scheduler_impl.cc',
- 'task_scheduler/task_scheduler_impl.h',
- 'task_scheduler/task_tracker.cc',
- 'task_scheduler/task_tracker.h',
- 'task_scheduler/task_traits.cc',
- 'task_scheduler/task_traits.h',
- 'template_util.h',
- 'third_party/dmg_fp/dmg_fp.h',
- 'third_party/dmg_fp/dtoa_wrapper.cc',
- 'third_party/dmg_fp/g_fmt.cc',
- 'third_party/icu/icu_utf.cc',
- 'third_party/icu/icu_utf.h',
- 'third_party/nspr/prtime.cc',
- 'third_party/nspr/prtime.h',
- 'third_party/superfasthash/superfasthash.c',
- 'third_party/xdg_mime/xdgmime.h',
- 'threading/non_thread_safe.h',
- 'threading/non_thread_safe_impl.cc',
- 'threading/non_thread_safe_impl.h',
- 'threading/platform_thread.h',
- 'threading/platform_thread_android.cc',
- 'threading/platform_thread_internal_posix.cc',
- 'threading/platform_thread_internal_posix.h',
- 'threading/platform_thread_linux.cc',
- 'threading/platform_thread_mac.mm',
- 'threading/platform_thread_posix.cc',
- 'threading/platform_thread_win.cc',
- 'threading/post_task_and_reply_impl.cc',
- 'threading/post_task_and_reply_impl.h',
- 'threading/sequenced_task_runner_handle.cc',
- 'threading/sequenced_task_runner_handle.h',
- 'threading/sequenced_worker_pool.cc',
- 'threading/sequenced_worker_pool.h',
- 'threading/simple_thread.cc',
- 'threading/simple_thread.h',
- 'threading/thread.cc',
- 'threading/thread.h',
- 'threading/thread_checker.h',
- 'threading/thread_checker_impl.cc',
- 'threading/thread_checker_impl.h',
- 'threading/thread_collision_warner.cc',
- 'threading/thread_collision_warner.h',
- 'threading/thread_id_name_manager.cc',
- 'threading/thread_id_name_manager.h',
- 'threading/thread_local.h',
- 'threading/thread_local_android.cc',
- 'threading/thread_local_posix.cc',
- 'threading/thread_local_storage.cc',
- 'threading/thread_local_storage.h',
- 'threading/thread_local_storage_posix.cc',
- 'threading/thread_local_storage_win.cc',
- 'threading/thread_local_win.cc',
- 'threading/thread_restrictions.cc',
- 'threading/thread_restrictions.h',
- 'threading/thread_task_runner_handle.cc',
- 'threading/thread_task_runner_handle.h',
- 'threading/watchdog.cc',
- 'threading/watchdog.h',
- 'threading/worker_pool.cc',
- 'threading/worker_pool.h',
- 'threading/worker_pool_posix.cc',
- 'threading/worker_pool_posix.h',
- 'threading/worker_pool_win.cc',
- 'time/clock.cc',
- 'time/clock.h',
- 'time/default_clock.cc',
- 'time/default_clock.h',
- 'time/default_tick_clock.cc',
- 'time/default_tick_clock.h',
- 'time/tick_clock.cc',
- 'time/tick_clock.h',
- 'time/time.cc',
- 'time/time.h',
- 'time/time_mac.cc',
- 'time/time_posix.cc',
- 'time/time_win.cc',
- 'timer/elapsed_timer.cc',
- 'timer/elapsed_timer.h',
- 'timer/hi_res_timer_manager.h',
- 'timer/hi_res_timer_manager_posix.cc',
- 'timer/hi_res_timer_manager_win.cc',
- 'timer/mock_timer.cc',
- 'timer/mock_timer.h',
- 'timer/timer.cc',
- 'timer/timer.h',
- 'tracked_objects.cc',
- 'tracked_objects.h',
- 'tracking_info.cc',
- 'tracking_info.h',
- 'tuple.h',
- 'value_conversions.cc',
- 'value_conversions.h',
- 'values.cc',
- 'values.h',
- 'version.cc',
- 'version.h',
- 'vlog.cc',
- 'vlog.h',
- 'win/enum_variant.cc',
- 'win/enum_variant.h',
- 'win/event_trace_consumer.h',
- 'win/event_trace_controller.cc',
- 'win/event_trace_controller.h',
- 'win/event_trace_provider.cc',
- 'win/event_trace_provider.h',
- 'win/i18n.cc',
- 'win/i18n.h',
- 'win/iat_patch_function.cc',
- 'win/iat_patch_function.h',
- 'win/iunknown_impl.cc',
- 'win/iunknown_impl.h',
- 'win/message_window.cc',
- 'win/message_window.h',
- 'win/object_watcher.cc',
- 'win/object_watcher.h',
- 'win/process_startup_helper.cc',
- 'win/process_startup_helper.h',
- 'win/registry.cc',
- 'win/registry.h',
- 'win/resource_util.cc',
- 'win/resource_util.h',
- 'win/scoped_bstr.cc',
- 'win/scoped_bstr.h',
- 'win/scoped_co_mem.h',
- 'win/scoped_com_initializer.h',
- 'win/scoped_comptr.h',
- 'win/scoped_gdi_object.h',
- 'win/scoped_handle.cc',
- 'win/scoped_handle.h',
- 'win/scoped_hdc.h',
- 'win/scoped_hglobal.h',
- 'win/scoped_process_information.cc',
- 'win/scoped_process_information.h',
- 'win/scoped_propvariant.h',
- 'win/scoped_select_object.h',
- 'win/scoped_variant.cc',
- 'win/scoped_variant.h',
- 'win/shortcut.cc',
- 'win/shortcut.h',
- 'win/startup_information.cc',
- 'win/startup_information.h',
- 'win/wait_chain.cc',
- 'win/wait_chain.h',
- 'win/win_util.cc',
- 'win/win_util.h',
- 'win/windows_version.cc',
- 'win/windows_version.h',
- 'win/wrapped_window_proc.cc',
- 'win/wrapped_window_proc.h',
- '<@(trace_event_sources)',
- ],
- 'defines': [
- 'BASE_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'target_conditions': [
- ['OS == "mac" or OS == "ios"', {
- 'sources!': [
- 'memory/shared_memory_posix.cc',
- ],
- }],
- ['OS == "ios"', {
- 'sources!': [
- 'memory/discardable_shared_memory.cc',
- 'memory/discardable_shared_memory.h',
- ],
- }],
- ['(<(desktop_linux) == 0 and <(chromeos) == 0) or >(nacl_untrusted_build)==1', {
- 'sources/': [
- ['exclude', '^nix/'],
- ],
- }],
- ['<(use_glib)==0 or >(nacl_untrusted_build)==1', {
- 'sources!': [
- 'message_loop/message_pump_glib.cc',
- ],
- }],
- ['(OS != "linux" and <(os_bsd) != 1 and OS != "android") or >(nacl_untrusted_build)==1', {
- 'sources!': [
- # Not automatically excluded by the *linux.cc rules.
- 'linux_util.cc',
- ],
- },
- ],
- ['>(nacl_untrusted_build)==1', {
- 'sources!': [
- 'base_paths.cc',
- 'cpu.cc',
- 'debug/stack_trace.cc',
- 'debug/stack_trace_posix.cc',
- 'files/file_enumerator_posix.cc',
- 'files/file_path_watcher_fsevents.cc',
- 'files/file_path_watcher_fsevents.h',
- 'files/file_path_watcher_kqueue.cc',
- 'files/file_path_watcher_kqueue.h',
- 'files/file_proxy.cc',
- 'files/file_util.cc',
- 'files/file_util_posix.cc',
- 'files/file_util_proxy.cc',
- 'files/important_file_writer.cc',
- 'files/scoped_temp_dir.cc',
- 'memory/shared_memory_posix.cc',
- 'native_library_posix.cc',
- 'path_service.cc',
- 'posix/unix_domain_socket_linux.cc',
- 'process/kill.cc',
- 'process/kill_posix.cc',
- 'process/launch.cc',
- 'process/launch_posix.cc',
- 'process/process_metrics.cc',
- 'process/process_metrics_posix.cc',
- 'process/process_posix.cc',
- 'rand_util_posix.cc',
- 'scoped_native_library.cc',
- 'synchronization/read_write_lock_posix.cc',
- 'sys_info.cc',
- 'sys_info_posix.cc',
- 'third_party/dynamic_annotations/dynamic_annotations.c',
- ],
- 'sources/': [
- ['include', '^threading/platform_thread_linux\\.cc$'],
- ],
- }],
- ['OS == "android" and >(nacl_untrusted_build)==0', {
- 'sources!': [
- 'base_paths_posix.cc',
- 'files/file_path_watcher_fsevents.cc',
- 'files/file_path_watcher_fsevents.h',
- 'files/file_path_watcher_kqueue.cc',
- 'files/file_path_watcher_kqueue.h',
- 'files/file_path_watcher_stub.cc',
- 'power_monitor/power_monitor_device_source_posix.cc',
- ],
- 'sources/': [
- ['include', '^debug/proc_maps_linux\\.cc$'],
- ['include', '^files/file_path_watcher_linux\\.cc$'],
- ['include', '^process/memory_linux\\.cc$'],
- ['include', '^process/internal_linux\\.cc$'],
- ['include', '^process/process_handle_linux\\.cc$'],
- ['include', '^process/process_iterator\\.cc$'],
- ['include', '^process/process_iterator_linux\\.cc$'],
- ['include', '^process/process_metrics_linux\\.cc$'],
- ['include', '^posix/unix_domain_socket_linux\\.cc$'],
- ['include', '^strings/sys_string_conversions_posix\\.cc$'],
- ['include', '^sys_info_linux\\.cc$'],
- ['include', '^worker_pool_linux\\.cc$'],
- ],
- }],
- ['OS == "android" and _toolset == "host" and host_os == "linux"', {
- 'sources/': [
- # Pull in specific files for host builds.
- ['include', '^threading/platform_thread_linux\\.cc$'],
- ],
- }],
- ['<(chromeos) == 1', {
- 'sources!': [
- 'power_monitor/power_monitor_device_source_posix.cc',
- ],
- }],
- ['OS == "ios" and _toolset != "host"', {
- 'sources/': [
- # Pull in specific Mac files for iOS (which have been filtered out
- # by file name rules).
- ['include', '^base_paths_mac\\.'],
- ['include', '^files/file_util_mac\\.'],
- ['include', '^file_version_info_mac\\.'],
- ['include', '^mac/bundle_locations\\.'],
- ['include', '^mac/call_with_eh_frame\\.'],
- ['include', '^mac/foundation_util\\.'],
- ['include', '^mac/mac_logging\\.'],
- ['include', '^mac/mach_logging\\.'],
- ['include', '^mac/objc_property_releaser\\.'],
- ['include', '^mac/scoped_block\\.'],
- ['include', '^mac/scoped_mach_port\\.'],
- ['include', '^mac/scoped_mach_vm\\.'],
- ['include', '^mac/scoped_nsautorelease_pool\\.'],
- ['include', '^mac/scoped_nsobject\\.'],
- ['include', '^mac/scoped_objc_class_swizzler\\.'],
- ['include', '^memory/shared_memory_posix\\.'],
- ['include', '^message_loop/message_pump_mac\\.'],
- ['include', '^strings/sys_string_conversions_mac\\.'],
- ['include', '^threading/platform_thread_mac\\.'],
- ['include', '^time/time_mac\\.'],
- ['include', '^worker_pool_mac\\.'],
- # Exclude all process/ except the minimal implementation
- # needed on iOS (mostly for unit tests).
- ['exclude', '^process/.*'],
- ['include', '^process/.*_ios\.(cc|mm)$'],
- ['include', '^process/memory_stubs\.cc$'],
- ['include', '^process/process_handle_posix\.cc$'],
- ['include', '^process/process_metrics\\.cc$'],
- # Exclude unsupported features on iOS.
- ['exclude', '^files/file_path_watcher.*'],
- ['exclude', '^threading/platform_thread_internal_posix\\.(h|cc)'],
- ['exclude', '^trace_event/malloc_dump_provider\\.(h|cc)$'],
- ],
- 'sources': [
- 'process/memory_stubs.cc',
- ],
- 'sources!': [
- 'message_loop/message_pump_libevent.cc'
- ],
- }],
- ['OS == "ios" and _toolset == "host"', {
- 'sources/': [
- # Copied filename_rules to switch from iOS to Mac inclusions.
- ['include', '_(cocoa|mac)(_unittest)?\\.(h|cc|mm?)$'],
- ['include', '(^|/)(cocoa|mac)/'],
- ['exclude', '_ios(_unittest)?\\.(h|cc|mm?)$'],
- ['exclude', '(^|/)ios/'],
- ['exclude', 'files/file_path_watcher_fsevents.cc'],
- ['exclude', 'files/file_path_watcher_fsevents.h'],
- ['include', 'files/file_path_watcher_mac.cc'],
- ]
- }],
- # For now, just test the *BSD platforms enough to exclude them.
- # Subsequent changes will include them further.
- ['OS != "freebsd" or >(nacl_untrusted_build)==1', {
- 'sources/': [ ['exclude', '_freebsd\\.cc$'] ],
- },
- ],
- ['OS != "openbsd" or >(nacl_untrusted_build)==1', {
- 'sources/': [ ['exclude', '_openbsd\\.cc$'] ],
- },
- ],
- ['OS == "win" and >(nacl_untrusted_build)==0', {
- 'include_dirs': [
- '<(DEPTH)/third_party/wtl/include',
- ],
- 'sources': [
- 'profiler/win32_stack_frame_unwinder.cc',
- 'profiler/win32_stack_frame_unwinder.h',
- ],
- 'sources!': [
- 'files/file_path_watcher_fsevents.cc',
- 'files/file_path_watcher_fsevents.h',
- 'files/file_path_watcher_kqueue.cc',
- 'files/file_path_watcher_kqueue.h',
- 'files/file_path_watcher_stub.cc',
- 'message_loop/message_pump_libevent.cc',
- 'posix/file_descriptor_shuffle.cc',
- 'strings/string16.cc',
- ],
- },],
- ['<(use_ozone) == 1', {
- 'sources!': [
- 'message_loop/message_pump_glib.cc',
- ]
- }],
- ['OS == "linux" and >(nacl_untrusted_build)==0', {
- 'sources!': [
- 'files/file_path_watcher_fsevents.cc',
- 'files/file_path_watcher_fsevents.h',
- 'files/file_path_watcher_kqueue.cc',
- 'files/file_path_watcher_kqueue.h',
- 'files/file_path_watcher_stub.cc',
- ],
- }],
- ['(OS == "mac" or OS == "ios") and >(nacl_untrusted_build)==0', {
- 'sources/': [
- ['exclude', '^base_paths_posix\\.cc$'],
- ['exclude', '^files/file_path_watcher_stub\\.cc$'],
- ['exclude', '^native_library_posix\\.cc$'],
- ['exclude', '^strings/sys_string_conversions_posix\\.cc$'],
- ['exclude', '^threading/platform_thread_internal_posix\\.cc$'],
- ],
- }],
- ['<(os_bsd)==1 and >(nacl_untrusted_build)==0', {
- 'sources': [
- 'process/memory_stubs.cc',
- ],
- 'sources/': [
- ['exclude', '^files/file_path_watcher_linux\\.cc$'],
- ['exclude', '^files/file_path_watcher_stub\\.cc$'],
- ['exclude', '^files/file_util_linux\\.cc$'],
- ['exclude', '^process/process_linux\\.cc$'],
- ['exclude', '^sys_info_linux\\.cc$'],
- ],
- }],
- # Remove all unnecessary files for build_nexe.py to avoid exceeding
- # command-line-string limitation when building NaCl on Windows.
- ['OS == "win" and >(nacl_untrusted_build)==1', {
- 'sources/': [ ['exclude', '\\.h$'] ],
- }],
- # Enable more direct string conversions on platforms with native utf8
- # strings
- ['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
- 'defines': ['SYSTEM_NATIVE_UTF8'],
- }],
- ],
- }],
- ['base_i18n_target==1', {
- 'defines': [
- 'BASE_I18N_IMPLEMENTATION',
- ],
- 'sources': [
- 'i18n/base_i18n_export.h',
- 'i18n/base_i18n_switches.cc',
- 'i18n/base_i18n_switches.h',
- 'i18n/bidi_line_iterator.cc',
- 'i18n/bidi_line_iterator.h',
- 'i18n/break_iterator.cc',
- 'i18n/break_iterator.h',
- 'i18n/case_conversion.cc',
- 'i18n/case_conversion.h',
- 'i18n/char_iterator.cc',
- 'i18n/char_iterator.h',
- 'i18n/file_util_icu.cc',
- 'i18n/file_util_icu.h',
- 'i18n/i18n_constants.cc',
- 'i18n/i18n_constants.h',
- 'i18n/icu_encoding_detection.cc',
- 'i18n/icu_encoding_detection.h',
- 'i18n/icu_string_conversions.cc',
- 'i18n/icu_string_conversions.h',
- 'i18n/icu_util.cc',
- 'i18n/icu_util.h',
- 'i18n/message_formatter.cc',
- 'i18n/message_formatter.h',
- 'i18n/number_formatting.cc',
- 'i18n/number_formatting.h',
- 'i18n/rtl.cc',
- 'i18n/rtl.h',
- 'i18n/streaming_utf8_validator.cc',
- 'i18n/streaming_utf8_validator.h',
- 'i18n/string_compare.cc',
- 'i18n/string_compare.h',
- 'i18n/string_search.cc',
- 'i18n/string_search.h',
- 'i18n/time_formatting.cc',
- 'i18n/time_formatting.h',
- 'i18n/timezone.cc',
- 'i18n/timezone.h',
- 'i18n/utf8_validator_tables.cc',
- 'i18n/utf8_validator_tables.h',
- ],
- }]
- ],
- },
-}
diff --git a/base/base_nacl.gyp b/base/base_nacl.gyp
deleted file mode 100644
index 30763d4813..0000000000
--- a/base/base_nacl.gyp
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'includes': [
- # base.gypi must be included before common_untrusted.gypi.
- #
- # TODO(sergeyu): Replace the target_defaults magic in base.gypi with a
- # sources variables lists. That way order of includes will not matter.
- 'base.gypi',
- '../build/common_untrusted.gypi',
- ],
- 'conditions': [
- ['disable_nacl==0 and disable_nacl_untrusted==0', {
- 'targets': [
- {
- 'target_name': 'base_nacl',
- 'type': 'none',
- 'variables': {
- 'base_target': 1,
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libbase_nacl.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_irt': 1,
- 'build_pnacl_newlib': 1,
- 'sources': [
- 'base_switches.cc',
- 'base_switches.h',
- 'strings/string16.cc',
- 'sync_socket_nacl.cc',
- 'time/time_posix.cc',
- ],
- 'compile_flags': [
- '-fno-strict-aliasing',
- ],
- },
- 'dependencies': [
- 'allocator/allocator.gyp:allocator_features#target',
- 'base.gyp:base_debugging_flags',
- 'base.gyp:base_build_date',
- ],
- },
- {
- 'target_name': 'base_i18n_nacl',
- 'type': 'none',
- 'variables': {
- 'base_i18n_target': 1,
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libbase_i18n_nacl.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_irt': 0,
- 'build_pnacl_newlib': 1,
- 'sources': [
- 'base_switches.cc',
- 'base_switches.h',
- 'strings/string16.cc',
- 'sync_socket_nacl.cc',
- 'time/time_posix.cc',
- ],
- },
- 'dependencies': [
- 'allocator/allocator.gyp:allocator_features#target',
- 'base.gyp:base_build_date',
- '../third_party/icu/icu_nacl.gyp:icudata_nacl',
- '../third_party/icu/icu_nacl.gyp:icui18n_nacl',
- '../third_party/icu/icu_nacl.gyp:icuuc_nacl',
- ],
- },
- {
- 'target_name': 'base_nacl_nonsfi',
- 'type': 'none',
- 'variables': {
- 'base_target': 1,
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libbase_nacl_nonsfi.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_irt': 0,
- 'build_pnacl_newlib': 0,
- 'build_nonsfi_helper': 1,
-
- 'sources': [
- 'base_switches.cc',
- 'base_switches.h',
-
- # For PathExists and ReadFromFD.
- 'files/file_util.cc',
- 'files/file_util_posix.cc',
-
- # For MessageLoopForIO based on libevent.
- 'message_loop/message_pump_libevent.cc',
- 'message_loop/message_pump_libevent.h',
-
- # For UnixDomainSocket::SendMsg and RecvMsg.
- 'posix/unix_domain_socket_linux.cc',
-
- # For GetKnownDeadTerminationStatus and GetTerminationStatus.
- 'process/kill_posix.cc',
-
- # For ForkWithFlags.
- 'process/launch.h',
- 'process/launch_posix.cc',
-
- # Unlike libbase_nacl, for Non-SFI build, we need to use
- # rand_util_posix for random implementation, instead of
- # rand_util_nacl.cc, which is based on IRT. rand_util_nacl.cc is
- # excluded below.
- 'rand_util_posix.cc',
-
- # For CancelableSyncSocket.
- 'sync_socket_nacl.cc',
- ],
- },
- 'sources!': [
- 'rand_util_nacl.cc',
- ],
- 'dependencies': [
- 'allocator/allocator.gyp:allocator_features#target',
- 'base.gyp:base_debugging_flags',
- 'base.gyp:base_build_date',
- 'third_party/libevent/libevent_nacl_nonsfi.gyp:event_nacl_nonsfi',
- ],
- },
- {
- 'target_name': 'test_support_base_nacl_nonsfi',
- 'type': 'none',
- 'variables': {
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libtest_support_base_nacl_nonsfi.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_irt': 0,
- 'build_pnacl_newlib': 0,
- 'build_nonsfi_helper': 1,
-
- 'sources': [
- 'test/gtest_util.cc',
- 'test/launcher/unit_test_launcher_nacl_nonsfi.cc',
- 'test/gtest_xml_unittest_result_printer.cc',
- 'test/test_switches.cc',
- ],
- },
- 'dependencies': [
- 'base.gyp:base_build_date',
- 'base_nacl_nonsfi',
- '../testing/gtest_nacl.gyp:gtest_nacl',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/base/base_switches.cc b/base/base_switches.cc
index f5c6eb3f59..e8aa5cbc4d 100644
--- a/base/base_switches.cc
+++ b/base/base_switches.cc
@@ -24,6 +24,12 @@ const char kEnableHeapProfiling[] = "enable-heap-profiling";
// derived from trace events are reported.
const char kEnableHeapProfilingModeNative[] = "native";
+// Report per-task heap usage and churn in the task profiler.
+// Does not keep track of individual allocations unlike the default and native
+// mode. Keeps only track of summarized churn stats in the task profiler
+// (chrome://profiler).
+const char kEnableHeapProfilingTaskProfiler[] = "task-profiler";
+
// Generates full memory crash dump.
const char kFullMemoryCrashReport[] = "full-memory-crash-report";
diff --git a/base/base_switches.h b/base/base_switches.h
index 0585186038..04b0773057 100644
--- a/base/base_switches.h
+++ b/base/base_switches.h
@@ -16,6 +16,7 @@ extern const char kDisableLowEndDeviceMode[];
extern const char kEnableCrashReporter[];
extern const char kEnableHeapProfiling[];
extern const char kEnableHeapProfilingModeNative[];
+extern const char kEnableHeapProfilingTaskProfiler[];
extern const char kEnableLowEndDeviceMode[];
extern const char kForceFieldTrials[];
extern const char kFullMemoryCrashReport[];
diff --git a/base/base_unittests.isolate b/base/base_unittests.isolate
deleted file mode 100644
index 208501fce8..0000000000
--- a/base/base_unittests.isolate
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'command': [
- '../testing/test_env.py',
- '<(PRODUCT_DIR)/base_unittests<(EXECUTABLE_SUFFIX)',
- '--brave-new-test-launcher',
- '--test-launcher-bot-mode',
- '--asan=<(asan)',
- '--msan=<(msan)',
- '--tsan=<(tsan)',
- ],
- },
- 'conditions': [
- ['OS=="android" or OS=="linux" or OS=="mac" or OS=="win"', {
- 'variables': {
- 'files': [
- 'test/data/',
- ],
- },
- }],
- ['OS=="linux" or OS=="mac" or OS=="win"', {
- 'variables': {
- 'files': [
- '../testing/test_env.py',
- ],
- },
- }],
- ['OS=="linux"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/lib/libmalloc_wrapper.so',
- ],
- },
- }],
- ['OS=="mac" and asan==1 and fastbuild==0', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/base_unittests.dSYM/',
- ],
- },
- }],
- ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/base_unittests.exe.pdb',
- ],
- },
- }],
- ],
- 'includes': [
- 'base.isolate',
- ],
-}
diff --git a/base/bind.h b/base/bind.h
index 9cf65b6776..ce717972e2 100644
--- a/base/bind.h
+++ b/base/bind.h
@@ -11,7 +11,7 @@
// Usage documentation
// -----------------------------------------------------------------------------
//
-// See base/callback.h for documentation.
+// See //docs/callback.md for documentation.
//
//
// -----------------------------------------------------------------------------
@@ -24,18 +24,58 @@
namespace base {
+// Bind as OnceCallback.
template <typename Functor, typename... Args>
-inline base::Callback<MakeUnboundRunType<Functor, Args...>> Bind(
- Functor&& functor,
- Args&&... args) {
+inline OnceCallback<MakeUnboundRunType<Functor, Args...>>
+BindOnce(Functor&& functor, Args&&... args) {
using BindState = internal::MakeBindStateType<Functor, Args...>;
using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
using Invoker = internal::Invoker<BindState, UnboundRunType>;
+ using CallbackType = OnceCallback<UnboundRunType>;
- using CallbackType = Callback<UnboundRunType>;
- return CallbackType(new BindState(std::forward<Functor>(functor),
- std::forward<Args>(args)...),
- &Invoker::Run);
+ // Store the invoke func into PolymorphicInvoke before casting it to
+ // InvokeFuncStorage, so that we can ensure its type matches to
+ // PolymorphicInvoke, to which CallbackType will cast back.
+ using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
+ PolymorphicInvoke invoke_func = &Invoker::RunOnce;
+
+ using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
+ return CallbackType(new BindState(
+ reinterpret_cast<InvokeFuncStorage>(invoke_func),
+ std::forward<Functor>(functor),
+ std::forward<Args>(args)...));
+}
+
+// Bind as RepeatingCallback.
+template <typename Functor, typename... Args>
+inline RepeatingCallback<MakeUnboundRunType<Functor, Args...>>
+BindRepeating(Functor&& functor, Args&&... args) {
+ using BindState = internal::MakeBindStateType<Functor, Args...>;
+ using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+ using Invoker = internal::Invoker<BindState, UnboundRunType>;
+ using CallbackType = RepeatingCallback<UnboundRunType>;
+
+ // Store the invoke func into PolymorphicInvoke before casting it to
+ // InvokeFuncStorage, so that we can ensure its type matches to
+ // PolymorphicInvoke, to which CallbackType will cast back.
+ using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
+ PolymorphicInvoke invoke_func = &Invoker::Run;
+
+ using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
+ return CallbackType(new BindState(
+ reinterpret_cast<InvokeFuncStorage>(invoke_func),
+ std::forward<Functor>(functor),
+ std::forward<Args>(args)...));
+}
+
+// Unannotated Bind.
+// TODO(tzik): Deprecate this and migrate to OnceCallback and
+// RepeatingCallback, once they get ready.
+template <typename Functor, typename... Args>
+inline Callback<MakeUnboundRunType<Functor, Args...>>
+Bind(Functor&& functor, Args&&... args) {
+ return BindRepeating(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
}
} // namespace base
diff --git a/base/bind_helpers.h b/base/bind_helpers.h
index 93d02e37a9..7b3d7d3474 100644
--- a/base/bind_helpers.h
+++ b/base/bind_helpers.h
@@ -21,7 +21,7 @@
// Owned() transfers ownership of an object to the Callback resulting from
// bind; the object will be deleted when the Callback is deleted.
//
-// Passed() is for transferring movable-but-not-copyable types (eg. scoped_ptr)
+// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
// through a Callback. Logically, this signifies a destructive transfer of
// the state of the argument into the target function. Invoking
// Callback::Run() twice on a Callback that was created with a Passed()
@@ -174,8 +174,14 @@ namespace base {
template <typename T>
struct IsWeakReceiver;
+template <typename>
+struct BindUnwrapTraits;
+
namespace internal {
+template <typename Functor, typename SFINAE = void>
+struct FunctorTraits;
+
template <typename T>
class UnretainedWrapper {
public:
@@ -275,35 +281,12 @@ class PassedWrapper {
mutable T scoper_;
};
-// Unwrap the stored parameters for the wrappers above.
-template <typename T>
-T&& Unwrap(T&& o) {
- return std::forward<T>(o);
-}
-
-template <typename T>
-T* Unwrap(const UnretainedWrapper<T>& unretained) {
- return unretained.get();
-}
-
-template <typename T>
-const T& Unwrap(const ConstRefWrapper<T>& const_ref) {
- return const_ref.get();
-}
-
template <typename T>
-T* Unwrap(const RetainedRefWrapper<T>& o) {
- return o.get();
-}
+using Unwrapper = BindUnwrapTraits<typename std::decay<T>::type>;
template <typename T>
-T* Unwrap(const OwnedWrapper<T>& o) {
- return o.get();
-}
-
-template <typename T>
-T Unwrap(const PassedWrapper<T>& o) {
- return o.Take();
+auto Unwrap(T&& o) -> decltype(Unwrapper<T>::Unwrap(std::forward<T>(o))) {
+ return Unwrapper<T>::Unwrap(std::forward<T>(o));
}
// IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
@@ -497,6 +480,92 @@ struct IsWeakReceiver<internal::ConstRefWrapper<T>> : IsWeakReceiver<T> {};
template <typename T>
struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
+// An injection point to control how bound objects passed to the target
+// function. BindUnwrapTraits<>::Unwrap() is called for each bound objects right
+// before the target function is invoked.
+template <typename>
+struct BindUnwrapTraits {
+ template <typename T>
+ static T&& Unwrap(T&& o) { return std::forward<T>(o); }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::UnretainedWrapper<T>> {
+ static T* Unwrap(const internal::UnretainedWrapper<T>& o) {
+ return o.get();
+ }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::ConstRefWrapper<T>> {
+ static const T& Unwrap(const internal::ConstRefWrapper<T>& o) {
+ return o.get();
+ }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::RetainedRefWrapper<T>> {
+ static T* Unwrap(const internal::RetainedRefWrapper<T>& o) {
+ return o.get();
+ }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::OwnedWrapper<T>> {
+ static T* Unwrap(const internal::OwnedWrapper<T>& o) {
+ return o.get();
+ }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::PassedWrapper<T>> {
+ static T Unwrap(const internal::PassedWrapper<T>& o) {
+ return o.Take();
+ }
+};
+
+// CallbackCancellationTraits allows customization of Callback's cancellation
+// semantics. By default, callbacks are not cancellable. A specialization should
+// set is_cancellable = true and implement an IsCancelled() that returns if the
+// callback should be cancelled.
+template <typename Functor, typename BoundArgsTuple, typename SFINAE = void>
+struct CallbackCancellationTraits {
+ static constexpr bool is_cancellable = false;
+};
+
+// Specialization for method bound to weak pointer receiver.
+template <typename Functor, typename... BoundArgs>
+struct CallbackCancellationTraits<
+ Functor,
+ std::tuple<BoundArgs...>,
+ typename std::enable_if<
+ internal::IsWeakMethod<internal::FunctorTraits<Functor>::is_method,
+ BoundArgs...>::value>::type> {
+ static constexpr bool is_cancellable = true;
+
+ template <typename Receiver, typename... Args>
+ static bool IsCancelled(const Functor&,
+ const Receiver& receiver,
+ const Args&...) {
+ return !receiver;
+ }
+};
+
+// Specialization for a nested bind.
+template <typename Signature,
+ typename... BoundArgs,
+ internal::CopyMode copy_mode,
+ internal::RepeatMode repeat_mode>
+struct CallbackCancellationTraits<Callback<Signature, copy_mode, repeat_mode>,
+ std::tuple<BoundArgs...>> {
+ static constexpr bool is_cancellable = true;
+
+ template <typename Functor>
+ static bool IsCancelled(const Functor& functor, const BoundArgs&...) {
+ return functor.IsCancelled();
+ }
+};
+
} // namespace base
#endif // BASE_BIND_HELPERS_H_
diff --git a/base/bind_internal.h b/base/bind_internal.h
index 3d6ca09c41..8988bdca22 100644
--- a/base/bind_internal.h
+++ b/base/bind_internal.h
@@ -130,7 +130,7 @@ struct ForceVoidReturn<R(Args...)> {
// FunctorTraits<>
//
// See description at top of file.
-template <typename Functor, typename SFINAE = void>
+template <typename Functor, typename SFINAE>
struct FunctorTraits;
// For a callable type that is convertible to the corresponding function type.
@@ -244,14 +244,16 @@ struct FunctorTraits<IgnoreResultHelper<T>> : FunctorTraits<T> {
template <typename IgnoreResultType, typename... RunArgs>
static void Invoke(IgnoreResultType&& ignore_result_helper,
RunArgs&&... args) {
- FunctorTraits<T>::Invoke(ignore_result_helper.functor_,
- std::forward<RunArgs>(args)...);
+ FunctorTraits<T>::Invoke(
+ std::forward<IgnoreResultType>(ignore_result_helper).functor_,
+ std::forward<RunArgs>(args)...);
}
};
// For Callbacks.
-template <typename R, typename... Args, CopyMode copy_mode>
-struct FunctorTraits<Callback<R(Args...), copy_mode>> {
+template <typename R, typename... Args,
+ CopyMode copy_mode, RepeatMode repeat_mode>
+struct FunctorTraits<Callback<R(Args...), copy_mode, repeat_mode>> {
using RunType = R(Args...);
static constexpr bool is_method = false;
static constexpr bool is_nullable = true;
@@ -314,6 +316,19 @@ struct Invoker;
template <typename StorageType, typename R, typename... UnboundArgs>
struct Invoker<StorageType, R(UnboundArgs...)> {
+ static R RunOnce(BindStateBase* base, UnboundArgs&&... unbound_args) {
+ // Local references to make debugger stepping easier. If in a debugger,
+ // you really want to warp ahead and step through the
+ // InvokeHelper<>::MakeItSo() call below.
+ StorageType* storage = static_cast<StorageType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return RunImpl(std::move(storage->functor_),
+ std::move(storage->bound_args_),
+ MakeIndexSequence<num_bound_args>(),
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
+
static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
// Local references to make debugger stepping easier. If in a debugger,
// you really want to warp ahead and step through the
@@ -372,27 +387,102 @@ IsNull(const Functor&) {
return false;
}
+// Used by ApplyCancellationTraits below.
+template <typename Functor, typename BoundArgsTuple, size_t... indices>
+bool ApplyCancellationTraitsImpl(const Functor& functor,
+ const BoundArgsTuple& bound_args,
+ IndexSequence<indices...>) {
+ return CallbackCancellationTraits<Functor, BoundArgsTuple>::IsCancelled(
+ functor, base::get<indices>(bound_args)...);
+}
+
+// Relays |base| to corresponding CallbackCancellationTraits<>::Run(). Returns
+// true if the callback |base| represents is canceled.
+template <typename BindStateType>
+bool ApplyCancellationTraits(const BindStateBase* base) {
+ const BindStateType* storage = static_cast<const BindStateType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return ApplyCancellationTraitsImpl(storage->functor_, storage->bound_args_,
+ MakeIndexSequence<num_bound_args>());
+};
+
+// Template helpers to detect using Bind() on a base::Callback without any
+// additional arguments. In that case, the original base::Callback object should
+// just be directly used.
+template <typename Functor, typename... BoundArgs>
+struct BindingCallbackWithNoArgs {
+ static constexpr bool value = false;
+};
+
+template <typename Signature,
+ typename... BoundArgs,
+ CopyMode copy_mode,
+ RepeatMode repeat_mode>
+struct BindingCallbackWithNoArgs<Callback<Signature, copy_mode, repeat_mode>,
+ BoundArgs...> {
+ static constexpr bool value = sizeof...(BoundArgs) == 0;
+};
+
// BindState<>
//
// This stores all the state passed into Bind().
template <typename Functor, typename... BoundArgs>
struct BindState final : BindStateBase {
+ using IsCancellable = std::integral_constant<
+ bool,
+ CallbackCancellationTraits<Functor,
+ std::tuple<BoundArgs...>>::is_cancellable>;
+
template <typename ForwardFunctor, typename... ForwardBoundArgs>
- explicit BindState(ForwardFunctor&& functor, ForwardBoundArgs&&... bound_args)
- : BindStateBase(&Destroy),
- functor_(std::forward<ForwardFunctor>(functor)),
- bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
- DCHECK(!IsNull(functor_));
+ explicit BindState(BindStateBase::InvokeFuncStorage invoke_func,
+ ForwardFunctor&& functor,
+ ForwardBoundArgs&&... bound_args)
+ // IsCancellable is std::false_type if
+ // CallbackCancellationTraits<>::IsCancelled returns always false.
+ // Otherwise, it's std::true_type.
+ : BindState(IsCancellable{},
+ invoke_func,
+ std::forward<ForwardFunctor>(functor),
+ std::forward<ForwardBoundArgs>(bound_args)...) {
+ static_assert(!BindingCallbackWithNoArgs<Functor, BoundArgs...>::value,
+ "Attempting to bind a base::Callback with no additional "
+ "arguments: save a heap allocation and use the original "
+ "base::Callback object");
}
Functor functor_;
std::tuple<BoundArgs...> bound_args_;
private:
+ template <typename ForwardFunctor, typename... ForwardBoundArgs>
+ explicit BindState(std::true_type,
+ BindStateBase::InvokeFuncStorage invoke_func,
+ ForwardFunctor&& functor,
+ ForwardBoundArgs&&... bound_args)
+ : BindStateBase(invoke_func,
+ &Destroy,
+ &ApplyCancellationTraits<BindState>),
+ functor_(std::forward<ForwardFunctor>(functor)),
+ bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+ DCHECK(!IsNull(functor_));
+ }
+
+ template <typename ForwardFunctor, typename... ForwardBoundArgs>
+ explicit BindState(std::false_type,
+ BindStateBase::InvokeFuncStorage invoke_func,
+ ForwardFunctor&& functor,
+ ForwardBoundArgs&&... bound_args)
+ : BindStateBase(invoke_func, &Destroy),
+ functor_(std::forward<ForwardFunctor>(functor)),
+ bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+ DCHECK(!IsNull(functor_));
+ }
+
~BindState() {}
- static void Destroy(BindStateBase* self) {
- delete static_cast<BindState*>(self);
+ static void Destroy(const BindStateBase* self) {
+ delete static_cast<const BindState*>(self);
}
};
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index ba5113b507..a9ca9d2538 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -13,11 +13,14 @@
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/test/gtest_util.h"
#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::_;
using ::testing::Mock;
+using ::testing::ByMove;
using ::testing::Return;
using ::testing::StrictMock;
@@ -36,6 +39,9 @@ class NoRef {
MOCK_METHOD0(IntMethod0, int());
MOCK_CONST_METHOD0(IntConstMethod0, int());
+ MOCK_METHOD1(VoidMethodWithIntArg, void(int));
+ MOCK_METHOD0(UniquePtrMethod0, std::unique_ptr<int>());
+
private:
// Particularly important in this test to ensure no copies are made.
DISALLOW_COPY_AND_ASSIGN(NoRef);
@@ -345,53 +351,28 @@ class BindTest : public ::testing::Test {
};
StrictMock<NoRef>* BindTest::static_func_mock_ptr;
+StrictMock<NoRef>* g_func_mock_ptr;
-// Sanity check that we can instantiate a callback for each arity.
-TEST_F(BindTest, ArityTest) {
- Callback<int()> c0 = Bind(&Sum, 32, 16, 8, 4, 2, 1);
- EXPECT_EQ(63, c0.Run());
-
- Callback<int(int)> c1 = Bind(&Sum, 32, 16, 8, 4, 2);
- EXPECT_EQ(75, c1.Run(13));
-
- Callback<int(int,int)> c2 = Bind(&Sum, 32, 16, 8, 4);
- EXPECT_EQ(85, c2.Run(13, 12));
-
- Callback<int(int,int,int)> c3 = Bind(&Sum, 32, 16, 8);
- EXPECT_EQ(92, c3.Run(13, 12, 11));
-
- Callback<int(int,int,int,int)> c4 = Bind(&Sum, 32, 16);
- EXPECT_EQ(94, c4.Run(13, 12, 11, 10));
-
- Callback<int(int,int,int,int,int)> c5 = Bind(&Sum, 32);
- EXPECT_EQ(87, c5.Run(13, 12, 11, 10, 9));
-
- Callback<int(int,int,int,int,int,int)> c6 = Bind(&Sum);
- EXPECT_EQ(69, c6.Run(13, 12, 11, 10, 9, 14));
+void VoidFunc0() {
+ g_func_mock_ptr->VoidMethod0();
}
-// Test the Currying ability of the Callback system.
-TEST_F(BindTest, CurryingTest) {
- Callback<int(int,int,int,int,int,int)> c6 = Bind(&Sum);
- EXPECT_EQ(69, c6.Run(13, 12, 11, 10, 9, 14));
-
- Callback<int(int,int,int,int,int)> c5 = Bind(c6, 32);
- EXPECT_EQ(87, c5.Run(13, 12, 11, 10, 9));
-
- Callback<int(int,int,int,int)> c4 = Bind(c5, 16);
- EXPECT_EQ(94, c4.Run(13, 12, 11, 10));
+int IntFunc0() {
+ return g_func_mock_ptr->IntMethod0();
+}
- Callback<int(int,int,int)> c3 = Bind(c4, 8);
- EXPECT_EQ(92, c3.Run(13, 12, 11));
+TEST_F(BindTest, BasicTest) {
+ Callback<int(int, int, int)> cb = Bind(&Sum, 32, 16, 8);
+ EXPECT_EQ(92, cb.Run(13, 12, 11));
- Callback<int(int,int)> c2 = Bind(c3, 4);
- EXPECT_EQ(85, c2.Run(13, 12));
+ Callback<int(int, int, int, int, int, int)> c1 = Bind(&Sum);
+ EXPECT_EQ(69, c1.Run(14, 13, 12, 11, 10, 9));
- Callback<int(int)> c1 = Bind(c2, 2);
- EXPECT_EQ(75, c1.Run(13));
+ Callback<int(int, int, int)> c2 = Bind(c1, 32, 16, 8);
+ EXPECT_EQ(86, c2.Run(11, 10, 9));
- Callback<int()> c0 = Bind(c1, 1);
- EXPECT_EQ(63, c0.Run());
+ Callback<int()> c3 = Bind(c2, 4, 2, 1);
+ EXPECT_EQ(63, c3.Run());
}
// Test that currying the rvalue result of another Bind() works correctly.
@@ -399,7 +380,8 @@ TEST_F(BindTest, CurryingTest) {
// - multiple runs of resulting Callback remain valid.
TEST_F(BindTest, CurryingRvalueResultOfBind) {
int n = 0;
- Closure cb = base::Bind(&TakesACallback, base::Bind(&PtrArgSet, &n));
+ RepeatingClosure cb = BindRepeating(&TakesACallback,
+ BindRepeating(&PtrArgSet, &n));
// If we implement Bind() such that the return value has auto_ptr-like
// semantics, the second call here will fail because ownership of
@@ -413,76 +395,45 @@ TEST_F(BindTest, CurryingRvalueResultOfBind) {
EXPECT_EQ(2, n);
}
-// Function type support.
-// - Normal function.
-// - Normal function bound with non-refcounted first argument.
-// - Method bound to non-const object.
-// - Method bound to scoped_refptr.
-// - Const method bound to non-const object.
-// - Const method bound to const object.
-// - Derived classes can be used with pointers to non-virtual base functions.
-// - Derived classes can be used with pointers to virtual base functions (and
-// preserve virtual dispatch).
-TEST_F(BindTest, FunctionTypeSupport) {
- EXPECT_CALL(static_func_mock_, VoidMethod0());
- EXPECT_CALL(has_ref_, AddRef()).Times(4);
- EXPECT_CALL(has_ref_, Release()).Times(4);
- EXPECT_CALL(has_ref_, VoidMethod0()).Times(2);
- EXPECT_CALL(has_ref_, VoidConstMethod0()).Times(2);
-
- Closure normal_cb = Bind(&VoidFunc0);
- Callback<NoRef*()> normal_non_refcounted_cb =
- Bind(&PolymorphicIdentity<NoRef*>, &no_ref_);
- normal_cb.Run();
- EXPECT_EQ(&no_ref_, normal_non_refcounted_cb.Run());
-
- Closure method_cb = Bind(&HasRef::VoidMethod0, &has_ref_);
- Closure method_refptr_cb = Bind(&HasRef::VoidMethod0,
- make_scoped_refptr(&has_ref_));
- Closure const_method_nonconst_obj_cb = Bind(&HasRef::VoidConstMethod0,
- &has_ref_);
- Closure const_method_const_obj_cb = Bind(&HasRef::VoidConstMethod0,
- const_has_ref_ptr_);
- method_cb.Run();
- method_refptr_cb.Run();
- const_method_nonconst_obj_cb.Run();
- const_method_const_obj_cb.Run();
+TEST_F(BindTest, RepeatingCallbackBasicTest) {
+ RepeatingCallback<int(int)> c0 = BindRepeating(&Sum, 1, 2, 4, 8, 16);
- Child child;
- child.value = 0;
- Closure virtual_set_cb = Bind(&Parent::VirtualSet, &child);
- virtual_set_cb.Run();
- EXPECT_EQ(kChildValue, child.value);
+ // RepeatingCallback can run via a lvalue-reference.
+ EXPECT_EQ(63, c0.Run(32));
- child.value = 0;
- Closure non_virtual_set_cb = Bind(&Parent::NonVirtualSet, &child);
- non_virtual_set_cb.Run();
- EXPECT_EQ(kParentValue, child.value);
+ // It is valid to call a RepeatingCallback more than once.
+ EXPECT_EQ(54, c0.Run(23));
+
+ // BindRepeating can handle a RepeatingCallback as the target functor.
+ RepeatingCallback<int()> c1 = BindRepeating(c0, 11);
+
+ // RepeatingCallback can run via a rvalue-reference.
+ EXPECT_EQ(42, std::move(c1).Run());
+
+ // BindRepeating can handle a rvalue-reference of RepeatingCallback.
+ EXPECT_EQ(32, BindRepeating(std::move(c0), 1).Run());
}
-// Return value support.
-// - Function with return value.
-// - Method with return value.
-// - Const method with return value.
-TEST_F(BindTest, ReturnValues) {
- EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
- EXPECT_CALL(has_ref_, AddRef()).Times(3);
- EXPECT_CALL(has_ref_, Release()).Times(3);
- EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(31337));
- EXPECT_CALL(has_ref_, IntConstMethod0())
- .WillOnce(Return(41337))
- .WillOnce(Return(51337));
+TEST_F(BindTest, OnceCallbackBasicTest) {
+ OnceCallback<int(int)> c0 = BindOnce(&Sum, 1, 2, 4, 8, 16);
+
+ // OnceCallback can run via a rvalue-reference.
+ EXPECT_EQ(63, std::move(c0).Run(32));
+
+ // After running via the rvalue-reference, the value of the OnceCallback
+ // is undefined. The implementation simply clears the instance after the
+ // invocation.
+ EXPECT_TRUE(c0.is_null());
- Callback<int()> normal_cb = Bind(&IntFunc0);
- Callback<int()> method_cb = Bind(&HasRef::IntMethod0, &has_ref_);
- Callback<int()> const_method_nonconst_obj_cb =
- Bind(&HasRef::IntConstMethod0, &has_ref_);
- Callback<int()> const_method_const_obj_cb =
- Bind(&HasRef::IntConstMethod0, const_has_ref_ptr_);
- EXPECT_EQ(1337, normal_cb.Run());
- EXPECT_EQ(31337, method_cb.Run());
- EXPECT_EQ(41337, const_method_nonconst_obj_cb.Run());
- EXPECT_EQ(51337, const_method_const_obj_cb.Run());
+ c0 = BindOnce(&Sum, 2, 3, 5, 7, 11);
+
+ // BindOnce can handle a rvalue-reference of OnceCallback as the target
+ // functor.
+ OnceCallback<int()> c1 = BindOnce(std::move(c0), 13);
+ EXPECT_EQ(41, std::move(c1).Run());
+
+ RepeatingCallback<int(int)> c2 = BindRepeating(&Sum, 2, 3, 5, 7, 11);
+ EXPECT_EQ(41, BindOnce(c2, 13).Run());
}
// IgnoreResult adapter test.
@@ -491,7 +442,7 @@ TEST_F(BindTest, ReturnValues) {
// - Const Method with return.
// - Method with return value bound to WeakPtr<>.
// - Const Method with return bound to WeakPtr<>.
-TEST_F(BindTest, IgnoreResult) {
+TEST_F(BindTest, IgnoreResultForRepeating) {
EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
EXPECT_CALL(has_ref_, AddRef()).Times(2);
EXPECT_CALL(has_ref_, Release()).Times(2);
@@ -500,26 +451,28 @@ TEST_F(BindTest, IgnoreResult) {
EXPECT_CALL(no_ref_, IntMethod0()).WillOnce(Return(12));
EXPECT_CALL(no_ref_, IntConstMethod0()).WillOnce(Return(13));
- Closure normal_func_cb = Bind(IgnoreResult(&IntFunc0));
+ RepeatingClosure normal_func_cb = BindRepeating(IgnoreResult(&IntFunc0));
normal_func_cb.Run();
- Closure non_void_method_cb =
- Bind(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+ RepeatingClosure non_void_method_cb =
+ BindRepeating(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
non_void_method_cb.Run();
- Closure non_void_const_method_cb =
- Bind(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+ RepeatingClosure non_void_const_method_cb =
+ BindRepeating(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
non_void_const_method_cb.Run();
WeakPtrFactory<NoRef> weak_factory(&no_ref_);
WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
- Closure non_void_weak_method_cb =
- Bind(IgnoreResult(&NoRef::IntMethod0), weak_factory.GetWeakPtr());
+ RepeatingClosure non_void_weak_method_cb =
+ BindRepeating(IgnoreResult(&NoRef::IntMethod0),
+ weak_factory.GetWeakPtr());
non_void_weak_method_cb.Run();
- Closure non_void_weak_const_method_cb =
- Bind(IgnoreResult(&NoRef::IntConstMethod0), weak_factory.GetWeakPtr());
+ RepeatingClosure non_void_weak_const_method_cb =
+ BindRepeating(IgnoreResult(&NoRef::IntConstMethod0),
+ weak_factory.GetWeakPtr());
non_void_weak_const_method_cb.Run();
weak_factory.InvalidateWeakPtrs();
@@ -527,128 +480,86 @@ TEST_F(BindTest, IgnoreResult) {
non_void_weak_method_cb.Run();
}
-// Argument binding tests.
-// - Argument binding to primitive.
-// - Argument binding to primitive pointer.
-// - Argument binding to a literal integer.
-// - Argument binding to a literal string.
-// - Argument binding with template function.
-// - Argument binding to an object.
-// - Argument binding to pointer to incomplete type.
-// - Argument gets type converted.
-// - Pointer argument gets converted.
-// - Const Reference forces conversion.
-TEST_F(BindTest, ArgumentBinding) {
- int n = 2;
-
- Callback<int()> bind_primitive_cb = Bind(&Identity, n);
- EXPECT_EQ(n, bind_primitive_cb.Run());
-
- Callback<int*()> bind_primitive_pointer_cb =
- Bind(&PolymorphicIdentity<int*>, &n);
- EXPECT_EQ(&n, bind_primitive_pointer_cb.Run());
-
- Callback<int()> bind_int_literal_cb = Bind(&Identity, 3);
- EXPECT_EQ(3, bind_int_literal_cb.Run());
-
- Callback<const char*()> bind_string_literal_cb =
- Bind(&CStringIdentity, "hi");
- EXPECT_STREQ("hi", bind_string_literal_cb.Run());
-
- Callback<int()> bind_template_function_cb =
- Bind(&PolymorphicIdentity<int>, 4);
- EXPECT_EQ(4, bind_template_function_cb.Run());
-
- NoRefParent p;
- p.value = 5;
- Callback<int()> bind_object_cb = Bind(&UnwrapNoRefParent, p);
- EXPECT_EQ(5, bind_object_cb.Run());
+TEST_F(BindTest, IgnoreResultForOnce) {
+ EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+ EXPECT_CALL(has_ref_, AddRef()).Times(2);
+ EXPECT_CALL(has_ref_, Release()).Times(2);
+ EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
+ EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
- IncompleteType* incomplete_ptr = reinterpret_cast<IncompleteType*>(123);
- Callback<IncompleteType*()> bind_incomplete_ptr_cb =
- Bind(&PolymorphicIdentity<IncompleteType*>, incomplete_ptr);
- EXPECT_EQ(incomplete_ptr, bind_incomplete_ptr_cb.Run());
+ OnceClosure normal_func_cb = BindOnce(IgnoreResult(&IntFunc0));
+ std::move(normal_func_cb).Run();
- NoRefChild c;
- c.value = 6;
- Callback<int()> bind_promotes_cb = Bind(&UnwrapNoRefParent, c);
- EXPECT_EQ(6, bind_promotes_cb.Run());
+ OnceClosure non_void_method_cb =
+ BindOnce(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+ std::move(non_void_method_cb).Run();
- c.value = 7;
- Callback<int()> bind_pointer_promotes_cb =
- Bind(&UnwrapNoRefParentPtr, &c);
- EXPECT_EQ(7, bind_pointer_promotes_cb.Run());
+ OnceClosure non_void_const_method_cb =
+ BindOnce(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+ std::move(non_void_const_method_cb).Run();
- c.value = 8;
- Callback<int()> bind_const_reference_promotes_cb =
- Bind(&UnwrapNoRefParentConstRef, c);
- EXPECT_EQ(8, bind_const_reference_promotes_cb.Run());
-}
+ WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+ WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
-// Unbound argument type support tests.
-// - Unbound value.
-// - Unbound pointer.
-// - Unbound reference.
-// - Unbound const reference.
-// - Unbound unsized array.
-// - Unbound sized array.
-// - Unbound array-of-arrays.
-TEST_F(BindTest, UnboundArgumentTypeSupport) {
- Callback<void(int)> unbound_value_cb = Bind(&VoidPolymorphic<int>::Run);
- Callback<void(int*)> unbound_pointer_cb = Bind(&VoidPolymorphic<int*>::Run);
- Callback<void(int&)> unbound_ref_cb = Bind(&VoidPolymorphic<int&>::Run);
- Callback<void(const int&)> unbound_const_ref_cb =
- Bind(&VoidPolymorphic<const int&>::Run);
- Callback<void(int[])> unbound_unsized_array_cb =
- Bind(&VoidPolymorphic<int[]>::Run);
- Callback<void(int[2])> unbound_sized_array_cb =
- Bind(&VoidPolymorphic<int[2]>::Run);
- Callback<void(int[][2])> unbound_array_of_arrays_cb =
- Bind(&VoidPolymorphic<int[][2]>::Run);
-
- Callback<void(int&)> unbound_ref_with_bound_arg =
- Bind(&VoidPolymorphic<int, int&>::Run, 1);
-}
+ OnceClosure non_void_weak_method_cb =
+ BindOnce(IgnoreResult(&NoRef::IntMethod0),
+ weak_factory.GetWeakPtr());
+ OnceClosure non_void_weak_const_method_cb =
+ BindOnce(IgnoreResult(&NoRef::IntConstMethod0),
+ weak_factory.GetWeakPtr());
-// Function with unbound reference parameter.
-// - Original parameter is modified by callback.
-TEST_F(BindTest, UnboundReferenceSupport) {
- int n = 0;
- Callback<void(int&)> unbound_ref_cb = Bind(&RefArgSet);
- unbound_ref_cb.Run(n);
- EXPECT_EQ(2, n);
+ weak_factory.InvalidateWeakPtrs();
+ std::move(non_void_weak_const_method_cb).Run();
+ std::move(non_void_weak_method_cb).Run();
}
// Functions that take reference parameters.
// - Forced reference parameter type still stores a copy.
// - Forced const reference parameter type still stores a copy.
-TEST_F(BindTest, ReferenceArgumentBinding) {
+TEST_F(BindTest, ReferenceArgumentBindingForRepeating) {
int n = 1;
int& ref_n = n;
const int& const_ref_n = n;
- Callback<int()> ref_copies_cb = Bind(&Identity, ref_n);
+ RepeatingCallback<int()> ref_copies_cb = BindRepeating(&Identity, ref_n);
EXPECT_EQ(n, ref_copies_cb.Run());
n++;
EXPECT_EQ(n - 1, ref_copies_cb.Run());
- Callback<int()> const_ref_copies_cb = Bind(&Identity, const_ref_n);
+ RepeatingCallback<int()> const_ref_copies_cb =
+ BindRepeating(&Identity, const_ref_n);
EXPECT_EQ(n, const_ref_copies_cb.Run());
n++;
EXPECT_EQ(n - 1, const_ref_copies_cb.Run());
}
+TEST_F(BindTest, ReferenceArgumentBindingForOnce) {
+ int n = 1;
+ int& ref_n = n;
+ const int& const_ref_n = n;
+
+ OnceCallback<int()> ref_copies_cb = BindOnce(&Identity, ref_n);
+ n++;
+ EXPECT_EQ(n - 1, std::move(ref_copies_cb).Run());
+
+ OnceCallback<int()> const_ref_copies_cb =
+ BindOnce(&Identity, const_ref_n);
+ n++;
+ EXPECT_EQ(n - 1, std::move(const_ref_copies_cb).Run());
+}
+
// Check that we can pass in arrays and have them be stored as a pointer.
// - Array of values stores a pointer.
// - Array of const values stores a pointer.
-TEST_F(BindTest, ArrayArgumentBinding) {
+TEST_F(BindTest, ArrayArgumentBindingForRepeating) {
int array[4] = {1, 1, 1, 1};
const int (*const_array_ptr)[4] = &array;
- Callback<int()> array_cb = Bind(&ArrayGet, array, 1);
+ RepeatingCallback<int()> array_cb = BindRepeating(&ArrayGet, array, 1);
EXPECT_EQ(1, array_cb.Run());
- Callback<int()> const_array_cb = Bind(&ArrayGet, *const_array_ptr, 1);
+ RepeatingCallback<int()> const_array_cb =
+ BindRepeating(&ArrayGet, *const_array_ptr, 1);
EXPECT_EQ(1, const_array_cb.Run());
array[1] = 3;
@@ -656,25 +567,17 @@ TEST_F(BindTest, ArrayArgumentBinding) {
EXPECT_EQ(3, const_array_cb.Run());
}
-// Unretained() wrapper support.
-// - Method bound to Unretained() non-const object.
-// - Const method bound to Unretained() non-const object.
-// - Const method bound to Unretained() const object.
-TEST_F(BindTest, Unretained) {
- EXPECT_CALL(no_ref_, VoidMethod0());
- EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
-
- Callback<void()> method_cb =
- Bind(&NoRef::VoidMethod0, Unretained(&no_ref_));
- method_cb.Run();
+TEST_F(BindTest, ArrayArgumentBindingForOnce) {
+ int array[4] = {1, 1, 1, 1};
+ const int (*const_array_ptr)[4] = &array;
- Callback<void()> const_method_cb =
- Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref_));
- const_method_cb.Run();
+ OnceCallback<int()> array_cb = BindOnce(&ArrayGet, array, 1);
+ OnceCallback<int()> const_array_cb =
+ BindOnce(&ArrayGet, *const_array_ptr, 1);
- Callback<void()> const_method_const_ptr_cb =
- Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr_));
- const_method_const_ptr_cb.Run();
+ array[1] = 3;
+ EXPECT_EQ(3, std::move(array_cb).Run());
+ EXPECT_EQ(3, std::move(const_array_cb).Run());
}
// WeakPtr() support.
@@ -683,27 +586,27 @@ TEST_F(BindTest, Unretained) {
// - Const method bound to WeakPtr<> to const object.
// - Normal Function with WeakPtr<> as P1 can have return type and is
// not canceled.
-TEST_F(BindTest, WeakPtr) {
+TEST_F(BindTest, WeakPtrForRepeating) {
EXPECT_CALL(no_ref_, VoidMethod0());
EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
WeakPtrFactory<NoRef> weak_factory(&no_ref_);
WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
- Closure method_cb =
- Bind(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+ RepeatingClosure method_cb =
+ BindRepeating(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
method_cb.Run();
- Closure const_method_cb =
- Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ RepeatingClosure const_method_cb =
+ BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
const_method_cb.Run();
- Closure const_method_const_ptr_cb =
- Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ RepeatingClosure const_method_const_ptr_cb =
+ BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
const_method_const_ptr_cb.Run();
- Callback<int(int)> normal_func_cb =
- Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+ RepeatingCallback<int(int)> normal_func_cb =
+ BindRepeating(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
EXPECT_EQ(1, normal_func_cb.Run(1));
weak_factory.InvalidateWeakPtrs();
@@ -717,15 +620,39 @@ TEST_F(BindTest, WeakPtr) {
EXPECT_EQ(2, normal_func_cb.Run(2));
}
+TEST_F(BindTest, WeakPtrForOnce) {
+ WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+ WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+ OnceClosure method_cb =
+ BindOnce(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+ OnceClosure const_method_cb =
+ BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ OnceClosure const_method_const_ptr_cb =
+ BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ Callback<int(int)> normal_func_cb =
+ Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+
+ weak_factory.InvalidateWeakPtrs();
+ const_weak_factory.InvalidateWeakPtrs();
+
+ std::move(method_cb).Run();
+ std::move(const_method_cb).Run();
+ std::move(const_method_const_ptr_cb).Run();
+
+ // Still runs even after the pointers are invalidated.
+ EXPECT_EQ(2, std::move(normal_func_cb).Run(2));
+}
+
// ConstRef() wrapper support.
// - Binding w/o ConstRef takes a copy.
// - Binding a ConstRef takes a reference.
// - Binding ConstRef to a function ConstRef does not copy on invoke.
-TEST_F(BindTest, ConstRef) {
+TEST_F(BindTest, ConstRefForRepeating) {
int n = 1;
- Callback<int()> copy_cb = Bind(&Identity, n);
- Callback<int()> const_ref_cb = Bind(&Identity, ConstRef(n));
+ RepeatingCallback<int()> copy_cb = BindRepeating(&Identity, n);
+ RepeatingCallback<int()> const_ref_cb = BindRepeating(&Identity, ConstRef(n));
EXPECT_EQ(n, copy_cb.Run());
EXPECT_EQ(n, const_ref_cb.Run());
n++;
@@ -737,8 +664,8 @@ TEST_F(BindTest, ConstRef) {
int move_constructs = 0;
int move_assigns = 0;
CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
- Callback<int()> all_const_ref_cb =
- Bind(&GetCopies, ConstRef(counter));
+ RepeatingCallback<int()> all_const_ref_cb =
+ BindRepeating(&GetCopies, ConstRef(counter));
EXPECT_EQ(0, all_const_ref_cb.Run());
EXPECT_EQ(0, copies);
EXPECT_EQ(0, assigns);
@@ -746,25 +673,38 @@ TEST_F(BindTest, ConstRef) {
EXPECT_EQ(0, move_assigns);
}
-TEST_F(BindTest, ScopedRefptr) {
- EXPECT_CALL(has_ref_, AddRef()).Times(1);
- EXPECT_CALL(has_ref_, Release()).Times(1);
+TEST_F(BindTest, ConstRefForOnce) {
+ int n = 1;
+
+ OnceCallback<int()> copy_cb = BindOnce(&Identity, n);
+ OnceCallback<int()> const_ref_cb = BindOnce(&Identity, ConstRef(n));
+ n++;
+ EXPECT_EQ(n - 1, std::move(copy_cb).Run());
+ EXPECT_EQ(n, std::move(const_ref_cb).Run());
- const scoped_refptr<HasRef> refptr(&has_ref_);
- Callback<int()> scoped_refptr_const_ref_cb =
- Bind(&FunctionWithScopedRefptrFirstParam, base::ConstRef(refptr), 1);
- EXPECT_EQ(1, scoped_refptr_const_ref_cb.Run());
+ int copies = 0;
+ int assigns = 0;
+ int move_constructs = 0;
+ int move_assigns = 0;
+ CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+ OnceCallback<int()> all_const_ref_cb =
+ BindOnce(&GetCopies, ConstRef(counter));
+ EXPECT_EQ(0, std::move(all_const_ref_cb).Run());
+ EXPECT_EQ(0, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(0, move_constructs);
+ EXPECT_EQ(0, move_assigns);
}
// Test Owned() support.
-TEST_F(BindTest, Owned) {
+TEST_F(BindTest, OwnedForRepeating) {
int deletes = 0;
DeleteCounter* counter = new DeleteCounter(&deletes);
// If we don't capture, delete happens on Callback destruction/reset.
// return the same value.
- Callback<DeleteCounter*()> no_capture_cb =
- Bind(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+ RepeatingCallback<DeleteCounter*()> no_capture_cb =
+ BindRepeating(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
ASSERT_EQ(counter, no_capture_cb.Run());
ASSERT_EQ(counter, no_capture_cb.Run());
EXPECT_EQ(0, deletes);
@@ -773,18 +713,272 @@ TEST_F(BindTest, Owned) {
deletes = 0;
counter = new DeleteCounter(&deletes);
- base::Closure own_object_cb =
- Bind(&DeleteCounter::VoidMethod0, Owned(counter));
+ RepeatingClosure own_object_cb =
+ BindRepeating(&DeleteCounter::VoidMethod0, Owned(counter));
own_object_cb.Run();
EXPECT_EQ(0, deletes);
own_object_cb.Reset();
EXPECT_EQ(1, deletes);
}
-TEST_F(BindTest, UniquePtrReceiver) {
+TEST_F(BindTest, OwnedForOnce) {
+ int deletes = 0;
+ DeleteCounter* counter = new DeleteCounter(&deletes);
+
+ // If we don't capture, delete happens on Callback destruction/reset.
+ // return the same value.
+ OnceCallback<DeleteCounter*()> no_capture_cb =
+ BindOnce(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+ EXPECT_EQ(0, deletes);
+ no_capture_cb.Reset(); // This should trigger a delete.
+ EXPECT_EQ(1, deletes);
+
+ deletes = 0;
+ counter = new DeleteCounter(&deletes);
+ OnceClosure own_object_cb =
+ BindOnce(&DeleteCounter::VoidMethod0, Owned(counter));
+ EXPECT_EQ(0, deletes);
+ own_object_cb.Reset();
+ EXPECT_EQ(1, deletes);
+}
+
+template <typename T>
+class BindVariantsTest : public ::testing::Test {
+};
+
+struct RepeatingTestConfig {
+ template <typename Signature>
+ using CallbackType = RepeatingCallback<Signature>;
+ using ClosureType = RepeatingClosure;
+
+ template <typename F, typename... Args>
+ static CallbackType<MakeUnboundRunType<F, Args...>>
+ Bind(F&& f, Args&&... args) {
+ return BindRepeating(std::forward<F>(f), std::forward<Args>(args)...);
+ }
+};
+
+struct OnceTestConfig {
+ template <typename Signature>
+ using CallbackType = OnceCallback<Signature>;
+ using ClosureType = OnceClosure;
+
+ template <typename F, typename... Args>
+ static CallbackType<MakeUnboundRunType<F, Args...>>
+ Bind(F&& f, Args&&... args) {
+ return BindOnce(std::forward<F>(f), std::forward<Args>(args)...);
+ }
+};
+
+using BindVariantsTestConfig = ::testing::Types<
+ RepeatingTestConfig, OnceTestConfig>;
+TYPED_TEST_CASE(BindVariantsTest, BindVariantsTestConfig);
+
+template <typename TypeParam, typename Signature>
+using CallbackType = typename TypeParam::template CallbackType<Signature>;
+
+// Function type support.
+// - Normal function.
+// - Normal function bound with non-refcounted first argument.
+// - Method bound to non-const object.
+// - Method bound to scoped_refptr.
+// - Const method bound to non-const object.
+// - Const method bound to const object.
+// - Derived classes can be used with pointers to non-virtual base functions.
+// - Derived classes can be used with pointers to virtual base functions (and
+// preserve virtual dispatch).
+TYPED_TEST(BindVariantsTest, FunctionTypeSupport) {
+ using ClosureType = typename TypeParam::ClosureType;
+
+ StrictMock<HasRef> has_ref;
+ StrictMock<NoRef> no_ref;
+ StrictMock<NoRef> static_func_mock;
+ const HasRef* const_has_ref_ptr = &has_ref;
+ g_func_mock_ptr = &static_func_mock;
+
+ EXPECT_CALL(static_func_mock, VoidMethod0());
+ EXPECT_CALL(has_ref, AddRef()).Times(4);
+ EXPECT_CALL(has_ref, Release()).Times(4);
+ EXPECT_CALL(has_ref, VoidMethod0()).Times(2);
+ EXPECT_CALL(has_ref, VoidConstMethod0()).Times(2);
+
+ ClosureType normal_cb = TypeParam::Bind(&VoidFunc0);
+ CallbackType<TypeParam, NoRef*()> normal_non_refcounted_cb =
+ TypeParam::Bind(&PolymorphicIdentity<NoRef*>, &no_ref);
+ std::move(normal_cb).Run();
+ EXPECT_EQ(&no_ref, std::move(normal_non_refcounted_cb).Run());
+
+ ClosureType method_cb = TypeParam::Bind(&HasRef::VoidMethod0, &has_ref);
+ ClosureType method_refptr_cb = TypeParam::Bind(&HasRef::VoidMethod0,
+ make_scoped_refptr(&has_ref));
+ ClosureType const_method_nonconst_obj_cb =
+ TypeParam::Bind(&HasRef::VoidConstMethod0, &has_ref);
+ ClosureType const_method_const_obj_cb =
+ TypeParam::Bind(&HasRef::VoidConstMethod0, const_has_ref_ptr);
+ std::move(method_cb).Run();
+ std::move(method_refptr_cb).Run();
+ std::move(const_method_nonconst_obj_cb).Run();
+ std::move(const_method_const_obj_cb).Run();
+
+ Child child;
+ child.value = 0;
+ ClosureType virtual_set_cb = TypeParam::Bind(&Parent::VirtualSet, &child);
+ std::move(virtual_set_cb).Run();
+ EXPECT_EQ(kChildValue, child.value);
+
+ child.value = 0;
+ ClosureType non_virtual_set_cb =
+ TypeParam::Bind(&Parent::NonVirtualSet, &child);
+ std::move(non_virtual_set_cb).Run();
+ EXPECT_EQ(kParentValue, child.value);
+}
+
+// Return value support.
+// - Function with return value.
+// - Method with return value.
+// - Const method with return value.
+// - Move-only return value.
+TYPED_TEST(BindVariantsTest, ReturnValues) {
+ StrictMock<NoRef> static_func_mock;
+ StrictMock<HasRef> has_ref;
+ g_func_mock_ptr = &static_func_mock;
+ const HasRef* const_has_ref_ptr = &has_ref;
+
+ EXPECT_CALL(static_func_mock, IntMethod0()).WillOnce(Return(1337));
+ EXPECT_CALL(has_ref, AddRef()).Times(4);
+ EXPECT_CALL(has_ref, Release()).Times(4);
+ EXPECT_CALL(has_ref, IntMethod0()).WillOnce(Return(31337));
+ EXPECT_CALL(has_ref, IntConstMethod0())
+ .WillOnce(Return(41337))
+ .WillOnce(Return(51337));
+ EXPECT_CALL(has_ref, UniquePtrMethod0())
+ .WillOnce(Return(ByMove(MakeUnique<int>(42))));
+
+ CallbackType<TypeParam, int()> normal_cb = TypeParam::Bind(&IntFunc0);
+ CallbackType<TypeParam, int()> method_cb =
+ TypeParam::Bind(&HasRef::IntMethod0, &has_ref);
+ CallbackType<TypeParam, int()> const_method_nonconst_obj_cb =
+ TypeParam::Bind(&HasRef::IntConstMethod0, &has_ref);
+ CallbackType<TypeParam, int()> const_method_const_obj_cb =
+ TypeParam::Bind(&HasRef::IntConstMethod0, const_has_ref_ptr);
+ CallbackType<TypeParam, std::unique_ptr<int>()> move_only_rv_cb =
+ TypeParam::Bind(&HasRef::UniquePtrMethod0, &has_ref);
+ EXPECT_EQ(1337, std::move(normal_cb).Run());
+ EXPECT_EQ(31337, std::move(method_cb).Run());
+ EXPECT_EQ(41337, std::move(const_method_nonconst_obj_cb).Run());
+ EXPECT_EQ(51337, std::move(const_method_const_obj_cb).Run());
+ EXPECT_EQ(42, *std::move(move_only_rv_cb).Run());
+}
+
+// Argument binding tests.
+// - Argument binding to primitive.
+// - Argument binding to primitive pointer.
+// - Argument binding to a literal integer.
+// - Argument binding to a literal string.
+// - Argument binding with template function.
+// - Argument binding to an object.
+// - Argument binding to pointer to incomplete type.
+// - Argument gets type converted.
+// - Pointer argument gets converted.
+// - Const Reference forces conversion.
+TYPED_TEST(BindVariantsTest, ArgumentBinding) {
+ int n = 2;
+
+ EXPECT_EQ(n, TypeParam::Bind(&Identity, n).Run());
+ EXPECT_EQ(&n, TypeParam::Bind(&PolymorphicIdentity<int*>, &n).Run());
+ EXPECT_EQ(3, TypeParam::Bind(&Identity, 3).Run());
+ EXPECT_STREQ("hi", TypeParam::Bind(&CStringIdentity, "hi").Run());
+ EXPECT_EQ(4, TypeParam::Bind(&PolymorphicIdentity<int>, 4).Run());
+
+ NoRefParent p;
+ p.value = 5;
+ EXPECT_EQ(5, TypeParam::Bind(&UnwrapNoRefParent, p).Run());
+
+ IncompleteType* incomplete_ptr = reinterpret_cast<IncompleteType*>(123);
+ EXPECT_EQ(incomplete_ptr,
+ TypeParam::Bind(&PolymorphicIdentity<IncompleteType*>,
+ incomplete_ptr).Run());
+
+ NoRefChild c;
+ c.value = 6;
+ EXPECT_EQ(6, TypeParam::Bind(&UnwrapNoRefParent, c).Run());
+
+ c.value = 7;
+ EXPECT_EQ(7, TypeParam::Bind(&UnwrapNoRefParentPtr, &c).Run());
+
+ c.value = 8;
+ EXPECT_EQ(8, TypeParam::Bind(&UnwrapNoRefParentConstRef, c).Run());
+}
+
+// Unbound argument type support tests.
+// - Unbound value.
+// - Unbound pointer.
+// - Unbound reference.
+// - Unbound const reference.
+// - Unbound unsized array.
+// - Unbound sized array.
+// - Unbound array-of-arrays.
+TYPED_TEST(BindVariantsTest, UnboundArgumentTypeSupport) {
+ CallbackType<TypeParam, void(int)> unbound_value_cb =
+ TypeParam::Bind(&VoidPolymorphic<int>::Run);
+ CallbackType<TypeParam, void(int*)> unbound_pointer_cb =
+ TypeParam::Bind(&VoidPolymorphic<int*>::Run);
+ CallbackType<TypeParam, void(int&)> unbound_ref_cb =
+ TypeParam::Bind(&VoidPolymorphic<int&>::Run);
+ CallbackType<TypeParam, void(const int&)> unbound_const_ref_cb =
+ TypeParam::Bind(&VoidPolymorphic<const int&>::Run);
+ CallbackType<TypeParam, void(int[])> unbound_unsized_array_cb =
+ TypeParam::Bind(&VoidPolymorphic<int[]>::Run);
+ CallbackType<TypeParam, void(int[2])> unbound_sized_array_cb =
+ TypeParam::Bind(&VoidPolymorphic<int[2]>::Run);
+ CallbackType<TypeParam, void(int[][2])> unbound_array_of_arrays_cb =
+ TypeParam::Bind(&VoidPolymorphic<int[][2]>::Run);
+ CallbackType<TypeParam, void(int&)> unbound_ref_with_bound_arg =
+ TypeParam::Bind(&VoidPolymorphic<int, int&>::Run, 1);
+}
+
+// Function with unbound reference parameter.
+// - Original parameter is modified by callback.
+TYPED_TEST(BindVariantsTest, UnboundReferenceSupport) {
+ int n = 0;
+ CallbackType<TypeParam, void(int&)> unbound_ref_cb =
+ TypeParam::Bind(&RefArgSet);
+ std::move(unbound_ref_cb).Run(n);
+ EXPECT_EQ(2, n);
+}
+
+// Unretained() wrapper support.
+// - Method bound to Unretained() non-const object.
+// - Const method bound to Unretained() non-const object.
+// - Const method bound to Unretained() const object.
+TYPED_TEST(BindVariantsTest, Unretained) {
+ StrictMock<NoRef> no_ref;
+ const NoRef* const_no_ref_ptr = &no_ref;
+
+ EXPECT_CALL(no_ref, VoidMethod0());
+ EXPECT_CALL(no_ref, VoidConstMethod0()).Times(2);
+
+ TypeParam::Bind(&NoRef::VoidMethod0, Unretained(&no_ref)).Run();
+ TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref)).Run();
+ TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr)).Run();
+}
+
+TYPED_TEST(BindVariantsTest, ScopedRefptr) {
+ StrictMock<HasRef> has_ref;
+ EXPECT_CALL(has_ref, AddRef()).Times(1);
+ EXPECT_CALL(has_ref, Release()).Times(1);
+
+ const scoped_refptr<HasRef> refptr(&has_ref);
+ CallbackType<TypeParam, int()> scoped_refptr_const_ref_cb =
+ TypeParam::Bind(&FunctionWithScopedRefptrFirstParam,
+ base::ConstRef(refptr), 1);
+ EXPECT_EQ(1, std::move(scoped_refptr_const_ref_cb).Run());
+}
+
+TYPED_TEST(BindVariantsTest, UniquePtrReceiver) {
std::unique_ptr<StrictMock<NoRef>> no_ref(new StrictMock<NoRef>);
EXPECT_CALL(*no_ref, VoidMethod0()).Times(1);
- Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
+ TypeParam::Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
}
// Tests for Passed() wrapper support:
@@ -803,7 +997,6 @@ struct CustomDeleter {
using MoveOnlyTypesToTest =
::testing::Types<std::unique_ptr<DeleteCounter>,
- std::unique_ptr<DeleteCounter>,
std::unique_ptr<DeleteCounter, CustomDeleter>>;
TYPED_TEST_CASE(BindMoveOnlyTypeTest, MoveOnlyTypesToTest);
@@ -1054,7 +1247,7 @@ TEST_F(BindTest, CapturelessLambda) {
EXPECT_TRUE(internal::IsConvertibleToRunType<decltype(f)>::value);
int i = 0;
- auto g = [i]() {};
+ auto g = [i]() { (void)i; };
EXPECT_FALSE(internal::IsConvertibleToRunType<decltype(g)>::value);
auto h = [](int, double) { return 'k'; };
@@ -1074,6 +1267,126 @@ TEST_F(BindTest, CapturelessLambda) {
EXPECT_EQ(42, x);
}
+TEST_F(BindTest, Cancellation) {
+ EXPECT_CALL(no_ref_, VoidMethodWithIntArg(_)).Times(2);
+
+ WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+ RepeatingCallback<void(int)> cb =
+ BindRepeating(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
+ RepeatingClosure cb2 = BindRepeating(cb, 8);
+ OnceClosure cb3 = BindOnce(cb, 8);
+
+ OnceCallback<void(int)> cb4 =
+ BindOnce(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
+ EXPECT_FALSE(cb4.IsCancelled());
+
+ OnceClosure cb5 = BindOnce(std::move(cb4), 8);
+
+ EXPECT_FALSE(cb.IsCancelled());
+ EXPECT_FALSE(cb2.IsCancelled());
+ EXPECT_FALSE(cb3.IsCancelled());
+ EXPECT_FALSE(cb5.IsCancelled());
+
+ cb.Run(6);
+ cb2.Run();
+
+ weak_factory.InvalidateWeakPtrs();
+
+ EXPECT_TRUE(cb.IsCancelled());
+ EXPECT_TRUE(cb2.IsCancelled());
+ EXPECT_TRUE(cb3.IsCancelled());
+ EXPECT_TRUE(cb5.IsCancelled());
+
+ cb.Run(6);
+ cb2.Run();
+ std::move(cb3).Run();
+ std::move(cb5).Run();
+}
+
+TEST_F(BindTest, OnceCallback) {
+ // Check if Callback variants have declarations of conversions as expected.
+ // Copy constructor and assignment of RepeatingCallback.
+ static_assert(std::is_constructible<
+ RepeatingClosure, const RepeatingClosure&>::value,
+ "RepeatingClosure should be copyable.");
+ static_assert(is_assignable<
+ RepeatingClosure, const RepeatingClosure&>::value,
+ "RepeatingClosure should be copy-assignable.");
+
+ // Move constructor and assignment of RepeatingCallback.
+ static_assert(std::is_constructible<
+ RepeatingClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be movable.");
+ static_assert(is_assignable<
+ RepeatingClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be move-assignable");
+
+ // Conversions from OnceCallback to RepeatingCallback.
+ static_assert(!std::is_constructible<
+ RepeatingClosure, const OnceClosure&>::value,
+ "OnceClosure should not be convertible to RepeatingClosure.");
+ static_assert(!is_assignable<
+ RepeatingClosure, const OnceClosure&>::value,
+ "OnceClosure should not be convertible to RepeatingClosure.");
+
+ // Destructive conversions from OnceCallback to RepeatingCallback.
+ static_assert(!std::is_constructible<
+ RepeatingClosure, OnceClosure&&>::value,
+ "OnceClosure should not be convertible to RepeatingClosure.");
+ static_assert(!is_assignable<
+ RepeatingClosure, OnceClosure&&>::value,
+ "OnceClosure should not be convertible to RepeatingClosure.");
+
+ // Copy constructor and assignment of OnceCallback.
+ static_assert(!std::is_constructible<
+ OnceClosure, const OnceClosure&>::value,
+ "OnceClosure should not be copyable.");
+ static_assert(!is_assignable<
+ OnceClosure, const OnceClosure&>::value,
+ "OnceClosure should not be copy-assignable");
+
+ // Move constructor and assignment of OnceCallback.
+ static_assert(std::is_constructible<
+ OnceClosure, OnceClosure&&>::value,
+ "OnceClosure should be movable.");
+ static_assert(is_assignable<
+ OnceClosure, OnceClosure&&>::value,
+ "OnceClosure should be move-assignable.");
+
+ // Conversions from RepeatingCallback to OnceCallback.
+ static_assert(std::is_constructible<
+ OnceClosure, const RepeatingClosure&>::value,
+ "RepeatingClosure should be convertible to OnceClosure.");
+ static_assert(is_assignable<
+ OnceClosure, const RepeatingClosure&>::value,
+ "RepeatingClosure should be convertible to OnceClosure.");
+
+ // Destructive conversions from RepeatingCallback to OnceCallback.
+ static_assert(std::is_constructible<
+ OnceClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be convertible to OnceClosure.");
+ static_assert(is_assignable<
+ OnceClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be covretible to OnceClosure.");
+
+ OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
+ std::move(cb).Run();
+
+ // RepeatingCallback should be convertible to OnceCallback.
+ OnceClosure cb2 = BindRepeating(&VoidPolymorphic<>::Run);
+ std::move(cb2).Run();
+
+ RepeatingClosure cb3 = BindRepeating(&VoidPolymorphic<>::Run);
+ cb = cb3;
+ std::move(cb).Run();
+
+ cb = std::move(cb2);
+
+ OnceCallback<void(int)> cb4 = BindOnce(
+ &VoidPolymorphic<std::unique_ptr<int>, int>::Run, MakeUnique<int>(0));
+ BindOnce(std::move(cb4), 1).Run();
+}
+
// Callback construction and assignment tests.
// - Construction from an InvokerStorageHolder should not cause ref/deref.
// - Assignment from other callback should only cause one ref
@@ -1101,17 +1414,12 @@ TEST_F(BindTest, WindowsCallingConventions) {
}
#endif
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
-
// Test null callbacks cause a DCHECK.
TEST(BindDeathTest, NullCallback) {
base::Callback<void(int)> null_cb;
ASSERT_TRUE(null_cb.is_null());
- EXPECT_DEATH(base::Bind(null_cb, 42), "");
+ EXPECT_DCHECK_DEATH(base::Bind(null_cb, 42));
}
-#endif // (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) &&
- // GTEST_HAS_DEATH_TEST
-
} // namespace
} // namespace base
diff --git a/base/bit_cast.h b/base/bit_cast.h
index c9514bceef..90dd925e86 100644
--- a/base/bit_cast.h
+++ b/base/bit_cast.h
@@ -9,6 +9,7 @@
#include <type_traits>
#include "base/compiler_specific.h"
+#include "base/template_util.h"
#include "build/build_config.h"
// bit_cast<Dest,Source> is a template function that implements the equivalent
@@ -63,34 +64,10 @@ template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
static_assert(sizeof(Dest) == sizeof(Source),
"bit_cast requires source and destination to be the same size");
-
-#if (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) || \
- (defined(__clang__) && defined(_LIBCPP_VERSION)))
- // GCC 5.1 contains the first libstdc++ with is_trivially_copyable.
- // Assume libc++ Just Works: is_trivially_copyable added on May 13th 2011.
- // However, with libc++ when GCC is the compiler the trait is buggy, see
- // crbug.com/607158, so fall back to the less strict variant for non-clang.
- static_assert(std::is_trivially_copyable<Dest>::value,
- "non-trivially-copyable bit_cast is undefined");
- static_assert(std::is_trivially_copyable<Source>::value,
- "non-trivially-copyable bit_cast is undefined");
-#elif HAS_FEATURE(is_trivially_copyable)
- // The compiler supports an equivalent intrinsic.
- static_assert(__is_trivially_copyable(Dest),
- "non-trivially-copyable bit_cast is undefined");
- static_assert(__is_trivially_copyable(Source),
- "non-trivially-copyable bit_cast is undefined");
-#elif COMPILER_GCC
- // Fallback to compiler intrinsic on GCC and clang (which pretends to be
- // GCC). This isn't quite the same as is_trivially_copyable but it'll do for
- // our purpose.
- static_assert(__has_trivial_copy(Dest),
- "non-trivially-copyable bit_cast is undefined");
- static_assert(__has_trivial_copy(Source),
- "non-trivially-copyable bit_cast is undefined");
-#else
- // Do nothing, let the bots handle it.
-#endif
+ static_assert(base::is_trivially_copyable<Dest>::value,
+ "bit_cast requires the destination type to be copyable");
+ static_assert(base::is_trivially_copyable<Source>::value,
+ "bit_cast requires the source type to be copyable");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
diff --git a/base/bits.h b/base/bits.h
index a3a59d1dfa..d101cb731a 100644
--- a/base/bits.h
+++ b/base/bits.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,8 +10,13 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/compiler_specific.h"
#include "base/logging.h"
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
namespace base {
namespace bits {
@@ -49,6 +54,58 @@ inline size_t Align(size_t size, size_t alignment) {
return (size + alignment - 1) & ~(alignment - 1);
}
+// These functions count the number of leading zeros in a binary value, starting
+// with the most significant bit. C does not have an operator to do this, but
+// fortunately the various compilers have built-ins that map to fast underlying
+// processor instructions.
+#if defined(COMPILER_MSVC)
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ unsigned long index;
+ return LIKELY(_BitScanReverse(&index, x)) ? (31 - index) : 32;
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+ unsigned long index;
+ return LIKELY(_BitScanReverse64(&index, x)) ? (63 - index) : 64;
+}
+
+#endif
+
+#elif defined(COMPILER_GCC)
+
+// This is very annoying. __builtin_clz has undefined behaviour for an input of
+// 0, even though there's clearly a return value that makes sense, and even
+// though some processor clz instructions have defined behaviour for 0. We could
+// drop to raw __asm__ to do better, but we'll avoid doing that unless we see
+// proof that we need to.
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ return LIKELY(x) ? __builtin_clz(x) : 32;
+}
+
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+ return LIKELY(x) ? __builtin_clzll(x) : 64;
+}
+
+#endif
+
+#if defined(ARCH_CPU_64_BITS)
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+ return CountLeadingZeroBits64(x);
+}
+
+#else
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+ return CountLeadingZeroBits32(x);
+}
+
+#endif
+
} // namespace bits
} // namespace base
diff --git a/base/bits_unittest.cc b/base/bits_unittest.cc
index 4f5b6ea49e..270b8ef7d3 100644
--- a/base/bits_unittest.cc
+++ b/base/bits_unittest.cc
@@ -61,5 +61,25 @@ TEST(BitsTest, Align) {
EXPECT_EQ(kSizeTMax / 2 + 1, Align(1, kSizeTMax / 2 + 1));
}
+TEST(BitsTest, CLZWorks) {
+ EXPECT_EQ(32u, CountLeadingZeroBits32(0u));
+ EXPECT_EQ(31u, CountLeadingZeroBits32(1u));
+ EXPECT_EQ(1u, CountLeadingZeroBits32(1u << 30));
+ EXPECT_EQ(0u, CountLeadingZeroBits32(1u << 31));
+
+#if defined(ARCH_CPU_64_BITS)
+ EXPECT_EQ(64u, CountLeadingZeroBitsSizeT(0ull));
+ EXPECT_EQ(63u, CountLeadingZeroBitsSizeT(1ull));
+ EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(1ull << 31));
+ EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1ull << 62));
+ EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1ull << 63));
+#else
+ EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(0u));
+ EXPECT_EQ(31u, CountLeadingZeroBitsSizeT(1u));
+ EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1u << 30));
+ EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1u << 31));
+#endif
+}
+
} // namespace bits
} // namespace base
diff --git a/base/callback.h b/base/callback.h
index e087c731d1..40bd5208a8 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -12,382 +12,130 @@
// Closure should #include "base/callback_forward.h" instead of this file.
// -----------------------------------------------------------------------------
-// Introduction
+// Usage documentation
// -----------------------------------------------------------------------------
//
-// The templated Callback class is a generalized function object. Together
-// with the Bind() function in bind.h, they provide a type-safe method for
-// performing partial application of functions.
-//
-// Partial application (or "currying") is the process of binding a subset of
-// a function's arguments to produce another function that takes fewer
-// arguments. This can be used to pass around a unit of delayed execution,
-// much like lexical closures are used in other languages. For example, it
-// is used in Chromium code to schedule tasks on different MessageLoops.
-//
-// A callback with no unbound input parameters (base::Callback<void()>)
-// is called a base::Closure. Note that this is NOT the same as what other
-// languages refer to as a closure -- it does not retain a reference to its
-// enclosing environment.
-//
-// MEMORY MANAGEMENT AND PASSING
-//
-// The Callback objects themselves should be passed by const-reference, and
-// stored by copy. They internally store their state via a refcounted class
-// and thus do not need to be deleted.
-//
-// The reason to pass via a const-reference is to avoid unnecessary
-// AddRef/Release pairs to the internal state.
-//
-//
-// -----------------------------------------------------------------------------
-// Quick reference for basic stuff
-// -----------------------------------------------------------------------------
-//
-// BINDING A BARE FUNCTION
-//
-// int Return5() { return 5; }
-// base::Callback<int()> func_cb = base::Bind(&Return5);
-// LOG(INFO) << func_cb.Run(); // Prints 5.
-//
-// BINDING A CLASS METHOD
-//
-// The first argument to bind is the member function to call, the second is
-// the object on which to call it.
-//
-// class Ref : public base::RefCountedThreadSafe<Ref> {
-// public:
-// int Foo() { return 3; }
-// void PrintBye() { LOG(INFO) << "bye."; }
-// };
-// scoped_refptr<Ref> ref = new Ref();
-// base::Callback<void()> ref_cb = base::Bind(&Ref::Foo, ref);
-// LOG(INFO) << ref_cb.Run(); // Prints out 3.
-//
-// By default the object must support RefCounted or you will get a compiler
-// error. If you're passing between threads, be sure it's
-// RefCountedThreadSafe! See "Advanced binding of member functions" below if
-// you don't want to use reference counting.
-//
-// RUNNING A CALLBACK
-//
-// Callbacks can be run with their "Run" method, which has the same
-// signature as the template argument to the callback.
-//
-// void DoSomething(const base::Callback<void(int, std::string)>& callback) {
-// callback.Run(5, "hello");
-// }
-//
-// Callbacks can be run more than once (they don't get deleted or marked when
-// run). However, this precludes using base::Passed (see below).
-//
-// void DoSomething(const base::Callback<double(double)>& callback) {
-// double myresult = callback.Run(3.14159);
-// myresult += callback.Run(2.71828);
-// }
-//
-// PASSING UNBOUND INPUT PARAMETERS
-//
-// Unbound parameters are specified at the time a callback is Run(). They are
-// specified in the Callback template type:
-//
-// void MyFunc(int i, const std::string& str) {}
-// base::Callback<void(int, const std::string&)> cb = base::Bind(&MyFunc);
-// cb.Run(23, "hello, world");
-//
-// PASSING BOUND INPUT PARAMETERS
-//
-// Bound parameters are specified when you create thee callback as arguments
-// to Bind(). They will be passed to the function and the Run()ner of the
-// callback doesn't see those values or even know that the function it's
-// calling.
-//
-// void MyFunc(int i, const std::string& str) {}
-// base::Callback<void()> cb = base::Bind(&MyFunc, 23, "hello world");
-// cb.Run();
-//
-// A callback with no unbound input parameters (base::Callback<void()>)
-// is called a base::Closure. So we could have also written:
-//
-// base::Closure cb = base::Bind(&MyFunc, 23, "hello world");
-//
-// When calling member functions, bound parameters just go after the object
-// pointer.
-//
-// base::Closure cb = base::Bind(&MyClass::MyFunc, this, 23, "hello world");
-//
-// PARTIAL BINDING OF PARAMETERS
-//
-// You can specify some parameters when you create the callback, and specify
-// the rest when you execute the callback.
-//
-// void MyFunc(int i, const std::string& str) {}
-// base::Callback<void(const std::string&)> cb = base::Bind(&MyFunc, 23);
-// cb.Run("hello world");
-//
-// When calling a function bound parameters are first, followed by unbound
-// parameters.
-//
-//
-// -----------------------------------------------------------------------------
-// Quick reference for advanced binding
-// -----------------------------------------------------------------------------
-//
-// BINDING A CLASS METHOD WITH WEAK POINTERS
-//
-// base::Bind(&MyClass::Foo, GetWeakPtr());
-//
-// The callback will not be run if the object has already been destroyed.
-// DANGER: weak pointers are not threadsafe, so don't use this
-// when passing between threads!
-//
-// BINDING A CLASS METHOD WITH MANUAL LIFETIME MANAGEMENT
-//
-// base::Bind(&MyClass::Foo, base::Unretained(this));
-//
-// This disables all lifetime management on the object. You're responsible
-// for making sure the object is alive at the time of the call. You break it,
-// you own it!
-//
-// BINDING A CLASS METHOD AND HAVING THE CALLBACK OWN THE CLASS
-//
-// MyClass* myclass = new MyClass;
-// base::Bind(&MyClass::Foo, base::Owned(myclass));
-//
-// The object will be deleted when the callback is destroyed, even if it's
-// not run (like if you post a task during shutdown). Potentially useful for
-// "fire and forget" cases.
-//
-// IGNORING RETURN VALUES
-//
-// Sometimes you want to call a function that returns a value in a callback
-// that doesn't expect a return value.
-//
-// int DoSomething(int arg) { cout << arg << endl; }
-// base::Callback<void(int)> cb =
-// base::Bind(base::IgnoreResult(&DoSomething));
-//
-//
-// -----------------------------------------------------------------------------
-// Quick reference for binding parameters to Bind()
-// -----------------------------------------------------------------------------
-//
-// Bound parameters are specified as arguments to Bind() and are passed to the
-// function. A callback with no parameters or no unbound parameters is called a
-// Closure (base::Callback<void()> and base::Closure are the same thing).
-//
-// PASSING PARAMETERS OWNED BY THE CALLBACK
-//
-// void Foo(int* arg) { cout << *arg << endl; }
-// int* pn = new int(1);
-// base::Closure foo_callback = base::Bind(&foo, base::Owned(pn));
-//
-// The parameter will be deleted when the callback is destroyed, even if it's
-// not run (like if you post a task during shutdown).
-//
-// PASSING PARAMETERS AS A scoped_ptr
-//
-// void TakesOwnership(std::unique_ptr<Foo> arg) {}
-// std::unique_ptr<Foo> f(new Foo);
-// // f becomes null during the following call.
-// base::Closure cb = base::Bind(&TakesOwnership, base::Passed(&f));
-//
-// Ownership of the parameter will be with the callback until the it is run,
-// when ownership is passed to the callback function. This means the callback
-// can only be run once. If the callback is never run, it will delete the
-// object when it's destroyed.
-//
-// PASSING PARAMETERS AS A scoped_refptr
-//
-// void TakesOneRef(scoped_refptr<Foo> arg) {}
-// scoped_refptr<Foo> f(new Foo)
-// base::Closure cb = base::Bind(&TakesOneRef, f);
-//
-// This should "just work." The closure will take a reference as long as it
-// is alive, and another reference will be taken for the called function.
-//
-// PASSING PARAMETERS BY REFERENCE
-//
-// Const references are *copied* unless ConstRef is used. Example:
-//
-// void foo(const int& arg) { printf("%d %p\n", arg, &arg); }
-// int n = 1;
-// base::Closure has_copy = base::Bind(&foo, n);
-// base::Closure has_ref = base::Bind(&foo, base::ConstRef(n));
-// n = 2;
-// foo(n); // Prints "2 0xaaaaaaaaaaaa"
-// has_copy.Run(); // Prints "1 0xbbbbbbbbbbbb"
-// has_ref.Run(); // Prints "2 0xaaaaaaaaaaaa"
-//
-// Normally parameters are copied in the closure. DANGER: ConstRef stores a
-// const reference instead, referencing the original parameter. This means
-// that you must ensure the object outlives the callback!
-//
-//
-// -----------------------------------------------------------------------------
-// Implementation notes
-// -----------------------------------------------------------------------------
-//
-// WHERE IS THIS DESIGN FROM:
-//
-// The design Callback and Bind is heavily influenced by C++'s
-// tr1::function/tr1::bind, and by the "Google Callback" system used inside
-// Google.
-//
-//
-// HOW THE IMPLEMENTATION WORKS:
-//
-// There are three main components to the system:
-// 1) The Callback classes.
-// 2) The Bind() functions.
-// 3) The arguments wrappers (e.g., Unretained() and ConstRef()).
-//
-// The Callback classes represent a generic function pointer. Internally,
-// it stores a refcounted piece of state that represents the target function
-// and all its bound parameters. Each Callback specialization has a templated
-// constructor that takes an BindState<>*. In the context of the constructor,
-// the static type of this BindState<> pointer uniquely identifies the
-// function it is representing, all its bound parameters, and a Run() method
-// that is capable of invoking the target.
-//
-// Callback's constructor takes the BindState<>* that has the full static type
-// and erases the target function type as well as the types of the bound
-// parameters. It does this by storing a pointer to the specific Run()
-// function, and upcasting the state of BindState<>* to a
-// BindStateBase*. This is safe as long as this BindStateBase pointer
-// is only used with the stored Run() pointer.
-//
-// To BindState<> objects are created inside the Bind() functions.
-// These functions, along with a set of internal templates, are responsible for
-//
-// - Unwrapping the function signature into return type, and parameters
-// - Determining the number of parameters that are bound
-// - Creating the BindState storing the bound parameters
-// - Performing compile-time asserts to avoid error-prone behavior
-// - Returning an Callback<> with an arity matching the number of unbound
-// parameters and that knows the correct refcounting semantics for the
-// target object if we are binding a method.
-//
-// The Bind functions do the above using type-inference, and template
-// specializations.
-//
-// By default Bind() will store copies of all bound parameters, and attempt
-// to refcount a target object if the function being bound is a class method.
-// These copies are created even if the function takes parameters as const
-// references. (Binding to non-const references is forbidden, see bind.h.)
-//
-// To change this behavior, we introduce a set of argument wrappers
-// (e.g., Unretained(), and ConstRef()). These are simple container templates
-// that are passed by value, and wrap a pointer to argument. See the
-// file-level comment in base/bind_helpers.h for more info.
-//
-// These types are passed to the Unwrap() functions, and the MaybeRefcount()
-// functions respectively to modify the behavior of Bind(). The Unwrap()
-// and MaybeRefcount() functions change behavior by doing partial
-// specialization based on whether or not a parameter is a wrapper type.
-//
-// ConstRef() is similar to tr1::cref. Unretained() is specific to Chromium.
-//
-//
-// WHY NOT TR1 FUNCTION/BIND?
-//
-// Direct use of tr1::function and tr1::bind was considered, but ultimately
-// rejected because of the number of copy constructors invocations involved
-// in the binding of arguments during construction, and the forwarding of
-// arguments during invocation. These copies will no longer be an issue in
-// C++0x because C++0x will support rvalue reference allowing for the compiler
-// to avoid these copies. However, waiting for C++0x is not an option.
-//
-// Measured with valgrind on gcc version 4.4.3 (Ubuntu 4.4.3-4ubuntu5), the
-// tr1::bind call itself will invoke a non-trivial copy constructor three times
-// for each bound parameter. Also, each when passing a tr1::function, each
-// bound argument will be copied again.
-//
-// In addition to the copies taken at binding and invocation, copying a
-// tr1::function causes a copy to be made of all the bound parameters and
-// state.
-//
-// Furthermore, in Chromium, it is desirable for the Callback to take a
-// reference on a target object when representing a class method call. This
-// is not supported by tr1.
-//
-// Lastly, tr1::function and tr1::bind has a more general and flexible API.
-// This includes things like argument reordering by use of
-// tr1::bind::placeholder, support for non-const reference parameters, and some
-// limited amount of subtyping of the tr1::function object (e.g.,
-// tr1::function<int(int)> is convertible to tr1::function<void(int)>).
-//
-// These are not features that are required in Chromium. Some of them, such as
-// allowing for reference parameters, and subtyping of functions, may actually
-// become a source of errors. Removing support for these features actually
-// allows for a simpler implementation, and a terser Currying API.
-//
-//
-// WHY NOT GOOGLE CALLBACKS?
-//
-// The Google callback system also does not support refcounting. Furthermore,
-// its implementation has a number of strange edge cases with respect to type
-// conversion of its arguments. In particular, the argument's constness must
-// at times match exactly the function signature, or the type-inference might
-// break. Given the above, writing a custom solution was easier.
-//
-//
-// MISSING FUNCTIONALITY
-// - Invoking the return of Bind. Bind(&foo).Run() does not work;
-// - Binding arrays to functions that take a non-const pointer.
-// Example:
-// void Foo(const char* ptr);
-// void Bar(char* ptr);
-// Bind(&Foo, "test");
-// Bind(&Bar, "test"); // This fails because ptr is not const.
-//
-// If you are thinking of forward declaring Callback in your own header file,
-// please include "base/callback_forward.h" instead.
+// See //docs/callback.md for documentation.
namespace base {
-template <typename R, typename... Args, internal::CopyMode copy_mode>
-class Callback<R(Args...), copy_mode>
- : public internal::CallbackBase<copy_mode> {
+namespace internal {
+
+template <typename CallbackType>
+struct IsOnceCallback : std::false_type {};
+
+template <typename Signature>
+struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
+
+// RunMixin provides different variants of `Run()` function to `Callback<>`
+// based on the type of callback.
+template <typename CallbackType>
+class RunMixin;
+
+// Specialization for OnceCallback.
+template <typename R, typename... Args>
+class RunMixin<OnceCallback<R(Args...)>> {
+ private:
+ using CallbackType = OnceCallback<R(Args...)>;
+
+ public:
+ using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
+
+ R Run(Args... /* args */) const & {
+ // Note: even though this static_assert will trivially always fail, it
+ // cannot be simply replaced with static_assert(false, ...) because:
+ // - Per [dcl.dcl]/p4, a program is ill-formed if the constant-expression
+ // argument does not evaluate to true.
+ // - Per [temp.res]/p8, if no valid specialization can be generated for a
+ // template definition, and that template is not instantiated, the
+ // template definition is ill-formed, no diagnostic required.
+ // These two clauses, taken together, would allow a conforming C++ compiler
+ // to immediately reject static_assert(false, ...), even inside an
+ // uninstantiated template.
+ static_assert(!IsOnceCallback<CallbackType>::value,
+ "OnceCallback::Run() may only be invoked on a non-const "
+ "rvalue, i.e. std::move(callback).Run().");
+ }
+
+ R Run(Args... args) && {
+ // Move the callback instance into a local variable before the invocation,
+ // that ensures the internal state is cleared after the invocation.
+ // It's not safe to touch |this| after the invocation, since running the
+ // bound function may destroy |this|.
+ CallbackType cb = static_cast<CallbackType&&>(*this);
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+ return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+ }
+};
+
+// Specialization for RepeatingCallback.
+template <typename R, typename... Args>
+class RunMixin<RepeatingCallback<R(Args...)>> {
private:
- using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
+ using CallbackType = RepeatingCallback<R(Args...)>;
+
+ public:
+ using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
+
+ R Run(Args... args) const {
+ const CallbackType& cb = static_cast<const CallbackType&>(*this);
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+ return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+ }
+};
+
+template <typename From, typename To>
+struct IsCallbackConvertible : std::false_type {};
+
+template <typename Signature>
+struct IsCallbackConvertible<RepeatingCallback<Signature>,
+ OnceCallback<Signature>> : std::true_type {};
+} // namespace internal
+
+template <typename R,
+ typename... Args,
+ internal::CopyMode copy_mode,
+ internal::RepeatMode repeat_mode>
+class Callback<R(Args...), copy_mode, repeat_mode>
+ : public internal::CallbackBase<copy_mode>,
+ public internal::RunMixin<Callback<R(Args...), copy_mode, repeat_mode>> {
public:
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef R RunType(Args...);
+ static_assert(repeat_mode != internal::RepeatMode::Once ||
+ copy_mode == internal::CopyMode::MoveOnly,
+ "OnceCallback must be MoveOnly.");
+
+ using RunType = R(Args...);
Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
- Callback(internal::BindStateBase* bind_state,
- PolymorphicInvoke invoke_func)
+ explicit Callback(internal::BindStateBase* bind_state)
: internal::CallbackBase<copy_mode>(bind_state) {
- using InvokeFuncStorage =
- typename internal::CallbackBase<copy_mode>::InvokeFuncStorage;
- this->polymorphic_invoke_ =
- reinterpret_cast<InvokeFuncStorage>(invoke_func);
+ }
+
+ template <typename OtherCallback,
+ typename = typename std::enable_if<
+ internal::IsCallbackConvertible<OtherCallback, Callback>::value
+ >::type>
+ Callback(OtherCallback other)
+ : internal::CallbackBase<copy_mode>(std::move(other)) {}
+
+ template <typename OtherCallback,
+ typename = typename std::enable_if<
+ internal::IsCallbackConvertible<OtherCallback, Callback>::value
+ >::type>
+ Callback& operator=(OtherCallback other) {
+ static_cast<internal::CallbackBase<copy_mode>&>(*this) = std::move(other);
+ return *this;
}
bool Equals(const Callback& other) const {
return this->EqualsInternal(other);
}
- // Run() makes an extra copy compared to directly calling the bound function
- // if an argument is passed-by-value and is copyable-but-not-movable:
- // i.e. below copies CopyableNonMovableType twice.
- // void F(CopyableNonMovableType) {}
- // Bind(&F).Run(CopyableNonMovableType());
- //
- // We can not fully apply Perfect Forwarding idiom to the callchain from
- // Callback::Run() to the target function. Perfect Forwarding requires
- // knowing how the caller will pass the arguments. However, the signature of
- // InvokerType::Run() needs to be fixed in the callback constructor, so Run()
- // cannot template its arguments based on how it's called.
- R Run(Args... args) const {
- PolymorphicInvoke f =
- reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke_);
- return f(this->bind_state_.get(), std::forward<Args>(args)...);
- }
+ friend class internal::RunMixin<Callback>;
};
} // namespace base
diff --git a/base/callback_forward.h b/base/callback_forward.h
index 8b9b89cdc2..13eed0eb0d 100644
--- a/base/callback_forward.h
+++ b/base/callback_forward.h
@@ -12,19 +12,37 @@ namespace internal {
// MoveOnly indicates the Callback is not copyable but movable, and Copyable
// indicates it is copyable and movable.
enum class CopyMode {
- MoveOnly, Copyable,
+ MoveOnly,
+ Copyable,
+};
+
+enum class RepeatMode {
+ Once,
+ Repeating,
};
} // namespace internal
template <typename Signature,
- internal::CopyMode copy_mode = internal::CopyMode::Copyable>
+ internal::CopyMode copy_mode = internal::CopyMode::Copyable,
+ internal::RepeatMode repeat_mode = internal::RepeatMode::Repeating>
class Callback;
// Syntactic sugar to make Callback<void()> easier to declare since it
// will be used in a lot of APIs with delayed execution.
using Closure = Callback<void()>;
+template <typename Signature>
+using OnceCallback = Callback<Signature,
+ internal::CopyMode::MoveOnly,
+ internal::RepeatMode::Once>;
+template <typename Signature>
+using RepeatingCallback = Callback<Signature,
+ internal::CopyMode::Copyable,
+ internal::RepeatMode::Repeating>;
+using OnceClosure = OnceCallback<void()>;
+using RepeatingClosure = RepeatingCallback<void()>;
+
} // namespace base
#endif // BASE_CALLBACK_FORWARD_H_
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
index 782371f6e7..ec3d6cbf16 100644
--- a/base/callback_helpers.h
+++ b/base/callback_helpers.h
@@ -20,10 +20,13 @@
namespace base {
-template <typename Sig>
-base::Callback<Sig> ResetAndReturn(base::Callback<Sig>* cb) {
- base::Callback<Sig> ret(*cb);
- cb->Reset();
+template <typename Signature,
+ internal::CopyMode copy_mode,
+ internal::RepeatMode repeat_mode>
+base::Callback<Signature, copy_mode, repeat_mode> ResetAndReturn(
+ base::Callback<Signature, copy_mode, repeat_mode>* cb) {
+ base::Callback<Signature, copy_mode, repeat_mode> ret(std::move(*cb));
+ DCHECK(!*cb);
return ret;
}
diff --git a/base/callback_helpers_unittest.cc b/base/callback_helpers_unittest.cc
index 8283996379..6c48d7ce4e 100644
--- a/base/callback_helpers_unittest.cc
+++ b/base/callback_helpers_unittest.cc
@@ -14,6 +14,24 @@ void Increment(int* value) {
(*value)++;
}
+TEST(CallbackHelpersTest, TestResetAndReturn) {
+ int run_count = 0;
+
+ base::Closure cb = base::Bind(&Increment, &run_count);
+ EXPECT_EQ(0, run_count);
+ base::ResetAndReturn(&cb).Run();
+ EXPECT_EQ(1, run_count);
+ EXPECT_FALSE(cb);
+
+ run_count = 0;
+
+ base::OnceClosure cb2 = base::BindOnce(&Increment, &run_count);
+ EXPECT_EQ(0, run_count);
+ base::ResetAndReturn(&cb2).Run();
+ EXPECT_EQ(1, run_count);
+ EXPECT_FALSE(cb2);
+}
+
TEST(CallbackHelpersTest, TestScopedClosureRunnerExitScope) {
int run_count = 0;
{
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
index 4c8ccae932..4330e9cce5 100644
--- a/base/callback_internal.cc
+++ b/base/callback_internal.cc
@@ -9,40 +9,75 @@
namespace base {
namespace internal {
-void BindStateBase::AddRef() {
+namespace {
+
+bool ReturnFalse(const BindStateBase*) {
+ return false;
+}
+
+} // namespace
+
+BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*))
+ : BindStateBase(polymorphic_invoke, destructor, &ReturnFalse) {
+}
+
+BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*),
+ bool (*is_cancelled)(const BindStateBase*))
+ : polymorphic_invoke_(polymorphic_invoke),
+ ref_count_(0),
+ destructor_(destructor),
+ is_cancelled_(is_cancelled) {}
+
+void BindStateBase::AddRef() const {
AtomicRefCountInc(&ref_count_);
}
-void BindStateBase::Release() {
+void BindStateBase::Release() const {
if (!AtomicRefCountDec(&ref_count_))
destructor_(this);
}
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c)
- : bind_state_(std::move(c.bind_state_)),
- polymorphic_invoke_(c.polymorphic_invoke_) {
- c.polymorphic_invoke_ = nullptr;
-}
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c) = default;
CallbackBase<CopyMode::MoveOnly>&
-CallbackBase<CopyMode::MoveOnly>::operator=(CallbackBase&& c) {
+CallbackBase<CopyMode::MoveOnly>::operator=(CallbackBase&& c) = default;
+
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(
+ const CallbackBase<CopyMode::Copyable>& c)
+ : bind_state_(c.bind_state_) {}
+
+CallbackBase<CopyMode::MoveOnly>& CallbackBase<CopyMode::MoveOnly>::operator=(
+ const CallbackBase<CopyMode::Copyable>& c) {
+ bind_state_ = c.bind_state_;
+ return *this;
+}
+
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(
+ CallbackBase<CopyMode::Copyable>&& c)
+ : bind_state_(std::move(c.bind_state_)) {}
+
+CallbackBase<CopyMode::MoveOnly>& CallbackBase<CopyMode::MoveOnly>::operator=(
+ CallbackBase<CopyMode::Copyable>&& c) {
bind_state_ = std::move(c.bind_state_);
- polymorphic_invoke_ = c.polymorphic_invoke_;
- c.polymorphic_invoke_ = nullptr;
return *this;
}
void CallbackBase<CopyMode::MoveOnly>::Reset() {
- polymorphic_invoke_ = nullptr;
// NULL the bind_state_ last, since it may be holding the last ref to whatever
// object owns us, and we may be deleted after that.
bind_state_ = nullptr;
}
+bool CallbackBase<CopyMode::MoveOnly>::IsCancelled() const {
+ DCHECK(bind_state_);
+ return bind_state_->IsCancelled();
+}
+
bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
const CallbackBase& other) const {
- return bind_state_.get() == other.bind_state_.get() &&
- polymorphic_invoke_ == other.polymorphic_invoke_;
+ return bind_state_ == other.bind_state_;
}
CallbackBase<CopyMode::MoveOnly>::CallbackBase(
@@ -57,24 +92,18 @@ CallbackBase<CopyMode::Copyable>::CallbackBase(
const CallbackBase& c)
: CallbackBase<CopyMode::MoveOnly>(nullptr) {
bind_state_ = c.bind_state_;
- polymorphic_invoke_ = c.polymorphic_invoke_;
}
-CallbackBase<CopyMode::Copyable>::CallbackBase(CallbackBase&& c)
- : CallbackBase<CopyMode::MoveOnly>(std::move(c)) {}
+CallbackBase<CopyMode::Copyable>::CallbackBase(CallbackBase&& c) = default;
CallbackBase<CopyMode::Copyable>&
CallbackBase<CopyMode::Copyable>::operator=(const CallbackBase& c) {
bind_state_ = c.bind_state_;
- polymorphic_invoke_ = c.polymorphic_invoke_;
return *this;
}
CallbackBase<CopyMode::Copyable>&
-CallbackBase<CopyMode::Copyable>::operator=(CallbackBase&& c) {
- *static_cast<CallbackBase<CopyMode::MoveOnly>*>(this) = std::move(c);
- return *this;
-}
+CallbackBase<CopyMode::Copyable>::operator=(CallbackBase&& c) = default;
template class CallbackBase<CopyMode::MoveOnly>;
template class CallbackBase<CopyMode::Copyable>;
diff --git a/base/callback_internal.h b/base/callback_internal.h
index 0fe0b2d9e1..d6dcfeb3c0 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -30,10 +30,16 @@ class CallbackBase;
// Creating a vtable for every BindState template instantiation results in a lot
// of bloat. Its only task is to call the destructor which can be done with a
// function pointer.
-class BindStateBase {
+class BASE_EXPORT BindStateBase {
+ public:
+ using InvokeFuncStorage = void(*)();
+
protected:
- explicit BindStateBase(void (*destructor)(BindStateBase*))
- : ref_count_(0), destructor_(destructor) {}
+ BindStateBase(InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*));
+ BindStateBase(InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*),
+ bool (*is_cancelled)(const BindStateBase*));
~BindStateBase() = default;
private:
@@ -41,13 +47,24 @@ class BindStateBase {
template <CopyMode copy_mode>
friend class CallbackBase;
- void AddRef();
- void Release();
+ bool IsCancelled() const {
+ return is_cancelled_(this);
+ }
+
+ void AddRef() const;
+ void Release() const;
+
+ // In C++, it is safe to cast function pointers to function pointers of
+ // another type. It is not okay to use void*. We create a InvokeFuncStorage
+ // that that can store our function pointer, and then cast it back to
+ // the original type on usage.
+ InvokeFuncStorage polymorphic_invoke_;
- AtomicRefCount ref_count_;
+ mutable AtomicRefCount ref_count_;
// Pointer to a function that will properly destroy |this|.
- void (*destructor_)(BindStateBase*);
+ void (*destructor_)(const BindStateBase*);
+ bool (*is_cancelled_)(const BindStateBase*);
DISALLOW_COPY_AND_ASSIGN(BindStateBase);
};
@@ -62,36 +79,43 @@ class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
CallbackBase(CallbackBase&& c);
CallbackBase& operator=(CallbackBase&& c);
+ explicit CallbackBase(const CallbackBase<CopyMode::Copyable>& c);
+ CallbackBase& operator=(const CallbackBase<CopyMode::Copyable>& c);
+
+ explicit CallbackBase(CallbackBase<CopyMode::Copyable>&& c);
+ CallbackBase& operator=(CallbackBase<CopyMode::Copyable>&& c);
+
// Returns true if Callback is null (doesn't refer to anything).
bool is_null() const { return bind_state_.get() == NULL; }
explicit operator bool() const { return !is_null(); }
+ // Returns true if the callback invocation will be nop due to an cancellation.
+ // It's invalid to call this on uninitialized callback.
+ bool IsCancelled() const;
+
// Returns the Callback into an uninitialized state.
void Reset();
protected:
- // In C++, it is safe to cast function pointers to function pointers of
- // another type. It is not okay to use void*. We create a InvokeFuncStorage
- // that that can store our function pointer, and then cast it back to
- // the original type on usage.
- using InvokeFuncStorage = void(*)();
+ using InvokeFuncStorage = BindStateBase::InvokeFuncStorage;
// Returns true if this callback equals |other|. |other| may be null.
bool EqualsInternal(const CallbackBase& other) const;
// Allow initializing of |bind_state_| via the constructor to avoid default
- // initialization of the scoped_refptr. We do not also initialize
- // |polymorphic_invoke_| here because doing a normal assignment in the
- // derived Callback templates makes for much nicer compiler errors.
+ // initialization of the scoped_refptr.
explicit CallbackBase(BindStateBase* bind_state);
+ InvokeFuncStorage polymorphic_invoke() const {
+ return bind_state_->polymorphic_invoke_;
+ }
+
// Force the destructor to be instantiated inside this translation unit so
// that our subclasses will not get inlined versions. Avoids more template
// bloat.
~CallbackBase();
scoped_refptr<BindStateBase> bind_state_;
- InvokeFuncStorage polymorphic_invoke_ = nullptr;
};
// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index ce453a1075..a41736946a 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -14,7 +14,7 @@
namespace base {
-void NopInvokeFunc(internal::BindStateBase*) {}
+void NopInvokeFunc() {}
// White-box testpoints to inject into a Callback<> object for checking
// comparators and emptiness APIs. Use a BindState that is specialized
@@ -22,20 +22,26 @@ void NopInvokeFunc(internal::BindStateBase*) {}
// chance of colliding with another instantiation and breaking the
// one-definition-rule.
struct FakeBindState1 : internal::BindStateBase {
- FakeBindState1() : BindStateBase(&Destroy) {}
+ FakeBindState1() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
private:
~FakeBindState1() {}
- static void Destroy(internal::BindStateBase* self) {
- delete static_cast<FakeBindState1*>(self);
+ static void Destroy(const internal::BindStateBase* self) {
+ delete static_cast<const FakeBindState1*>(self);
+ }
+ static bool IsCancelled(const internal::BindStateBase*) {
+ return false;
}
};
struct FakeBindState2 : internal::BindStateBase {
- FakeBindState2() : BindStateBase(&Destroy) {}
+ FakeBindState2() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
private:
~FakeBindState2() {}
- static void Destroy(internal::BindStateBase* self) {
- delete static_cast<FakeBindState2*>(self);
+ static void Destroy(const internal::BindStateBase* self) {
+ delete static_cast<const FakeBindState2*>(self);
+ }
+ static bool IsCancelled(const internal::BindStateBase*) {
+ return false;
}
};
@@ -44,8 +50,8 @@ namespace {
class CallbackTest : public ::testing::Test {
public:
CallbackTest()
- : callback_a_(new FakeBindState1(), &NopInvokeFunc),
- callback_b_(new FakeBindState2(), &NopInvokeFunc) {
+ : callback_a_(new FakeBindState1()),
+ callback_b_(new FakeBindState2()) {
}
~CallbackTest() override {}
@@ -88,7 +94,7 @@ TEST_F(CallbackTest, Equals) {
EXPECT_FALSE(callback_b_.Equals(callback_a_));
// We should compare based on instance, not type.
- Callback<void()> callback_c(new FakeBindState1(), &NopInvokeFunc);
+ Callback<void()> callback_c(new FakeBindState1());
Callback<void()> callback_a2 = callback_a_;
EXPECT_TRUE(callback_a_.Equals(callback_a2));
EXPECT_FALSE(callback_a_.Equals(callback_c));
@@ -109,6 +115,17 @@ TEST_F(CallbackTest, Reset) {
EXPECT_TRUE(callback_a_.Equals(null_callback_));
}
+TEST_F(CallbackTest, Move) {
+ // Moving should reset the callback.
+ ASSERT_FALSE(callback_a_.is_null());
+ ASSERT_FALSE(callback_a_.Equals(null_callback_));
+
+ auto tmp = std::move(callback_a_);
+
+ EXPECT_TRUE(callback_a_.is_null());
+ EXPECT_TRUE(callback_a_.Equals(null_callback_));
+}
+
struct TestForReentrancy {
TestForReentrancy()
: cb_already_run(false),
diff --git a/base/cancelable_callback.h b/base/cancelable_callback.h
index 0034fddccd..13cbd0c213 100644
--- a/base/cancelable_callback.h
+++ b/base/cancelable_callback.h
@@ -26,16 +26,18 @@
// to the message loop, the intensive test runs, the message loop is run,
// then the callback is cancelled.
//
+// RunLoop run_loop;
+//
// void TimeoutCallback(const std::string& timeout_message) {
// FAIL() << timeout_message;
-// MessageLoop::current()->QuitWhenIdle();
+// run_loop.QuitWhenIdle();
// }
//
// CancelableClosure timeout(base::Bind(&TimeoutCallback, "Test timed out."));
-// MessageLoop::current()->PostDelayedTask(FROM_HERE, timeout.callback(),
-// 4000) // 4 seconds to run.
+// ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, timeout.callback(),
+// TimeDelta::FromSeconds(4));
// RunIntensiveTest();
-// MessageLoop::current()->Run();
+// run_loop.Run();
// timeout.Cancel(); // Hopefully this is hit before the timeout callback runs.
//
diff --git a/base/command_line.cc b/base/command_line.cc
index 099bb185a4..3033fcfc6e 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -149,7 +149,10 @@ string16 QuoteForCommandLineToArgvW(const string16& arg,
} // namespace
-CommandLine::CommandLine(NoProgram) : argv_(1), begin_args_(1) {}
+CommandLine::CommandLine(NoProgram /* no_program */)
+ : argv_(1),
+ begin_args_(1) {
+}
CommandLine::CommandLine(const FilePath& program)
: argv_(1),
@@ -452,9 +455,7 @@ CommandLine::StringType CommandLine::GetCommandLineStringInternal(
CommandLine::StringType CommandLine::GetArgumentsStringInternal(
bool quote_placeholders) const {
-#if !defined(OS_WIN)
- (void)quote_placeholders; // Avoid an unused warning.
-#endif
+ ALLOW_UNUSED_PARAM(quote_placeholders);
StringType params;
// Append switches and arguments.
bool parse_switches = true;
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
index c2a02dee01..358a5c9ca3 100644
--- a/base/compiler_specific.h
+++ b/base/compiler_specific.h
@@ -85,6 +85,9 @@
// ALLOW_UNUSED_LOCAL(x);
#define ALLOW_UNUSED_LOCAL(x) false ? (void)x : (void)0
+// Used for Arc++ where -Wno-unused-parameter is used.
+#define ALLOW_UNUSED_PARAM(x) false ? (void)x : (void)0
+
// Annotate a typedef or function indicating it's ok if it's not used.
// Use like:
// typedef Foo Bar ALLOW_UNUSED_TYPE;
@@ -105,6 +108,14 @@
#define NOINLINE
#endif
+#if COMPILER_GCC && defined(NDEBUG)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif COMPILER_MSVC && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+
// Specify memory alignment for structs, classes, etc.
// Use like:
// class ALIGNAS(16) MyClass { ... }
@@ -154,6 +165,16 @@
// If available, it would look like:
// __attribute__((format(wprintf, format_param, dots_param)))
+// Sanitizers annotations.
+#if defined(__has_attribute)
+#if __has_attribute(no_sanitize)
+#define NO_SANITIZE(what) __attribute__((no_sanitize(what)))
+#endif
+#endif
+#if !defined(NO_SANITIZE)
+#define NO_SANITIZE(what)
+#endif
+
// MemorySanitizer annotations.
#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
#include <sanitizer/msan_interface.h>
@@ -174,6 +195,15 @@
#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
#endif // MEMORY_SANITIZER
+// DISABLE_CFI_PERF -- Disable Control Flow Integrity for perf reasons.
+#if !defined(DISABLE_CFI_PERF)
+#if defined(__clang__) && defined(OFFICIAL_BUILD)
+#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
+#else
+#define DISABLE_CFI_PERF
+#endif
+#endif
+
// Macro useful for writing cross-platform function pointers.
#if !defined(CDECL)
#if defined(OS_WIN)
@@ -192,6 +222,14 @@
#endif // defined(COMPILER_GCC)
#endif // !defined(UNLIKELY)
+#if !defined(LIKELY)
+#if defined(COMPILER_GCC)
+#define LIKELY(x) __builtin_expect(!!(x), 1)
+#else
+#define LIKELY(x) (x)
+#endif // defined(COMPILER_GCC)
+#endif // !defined(LIKELY)
+
// Compiler feature-detection.
// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
#if defined(__has_feature)
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
index 6c1d6260f5..4005489d4b 100644
--- a/base/containers/mru_cache.h
+++ b/base/containers/mru_cache.h
@@ -209,10 +209,12 @@ class MRUCacheBase {
// A container that does not do anything to free its data. Use this when storing
// value types (as opposed to pointers) in the list.
-template <class KeyType, class PayloadType>
-class MRUCache : public MRUCacheBase<KeyType, PayloadType, std::less<KeyType>> {
+template <class KeyType,
+ class PayloadType,
+ class CompareType = std::less<KeyType>>
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType> {
private:
- using ParentType = MRUCacheBase<KeyType, PayloadType, std::less<KeyType>>;
+ using ParentType = MRUCacheBase<KeyType, PayloadType, CompareType>;
public:
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
diff --git a/base/containers/scoped_ptr_hash_map.h b/base/containers/scoped_ptr_hash_map.h
deleted file mode 100644
index f513f06ac2..0000000000
--- a/base/containers/scoped_ptr_hash_map.h
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
-#define BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
-
-#include <stddef.h>
-
-#include <algorithm>
-#include <memory>
-#include <utility>
-
-#include "base/containers/hash_tables.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/stl_util.h"
-
-namespace base {
-
-// Deprecated. Use std::unordered_map instead. https://crbug.com/579229
-//
-// This type acts like a hash_map<K, std::unique_ptr<V, D> >, based on top of
-// base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
-// structure.
-template <typename Key, typename ScopedPtr>
-class ScopedPtrHashMap {
- typedef base::hash_map<Key, typename ScopedPtr::element_type*> Container;
-
- public:
- typedef typename Container::key_type key_type;
- typedef typename Container::mapped_type mapped_type;
- typedef typename Container::value_type value_type;
- typedef typename Container::iterator iterator;
- typedef typename Container::const_iterator const_iterator;
-
- ScopedPtrHashMap() {}
-
- ~ScopedPtrHashMap() { clear(); }
-
- void swap(ScopedPtrHashMap<Key, ScopedPtr>& other) {
- data_.swap(other.data_);
- }
-
- // Replaces value but not key if key is already present.
- iterator set(const Key& key, ScopedPtr data) {
- iterator it = find(key);
- if (it != end()) {
- // Let ScopedPtr decide how to delete. For example, it may use custom
- // deleter.
- ScopedPtr(it->second).reset();
- it->second = data.release();
- return it;
- }
-
- return data_.insert(std::make_pair(key, data.release())).first;
- }
-
- // Does nothing if key is already present
- std::pair<iterator, bool> add(const Key& key, ScopedPtr data) {
- std::pair<iterator, bool> result =
- data_.insert(std::make_pair(key, data.get()));
- if (result.second)
- ::ignore_result(data.release());
- return result;
- }
-
- void erase(iterator it) {
- // Let ScopedPtr decide how to delete.
- ScopedPtr(it->second).reset();
- data_.erase(it);
- }
-
- size_t erase(const Key& k) {
- iterator it = data_.find(k);
- if (it == data_.end())
- return 0;
- erase(it);
- return 1;
- }
-
- ScopedPtr take(iterator it) {
- DCHECK(it != data_.end());
- if (it == data_.end())
- return ScopedPtr();
-
- ScopedPtr ret(it->second);
- it->second = NULL;
- return ret;
- }
-
- ScopedPtr take(const Key& k) {
- iterator it = find(k);
- if (it == data_.end())
- return ScopedPtr();
-
- return take(it);
- }
-
- ScopedPtr take_and_erase(iterator it) {
- DCHECK(it != data_.end());
- if (it == data_.end())
- return ScopedPtr();
-
- ScopedPtr ret(it->second);
- data_.erase(it);
- return ret;
- }
-
- ScopedPtr take_and_erase(const Key& k) {
- iterator it = find(k);
- if (it == data_.end())
- return ScopedPtr();
-
- return take_and_erase(it);
- }
-
- // Returns the element in the hash_map that matches the given key.
- // If no such element exists it returns NULL.
- typename ScopedPtr::element_type* get(const Key& k) const {
- const_iterator it = find(k);
- if (it == end())
- return NULL;
- return it->second;
- }
-
- inline bool contains(const Key& k) const { return data_.count(k) > 0; }
-
- inline void clear() {
- auto it = data_.begin();
- while (it != data_.end()) {
- // NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
- // Deleting the value does not always invalidate the iterator, but it may
- // do so if the key is a pointer into the value object.
- auto temp = it;
- ++it;
- // Let ScopedPtr decide how to delete.
- ScopedPtr(temp->second).reset();
- }
- data_.clear();
- }
-
- inline const_iterator find(const Key& k) const { return data_.find(k); }
- inline iterator find(const Key& k) { return data_.find(k); }
-
- inline size_t count(const Key& k) const { return data_.count(k); }
- inline std::pair<const_iterator, const_iterator> equal_range(
- const Key& k) const {
- return data_.equal_range(k);
- }
- inline std::pair<iterator, iterator> equal_range(const Key& k) {
- return data_.equal_range(k);
- }
-
- inline size_t size() const { return data_.size(); }
- inline size_t max_size() const { return data_.max_size(); }
-
- inline bool empty() const { return data_.empty(); }
-
- inline size_t bucket_count() const { return data_.bucket_count(); }
- inline void resize(size_t size) { return data_.resize(size); }
-
- inline iterator begin() { return data_.begin(); }
- inline const_iterator begin() const { return data_.begin(); }
- inline iterator end() { return data_.end(); }
- inline const_iterator end() const { return data_.end(); }
-
- private:
- Container data_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedPtrHashMap);
-};
-
-} // namespace base
-
-#endif // BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
diff --git a/base/containers/small_map.h b/base/containers/small_map.h
index 82ed6c5473..2945d58769 100644
--- a/base/containers/small_map.h
+++ b/base/containers/small_map.h
@@ -32,7 +32,7 @@ namespace base {
//
// - If you only ever keep a couple of items and have very simple usage,
// consider whether a using a vector and brute-force searching it will be
-// the most efficient. It's not a lot of generated code (less then a
+// the most efficient. It's not a lot of generated code (less than a
// red-black tree if your key is "weird" and not eliminated as duplicate of
// something else) and will probably be faster and do fewer heap allocations
// than std::map if you have just a couple of items.
@@ -510,8 +510,8 @@ class SmallMap {
size_ = 0;
}
- // Invalidates iterators.
- void erase(const iterator& position) {
+ // Invalidates iterators. Returns iterator following the last removed element.
+ iterator erase(const iterator& position) {
if (size_ >= 0) {
int i = position.array_iter_ - array_;
array_[i].Destroy();
@@ -519,10 +519,11 @@ class SmallMap {
if (i != size_) {
array_[i].InitFromMove(std::move(array_[size_]));
array_[size_].Destroy();
+ return iterator(array_ + i);
}
- } else {
- map_->erase(position.hash_iter_);
+ return end();
}
+ return iterator(map_->erase(position.hash_iter_));
}
size_t erase(const key_type& key) {
@@ -574,17 +575,13 @@ class SmallMap {
// We want to call constructors and destructors manually, but we don't
// want to allocate and deallocate the memory used for them separately.
- // So, we use this crazy ManualConstructor class.
+ // So, we use this crazy ManualConstructor class. Since C++11 it's possible
+ // to use objects in unions like this, but the ManualDestructor syntax is
+ // a bit better and doesn't have limitations on object type.
//
// Since array_ and map_ are mutually exclusive, we'll put them in a
- // union, too. We add in a dummy_ value which quiets MSVC from otherwise
- // giving an erroneous "union member has copy constructor" error message
- // (C2621). This dummy member has to come before array_ to quiet the
- // compiler.
- //
- // TODO(brettw) remove this and use C++11 unions when we require C++11.
+ // union.
union {
- ManualConstructor<value_type> dummy_;
ManualConstructor<value_type> array_[kArraySize];
ManualConstructor<NormalMap> map_;
};
diff --git a/base/cpu.cc b/base/cpu.cc
index de4a001f7f..848208f7c1 100644
--- a/base/cpu.cc
+++ b/base/cpu.cc
@@ -16,7 +16,6 @@
#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
#include "base/files/file_util.h"
-#include "base/lazy_instance.h"
#endif
#if defined(ARCH_CPU_X86_FAMILY)
@@ -43,6 +42,7 @@ CPU::CPU()
has_ssse3_(false),
has_sse41_(false),
has_sse42_(false),
+ has_popcnt_(false),
has_avx_(false),
has_avx2_(false),
has_aesni_(false),
@@ -59,23 +59,22 @@ namespace {
#if defined(__pic__) && defined(__i386__)
void __cpuid(int cpu_info[4], int info_type) {
- __asm__ volatile (
- "mov %%ebx, %%edi\n"
- "cpuid\n"
- "xchg %%edi, %%ebx\n"
- : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
- : "a"(info_type)
- );
+ __asm__ volatile(
+ "mov %%ebx, %%edi\n"
+ "cpuid\n"
+ "xchg %%edi, %%ebx\n"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
+ "=d"(cpu_info[3])
+ : "a"(info_type), "c"(0));
}
#else
void __cpuid(int cpu_info[4], int info_type) {
- __asm__ volatile (
- "cpuid\n"
- : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
- : "a"(info_type)
- );
+ __asm__ volatile("cpuid\n"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
+ "=d"(cpu_info[3])
+ : "a"(info_type), "c"(0));
}
#endif
@@ -94,9 +93,8 @@ uint64_t _xgetbv(uint32_t xcr) {
#endif // ARCH_CPU_X86_FAMILY
#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
-class LazyCpuInfoValue {
- public:
- LazyCpuInfoValue() {
+std::string* CpuInfoBrand() {
+ static std::string* brand = []() {
// This function finds the value from /proc/cpuinfo under the key "model
// name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
// and later for arm64) and is shown once per CPU. "Processor" is used in
@@ -109,30 +107,23 @@ class LazyCpuInfoValue {
ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
DCHECK(!contents.empty());
if (contents.empty()) {
- return;
+ return new std::string();
}
std::istringstream iss(contents);
std::string line;
while (std::getline(iss, line)) {
- if (brand_.empty() &&
- (line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
+ if ((line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)) {
- brand_.assign(line.substr(strlen(kModelNamePrefix)));
+ return new std::string(line.substr(strlen(kModelNamePrefix)));
}
}
- }
-
- const std::string& brand() const { return brand_; }
- private:
- std::string brand_;
- DISALLOW_COPY_AND_ASSIGN(LazyCpuInfoValue);
-};
-
-base::LazyInstance<LazyCpuInfoValue>::Leaky g_lazy_cpuinfo =
- LAZY_INSTANCE_INITIALIZER;
+ return new std::string();
+ }();
+ return brand;
+}
#endif // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
// defined(OS_LINUX))
@@ -177,6 +168,8 @@ void CPU::Initialize() {
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
+
// AVX instructions will generate an illegal instruction exception unless
// a) they are supported by the CPU,
// b) XSAVE is supported by the CPU and
@@ -219,7 +212,7 @@ void CPU::Initialize() {
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
}
#elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
- cpu_brand_.assign(g_lazy_cpuinfo.Get().brand());
+ cpu_brand_.assign(*CpuInfoBrand());
#endif
}
diff --git a/base/cpu.h b/base/cpu.h
index 0e4303bfa0..0e24df61dd 100644
--- a/base/cpu.h
+++ b/base/cpu.h
@@ -46,6 +46,7 @@ class BASE_EXPORT CPU {
bool has_ssse3() const { return has_ssse3_; }
bool has_sse41() const { return has_sse41_; }
bool has_sse42() const { return has_sse42_; }
+ bool has_popcnt() const { return has_popcnt_; }
bool has_avx() const { return has_avx_; }
bool has_avx2() const { return has_avx2_; }
bool has_aesni() const { return has_aesni_; }
@@ -74,6 +75,7 @@ class BASE_EXPORT CPU {
bool has_ssse3_;
bool has_sse41_;
bool has_sse42_;
+ bool has_popcnt_;
bool has_avx_;
bool has_avx2_;
bool has_aesni_;
diff --git a/base/cpu_unittest.cc b/base/cpu_unittest.cc
index ec14620f98..9cabfd6998 100644
--- a/base/cpu_unittest.cc
+++ b/base/cpu_unittest.cc
@@ -57,6 +57,11 @@ TEST(CPU, RunExtendedInstructions) {
__asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
}
+ if (cpu.has_popcnt()) {
+ // Execute a POPCNT instruction.
+ __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
+ }
+
if (cpu.has_avx()) {
// Execute an AVX instruction.
__asm__ __volatile__("vzeroupper\n" : : : "xmm0");
@@ -100,6 +105,11 @@ TEST(CPU, RunExtendedInstructions) {
__asm crc32 eax, eax;
}
+ if (cpu.has_popcnt()) {
+ // Execute a POPCNT instruction.
+ __asm popcnt eax, eax;
+ }
+
// Visual C 2012 required for AVX.
#if _MSC_VER >= 1700
if (cpu.has_avx()) {
diff --git a/base/critical_closure.h b/base/critical_closure.h
index 6ebd7afa50..1b10cde7ce 100644
--- a/base/critical_closure.h
+++ b/base/critical_closure.h
@@ -25,21 +25,15 @@ bool IsMultiTaskingSupported();
// This class wraps a closure so it can continue to run for a period of time
// when the application goes to the background by using
// |ios::ScopedCriticalAction|.
-template <typename R>
class CriticalClosure {
public:
- explicit CriticalClosure(const Callback<R(void)>& closure)
- : closure_(closure) {}
-
- ~CriticalClosure() {}
-
- R Run() {
- return closure_.Run();
- }
+ explicit CriticalClosure(const Closure& closure);
+ ~CriticalClosure();
+ void Run();
private:
ios::ScopedCriticalAction critical_action_;
- Callback<R(void)> closure_;
+ Closure closure_;
DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
};
@@ -47,8 +41,7 @@ class CriticalClosure {
} // namespace internal
-// Returns a closure (which may return a result, but must not require any extra
-// arguments) that will continue to run for a period of time when the
+// Returns a closure that will continue to run for a period of time when the
// application goes to the background if possible on platforms where
// applications don't execute while backgrounded, otherwise the original task is
// returned.
@@ -62,15 +55,13 @@ class CriticalClosure {
// background running time, |MakeCriticalClosure| should be applied on them
// before posting.
#if defined(OS_IOS)
-template <typename R>
-Callback<R(void)> MakeCriticalClosure(const Callback<R(void)>& closure) {
+inline Closure MakeCriticalClosure(const Closure& closure) {
DCHECK(internal::IsMultiTaskingSupported());
- return base::Bind(&internal::CriticalClosure<R>::Run,
- Owned(new internal::CriticalClosure<R>(closure)));
+ return base::Bind(&internal::CriticalClosure::Run,
+ Owned(new internal::CriticalClosure(closure)));
}
#else // defined(OS_IOS)
-template <typename R>
-inline Callback<R(void)> MakeCriticalClosure(const Callback<R(void)>& closure) {
+inline Closure MakeCriticalClosure(const Closure& closure) {
// No-op for platforms where the application does not need to acquire
// background time for closures to finish when it goes into the background.
return closure;
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
new file mode 100644
index 0000000000..40e9b9537c
--- /dev/null
+++ b/base/debug/activity_tracker.cc
@@ -0,0 +1,1389 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/activity_tracker.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/debug/stack_trace.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/pending_task.h"
+#include "base/pickle.h"
+#include "base/process/process.h"
+#include "base/process/process_handle.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+// A number that identifies the memory as having been initialized. It's
+// arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
+// A version number is added on so that major structure changes won't try to
+// read an older version (since the cookie won't match).
+const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
+
+// The minimum depth a stack should support.
+const int kMinStackDepth = 2;
+
+// The amount of memory set aside for holding arbitrary user data (key/value
+// pairs) globally or associated with ActivityData entries.
+const size_t kUserDataSize = 1 << 10; // 1 KiB
+const size_t kGlobalDataSize = 16 << 10; // 16 KiB
+const size_t kMaxUserDataNameLength =
+ static_cast<size_t>(std::numeric_limits<uint8_t>::max());
+
+// A constant used to indicate that module information is changing.
+const uint32_t kModuleInformationChanging = 0x80000000;
+
+union ThreadRef {
+ int64_t as_id;
+#if defined(OS_WIN)
+ // On Windows, the handle itself is often a pseudo-handle with a common
+ // value meaning "this thread" and so the thread-id is used. The former
+ // can be converted to a thread-id with a system call.
+ PlatformThreadId as_tid;
+#elif defined(OS_POSIX)
+ // On Posix, the handle is always a unique identifier so no conversion
+ // needs to be done. However, it's value is officially opaque so there
+ // is no one correct way to convert it to a numerical identifier.
+ PlatformThreadHandle::Handle as_handle;
+#endif
+};
+
+// Determines the previous aligned index.
+size_t RoundDownToAlignment(size_t index, size_t alignment) {
+ return index & (0 - alignment);
+}
+
+// Determines the next aligned index.
+size_t RoundUpToAlignment(size_t index, size_t alignment) {
+ return (index + (alignment - 1)) & (0 - alignment);
+}
+
+} // namespace
+
+
+// It doesn't matter what is contained in this (though it will be all zeros)
+// as only the address of it is important.
+const ActivityData kNullActivityData = {};
+
+ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
+ ThreadRef thread_ref;
+ thread_ref.as_id = 0; // Zero the union in case other is smaller.
+#if defined(OS_WIN)
+ thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
+#elif defined(OS_POSIX)
+ thread_ref.as_handle = handle.platform_handle();
+#endif
+ return ForThread(thread_ref.as_id);
+}
+
+ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
+ PersistentMemoryAllocator* allocator,
+ uint32_t object_type,
+ uint32_t object_free_type,
+ size_t object_size,
+ size_t cache_size,
+ bool make_iterable)
+ : allocator_(allocator),
+ object_type_(object_type),
+ object_free_type_(object_free_type),
+ object_size_(object_size),
+ cache_size_(cache_size),
+ make_iterable_(make_iterable),
+ iterator_(allocator),
+ cache_values_(new Reference[cache_size]),
+ cache_used_(0) {
+ DCHECK(allocator);
+}
+
+ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
+
+ActivityTrackerMemoryAllocator::Reference
+ActivityTrackerMemoryAllocator::GetObjectReference() {
+ // First see if there is a cached value that can be returned. This is much
+ // faster than searching the memory system for free blocks.
+ while (cache_used_ > 0) {
+ Reference cached = cache_values_[--cache_used_];
+ // Change the type of the cached object to the proper type and return it.
+ // If the type-change fails that means another thread has taken this from
+ // under us (via the search below) so ignore it and keep trying. Don't
+ // clear the memory because that was done when the type was made "free".
+ if (allocator_->ChangeType(cached, object_type_, object_free_type_, false))
+ return cached;
+ }
+
+ // Fetch the next "free" object from persistent memory. Rather than restart
+ // the iterator at the head each time and likely waste time going again
+ // through objects that aren't relevant, the iterator continues from where
+ // it last left off and is only reset when the end is reached. If the
+ // returned reference matches |last|, then it has wrapped without finding
+ // anything.
+ const Reference last = iterator_.GetLast();
+ while (true) {
+ uint32_t type;
+ Reference found = iterator_.GetNext(&type);
+ if (found && type == object_free_type_) {
+ // Found a free object. Change it to the proper type and return it. If
+ // the type-change fails that means another thread has taken this from
+ // under us so ignore it and keep trying.
+ if (allocator_->ChangeType(found, object_type_, object_free_type_, false))
+ return found;
+ }
+ if (found == last) {
+ // Wrapped. No desired object was found.
+ break;
+ }
+ if (!found) {
+ // Reached end; start over at the beginning.
+ iterator_.Reset();
+ }
+ }
+
+ // No free block was found so instead allocate a new one.
+ Reference allocated = allocator_->Allocate(object_size_, object_type_);
+ if (allocated && make_iterable_)
+ allocator_->MakeIterable(allocated);
+ return allocated;
+}
+
+void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
+ // Mark object as free.
+ bool success = allocator_->ChangeType(ref, object_free_type_, object_type_,
+ /*clear=*/true);
+ DCHECK(success);
+
+ // Add this reference to our "free" cache if there is space. If not, the type
+ // has still been changed to indicate that it is free so this (or another)
+ // thread can find it, albeit more slowly, using the iteration method above.
+ if (cache_used_ < cache_size_)
+ cache_values_[cache_used_++] = ref;
+}
+
+// static
+void Activity::FillFrom(Activity* activity,
+ const void* program_counter,
+ const void* origin,
+ Type type,
+ const ActivityData& data) {
+ activity->time_internal = base::TimeTicks::Now().ToInternalValue();
+ activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
+ activity->origin_address = reinterpret_cast<uintptr_t>(origin);
+ activity->activity_type = type;
+ activity->data = data;
+
+#if defined(SYZYASAN)
+ // Create a stacktrace from the current location and get the addresses.
+ StackTrace stack_trace;
+ size_t stack_depth;
+ const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
+ // Copy the stack addresses, ignoring the first one (here).
+ size_t i;
+ for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
+ activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
+ }
+ activity->call_stack[i - 1] = 0;
+#else
+ activity->call_stack[0] = 0;
+#endif
+}
+
+ActivityUserData::TypedValue::TypedValue() {}
+ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
+ActivityUserData::TypedValue::~TypedValue() {}
+
+StringPiece ActivityUserData::TypedValue::Get() const {
+ DCHECK_EQ(RAW_VALUE, type_);
+ return long_value_;
+}
+
+StringPiece ActivityUserData::TypedValue::GetString() const {
+ DCHECK_EQ(STRING_VALUE, type_);
+ return long_value_;
+}
+
+bool ActivityUserData::TypedValue::GetBool() const {
+ DCHECK_EQ(BOOL_VALUE, type_);
+ return short_value_ != 0;
+}
+
+char ActivityUserData::TypedValue::GetChar() const {
+ DCHECK_EQ(CHAR_VALUE, type_);
+ return static_cast<char>(short_value_);
+}
+
+int64_t ActivityUserData::TypedValue::GetInt() const {
+ DCHECK_EQ(SIGNED_VALUE, type_);
+ return static_cast<int64_t>(short_value_);
+}
+
+uint64_t ActivityUserData::TypedValue::GetUint() const {
+ DCHECK_EQ(UNSIGNED_VALUE, type_);
+ return static_cast<uint64_t>(short_value_);
+}
+
+StringPiece ActivityUserData::TypedValue::GetReference() const {
+ DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
+ return ref_value_;
+}
+
+StringPiece ActivityUserData::TypedValue::GetStringReference() const {
+ DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
+ return ref_value_;
+}
+
+ActivityUserData::ValueInfo::ValueInfo() {}
+ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
+ActivityUserData::ValueInfo::~ValueInfo() {}
+
+StaticAtomicSequenceNumber ActivityUserData::next_id_;
+
+ActivityUserData::ActivityUserData(void* memory, size_t size)
+ : memory_(reinterpret_cast<char*>(memory)),
+ available_(RoundDownToAlignment(size, kMemoryAlignment)),
+ id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) {
+ // It's possible that no user data is being stored.
+ if (!memory_)
+ return;
+
+ DCHECK_LT(kMemoryAlignment, available_);
+ if (id_->load(std::memory_order_relaxed) == 0) {
+ // Generate a new ID and store it in the first 32-bit word of memory_.
+ // |id_| must be non-zero for non-sink instances.
+ uint32_t id;
+ while ((id = next_id_.GetNext()) == 0)
+ ;
+ id_->store(id, std::memory_order_relaxed);
+ DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
+ }
+ memory_ += kMemoryAlignment;
+ available_ -= kMemoryAlignment;
+
+ // If there is already data present, load that. This allows the same class
+ // to be used for analysis through snapshots.
+ ImportExistingData();
+}
+
+ActivityUserData::~ActivityUserData() {}
+
+void ActivityUserData::Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
+ DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
+ size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
+ size);
+
+ // It's possible that no user data is being stored.
+ if (!memory_)
+ return;
+
+ // The storage of a name is limited so use that limit during lookup.
+ if (name.length() > kMaxUserDataNameLength)
+ name.set(name.data(), kMaxUserDataNameLength);
+
+ ValueInfo* info;
+ auto existing = values_.find(name);
+ if (existing != values_.end()) {
+ info = &existing->second;
+ } else {
+ // The name size is limited to what can be held in a single byte but
+ // because there are not alignment constraints on strings, it's set tight
+ // against the header. Its extent (the reserved space, even if it's not
+ // all used) is calculated so that, when pressed against the header, the
+ // following field will be aligned properly.
+ size_t name_size = name.length();
+ size_t name_extent =
+ RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
+ sizeof(Header);
+ size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
+
+ // The "base size" is the size of the header and (padded) string key. Stop
+ // now if there's not room enough for even this.
+ size_t base_size = sizeof(Header) + name_extent;
+ if (base_size > available_)
+ return;
+
+ // The "full size" is the size for storing the entire value.
+ size_t full_size = std::min(base_size + value_extent, available_);
+
+ // If the value is actually a single byte, see if it can be stuffed at the
+ // end of the name extent rather than wasting kMemoryAlignment bytes.
+ if (size == 1 && name_extent > name_size) {
+ full_size = base_size;
+ --name_extent;
+ --base_size;
+ }
+
+ // Truncate the stored size to the amount of available memory. Stop now if
+ // there's not any room for even part of the value.
+ if (size != 0) {
+ size = std::min(full_size - base_size, size);
+ if (size == 0)
+ return;
+ }
+
+ // Allocate a chunk of memory.
+ Header* header = reinterpret_cast<Header*>(memory_);
+ memory_ += full_size;
+ available_ -= full_size;
+
+ // Datafill the header and name records. Memory must be zeroed. The |type|
+ // is written last, atomically, to release all the other values.
+ DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
+ DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
+ header->name_size = static_cast<uint8_t>(name_size);
+ header->record_size = full_size;
+ char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
+ void* value_memory =
+ reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
+ memcpy(name_memory, name.data(), name_size);
+ header->type.store(type, std::memory_order_release);
+
+ // Create an entry in |values_| so that this field can be found and changed
+ // later on without having to allocate new entries.
+ StringPiece persistent_name(name_memory, name_size);
+ auto inserted =
+ values_.insert(std::make_pair(persistent_name, ValueInfo()));
+ DCHECK(inserted.second); // True if inserted, false if existed.
+ info = &inserted.first->second;
+ info->name = persistent_name;
+ info->memory = value_memory;
+ info->size_ptr = &header->value_size;
+ info->extent = full_size - sizeof(Header) - name_extent;
+ info->type = type;
+ }
+
+ // Copy the value data to storage. The |size| is written last, atomically, to
+ // release the copied data. Until then, a parallel reader will just ignore
+ // records with a zero size.
+ DCHECK_EQ(type, info->type);
+ size = std::min(size, info->extent);
+ info->size_ptr->store(0, std::memory_order_seq_cst);
+ memcpy(info->memory, memory, size);
+ info->size_ptr->store(size, std::memory_order_release);
+}
+
+void ActivityUserData::SetReference(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
+ ReferenceRecord rec;
+ rec.address = reinterpret_cast<uintptr_t>(memory);
+ rec.size = size;
+ Set(name, type, &rec, sizeof(rec));
+}
+
+void ActivityUserData::ImportExistingData() const {
+ while (available_ > sizeof(Header)) {
+ Header* header = reinterpret_cast<Header*>(memory_);
+ ValueType type =
+ static_cast<ValueType>(header->type.load(std::memory_order_acquire));
+ if (type == END_OF_VALUES)
+ return;
+ if (header->record_size > available_)
+ return;
+
+ size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size,
+ kMemoryAlignment);
+ if (header->record_size == value_offset &&
+ header->value_size.load(std::memory_order_relaxed) == 1) {
+ value_offset -= 1;
+ }
+ if (value_offset + header->value_size > header->record_size)
+ return;
+
+ ValueInfo info;
+ info.name = StringPiece(memory_ + sizeof(Header), header->name_size);
+ info.type = type;
+ info.memory = memory_ + value_offset;
+ info.size_ptr = &header->value_size;
+ info.extent = header->record_size - value_offset;
+
+ StringPiece key(info.name);
+ values_.insert(std::make_pair(key, std::move(info)));
+
+ memory_ += header->record_size;
+ available_ -= header->record_size;
+ }
+}
+
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
+ DCHECK(output_snapshot);
+ DCHECK(output_snapshot->empty());
+
+ // Find any new data that may have been added by an active instance of this
+ // class that is adding records.
+ ImportExistingData();
+
+ for (const auto& entry : values_) {
+ TypedValue value;
+ value.type_ = entry.second.type;
+ DCHECK_GE(entry.second.extent,
+ entry.second.size_ptr->load(std::memory_order_relaxed));
+
+ switch (entry.second.type) {
+ case RAW_VALUE:
+ case STRING_VALUE:
+ value.long_value_ =
+ std::string(reinterpret_cast<char*>(entry.second.memory),
+ entry.second.size_ptr->load(std::memory_order_relaxed));
+ break;
+ case RAW_VALUE_REFERENCE:
+ case STRING_VALUE_REFERENCE: {
+ ReferenceRecord* ref =
+ reinterpret_cast<ReferenceRecord*>(entry.second.memory);
+ value.ref_value_ = StringPiece(
+ reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
+ static_cast<size_t>(ref->size));
+ } break;
+ case BOOL_VALUE:
+ case CHAR_VALUE:
+ value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
+ break;
+ case SIGNED_VALUE:
+ case UNSIGNED_VALUE:
+ value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
+ break;
+ case END_OF_VALUES: // Included for completeness purposes.
+ NOTREACHED();
+ }
+ auto inserted = output_snapshot->insert(
+ std::make_pair(entry.second.name.as_string(), std::move(value)));
+ DCHECK(inserted.second); // True if inserted, false if existed.
+ }
+
+ return true;
+}
+
+const void* ActivityUserData::GetBaseAddress() {
+ // The |memory_| pointer advances as elements are written but the |id_|
+ // value is always at the start of the block so just return that.
+ return id_;
+}
+
+// This information is kept for every thread that is tracked. It is filled
+// the very first time the thread is seen. All fields must be of exact sizes
+// so there is no issue moving between 32 and 64-bit builds.
+struct ThreadActivityTracker::Header {
+ // Defined in .h for analyzer access. Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId =
+ GlobalActivityTracker::kTypeIdActivityTracker;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 80;
+
+ // This unique number indicates a valid initialization of the memory.
+ std::atomic<uint32_t> cookie;
+
+ // The number of Activity slots (spaces that can hold an Activity) that
+ // immediately follow this structure in memory.
+ uint32_t stack_slots;
+
+ // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
+ // These identifiers are not guaranteed to mean anything but are unique, in
+ // combination, among all active trackers. It would be nice to always have
+ // the process_id be a 64-bit value but the necessity of having it atomic
+ // (for the memory barriers it provides) limits it to the natural word size
+ // of the machine.
+#ifdef ARCH_CPU_64_BITS
+ std::atomic<int64_t> process_id;
+#else
+ std::atomic<int32_t> process_id;
+ int32_t process_id_padding;
+#endif
+ ThreadRef thread_ref;
+
+ // The start-time and start-ticks when the data was created. Each activity
+ // record has a |time_internal| value that can be converted to a "wall time"
+ // with these two values.
+ int64_t start_time;
+ int64_t start_ticks;
+
+ // The current depth of the stack. This may be greater than the number of
+ // slots. If the depth exceeds the number of slots, the newest entries
+ // won't be recorded.
+ std::atomic<uint32_t> current_depth;
+
+ // A memory location used to indicate if changes have been made to the stack
+ // that would invalidate an in-progress read of its contents. The active
+ // tracker will zero the value whenever something gets popped from the
+ // stack. A monitoring tracker can write a non-zero value here, copy the
+ // stack contents, and read the value to know, if it is still non-zero, that
+ // the contents didn't change while being copied. This can handle concurrent
+ // snapshot operations only if each snapshot writes a different bit (which
+ // is not the current implementation so no parallel snapshots allowed).
+ std::atomic<uint32_t> stack_unchanged;
+
+ // The name of the thread (up to a maximum length). Dynamic-length names
+ // are not practical since the memory has to come from the same persistent
+ // allocator that holds this structure and to which this object has no
+ // reference.
+ char thread_name[32];
+};
+
+ThreadActivityTracker::Snapshot::Snapshot() {}
+ThreadActivityTracker::Snapshot::~Snapshot() {}
+
+ThreadActivityTracker::ScopedActivity::ScopedActivity(
+ ThreadActivityTracker* tracker,
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data)
+ : tracker_(tracker) {
+ if (tracker_)
+ activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
+}
+
+ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
+ if (tracker_)
+ tracker_->PopActivity(activity_id_);
+}
+
+void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
+ Activity::Type type,
+ const ActivityData& data) {
+ if (tracker_)
+ tracker_->ChangeActivity(activity_id_, type, data);
+}
+
+ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
+ : header_(static_cast<Header*>(base)),
+ stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
+ sizeof(Header))),
+ stack_slots_(
+ static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Verify the parameters but fail gracefully if they're not valid so that
+ // production code based on external inputs will not crash. IsValid() will
+ // return false in this case.
+ if (!base ||
+ // Ensure there is enough space for the header and at least a few records.
+ size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
+ // Ensure that the |stack_slots_| calculation didn't overflow.
+ (size - sizeof(Header)) / sizeof(Activity) >
+ std::numeric_limits<uint32_t>::max()) {
+ NOTREACHED();
+ return;
+ }
+
+ // Ensure that the thread reference doesn't exceed the size of the ID number.
+ // This won't compile at the global scope because Header is a private struct.
+ static_assert(
+ sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
+ "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
+
+ // Ensure that the alignment of Activity.data is properly aligned to a
+ // 64-bit boundary so there are no interoperability-issues across cpu
+ // architectures.
+ static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
+ "ActivityData.data is not 64-bit aligned");
+
+ // Provided memory should either be completely initialized or all zeros.
+ if (header_->cookie.load(std::memory_order_relaxed) == 0) {
+ // This is a new file. Double-check other fields and then initialize.
+ DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
+ DCHECK_EQ(0, header_->thread_ref.as_id);
+ DCHECK_EQ(0, header_->start_time);
+ DCHECK_EQ(0, header_->start_ticks);
+ DCHECK_EQ(0U, header_->stack_slots);
+ DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
+ DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
+ DCHECK_EQ(0, stack_[0].time_internal);
+ DCHECK_EQ(0U, stack_[0].origin_address);
+ DCHECK_EQ(0U, stack_[0].call_stack[0]);
+ DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
+
+#if defined(OS_WIN)
+ header_->thread_ref.as_tid = PlatformThread::CurrentId();
+#elif defined(OS_POSIX)
+ header_->thread_ref.as_handle =
+ PlatformThread::CurrentHandle().platform_handle();
+#endif
+ header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
+
+ header_->start_time = base::Time::Now().ToInternalValue();
+ header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
+ header_->stack_slots = stack_slots_;
+ strlcpy(header_->thread_name, PlatformThread::GetName(),
+ sizeof(header_->thread_name));
+
+ // This is done last so as to guarantee that everything above is "released"
+ // by the time this value gets written.
+ header_->cookie.store(kHeaderCookie, std::memory_order_release);
+
+ valid_ = true;
+ DCHECK(IsValid());
+ } else {
+ // This is a file with existing data. Perform basic consistency checks.
+ valid_ = true;
+ valid_ = IsValid();
+ }
+}
+
+ThreadActivityTracker::~ThreadActivityTracker() {}
+
+ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
+ // A thread-checker creates a lock to check the thread-id which means
+ // re-entry into this code if lock acquisitions are being tracked.
+ DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
+ thread_checker_.CalledOnValidThread());
+
+ // Get the current depth of the stack. No access to other memory guarded
+ // by this variable is done here so a "relaxed" load is acceptable.
+ uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
+
+ // Handle the case where the stack depth has exceeded the storage capacity.
+ // Extra entries will be lost leaving only the base of the stack.
+ if (depth >= stack_slots_) {
+ // Since no other threads modify the data, no compare/exchange is needed.
+ // Since no other memory is being modified, a "relaxed" store is acceptable.
+ header_->current_depth.store(depth + 1, std::memory_order_relaxed);
+ return depth;
+ }
+
+ // Get a pointer to the next activity and load it. No atomicity is required
+ // here because the memory is known only to this thread. It will be made
+ // known to other threads once the depth is incremented.
+ Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
+
+ // Save the incremented depth. Because this guards |activity| memory filled
+ // above that may be read by another thread once the recorded depth changes,
+ // a "release" store is required.
+ header_->current_depth.store(depth + 1, std::memory_order_release);
+
+ // The current depth is used as the activity ID because it simply identifies
+ // an entry. Once an entry is pop'd, it's okay to reuse the ID.
+ return depth;
+}
+
+void ThreadActivityTracker::ChangeActivity(ActivityId id,
+ Activity::Type type,
+ const ActivityData& data) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
+ DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
+
+ // Update the information if it is being recorded (i.e. within slot limit).
+ if (id < stack_slots_) {
+ Activity* activity = &stack_[id];
+
+ if (type != Activity::ACT_NULL) {
+ DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
+ type & Activity::ACT_CATEGORY_MASK);
+ activity->activity_type = type;
+ }
+
+ if (&data != &kNullActivityData)
+ activity->data = data;
+ }
+}
+
+void ThreadActivityTracker::PopActivity(ActivityId id) {
+ // Do an atomic decrement of the depth. No changes to stack entries guarded
+ // by this variable are done here so a "relaxed" operation is acceptable.
+ // |depth| will receive the value BEFORE it was modified which means the
+ // return value must also be decremented. The slot will be "free" after
+ // this call but since only a single thread can access this object, the
+ // data will remain valid until this method returns or calls outside.
+ uint32_t depth =
+ header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
+
+ // Validate that everything is running correctly.
+ DCHECK_EQ(id, depth);
+
+ // A thread-checker creates a lock to check the thread-id which means
+ // re-entry into this code if lock acquisitions are being tracked.
+ DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
+ thread_checker_.CalledOnValidThread());
+
+ // The stack has shrunk meaning that some other thread trying to copy the
+ // contents for reporting purposes could get bad data. That thread would
+ // have written a non-zero value into |stack_unchanged|; clearing it here
+ // will let that thread detect that something did change. This needs to
+ // happen after the atomic |depth| operation above so a "release" store
+ // is required.
+ header_->stack_unchanged.store(0, std::memory_order_release);
+}
+
+std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
+ ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator) {
+ // User-data is only stored for activities actually held in the stack.
+ if (id < stack_slots_) {
+ // Don't allow user data for lock acquisition as recursion may occur.
+ if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+ NOTREACHED();
+ return MakeUnique<ActivityUserData>(nullptr, 0);
+ }
+
+ // Get (or reuse) a block of memory and create a real UserData object
+ // on it.
+ PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
+ void* memory =
+ allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
+ if (memory) {
+ std::unique_ptr<ActivityUserData> user_data =
+ MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ stack_[id].user_data_ref = ref;
+ stack_[id].user_data_id = user_data->id();
+ return user_data;
+ }
+ }
+
+ // Return a dummy object that will still accept (but ignore) Set() calls.
+ return MakeUnique<ActivityUserData>(nullptr, 0);
+}
+
+bool ThreadActivityTracker::HasUserData(ActivityId id) {
+ // User-data is only stored for activities actually held in the stack.
+ return (id < stack_slots_ && stack_[id].user_data_ref);
+}
+
+void ThreadActivityTracker::ReleaseUserData(
+ ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator) {
+ // User-data is only stored for activities actually held in the stack.
+ if (id < stack_slots_ && stack_[id].user_data_ref) {
+ allocator->ReleaseObjectReference(stack_[id].user_data_ref);
+ stack_[id].user_data_ref = 0;
+ }
+}
+
+bool ThreadActivityTracker::IsValid() const {
+ if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
+ header_->process_id.load(std::memory_order_relaxed) == 0 ||
+ header_->thread_ref.as_id == 0 ||
+ header_->start_time == 0 ||
+ header_->start_ticks == 0 ||
+ header_->stack_slots != stack_slots_ ||
+ header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
+ return false;
+ }
+
+ return valid_;
+}
+
+bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
+ DCHECK(output_snapshot);
+
+ // There is no "called on valid thread" check for this method as it can be
+ // called from other threads or even other processes. It is also the reason
+ // why atomic operations must be used in certain places above.
+
+ // It's possible for the data to change while reading it in such a way that it
+ // invalidates the read. Make several attempts but don't try forever.
+ const int kMaxAttempts = 10;
+ uint32_t depth;
+
+ // Stop here if the data isn't valid.
+ if (!IsValid())
+ return false;
+
+ // Allocate the maximum size for the stack so it doesn't have to be done
+ // during the time-sensitive snapshot operation. It is shrunk once the
+ // actual size is known.
+ output_snapshot->activity_stack.reserve(stack_slots_);
+
+ for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
+ // Remember the process and thread IDs to ensure they aren't replaced
+ // during the snapshot operation. Use "acquire" to ensure that all the
+ // non-atomic fields of the structure are valid (at least at the current
+ // moment in time).
+ const int64_t starting_process_id =
+ header_->process_id.load(std::memory_order_acquire);
+ const int64_t starting_thread_id = header_->thread_ref.as_id;
+
+ // Write a non-zero value to |stack_unchanged| so it's possible to detect
+ // at the end that nothing has changed since copying the data began. A
+ // "cst" operation is required to ensure it occurs before everything else.
+ // Using "cst" memory ordering is relatively expensive but this is only
+ // done during analysis so doesn't directly affect the worker threads.
+ header_->stack_unchanged.store(1, std::memory_order_seq_cst);
+
+ // Fetching the current depth also "acquires" the contents of the stack.
+ depth = header_->current_depth.load(std::memory_order_acquire);
+ uint32_t count = std::min(depth, stack_slots_);
+ output_snapshot->activity_stack.resize(count);
+ if (count > 0) {
+ // Copy the existing contents. Memcpy is used for speed.
+ memcpy(&output_snapshot->activity_stack[0], stack_,
+ count * sizeof(Activity));
+ }
+
+ // Retry if something changed during the copy. A "cst" operation ensures
+ // it must happen after all the above operations.
+ if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
+ continue;
+
+ // Stack copied. Record it's full depth.
+ output_snapshot->activity_stack_depth = depth;
+
+ // TODO(bcwhite): Snapshot other things here.
+
+ // Get the general thread information. Loading of "process_id" is guaranteed
+ // to be last so that it's possible to detect below if any content has
+ // changed while reading it. It's technically possible for a thread to end,
+ // have its data cleared, a new thread get created with the same IDs, and
+ // it perform an action which starts tracking all in the time since the
+ // ID reads above but the chance is so unlikely that it's not worth the
+ // effort and complexity of protecting against it (perhaps with an
+ // "unchanged" field like is done for the stack).
+ output_snapshot->thread_name =
+ std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
+ output_snapshot->thread_id = header_->thread_ref.as_id;
+ output_snapshot->process_id =
+ header_->process_id.load(std::memory_order_seq_cst);
+
+ // All characters of the thread-name buffer were copied so as to not break
+ // if the trailing NUL were missing. Now limit the length if the actual
+ // name is shorter.
+ output_snapshot->thread_name.resize(
+ strlen(output_snapshot->thread_name.c_str()));
+
+ // If the process or thread ID has changed then the tracker has exited and
+ // the memory reused by a new one. Try again.
+ if (output_snapshot->process_id != starting_process_id ||
+ output_snapshot->thread_id != starting_thread_id) {
+ continue;
+ }
+
+ // Only successful if the data is still valid once everything is done since
+ // it's possible for the thread to end somewhere in the middle and all its
+ // values become garbage.
+ if (!IsValid())
+ return false;
+
+ // Change all the timestamps in the activities from "ticks" to "wall" time.
+ const Time start_time = Time::FromInternalValue(header_->start_time);
+ const int64_t start_ticks = header_->start_ticks;
+ for (Activity& activity : output_snapshot->activity_stack) {
+ activity.time_internal =
+ (start_time +
+ TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
+ .ToInternalValue();
+ }
+
+ // Success!
+ return true;
+ }
+
+ // Too many attempts.
+ return false;
+}
+
+// static
+size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
+ return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
+}
+
+// The instantiation of the GlobalActivityTracker object.
+// The object held here will obviously not be destructed at process exit
+// but that's best since PersistentMemoryAllocator objects (that underlie
+// GlobalActivityTracker objects) are explicitly forbidden from doing anything
+// essential at exit anyway due to the fact that they depend on data managed
+// elsewhere and which could be destructed first. An AtomicWord is used instead
+// of std::atomic because the latter can create global ctors and dtors.
+subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
+
+GlobalActivityTracker::ModuleInfo::ModuleInfo() {}
+GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
+GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
+GlobalActivityTracker::ModuleInfo::~ModuleInfo() {}
+
+GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
+ ModuleInfo&& rhs) = default;
+GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
+ const ModuleInfo& rhs) = default;
+
+GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() {}
+GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() {}
+
+bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
+ GlobalActivityTracker::ModuleInfo* info,
+ size_t record_size) const {
+ // Get the current "changes" indicator, acquiring all the other values.
+ uint32_t current_changes = changes.load(std::memory_order_acquire);
+
+ // Copy out the dynamic information.
+ info->is_loaded = loaded != 0;
+ info->address = static_cast<uintptr_t>(address);
+ info->load_time = load_time;
+
+ // Check to make sure no information changed while being read. A "seq-cst"
+ // operation is expensive but is only done during analysis and it's the only
+ // way to ensure this occurs after all the accesses above. If changes did
+ // occur then return a "not loaded" result so that |size| and |address|
+ // aren't expected to be accurate.
+ if ((current_changes & kModuleInformationChanging) != 0 ||
+ changes.load(std::memory_order_seq_cst) != current_changes) {
+ info->is_loaded = false;
+ }
+
+ // Copy out the static information. These never change so don't have to be
+ // protected by the atomic |current_changes| operations.
+ info->size = static_cast<size_t>(size);
+ info->timestamp = timestamp;
+ info->age = age;
+ memcpy(info->identifier, identifier, sizeof(info->identifier));
+
+ if (offsetof(ModuleInfoRecord, pickle) + pickle_size > record_size)
+ return false;
+ Pickle pickler(pickle, pickle_size);
+ PickleIterator iter(pickler);
+ return iter.ReadString(&info->file) && iter.ReadString(&info->debug_file);
+}
+
+bool GlobalActivityTracker::ModuleInfoRecord::EncodeFrom(
+ const GlobalActivityTracker::ModuleInfo& info,
+ size_t record_size) {
+ Pickle pickler;
+ bool okay =
+ pickler.WriteString(info.file) && pickler.WriteString(info.debug_file);
+ if (!okay) {
+ NOTREACHED();
+ return false;
+ }
+ if (offsetof(ModuleInfoRecord, pickle) + pickler.size() > record_size) {
+ NOTREACHED();
+ return false;
+ }
+
+ // These fields never changes and are done before the record is made
+ // iterable so no thread protection is necessary.
+ size = info.size;
+ timestamp = info.timestamp;
+ age = info.age;
+ memcpy(identifier, info.identifier, sizeof(identifier));
+ memcpy(pickle, pickler.data(), pickler.size());
+ pickle_size = pickler.size();
+ changes.store(0, std::memory_order_relaxed);
+
+ // Now set those fields that can change.
+ return UpdateFrom(info);
+}
+
+bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
+ const GlobalActivityTracker::ModuleInfo& info) {
+ // Updates can occur after the record is made visible so make changes atomic.
+ // A "strong" exchange ensures no false failures.
+ uint32_t old_changes = changes.load(std::memory_order_relaxed);
+ uint32_t new_changes = old_changes | kModuleInformationChanging;
+ if ((old_changes & kModuleInformationChanging) != 0 ||
+ !changes.compare_exchange_strong(old_changes, new_changes,
+ std::memory_order_acquire,
+ std::memory_order_acquire)) {
+ NOTREACHED() << "Multiple sources are updating module information.";
+ return false;
+ }
+
+ loaded = info.is_loaded ? 1 : 0;
+ address = info.address;
+ load_time = Time::Now().ToInternalValue();
+
+ bool success = changes.compare_exchange_strong(new_changes, old_changes + 1,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ DCHECK(success);
+ return true;
+}
+
+// static
+size_t GlobalActivityTracker::ModuleInfoRecord::EncodedSize(
+ const GlobalActivityTracker::ModuleInfo& info) {
+ PickleSizer sizer;
+ sizer.AddString(info.file);
+ sizer.AddString(info.debug_file);
+
+ return offsetof(ModuleInfoRecord, pickle) + sizeof(Pickle::Header) +
+ sizer.payload_size();
+}
+
+GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data,
+ bool lock_allowed)
+ : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
+ program_counter,
+ origin,
+ type,
+ data) {}
+
+GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
+ if (tracker_ && tracker_->HasUserData(activity_id_)) {
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ AutoLock lock(global->user_data_allocator_lock_);
+ tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
+ }
+}
+
+ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
+ if (!user_data_) {
+ if (tracker_) {
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ AutoLock lock(global->user_data_allocator_lock_);
+ user_data_ =
+ tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
+ } else {
+ user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
+ }
+ }
+ return *user_data_;
+}
+
+GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size)
+ : ActivityUserData(memory, size) {}
+
+GlobalActivityTracker::GlobalUserData::~GlobalUserData() {}
+
+void GlobalActivityTracker::GlobalUserData::Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
+ AutoLock lock(data_lock_);
+ ActivityUserData::Set(name, type, memory, size);
+}
+
+GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
+ PersistentMemoryAllocator::Reference mem_reference,
+ void* base,
+ size_t size)
+ : ThreadActivityTracker(base, size),
+ mem_reference_(mem_reference),
+ mem_base_(base) {}
+
+GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
+ // The global |g_tracker_| must point to the owner of this class since all
+ // objects of this type must be destructed before |g_tracker_| can be changed
+ // (something that only occurs in tests).
+ DCHECK(g_tracker_);
+ GlobalActivityTracker::Get()->ReturnTrackerMemory(this);
+}
+
+void GlobalActivityTracker::CreateWithAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> allocator,
+ int stack_depth) {
+ // There's no need to do anything with the result. It is self-managing.
+ GlobalActivityTracker* global_tracker =
+ new GlobalActivityTracker(std::move(allocator), stack_depth);
+ // Create a tracker for this thread since it is known.
+ global_tracker->CreateTrackerForCurrentThread();
+}
+
+#if !defined(OS_NACL)
+// static
+void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth) {
+ DCHECK(!file_path.empty());
+ DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
+
+ // Create and map the file into memory and make it globally available.
+ std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
+ bool success =
+ mapped_file->Initialize(File(file_path,
+ File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
+ File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
+ {0, static_cast<int64_t>(size)},
+ MemoryMappedFile::READ_WRITE_EXTEND);
+ DCHECK(success);
+ CreateWithAllocator(MakeUnique<FilePersistentMemoryAllocator>(
+ std::move(mapped_file), size, id, name, false),
+ stack_depth);
+}
+#endif // !defined(OS_NACL)
+
+// static
+void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth) {
+ CreateWithAllocator(
+ MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
+}
+
+ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
+ DCHECK(!this_thread_tracker_.Get());
+
+ PersistentMemoryAllocator::Reference mem_reference;
+
+ {
+ base::AutoLock autolock(thread_tracker_allocator_lock_);
+ mem_reference = thread_tracker_allocator_.GetObjectReference();
+ }
+
+ if (!mem_reference) {
+ // Failure. This shouldn't happen. But be graceful if it does, probably
+ // because the underlying allocator wasn't given enough memory to satisfy
+ // to all possible requests.
+ NOTREACHED();
+ // Report the thread-count at which the allocator was full so that the
+ // failure can be seen and underlying memory resized appropriately.
+ UMA_HISTOGRAM_COUNTS_1000(
+ "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
+ thread_tracker_count_.load(std::memory_order_relaxed));
+ // Return null, just as if tracking wasn't enabled.
+ return nullptr;
+ }
+
+ // Convert the memory block found above into an actual memory address.
+ // Doing the conversion as a Header object enacts the 32/64-bit size
+ // consistency checks which would not otherwise be done. Unfortunately,
+ // some older compilers and MSVC don't have standard-conforming definitions
+ // of std::atomic which cause it not to be plain-old-data. Don't check on
+ // those platforms assuming that the checks on other platforms will be
+ // sufficient.
+ // TODO(bcwhite): Review this after major compiler releases.
+ DCHECK(mem_reference);
+ void* mem_base;
+ mem_base =
+ allocator_->GetAsObject<ThreadActivityTracker::Header>(mem_reference);
+
+ DCHECK(mem_base);
+ DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
+
+ // Create a tracker with the acquired memory and set it as the tracker
+ // for this particular thread in thread-local-storage.
+ ManagedActivityTracker* tracker =
+ new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
+ DCHECK(tracker->IsValid());
+ this_thread_tracker_.Set(tracker);
+ int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
+
+ UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count",
+ old_count + 1, kMaxThreadCount);
+ return tracker;
+}
+
+void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
+ ThreadActivityTracker* tracker =
+ reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
+ if (tracker)
+ delete tracker;
+}
+
+void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
+ // Allocate at least one extra byte so the string is NUL terminated. All
+ // memory returned by the allocator is guaranteed to be zeroed.
+ PersistentMemoryAllocator::Reference ref =
+ allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
+ char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
+ message.size() + 1);
+ if (memory) {
+ memcpy(memory, message.data(), message.size());
+ allocator_->MakeIterable(ref);
+ }
+}
+
+void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
+ AutoLock lock(modules_lock_);
+ auto found = modules_.find(info.file);
+ if (found != modules_.end()) {
+ ModuleInfoRecord* record = found->second;
+ DCHECK(record);
+
+ // Update the basic state of module information that has been already
+ // recorded. It is assumed that the string information (identifier,
+ // version, etc.) remain unchanged which means that there's no need
+ // to create a new record to accommodate a possibly longer length.
+ record->UpdateFrom(info);
+ return;
+ }
+
+ size_t required_size = ModuleInfoRecord::EncodedSize(info);
+ ModuleInfoRecord* record = allocator_->New<ModuleInfoRecord>(required_size);
+ if (!record)
+ return;
+
+ bool success = record->EncodeFrom(info, required_size);
+ DCHECK(success);
+ allocator_->MakeIterable(record);
+ modules_.insert(std::make_pair(info.file, record));
+}
+
+void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
+ StringPiece group_name) {
+ const std::string key = std::string("FieldTrial.") + trial_name;
+ global_data_.SetString(key, group_name);
+}
+
+GlobalActivityTracker::GlobalActivityTracker(
+ std::unique_ptr<PersistentMemoryAllocator> allocator,
+ int stack_depth)
+ : allocator_(std::move(allocator)),
+ stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
+ this_thread_tracker_(&OnTLSDestroy),
+ thread_tracker_count_(0),
+ thread_tracker_allocator_(allocator_.get(),
+ kTypeIdActivityTracker,
+ kTypeIdActivityTrackerFree,
+ stack_memory_size_,
+ kCachedThreadMemories,
+ /*make_iterable=*/true),
+ user_data_allocator_(allocator_.get(),
+ kTypeIdUserDataRecord,
+ kTypeIdUserDataRecordFree,
+ kUserDataSize,
+ kCachedUserDataMemories,
+ /*make_iterable=*/false),
+ global_data_(
+ allocator_->GetAsArray<char>(
+ allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
+ kTypeIdGlobalDataRecord,
+ PersistentMemoryAllocator::kSizeAny),
+ kGlobalDataSize) {
+ // Ensure the passed memory is valid and empty (iterator finds nothing).
+ uint32_t type;
+ DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
+
+ // Ensure that there is no other global object and then make this one such.
+ DCHECK(!g_tracker_);
+ subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
+
+ // The global records must be iterable in order to be found by an analyzer.
+ allocator_->MakeIterable(allocator_->GetAsReference(
+ global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
+
+ // Fetch and record all activated field trials.
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ for (auto& group : active_groups)
+ RecordFieldTrial(group.trial_name, group.group_name);
+}
+
+GlobalActivityTracker::~GlobalActivityTracker() {
+ DCHECK_EQ(Get(), this);
+ DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
+ subtle::Release_Store(&g_tracker_, 0);
+}
+
+void GlobalActivityTracker::ReturnTrackerMemory(
+ ManagedActivityTracker* tracker) {
+ PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
+ void* mem_base = tracker->mem_base_;
+ DCHECK(mem_reference);
+ DCHECK(mem_base);
+
+ // Remove the destructed tracker from the set of known ones.
+ DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
+ thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
+
+ // Release this memory for re-use at a later time.
+ base::AutoLock autolock(thread_tracker_allocator_lock_);
+ thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
+}
+
+// static
+void GlobalActivityTracker::OnTLSDestroy(void* value) {
+ delete reinterpret_cast<ManagedActivityTracker*>(value);
+}
+
+ScopedActivity::ScopedActivity(const void* program_counter,
+ uint8_t action,
+ uint32_t id,
+ int32_t info)
+ : GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
+ nullptr,
+ static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
+ ActivityData::ForGeneric(id, info),
+ /*lock_allowed=*/true),
+ id_(id) {
+ // The action must not affect the category bits of the activity type.
+ DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
+}
+
+void ScopedActivity::ChangeAction(uint8_t action) {
+ DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
+ ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
+ kNullActivityData);
+}
+
+void ScopedActivity::ChangeInfo(int32_t info) {
+ ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
+}
+
+void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
+ DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
+ ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
+ ActivityData::ForGeneric(id_, info));
+}
+
+ScopedTaskRunActivity::ScopedTaskRunActivity(
+ const void* program_counter,
+ const base::PendingTask& task)
+ : GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
+ task.posted_from.program_counter(),
+ Activity::ACT_TASK_RUN,
+ ActivityData::ForTask(task.sequence_num),
+ /*lock_allowed=*/true) {}
+
+ScopedLockAcquireActivity::ScopedLockAcquireActivity(
+ const void* program_counter,
+ const base::internal::LockImpl* lock)
+ : GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
+ nullptr,
+ Activity::ACT_LOCK_ACQUIRE,
+ ActivityData::ForLock(lock),
+ /*lock_allowed=*/false) {}
+
+ScopedEventWaitActivity::ScopedEventWaitActivity(
+ const void* program_counter,
+ const base::WaitableEvent* event)
+ : GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
+ nullptr,
+ Activity::ACT_EVENT_WAIT,
+ ActivityData::ForEvent(event),
+ /*lock_allowed=*/true) {}
+
+ScopedThreadJoinActivity::ScopedThreadJoinActivity(
+ const void* program_counter,
+ const base::PlatformThreadHandle* thread)
+ : GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
+ nullptr,
+ Activity::ACT_THREAD_JOIN,
+ ActivityData::ForThread(*thread),
+ /*lock_allowed=*/true) {}
+
+#if !defined(OS_NACL) && !defined(OS_IOS)
+ScopedProcessWaitActivity::ScopedProcessWaitActivity(
+ const void* program_counter,
+ const base::Process* process)
+ : GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
+ nullptr,
+ Activity::ACT_PROCESS_WAIT,
+ ActivityData::ForProcess(process->Pid()),
+ /*lock_allowed=*/true) {}
+#endif
+
+} // namespace debug
+} // namespace base
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
new file mode 100644
index 0000000000..719a31865c
--- /dev/null
+++ b/base/debug/activity_tracker.h
@@ -0,0 +1,1102 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Activity tracking provides a low-overhead method of collecting information
+// about the state of the application for analysis both while it is running
+// and after it has terminated unexpectedly. Its primary purpose is to help
+// locate reasons the browser becomes unresponsive by providing insight into
+// what all the various threads and processes are (or were) doing.
+
+#ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_
+#define BASE_DEBUG_ACTIVITY_TRACKER_H_
+
+// std::atomic is undesired due to performance issues when used as global
+// variables. There are no such instances here. This module uses the
+// PersistentMemoryAllocator which also uses std::atomic and is written
+// by the same author.
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/location.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+
+struct PendingTask;
+
+class FilePath;
+class Lock;
+class PlatformThreadHandle;
+class Process;
+class StaticAtomicSequenceNumber;
+class WaitableEvent;
+
+namespace debug {
+
+class ThreadActivityTracker;
+
+
+enum : int {
+ // The maximum number of call-stack addresses stored per activity. This
+ // cannot be changed without also changing the version number of the
+ // structure. See kTypeIdActivityTracker in GlobalActivityTracker.
+ kActivityCallStackSize = 10,
+};
+
+// The data associated with an activity is dependent upon the activity type.
+// This union defines all of the various fields. All fields must be explicitly
+// sized types to ensure no interoperability problems between 32-bit and
+// 64-bit systems.
+union ActivityData {
+ // Generic activities don't have any defined structure.
+ struct {
+ uint32_t id; // An arbitrary identifier used for association.
+ int32_t info; // An arbitrary value used for information purposes.
+ } generic;
+ struct {
+ uint64_t sequence_id; // The sequence identifier of the posted task.
+ } task;
+ struct {
+ uint64_t lock_address; // The memory address of the lock object.
+ } lock;
+ struct {
+ uint64_t event_address; // The memory address of the event object.
+ } event;
+ struct {
+ int64_t thread_id; // A unique identifier for a thread within a process.
+ } thread;
+ struct {
+ int64_t process_id; // A unique identifier for a process.
+ } process;
+
+ // These methods create an ActivityData object from the appropriate
+ // parameters. Objects of this type should always be created this way to
+ // ensure that no fields remain unpopulated should the set of recorded
+ // fields change. They're defined inline where practical because they
+ // reduce to loading a small local structure with a few values, roughly
+ // the same as loading all those values into parameters.
+
+ static ActivityData ForGeneric(uint32_t id, int32_t info) {
+ ActivityData data;
+ data.generic.id = id;
+ data.generic.info = info;
+ return data;
+ }
+
+ static ActivityData ForTask(uint64_t sequence) {
+ ActivityData data;
+ data.task.sequence_id = sequence;
+ return data;
+ }
+
+ static ActivityData ForLock(const void* lock) {
+ ActivityData data;
+ data.lock.lock_address = reinterpret_cast<uintptr_t>(lock);
+ return data;
+ }
+
+ static ActivityData ForEvent(const void* event) {
+ ActivityData data;
+ data.event.event_address = reinterpret_cast<uintptr_t>(event);
+ return data;
+ }
+
+ static ActivityData ForThread(const PlatformThreadHandle& handle);
+ static ActivityData ForThread(const int64_t id) {
+ ActivityData data;
+ data.thread.thread_id = id;
+ return data;
+ }
+
+ static ActivityData ForProcess(const int64_t id) {
+ ActivityData data;
+ data.process.process_id = id;
+ return data;
+ }
+};
+
+// A "null" activity-data that can be passed to indicate "do not change".
+extern const ActivityData kNullActivityData;
+
+
+// A helper class that is used for managing memory allocations within a
+// persistent memory allocator. Instances of this class are NOT thread-safe.
+// Use from a single thread or protect access with a lock.
+class BASE_EXPORT ActivityTrackerMemoryAllocator {
+ public:
+ using Reference = PersistentMemoryAllocator::Reference;
+
+ // Creates a instance for allocating objects of a fixed |object_type|, a
+ // corresponding |object_free| type, and the |object_size|. An internal
+ // cache of the last |cache_size| released references will be kept for
+ // quick future fetches. If |make_iterable| then allocated objects will
+ // be marked "iterable" in the allocator.
+ ActivityTrackerMemoryAllocator(PersistentMemoryAllocator* allocator,
+ uint32_t object_type,
+ uint32_t object_free_type,
+ size_t object_size,
+ size_t cache_size,
+ bool make_iterable);
+ ~ActivityTrackerMemoryAllocator();
+
+ // Gets a reference to an object of the configured type. This can return
+ // a null reference if it was not possible to allocate the memory.
+ Reference GetObjectReference();
+
+ // Returns an object to the "free" pool.
+ void ReleaseObjectReference(Reference ref);
+
+ // Helper function to access an object allocated using this instance.
+ template <typename T>
+ T* GetAsObject(Reference ref) {
+ return allocator_->GetAsObject<T>(ref);
+ }
+
+ // Similar to GetAsObject() but converts references to arrays of objects.
+ template <typename T>
+ T* GetAsArray(Reference ref, size_t count) {
+ return allocator_->GetAsArray<T>(ref, object_type_, count);
+ }
+
+ // The current "used size" of the internal cache, visible for testing.
+ size_t cache_used() const { return cache_used_; }
+
+ private:
+ PersistentMemoryAllocator* const allocator_;
+ const uint32_t object_type_;
+ const uint32_t object_free_type_;
+ const size_t object_size_;
+ const size_t cache_size_;
+ const bool make_iterable_;
+
+ // An iterator for going through persistent memory looking for free'd objects.
+ PersistentMemoryAllocator::Iterator iterator_;
+
+ // The cache of released object memories.
+ std::unique_ptr<Reference[]> cache_values_;
+ size_t cache_used_;
+
+ DISALLOW_COPY_AND_ASSIGN(ActivityTrackerMemoryAllocator);
+};
+
+
+// This structure is the full contents recorded for every activity pushed
+// onto the stack. The |activity_type| indicates what is actually stored in
+// the |data| field. All fields must be explicitly sized types to ensure no
+// interoperability problems between 32-bit and 64-bit systems.
+struct Activity {
+ // SHA1(base::debug::Activity): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0x99425159 + 1;
+ // Expected size for 32/64-bit check. Update this if structure changes!
+ static constexpr size_t kExpectedInstanceSize =
+ 48 + 8 * kActivityCallStackSize;
+
+ // The type of an activity on the stack. Activities are broken into
+ // categories with the category ID taking the top 4 bits and the lower
+ // bits representing an action within that category. This combination
+ // makes it easy to "switch" based on the type during analysis.
+ enum Type : uint8_t {
+ // This "null" constant is used to indicate "do not change" in calls.
+ ACT_NULL = 0,
+
+ // Task activities involve callbacks posted to a thread or thread-pool
+ // using the PostTask() method or any of its friends.
+ ACT_TASK = 1 << 4,
+ ACT_TASK_RUN = ACT_TASK,
+
+ // Lock activities involve the acquisition of "mutex" locks.
+ ACT_LOCK = 2 << 4,
+ ACT_LOCK_ACQUIRE = ACT_LOCK,
+ ACT_LOCK_RELEASE,
+
+ // Event activities involve operations on a WaitableEvent.
+ ACT_EVENT = 3 << 4,
+ ACT_EVENT_WAIT = ACT_EVENT,
+ ACT_EVENT_SIGNAL,
+
+ // Thread activities involve the life management of threads.
+ ACT_THREAD = 4 << 4,
+ ACT_THREAD_START = ACT_THREAD,
+ ACT_THREAD_JOIN,
+
+ // Process activities involve the life management of processes.
+ ACT_PROCESS = 5 << 4,
+ ACT_PROCESS_START = ACT_PROCESS,
+ ACT_PROCESS_WAIT,
+
+ // Generic activities are user defined and can be anything.
+ ACT_GENERIC = 15 << 4,
+
+ // These constants can be used to separate the category and action from
+ // a combined activity type.
+ ACT_CATEGORY_MASK = 0xF << 4,
+ ACT_ACTION_MASK = 0xF
+ };
+
+ // Internal representation of time. During collection, this is in "ticks"
+ // but when returned in a snapshot, it is "wall time".
+ int64_t time_internal;
+
+ // The address that pushed the activity onto the stack as a raw number.
+ uint64_t calling_address;
+
+ // The address that is the origin of the activity if it not obvious from
+ // the call stack. This is useful for things like tasks that are posted
+ // from a completely different thread though most activities will leave
+ // it null.
+ uint64_t origin_address;
+
+ // Array of program-counters that make up the top of the call stack.
+ // Despite the fixed size, this list is always null-terminated. Entries
+ // after the terminator have no meaning and may or may not also be null.
+ // The list will be completely empty if call-stack collection is not
+ // enabled.
+ uint64_t call_stack[kActivityCallStackSize];
+
+ // Reference to arbitrary user data within the persistent memory segment
+ // and a unique identifier for it.
+ uint32_t user_data_ref;
+ uint32_t user_data_id;
+
+ // The (enumerated) type of the activity. This defines what fields of the
+ // |data| record are valid.
+ uint8_t activity_type;
+
+ // Padding to ensure that the next member begins on a 64-bit boundary
+ // even on 32-bit builds which ensures inter-operability between CPU
+ // architectures. New fields can be taken from this space.
+ uint8_t padding[7];
+
+ // Information specific to the |activity_type|.
+ ActivityData data;
+
+ static void FillFrom(Activity* activity,
+ const void* program_counter,
+ const void* origin,
+ Type type,
+ const ActivityData& data);
+};
+
+// This class manages arbitrary user data that can be associated with activities
+// done by a thread by supporting key/value pairs of any type. This can provide
+// additional information during debugging. It is also used to store arbitrary
+// global data. All updates must be done from the same thread.
+class BASE_EXPORT ActivityUserData {
+ public:
+ // List of known value type. REFERENCE types must immediately follow the non-
+ // external types.
+ enum ValueType : uint8_t {
+ END_OF_VALUES = 0,
+ RAW_VALUE,
+ RAW_VALUE_REFERENCE,
+ STRING_VALUE,
+ STRING_VALUE_REFERENCE,
+ CHAR_VALUE,
+ BOOL_VALUE,
+ SIGNED_VALUE,
+ UNSIGNED_VALUE,
+ };
+
+ class BASE_EXPORT TypedValue {
+ public:
+ TypedValue();
+ TypedValue(const TypedValue& other);
+ ~TypedValue();
+
+ ValueType type() const { return type_; }
+
+ // These methods return the extracted value in the correct format.
+ StringPiece Get() const;
+ StringPiece GetString() const;
+ bool GetBool() const;
+ char GetChar() const;
+ int64_t GetInt() const;
+ uint64_t GetUint() const;
+
+ // These methods return references to process memory as originally provided
+ // to corresponding Set calls. USE WITH CAUTION! There is no guarantee that
+ // the referenced memory is assessible or useful. It's possible that:
+ // - the memory was free'd and reallocated for a different purpose
+ // - the memory has been released back to the OS
+ // - the memory belongs to a different process's address space
+ // Dereferencing the returned StringPiece when the memory is not accessible
+ // will cause the program to SEGV!
+ StringPiece GetReference() const;
+ StringPiece GetStringReference() const;
+
+ private:
+ friend class ActivityUserData;
+
+ ValueType type_;
+ uint64_t short_value_; // Used to hold copy of numbers, etc.
+ std::string long_value_; // Used to hold copy of raw/string data.
+ StringPiece ref_value_; // Used to hold reference to external data.
+ };
+
+ using Snapshot = std::map<std::string, TypedValue>;
+
+ ActivityUserData(void* memory, size_t size);
+ virtual ~ActivityUserData();
+
+ // Gets the unique ID number for this user data. If this changes then the
+ // contents have been overwritten by another thread. The return value is
+ // always non-zero unless it's actually just a data "sink".
+ uint32_t id() const {
+ return memory_ ? id_->load(std::memory_order_relaxed) : 0;
+ }
+
+ // Writes a |value| (as part of a key/value pair) that will be included with
+ // the activity in any reports. The same |name| can be written multiple times
+ // with each successive call overwriting the previously stored |value|. For
+ // raw and string values, the maximum size of successive writes is limited by
+ // the first call. The length of "name" is limited to 255 characters.
+ //
+ // This information is stored on a "best effort" basis. It may be dropped if
+ // the memory buffer is full or the associated activity is beyond the maximum
+ // recording depth.
+ void Set(StringPiece name, const void* memory, size_t size) {
+ Set(name, RAW_VALUE, memory, size);
+ }
+ void SetString(StringPiece name, StringPiece value) {
+ Set(name, STRING_VALUE, value.data(), value.length());
+ }
+ void SetString(StringPiece name, StringPiece16 value) {
+ SetString(name, UTF16ToUTF8(value));
+ }
+ void SetBool(StringPiece name, bool value) {
+ char cvalue = value ? 1 : 0;
+ Set(name, BOOL_VALUE, &cvalue, sizeof(cvalue));
+ }
+ void SetChar(StringPiece name, char value) {
+ Set(name, CHAR_VALUE, &value, sizeof(value));
+ }
+ void SetInt(StringPiece name, int64_t value) {
+ Set(name, SIGNED_VALUE, &value, sizeof(value));
+ }
+ void SetUint(StringPiece name, uint64_t value) {
+ Set(name, UNSIGNED_VALUE, &value, sizeof(value));
+ }
+
+ // These function as above but don't actually copy the data into the
+ // persistent memory. They store unaltered pointers along with a size. These
+ // can be used in conjuction with a memory dump to find certain large pieces
+ // of information.
+ void SetReference(StringPiece name, const void* memory, size_t size) {
+ SetReference(name, RAW_VALUE_REFERENCE, memory, size);
+ }
+ void SetStringReference(StringPiece name, StringPiece value) {
+ SetReference(name, STRING_VALUE_REFERENCE, value.data(), value.length());
+ }
+
+ // Creates a snapshot of the key/value pairs contained within. The returned
+ // data will be fixed, independent of whatever changes afterward. There is
+ // protection against concurrent modification of the values but no protection
+ // against a complete overwrite of the contents; the caller must ensure that
+ // the memory segment is not going to be re-initialized while this runs.
+ bool CreateSnapshot(Snapshot* output_snapshot) const;
+
+ // Gets the base memory address used for storing data.
+ const void* GetBaseAddress();
+
+ protected:
+ virtual void Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(ActivityTrackerTest, UserDataTest);
+
+ enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
+
+ // A structure used to reference data held outside of persistent memory.
+ struct ReferenceRecord {
+ uint64_t address;
+ uint64_t size;
+ };
+
+ // Header to a key/value record held in persistent memory.
+ struct Header {
+ std::atomic<uint8_t> type; // Encoded ValueType
+ uint8_t name_size; // Length of "name" key.
+ std::atomic<uint16_t> value_size; // Actual size of of the stored value.
+ uint16_t record_size; // Total storage of name, value, header.
+ };
+
+ // This record is used to hold known value is a map so that they can be
+ // found and overwritten later.
+ struct ValueInfo {
+ ValueInfo();
+ ValueInfo(ValueInfo&&);
+ ~ValueInfo();
+
+ StringPiece name; // The "key" of the record.
+ ValueType type; // The type of the value.
+ void* memory; // Where the "value" is held.
+ std::atomic<uint16_t>* size_ptr; // Address of the actual size of value.
+ size_t extent; // The total storage of the value,
+ }; // typically rounded up for alignment.
+
+ void SetReference(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size);
+
+ // Loads any data already in the memory segment. This allows for accessing
+ // records created previously.
+ void ImportExistingData() const;
+
+ // A map of all the values within the memory block, keyed by name for quick
+ // updates of the values. This is "mutable" because it changes on "const"
+ // objects even when the actual data values can't change.
+ mutable std::map<StringPiece, ValueInfo> values_;
+
+ // Information about the memory block in which new data can be stored. These
+ // are "mutable" because they change even on "const" objects that are just
+ // skipping already set values.
+ mutable char* memory_;
+ mutable size_t available_;
+
+ // A pointer to the unique ID for this instance.
+ std::atomic<uint32_t>* const id_;
+
+ // This ID is used to create unique indentifiers for user data so that it's
+ // possible to tell if the information has been overwritten.
+ static StaticAtomicSequenceNumber next_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
+};
+
+// This class manages tracking a stack of activities for a single thread in
+// a persistent manner, implementing a bounded-size stack in a fixed-size
+// memory allocation. In order to support an operational mode where another
+// thread is analyzing this data in real-time, atomic operations are used
+// where necessary to guarantee a consistent view from the outside.
+//
+// This class is not generally used directly but instead managed by the
+// GlobalActivityTracker instance and updated using Scoped*Activity local
+// objects.
+class BASE_EXPORT ThreadActivityTracker {
+ public:
+ using ActivityId = uint32_t;
+
+ // This structure contains all the common information about the thread so
+ // it doesn't have to be repeated in every entry on the stack. It is defined
+ // and used completely within the .cc file.
+ struct Header;
+
+ // This structure holds a copy of all the internal data at the moment the
+ // "snapshot" operation is done. It is disconnected from the live tracker
+ // so that continued operation of the thread will not cause changes here.
+ struct BASE_EXPORT Snapshot {
+ // Explicit constructor/destructor are needed because of complex types
+ // with non-trivial default constructors and destructors.
+ Snapshot();
+ ~Snapshot();
+
+ // The name of the thread as set when it was created. The name may be
+ // truncated due to internal length limitations.
+ std::string thread_name;
+
+ // The process and thread IDs. These values have no meaning other than
+ // they uniquely identify a running process and a running thread within
+ // that process. Thread-IDs can be re-used across different processes
+ // and both can be re-used after the process/thread exits.
+ int64_t process_id = 0;
+ int64_t thread_id = 0;
+
+ // The current stack of activities that are underway for this thread. It
+ // is limited in its maximum size with later entries being left off.
+ std::vector<Activity> activity_stack;
+
+ // The current total depth of the activity stack, including those later
+ // entries not recorded in the |activity_stack| vector.
+ uint32_t activity_stack_depth = 0;
+ };
+
+ // This is the base class for having the compiler manage an activity on the
+ // tracker's stack. It does nothing but call methods on the passed |tracker|
+ // if it is not null, making it safe (and cheap) to create these objects
+ // even if activity tracking is not enabled.
+ class BASE_EXPORT ScopedActivity {
+ public:
+ ScopedActivity(ThreadActivityTracker* tracker,
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data);
+ ~ScopedActivity();
+
+ // Changes some basic metadata about the activity.
+ void ChangeTypeAndData(Activity::Type type, const ActivityData& data);
+
+ protected:
+ // The thread tracker to which this object reports. It can be null if
+ // activity tracking is not (yet) enabled.
+ ThreadActivityTracker* const tracker_;
+
+ // An identifier that indicates a specific activity on the stack.
+ ActivityId activity_id_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
+ };
+
+ // A ThreadActivityTracker runs on top of memory that is managed externally.
+ // It must be large enough for the internal header and a few Activity
+ // blocks. See SizeForStackDepth().
+ ThreadActivityTracker(void* base, size_t size);
+ virtual ~ThreadActivityTracker();
+
+ // Indicates that an activity has started from a given |origin| address in
+ // the code, though it can be null if the creator's address is not known.
+ // The |type| and |data| describe the activity. |program_counter| should be
+ // the result of GetProgramCounter() where push is called. Returned is an
+ // ID that can be used to adjust the pushed activity.
+ ActivityId PushActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data);
+
+ // An inlined version of the above that gets the program counter where it
+ // is called.
+ ALWAYS_INLINE
+ ActivityId PushActivity(const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
+ return PushActivity(::tracked_objects::GetProgramCounter(), origin, type,
+ data);
+ }
+
+ // Changes the activity |type| and |data| of the top-most entry on the stack.
+ // This is useful if the information has changed and it is desireable to
+ // track that change without creating a new stack entry. If the type is
+ // ACT_NULL or the data is kNullActivityData then that value will remain
+ // unchanged. The type, if changed, must remain in the same category.
+ // Changing both is not atomic so a snapshot operation could occur between
+ // the update of |type| and |data| or between update of |data| fields.
+ void ChangeActivity(ActivityId id,
+ Activity::Type type,
+ const ActivityData& data);
+
+ // Indicates that an activity has completed.
+ void PopActivity(ActivityId id);
+
+ // Sets the user-data information for an activity.
+ std::unique_ptr<ActivityUserData> GetUserData(
+ ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator);
+
+ // Returns if there is true use-data associated with a given ActivityId since
+ // it's possible than any returned object is just a sink.
+ bool HasUserData(ActivityId id);
+
+ // Release the user-data information for an activity.
+ void ReleaseUserData(ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator);
+
+ // Returns whether the current data is valid or not. It is not valid if
+ // corruption has been detected in the header or other data structures.
+ bool IsValid() const;
+
+ // Gets a copy of the tracker contents for analysis. Returns false if a
+ // snapshot was not possible, perhaps because the data is not valid; the
+ // contents of |output_snapshot| are undefined in that case. The current
+ // implementation does not support concurrent snapshot operations.
+ bool CreateSnapshot(Snapshot* output_snapshot) const;
+
+ // Calculates the memory size required for a given stack depth, including
+ // the internal header structure for the stack.
+ static size_t SizeForStackDepth(int stack_depth);
+
+ private:
+ friend class ActivityTrackerTest;
+
+ Header* const header_; // Pointer to the Header structure.
+ Activity* const stack_; // The stack of activities.
+ const uint32_t stack_slots_; // The total number of stack slots.
+
+ bool valid_ = false; // Tracks whether the data is valid or not.
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadActivityTracker);
+};
+
+
+// The global tracker manages all the individual thread trackers. Memory for
+// the thread trackers is taken from a PersistentMemoryAllocator which allows
+// for the data to be analyzed by a parallel process or even post-mortem.
+class BASE_EXPORT GlobalActivityTracker {
+ public:
+ // Type identifiers used when storing in persistent memory so they can be
+ // identified during extraction; the first 4 bytes of the SHA1 of the name
+ // is used as a unique integer. A "version number" is added to the base
+ // so that, if the structure of that object changes, stored older versions
+ // will be safely ignored. These are public so that an external process
+ // can recognize records of this type within an allocator.
+ enum : uint32_t {
+ kTypeIdActivityTracker = 0x5D7381AF + 3, // SHA1(ActivityTracker) v3
+ kTypeIdUserDataRecord = 0x615EDDD7 + 2, // SHA1(UserDataRecord) v2
+ kTypeIdGlobalLogMessage = 0x4CF434F9 + 1, // SHA1(GlobalLogMessage) v1
+ kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 1000,
+
+ kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
+ kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
+ };
+
+ // This structure contains information about a loaded module, as shown to
+ // users of the tracker.
+ struct BASE_EXPORT ModuleInfo {
+ ModuleInfo();
+ ModuleInfo(ModuleInfo&& rhs);
+ ModuleInfo(const ModuleInfo& rhs);
+ ~ModuleInfo();
+
+ ModuleInfo& operator=(ModuleInfo&& rhs);
+ ModuleInfo& operator=(const ModuleInfo& rhs);
+
+ // Information about where and when the module was loaded/unloaded.
+ bool is_loaded = false; // Was the last operation a load or unload?
+ uintptr_t address = 0; // Address of the last load operation.
+ int64_t load_time = 0; // Time of last change; set automatically.
+
+ // Information about the module itself. These never change no matter how
+ // many times a module may be loaded and unloaded.
+ size_t size = 0; // The size of the loaded module.
+ uint32_t timestamp = 0; // Opaque "timestamp" for the module.
+ uint32_t age = 0; // Opaque "age" for the module.
+ uint8_t identifier[16]; // Opaque identifier (GUID, etc.) for the module.
+ std::string file; // The full path to the file. (UTF-8)
+ std::string debug_file; // The full path to the debug file.
+ };
+
+ // This is a thin wrapper around the thread-tracker's ScopedActivity that
+ // accesses the global tracker to provide some of the information, notably
+ // which thread-tracker to use. It is safe to create even if activity
+ // tracking is not enabled.
+ class BASE_EXPORT ScopedThreadActivity
+ : public ThreadActivityTracker::ScopedActivity {
+ public:
+ ScopedThreadActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data,
+ bool lock_allowed);
+ ~ScopedThreadActivity();
+
+ // Returns an object for manipulating user data.
+ ActivityUserData& user_data();
+
+ private:
+ // Gets (or creates) a tracker for the current thread. If locking is not
+ // allowed (because a lock is being tracked which would cause recursion)
+ // then the attempt to create one if none found will be skipped. Once
+ // the tracker for this thread has been created for other reasons, locks
+ // will be tracked. The thread-tracker uses locks.
+ static ThreadActivityTracker* GetOrCreateTracker(bool lock_allowed) {
+ GlobalActivityTracker* global_tracker = Get();
+ if (!global_tracker)
+ return nullptr;
+ if (lock_allowed)
+ return global_tracker->GetOrCreateTrackerForCurrentThread();
+ else
+ return global_tracker->GetTrackerForCurrentThread();
+ }
+
+ // An object that manages additional user data, created only upon request.
+ std::unique_ptr<ActivityUserData> user_data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedThreadActivity);
+ };
+
+ ~GlobalActivityTracker();
+
+ // Creates a global tracker using a given persistent-memory |allocator| and
+ // providing the given |stack_depth| to each thread tracker it manages. The
+ // created object is activated so tracking will begin immediately upon return.
+ static void CreateWithAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> allocator,
+ int stack_depth);
+
+#if !defined(OS_NACL)
+ // Like above but internally creates an allocator around a disk file with
+ // the specified |size| at the given |file_path|. Any existing file will be
+ // overwritten. The |id| and |name| are arbitrary and stored in the allocator
+ // for reference by whatever process reads it.
+ static void CreateWithFile(const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth);
+#endif // !defined(OS_NACL)
+
+ // Like above but internally creates an allocator using local heap memory of
+ // the specified size. This is used primarily for unit tests.
+ static void CreateWithLocalMemory(size_t size,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth);
+
+ // Gets the global activity-tracker or null if none exists.
+ static GlobalActivityTracker* Get() {
+ return reinterpret_cast<GlobalActivityTracker*>(
+ subtle::Acquire_Load(&g_tracker_));
+ }
+
+ // Convenience method for determining if a global tracker is active.
+ static bool IsEnabled() { return Get() != nullptr; }
+
+ // Gets the persistent-memory-allocator in which data is stored. Callers
+ // can store additional records here to pass more information to the
+ // analysis process.
+ PersistentMemoryAllocator* allocator() { return allocator_.get(); }
+
+ // Gets the thread's activity-tracker if it exists. This is inline for
+ // performance reasons and it uses thread-local-storage (TLS) so that there
+ // is no significant lookup time required to find the one for the calling
+ // thread. Ownership remains with the global tracker.
+ ThreadActivityTracker* GetTrackerForCurrentThread() {
+ return reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
+ }
+
+ // Gets the thread's activity-tracker or creates one if none exists. This
+ // is inline for performance reasons. Ownership remains with the global
+ // tracker.
+ ThreadActivityTracker* GetOrCreateTrackerForCurrentThread() {
+ ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
+ if (tracker)
+ return tracker;
+ return CreateTrackerForCurrentThread();
+ }
+
+ // Creates an activity-tracker for the current thread.
+ ThreadActivityTracker* CreateTrackerForCurrentThread();
+
+ // Releases the activity-tracker for the current thread (for testing only).
+ void ReleaseTrackerForCurrentThreadForTesting();
+
+ // Records a log message. The current implementation does NOT recycle these
+ // only store critical messages such as FATAL ones.
+ void RecordLogMessage(StringPiece message);
+ static void RecordLogMessageIfEnabled(StringPiece message) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordLogMessage(message);
+ }
+
+ // Records a module load/unload event. This is safe to call multiple times
+ // even with the same information.
+ void RecordModuleInfo(const ModuleInfo& info);
+ static void RecordModuleInfoIfEnabled(const ModuleInfo& info) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordModuleInfo(info);
+ }
+
+ // Record field trial information. This call is thread-safe. In addition to
+ // this, construction of a GlobalActivityTracker will cause all existing
+ // active field trials to be fetched and recorded.
+ void RecordFieldTrial(const std::string& trial_name, StringPiece group_name);
+ static void RecordFieldTrialIfEnabled(const std::string& trial_name,
+ StringPiece group_name) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordFieldTrial(trial_name, group_name);
+ }
+
+ // Accesses the global data record for storing arbitrary key/value pairs.
+ ActivityUserData& global_data() { return global_data_; }
+
+ private:
+ friend class GlobalActivityAnalyzer;
+ friend class ScopedThreadActivity;
+ friend class ActivityTrackerTest;
+
+ enum : int {
+ // The maximum number of threads that can be tracked within a process. If
+ // more than this number run concurrently, tracking of new ones may cease.
+ kMaxThreadCount = 100,
+ kCachedThreadMemories = 10,
+ kCachedUserDataMemories = 10,
+ };
+
+ // A wrapper around ActivityUserData that is thread-safe and thus can be used
+ // in the global scope without the requirement of being called from only one
+ // thread.
+ class GlobalUserData : public ActivityUserData {
+ public:
+ GlobalUserData(void* memory, size_t size);
+ ~GlobalUserData() override;
+
+ private:
+ void Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) override;
+
+ Lock data_lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(GlobalUserData);
+ };
+
+ // State of a module as stored in persistent memory. This supports a single
+ // loading of a module only. If modules are loaded multiple times at
+ // different addresses, only the last will be recorded and an unload will
+ // not revert to the information of any other addresses.
+ struct BASE_EXPORT ModuleInfoRecord {
+ // SHA1(ModuleInfoRecord): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
+
+ // Expected size for 32/64-bit check by PersistentMemoryAllocator.
+ static constexpr size_t kExpectedInstanceSize = 56;
+
+ // The atomic unfortunately makes this a "complex" class on some compilers
+ // and thus requires an out-of-line constructor & destructor even though
+ // they do nothing.
+ ModuleInfoRecord();
+ ~ModuleInfoRecord();
+
+ uint64_t address; // The base address of the module.
+ uint64_t load_time; // Time of last load/unload.
+ uint64_t size; // The size of the module in bytes.
+ uint32_t timestamp; // Opaque timestamp of the module.
+ uint32_t age; // Opaque "age" associated with the module.
+ uint8_t identifier[16]; // Opaque identifier for the module.
+ std::atomic<uint32_t> changes; // Number load/unload actions.
+ uint16_t pickle_size; // The size of the following pickle.
+ uint8_t loaded; // Flag if module is loaded or not.
+ char pickle[1]; // Other strings; may allocate larger.
+
+ // Decodes/encodes storage structure from more generic info structure.
+ bool DecodeTo(GlobalActivityTracker::ModuleInfo* info,
+ size_t record_size) const;
+ bool EncodeFrom(const GlobalActivityTracker::ModuleInfo& info,
+ size_t record_size);
+
+ // Updates the core information without changing the encoded strings. This
+ // is useful when a known module changes state (i.e. new load or unload).
+ bool UpdateFrom(const GlobalActivityTracker::ModuleInfo& info);
+
+ // Determines the required memory size for the encoded storage.
+ static size_t EncodedSize(const GlobalActivityTracker::ModuleInfo& info);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ModuleInfoRecord);
+ };
+
+ // A thin wrapper around the main thread-tracker that keeps additional
+ // information that the global tracker needs to handle joined threads.
+ class ManagedActivityTracker : public ThreadActivityTracker {
+ public:
+ ManagedActivityTracker(PersistentMemoryAllocator::Reference mem_reference,
+ void* base,
+ size_t size);
+ ~ManagedActivityTracker() override;
+
+ // The reference into persistent memory from which the thread-tracker's
+ // memory was created.
+ const PersistentMemoryAllocator::Reference mem_reference_;
+
+ // The physical address used for the thread-tracker's memory.
+ void* const mem_base_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ManagedActivityTracker);
+ };
+
+ // Creates a global tracker using a given persistent-memory |allocator| and
+ // providing the given |stack_depth| to each thread tracker it manages. The
+ // created object is activated so tracking has already started upon return.
+ GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
+ int stack_depth);
+
+ // Returns the memory used by an activity-tracker managed by this class.
+ // It is called during the destruction of a ManagedActivityTracker object.
+ void ReturnTrackerMemory(ManagedActivityTracker* tracker);
+
+ // Releases the activity-tracker associcated with thread. It is called
+ // automatically when a thread is joined and thus there is nothing more to
+ // be tracked. |value| is a pointer to a ManagedActivityTracker.
+ static void OnTLSDestroy(void* value);
+
+ // The persistent-memory allocator from which the memory for all trackers
+ // is taken.
+ std::unique_ptr<PersistentMemoryAllocator> allocator_;
+
+ // The size (in bytes) of memory required by a ThreadActivityTracker to
+ // provide the stack-depth requested during construction.
+ const size_t stack_memory_size_;
+
+ // The activity tracker for the currently executing thread.
+ base::ThreadLocalStorage::Slot this_thread_tracker_;
+
+ // The number of thread trackers currently active.
+ std::atomic<int> thread_tracker_count_;
+
+ // A caching memory allocator for thread-tracker objects.
+ ActivityTrackerMemoryAllocator thread_tracker_allocator_;
+ base::Lock thread_tracker_allocator_lock_;
+
+ // A caching memory allocator for user data attached to activity data.
+ ActivityTrackerMemoryAllocator user_data_allocator_;
+ base::Lock user_data_allocator_lock_;
+
+ // An object for holding global arbitrary key value pairs. Values must always
+ // be written from the main UI thread.
+ GlobalUserData global_data_;
+
+ // A map of global module information, keyed by module path.
+ std::map<const std::string, ModuleInfoRecord*> modules_;
+ base::Lock modules_lock_;
+
+ // The active global activity tracker.
+ static subtle::AtomicWord g_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
+};
+
+
+// Record entry in to and out of an arbitrary block of code.
+class BASE_EXPORT ScopedActivity
+ : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+ // Track activity at the specified FROM_HERE location for an arbitrary
+ // 4-bit |action|, an arbitrary 32-bit |id|, and 32-bits of arbitrary
+ // |info|. None of these values affect operation; they're all purely
+ // for association and analysis. To have unique identifiers across a
+ // diverse code-base, create the number by taking the first 8 characters
+ // of the hash of the activity being tracked.
+ //
+ // For example:
+ // Tracking method: void MayNeverExit(uint32_t foo) {...}
+ // echo -n "MayNeverExit" | sha1sum => e44873ccab21e2b71270da24aa1...
+ //
+ // void MayNeverExit(int32_t foo) {
+ // base::debug::ScopedActivity track_me(0, 0xE44873CC, foo);
+ // ...
+ // }
+ ALWAYS_INLINE
+ ScopedActivity(uint8_t action, uint32_t id, int32_t info)
+ : ScopedActivity(::tracked_objects::GetProgramCounter(),
+ action,
+ id,
+ info) {}
+ ScopedActivity() : ScopedActivity(0, 0, 0) {}
+
+ // Changes the |action| and/or |info| of this activity on the stack. This
+ // is useful for tracking progress through a function, updating the action
+ // to indicate "milestones" in the block (max 16 milestones: 0-15) or the
+ // info to reflect other changes. Changing both is not atomic so a snapshot
+ // operation could occur between the update of |action| and |info|.
+ void ChangeAction(uint8_t action);
+ void ChangeInfo(int32_t info);
+ void ChangeActionAndInfo(uint8_t action, int32_t info);
+
+ private:
+ // Constructs the object using a passed-in program-counter.
+ ScopedActivity(const void* program_counter,
+ uint8_t action,
+ uint32_t id,
+ int32_t info);
+
+ // A copy of the ID code so it doesn't have to be passed by the caller when
+ // changing the |info| field.
+ uint32_t id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
+};
+
+
+// These "scoped" classes provide easy tracking of various blocking actions.
+
+class BASE_EXPORT ScopedTaskRunActivity
+ : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+ ALWAYS_INLINE
+ explicit ScopedTaskRunActivity(const base::PendingTask& task)
+ : ScopedTaskRunActivity(::tracked_objects::GetProgramCounter(),
+ task) {}
+
+ private:
+ ScopedTaskRunActivity(const void* program_counter,
+ const base::PendingTask& task);
+ DISALLOW_COPY_AND_ASSIGN(ScopedTaskRunActivity);
+};
+
+class BASE_EXPORT ScopedLockAcquireActivity
+ : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+ ALWAYS_INLINE
+ explicit ScopedLockAcquireActivity(const base::internal::LockImpl* lock)
+ : ScopedLockAcquireActivity(::tracked_objects::GetProgramCounter(),
+ lock) {}
+
+ private:
+ ScopedLockAcquireActivity(const void* program_counter,
+ const base::internal::LockImpl* lock);
+ DISALLOW_COPY_AND_ASSIGN(ScopedLockAcquireActivity);
+};
+
+class BASE_EXPORT ScopedEventWaitActivity
+ : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+ ALWAYS_INLINE
+ explicit ScopedEventWaitActivity(const base::WaitableEvent* event)
+ : ScopedEventWaitActivity(::tracked_objects::GetProgramCounter(),
+ event) {}
+
+ private:
+ ScopedEventWaitActivity(const void* program_counter,
+ const base::WaitableEvent* event);
+ DISALLOW_COPY_AND_ASSIGN(ScopedEventWaitActivity);
+};
+
+class BASE_EXPORT ScopedThreadJoinActivity
+ : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+ ALWAYS_INLINE
+ explicit ScopedThreadJoinActivity(const base::PlatformThreadHandle* thread)
+ : ScopedThreadJoinActivity(::tracked_objects::GetProgramCounter(),
+ thread) {}
+
+ private:
+ ScopedThreadJoinActivity(const void* program_counter,
+ const base::PlatformThreadHandle* thread);
+ DISALLOW_COPY_AND_ASSIGN(ScopedThreadJoinActivity);
+};
+
+// Some systems don't have base::Process
+#if !defined(OS_NACL) && !defined(OS_IOS)
+class BASE_EXPORT ScopedProcessWaitActivity
+ : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+ ALWAYS_INLINE
+ explicit ScopedProcessWaitActivity(const base::Process* process)
+ : ScopedProcessWaitActivity(::tracked_objects::GetProgramCounter(),
+ process) {}
+
+ private:
+ ScopedProcessWaitActivity(const void* program_counter,
+ const base::Process* process);
+ DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity);
+};
+#endif
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_ACTIVITY_TRACKER_H_
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc
new file mode 100644
index 0000000000..aced4fb36a
--- /dev/null
+++ b/base/debug/activity_tracker_unittest.cc
@@ -0,0 +1,340 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/activity_tracker.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/ptr_util.h"
+#include "base/pending_task.h"
+#include "base/rand_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+class TestActivityTracker : public ThreadActivityTracker {
+ public:
+ TestActivityTracker(std::unique_ptr<char[]> memory, size_t mem_size)
+ : ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size),
+ mem_segment_(std::move(memory)) {}
+
+ ~TestActivityTracker() override {}
+
+ private:
+ std::unique_ptr<char[]> mem_segment_;
+};
+
+} // namespace
+
+
+class ActivityTrackerTest : public testing::Test {
+ public:
+ const int kMemorySize = 1 << 20; // 1MiB
+ const int kStackSize = 1 << 10; // 1KiB
+
+ using ActivityId = ThreadActivityTracker::ActivityId;
+
+ ActivityTrackerTest() {}
+
+ ~ActivityTrackerTest() override {
+ GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
+ if (global_tracker) {
+ global_tracker->ReleaseTrackerForCurrentThreadForTesting();
+ delete global_tracker;
+ }
+ }
+
+ std::unique_ptr<ThreadActivityTracker> CreateActivityTracker() {
+ std::unique_ptr<char[]> memory(new char[kStackSize]);
+ return MakeUnique<TestActivityTracker>(std::move(memory), kStackSize);
+ }
+
+ size_t GetGlobalActiveTrackerCount() {
+ GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
+ if (!global_tracker)
+ return 0;
+ return global_tracker->thread_tracker_count_.load(
+ std::memory_order_relaxed);
+ }
+
+ size_t GetGlobalInactiveTrackerCount() {
+ GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
+ if (!global_tracker)
+ return 0;
+ base::AutoLock autolock(global_tracker->thread_tracker_allocator_lock_);
+ return global_tracker->thread_tracker_allocator_.cache_used();
+ }
+
+ size_t GetGlobalUserDataMemoryCacheUsed() {
+ return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
+ }
+
+ static void DoNothing() {}
+};
+
+TEST_F(ActivityTrackerTest, UserDataTest) {
+ char buffer[256];
+ memset(buffer, 0, sizeof(buffer));
+ ActivityUserData data(buffer, sizeof(buffer));
+ const size_t space = sizeof(buffer) - 8;
+ ASSERT_EQ(space, data.available_);
+
+ data.SetInt("foo", 1);
+ ASSERT_EQ(space - 24, data.available_);
+
+ data.SetUint("b", 1U); // Small names fit beside header in a word.
+ ASSERT_EQ(space - 24 - 16, data.available_);
+
+ data.Set("c", buffer, 10);
+ ASSERT_EQ(space - 24 - 16 - 24, data.available_);
+
+ data.SetString("dear john", "it's been fun");
+ ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+
+ data.Set("c", buffer, 20);
+ ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+
+ data.SetString("dear john", "but we're done together");
+ ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+
+ data.SetString("dear john", "bye");
+ ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+
+ data.SetChar("d", 'x');
+ ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8, data.available_);
+
+ data.SetBool("ee", true);
+ ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16, data.available_);
+
+ data.SetString("f", "");
+ ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16 - 8, data.available_);
+}
+
+TEST_F(ActivityTrackerTest, PushPopTest) {
+ std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker();
+ ThreadActivityTracker::Snapshot snapshot;
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.activity_stack_depth);
+ ASSERT_EQ(0U, snapshot.activity_stack.size());
+
+ char origin1;
+ ActivityId id1 = tracker->PushActivity(&origin1, Activity::ACT_TASK,
+ ActivityData::ForTask(11));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(1U, snapshot.activity_stack_depth);
+ ASSERT_EQ(1U, snapshot.activity_stack.size());
+ EXPECT_NE(0, snapshot.activity_stack[0].time_internal);
+ EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin1),
+ snapshot.activity_stack[0].origin_address);
+ EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
+
+ char origin2;
+ char lock2;
+ ActivityId id2 = tracker->PushActivity(&origin2, Activity::ACT_LOCK,
+ ActivityData::ForLock(&lock2));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(2U, snapshot.activity_stack_depth);
+ ASSERT_EQ(2U, snapshot.activity_stack.size());
+ EXPECT_LE(snapshot.activity_stack[0].time_internal,
+ snapshot.activity_stack[1].time_internal);
+ EXPECT_EQ(Activity::ACT_LOCK, snapshot.activity_stack[1].activity_type);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin2),
+ snapshot.activity_stack[1].origin_address);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&lock2),
+ snapshot.activity_stack[1].data.lock.lock_address);
+
+ tracker->PopActivity(id2);
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(1U, snapshot.activity_stack_depth);
+ ASSERT_EQ(1U, snapshot.activity_stack.size());
+ EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin1),
+ snapshot.activity_stack[0].origin_address);
+ EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
+
+ tracker->PopActivity(id1);
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.activity_stack_depth);
+ ASSERT_EQ(0U, snapshot.activity_stack.size());
+}
+
+TEST_F(ActivityTrackerTest, ScopedTaskTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+
+ ThreadActivityTracker* tracker =
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+ ThreadActivityTracker::Snapshot snapshot;
+ ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.activity_stack_depth);
+ ASSERT_EQ(0U, snapshot.activity_stack.size());
+
+ {
+ PendingTask task1(FROM_HERE, base::Bind(&DoNothing));
+ ScopedTaskRunActivity activity1(task1);
+ ActivityUserData& user_data1 = activity1.user_data();
+ (void)user_data1; // Tell compiler it's been used.
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(1U, snapshot.activity_stack_depth);
+ ASSERT_EQ(1U, snapshot.activity_stack.size());
+ EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+
+ {
+ PendingTask task2(FROM_HERE, base::Bind(&DoNothing));
+ ScopedTaskRunActivity activity2(task2);
+ ActivityUserData& user_data2 = activity2.user_data();
+ (void)user_data2; // Tell compiler it's been used.
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(2U, snapshot.activity_stack_depth);
+ ASSERT_EQ(2U, snapshot.activity_stack.size());
+ EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[1].activity_type);
+ }
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(1U, snapshot.activity_stack_depth);
+ ASSERT_EQ(1U, snapshot.activity_stack.size());
+ EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+ }
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.activity_stack_depth);
+ ASSERT_EQ(0U, snapshot.activity_stack.size());
+ ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
+}
+
+TEST_F(ActivityTrackerTest, CreateWithFileTest) {
+ const char temp_name[] = "CreateWithFileTest";
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
+ const size_t temp_size = 64 << 10; // 64 KiB
+
+ // Create a global tracker on a new file.
+ ASSERT_FALSE(PathExists(temp_file));
+ GlobalActivityTracker::CreateWithFile(temp_file, temp_size, 0, "foo", 3);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ EXPECT_EQ(std::string("foo"), global->allocator()->Name());
+ global->ReleaseTrackerForCurrentThreadForTesting();
+ delete global;
+
+ // Create a global tracker over an existing file, replacing it. If the
+ // replacement doesn't work, the name will remain as it was first created.
+ ASSERT_TRUE(PathExists(temp_file));
+ GlobalActivityTracker::CreateWithFile(temp_file, temp_size, 0, "bar", 3);
+ global = GlobalActivityTracker::Get();
+ EXPECT_EQ(std::string("bar"), global->allocator()->Name());
+ global->ReleaseTrackerForCurrentThreadForTesting();
+ delete global;
+}
+
+
+// GlobalActivityTracker tests below.
+
+class SimpleActivityThread : public SimpleThread {
+ public:
+ SimpleActivityThread(const std::string& name,
+ const void* origin,
+ Activity::Type activity,
+ const ActivityData& data)
+ : SimpleThread(name, Options()),
+ origin_(origin),
+ activity_(activity),
+ data_(data),
+ exit_condition_(&lock_) {}
+
+ ~SimpleActivityThread() override {}
+
+ void Run() override {
+ ThreadActivityTracker::ActivityId id =
+ GlobalActivityTracker::Get()
+ ->GetOrCreateTrackerForCurrentThread()
+ ->PushActivity(origin_, activity_, data_);
+
+ {
+ AutoLock auto_lock(lock_);
+ ready_ = true;
+ while (!exit_)
+ exit_condition_.Wait();
+ }
+
+ GlobalActivityTracker::Get()->GetTrackerForCurrentThread()->PopActivity(id);
+ }
+
+ void Exit() {
+ AutoLock auto_lock(lock_);
+ exit_ = true;
+ exit_condition_.Signal();
+ }
+
+ void WaitReady() {
+ SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(ready_);
+ }
+
+ private:
+ const void* origin_;
+ Activity::Type activity_;
+ ActivityData data_;
+
+ bool ready_ = false;
+ bool exit_ = false;
+ Lock lock_;
+ ConditionVariable exit_condition_;
+
+ DISALLOW_COPY_AND_ASSIGN(SimpleActivityThread);
+};
+
+TEST_F(ActivityTrackerTest, ThreadDeathTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+ const size_t starting_active = GetGlobalActiveTrackerCount();
+ const size_t starting_inactive = GetGlobalInactiveTrackerCount();
+
+ SimpleActivityThread t1("t1", nullptr, Activity::ACT_TASK,
+ ActivityData::ForTask(11));
+ t1.Start();
+ t1.WaitReady();
+ EXPECT_EQ(starting_active + 1, GetGlobalActiveTrackerCount());
+ EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount());
+
+ t1.Exit();
+ t1.Join();
+ EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount());
+ EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
+
+ // Start another thread and ensure it re-uses the existing memory.
+
+ SimpleActivityThread t2("t2", nullptr, Activity::ACT_TASK,
+ ActivityData::ForTask(22));
+ t2.Start();
+ t2.WaitReady();
+ EXPECT_EQ(starting_active + 1, GetGlobalActiveTrackerCount());
+ EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount());
+
+ t2.Exit();
+ t2.Join();
+ EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount());
+ EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/debug/alias.cc b/base/debug/alias.cc
index ff3557450f..d49808491b 100644
--- a/base/debug/alias.cc
+++ b/base/debug/alias.cc
@@ -12,7 +12,8 @@ namespace debug {
#pragma optimize("", off)
#endif
-void Alias(const void*) {}
+void Alias(const void* /* var */) {
+}
#if defined(COMPILER_MSVC)
#pragma optimize("", on)
diff --git a/base/debug/debugger_posix.cc b/base/debug/debugger_posix.cc
index a157d9ad3f..ebe9d611f7 100644
--- a/base/debug/debugger_posix.cc
+++ b/base/debug/debugger_posix.cc
@@ -18,6 +18,8 @@
#include <vector>
#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#if defined(__GLIBCXX__)
diff --git a/base/debug/debugging_flags.h b/base/debug/debugging_flags.h
index 1ea435fd71..e6ae1ee192 100644
--- a/base/debug/debugging_flags.h
+++ b/base/debug/debugging_flags.h
@@ -1,11 +1,8 @@
// Generated by build/write_buildflag_header.py
// From "base_debugging_flags"
-
#ifndef BASE_DEBUG_DEBUGGING_FLAGS_H_
#define BASE_DEBUG_DEBUGGING_FLAGS_H_
-
#include "build/buildflag.h"
-
#define BUILDFLAG_INTERNAL_ENABLE_PROFILING() (0)
-
+#define BUILDFLAG_INTERNAL_ENABLE_MEMORY_TASK_PROFILER() (0)
#endif // BASE_DEBUG_DEBUGGING_FLAGS_H_
diff --git a/base/debug/dump_without_crashing.cc b/base/debug/dump_without_crashing.cc
new file mode 100644
index 0000000000..4b338ca293
--- /dev/null
+++ b/base/debug/dump_without_crashing.cc
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/dump_without_crashing.h"
+
+#include "base/logging.h"
+
+namespace {
+
+// Pointer to the function that's called by DumpWithoutCrashing() to dump the
+// process's memory.
+void (CDECL *dump_without_crashing_function_)() = NULL;
+
+} // namespace
+
+namespace base {
+
+namespace debug {
+
+bool DumpWithoutCrashing() {
+ if (dump_without_crashing_function_) {
+ (*dump_without_crashing_function_)();
+ return true;
+ }
+ return false;
+}
+
+void SetDumpWithoutCrashingFunction(void (CDECL *function)()) {
+ dump_without_crashing_function_ = function;
+}
+
+} // namespace debug
+
+} // namespace base
diff --git a/base/debug/dump_without_crashing.h b/base/debug/dump_without_crashing.h
new file mode 100644
index 0000000000..a5c85d5ebe
--- /dev/null
+++ b/base/debug/dump_without_crashing.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
+#define BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace debug {
+
+// Handler to silently dump the current process without crashing.
+// Before calling this function, call SetDumpWithoutCrashingFunction to pass a
+// function pointer, typically chrome!DumpProcessWithoutCrash. See example code
+// in chrome_main.cc that does this for chrome.dll.
+// Returns false if called before SetDumpWithoutCrashingFunction.
+BASE_EXPORT bool DumpWithoutCrashing();
+
+// Sets a function that'll be invoked to dump the current process when
+// DumpWithoutCrashing() is called.
+BASE_EXPORT void SetDumpWithoutCrashingFunction(void (CDECL *function)());
+
+} // namespace debug
+
+} // namespace base
+
+#endif // BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
diff --git a/base/debug/leak_tracker_unittest.cc b/base/debug/leak_tracker_unittest.cc
index 8b4c5681e0..b9ecdcf3c9 100644
--- a/base/debug/leak_tracker_unittest.cc
+++ b/base/debug/leak_tracker_unittest.cc
@@ -30,7 +30,7 @@ TEST(LeakTrackerTest, NotEnabled) {
EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
- // Use scoped_ptr so compiler doesn't complain about unused variables.
+ // Use unique_ptr so compiler doesn't complain about unused variables.
std::unique_ptr<ClassA> a1(new ClassA);
std::unique_ptr<ClassB> b1(new ClassB);
std::unique_ptr<ClassB> b2(new ClassB);
diff --git a/base/debug/profiler.cc b/base/debug/profiler.cc
new file mode 100644
index 0000000000..e303c2891a
--- /dev/null
+++ b/base/debug/profiler.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/profiler.h"
+
+#include <string>
+
+#include "base/debug/debugging_flags.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/current_module.h"
+#include "base/win/pe_image.h"
+#endif // defined(OS_WIN)
+
+// TODO(peria): Enable profiling on Windows.
+#if BUILDFLAG(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
+#include "third_party/tcmalloc/chromium/src/gperftools/profiler.h"
+#endif
+
+namespace base {
+namespace debug {
+
+// TODO(peria): Enable profiling on Windows.
+#if BUILDFLAG(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
+
+static int profile_count = 0;
+
+void StartProfiling(const std::string& name) {
+ ++profile_count;
+ std::string full_name(name);
+ std::string pid = IntToString(GetCurrentProcId());
+ std::string count = IntToString(profile_count);
+ ReplaceSubstringsAfterOffset(&full_name, 0, "{pid}", pid);
+ ReplaceSubstringsAfterOffset(&full_name, 0, "{count}", count);
+ ProfilerStart(full_name.c_str());
+}
+
+void StopProfiling() {
+ ProfilerFlush();
+ ProfilerStop();
+}
+
+void FlushProfiling() {
+ ProfilerFlush();
+}
+
+bool BeingProfiled() {
+ return ProfilingIsEnabledForAllThreads();
+}
+
+void RestartProfilingAfterFork() {
+ ProfilerRegisterThread();
+}
+
+bool IsProfilingSupported() {
+ return true;
+}
+
+#else
+
+void StartProfiling(const std::string&) {
+}
+
+void StopProfiling() {
+}
+
+void FlushProfiling() {
+}
+
+bool BeingProfiled() {
+ return false;
+}
+
+void RestartProfilingAfterFork() {
+}
+
+bool IsProfilingSupported() {
+ return false;
+}
+
+#endif
+
+#if !defined(OS_WIN)
+
+bool IsBinaryInstrumented() {
+ return false;
+}
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+ return NULL;
+}
+
+DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
+ return NULL;
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+ return NULL;
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+ return NULL;
+}
+
+#else // defined(OS_WIN)
+
+bool IsBinaryInstrumented() {
+ enum InstrumentationCheckState {
+ UNINITIALIZED,
+ INSTRUMENTED_IMAGE,
+ NON_INSTRUMENTED_IMAGE,
+ };
+
+ static InstrumentationCheckState state = UNINITIALIZED;
+
+ if (state == UNINITIALIZED) {
+ base::win::PEImage image(CURRENT_MODULE());
+
+ // Check to be sure our image is structured as we'd expect.
+ DCHECK(image.VerifyMagic());
+
+ // Syzygy-instrumented binaries contain a PE image section named ".thunks",
+ // and all Syzygy-modified binaries contain the ".syzygy" image section.
+ // This is a very fast check, as it only looks at the image header.
+ if ((image.GetImageSectionHeaderByName(".thunks") != NULL) &&
+ (image.GetImageSectionHeaderByName(".syzygy") != NULL)) {
+ state = INSTRUMENTED_IMAGE;
+ } else {
+ state = NON_INSTRUMENTED_IMAGE;
+ }
+ }
+ DCHECK(state != UNINITIALIZED);
+
+ return state == INSTRUMENTED_IMAGE;
+}
+
+namespace {
+
+struct FunctionSearchContext {
+ const char* name;
+ FARPROC function;
+};
+
+// Callback function to PEImage::EnumImportChunks.
+bool FindResolutionFunctionInImports(
+ const base::win::PEImage &image, const char* module_name,
+ PIMAGE_THUNK_DATA unused_name_table, PIMAGE_THUNK_DATA import_address_table,
+ PVOID cookie) {
+ FunctionSearchContext* context =
+ reinterpret_cast<FunctionSearchContext*>(cookie);
+
+ DCHECK(context);
+ DCHECK(!context->function);
+
+ // Our import address table contains pointers to the functions we import
+ // at this point. Let's retrieve the first such function and use it to
+ // find the module this import was resolved to by the loader.
+ const wchar_t* function_in_module =
+ reinterpret_cast<const wchar_t*>(import_address_table->u1.Function);
+
+ // Retrieve the module by a function in the module.
+ const DWORD kFlags = GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT;
+ HMODULE module = NULL;
+ if (!::GetModuleHandleEx(kFlags, function_in_module, &module)) {
+ // This can happen if someone IAT patches us to a thunk.
+ return true;
+ }
+
+ // See whether this module exports the function we're looking for.
+ FARPROC exported_func = ::GetProcAddress(module, context->name);
+ if (exported_func != NULL) {
+ // We found it, return the function and terminate the enumeration.
+ context->function = exported_func;
+ return false;
+ }
+
+ // Keep going.
+ return true;
+}
+
+template <typename FunctionType>
+FunctionType FindFunctionInImports(const char* function_name) {
+ if (!IsBinaryInstrumented())
+ return NULL;
+
+ base::win::PEImage image(CURRENT_MODULE());
+
+ FunctionSearchContext ctx = { function_name, NULL };
+ image.EnumImportChunks(FindResolutionFunctionInImports, &ctx);
+
+ return reinterpret_cast<FunctionType>(ctx.function);
+}
+
+} // namespace
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+ return FindFunctionInImports<ReturnAddressLocationResolver>(
+ "ResolveReturnAddressLocation");
+}
+
+DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
+ return FindFunctionInImports<DynamicFunctionEntryHook>(
+ "OnDynamicFunctionEntry");
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+ return FindFunctionInImports<AddDynamicSymbol>(
+ "AddDynamicSymbol");
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+ return FindFunctionInImports<MoveDynamicSymbol>(
+ "MoveDynamicSymbol");
+}
+
+#endif // defined(OS_WIN)
+
+} // namespace debug
+} // namespace base
diff --git a/base/debug/profiler.h b/base/debug/profiler.h
new file mode 100644
index 0000000000..ea81b13c6a
--- /dev/null
+++ b/base/debug/profiler.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_PROFILER_H_
+#define BASE_DEBUG_PROFILER_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+// The Profiler functions allow usage of the underlying sampling based
+// profiler. If the application has not been built with the necessary
+// flags (-DENABLE_PROFILING and not -DNO_TCMALLOC) then these functions
+// are noops.
+namespace base {
+namespace debug {
+
+// Start profiling with the supplied name.
+// {pid} will be replaced by the process' pid and {count} will be replaced
+// by the count of the profile run (starts at 1 with each process).
+BASE_EXPORT void StartProfiling(const std::string& name);
+
+// Stop profiling and write out data.
+BASE_EXPORT void StopProfiling();
+
+// Force data to be written to file.
+BASE_EXPORT void FlushProfiling();
+
+// Returns true if process is being profiled.
+BASE_EXPORT bool BeingProfiled();
+
+// Reset profiling after a fork, which disables timers.
+BASE_EXPORT void RestartProfilingAfterFork();
+
+// Returns true iff this executable is instrumented with the Syzygy profiler.
+BASE_EXPORT bool IsBinaryInstrumented();
+
+// Returns true iff this executable supports profiling.
+BASE_EXPORT bool IsProfilingSupported();
+
+// There's a class of profilers that use "return address swizzling" to get a
+// hook on function exits. This class of profilers uses some form of entry hook,
+// like e.g. binary instrumentation, or a compiler flag, that calls a hook each
+// time a function is invoked. The hook then switches the return address on the
+// stack for the address of an exit hook function, and pushes the original
+// return address to a shadow stack of some type. When in due course the CPU
+// executes a return to the exit hook, the exit hook will do whatever work it
+// does on function exit, then arrange to return to the original return address.
+// This class of profiler does not play well with programs that look at the
+// return address, as does e.g. V8. V8 uses the return address to certain
+// runtime functions to find the JIT code that called it, and from there finds
+// the V8 data structures associated to the JS function involved.
+// A return address resolution function is used to fix this. It allows such
+// programs to resolve a location on stack where a return address originally
+// resided, to the shadow stack location where the profiler stashed it.
+typedef uintptr_t (*ReturnAddressLocationResolver)(
+ uintptr_t return_addr_location);
+
+// This type declaration must match V8's FunctionEntryHook.
+typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
+ uintptr_t return_addr_location);
+
+// The functions below here are to support profiling V8-generated code.
+// V8 has provisions for generating a call to an entry hook for newly generated
+// JIT code, and it can push symbol information on code generation and advise
+// when the garbage collector moves code. The functions declarations below here
+// make glue between V8's facilities and a profiler.
+
+// This type declaration must match V8's FunctionEntryHook.
+typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
+ uintptr_t return_addr_location);
+
+typedef void (*AddDynamicSymbol)(const void* address,
+ size_t length,
+ const char* name,
+ size_t name_len);
+typedef void (*MoveDynamicSymbol)(const void* address, const void* new_address);
+
+
+// If this binary is instrumented and the instrumentation supplies a function
+// for each of those purposes, find and return the function in question.
+// Otherwise returns NULL.
+BASE_EXPORT ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc();
+BASE_EXPORT DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc();
+BASE_EXPORT AddDynamicSymbol GetProfilerAddDynamicSymbolFunc();
+BASE_EXPORT MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc();
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_PROFILER_H_
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index ac0ead76be..af4a6efc3e 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -9,47 +9,146 @@
#include <algorithm>
#include <sstream>
+#include "base/logging.h"
#include "base/macros.h"
-#if HAVE_TRACE_STACK_FRAME_POINTERS && defined(OS_ANDROID)
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
#include <pthread.h>
#include "base/process/process_handle.h"
#include "base/threading/platform_thread.h"
#endif
+#if defined(OS_MACOSX)
+#include <pthread.h>
+#endif
+
+#if defined(OS_LINUX) && defined(__GLIBC__)
+extern "C" void* __libc_stack_end;
+#endif
+
+#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+
namespace base {
namespace debug {
-StackTrace::StackTrace(const void* const* trace, size_t count) {
- count = std::min(count, arraysize(trace_));
- if (count)
- memcpy(trace_, trace, count * sizeof(trace_[0]));
- count_ = count;
+namespace {
+
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+
+#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
+// GCC and LLVM generate slightly different frames on ARM, see
+// https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
+// x86-compatible frame, while GCC needs adjustment.
+constexpr size_t kStackFrameAdjustment = sizeof(uintptr_t);
+#else
+constexpr size_t kStackFrameAdjustment = 0;
+#endif
+
+uintptr_t GetNextStackFrame(uintptr_t fp) {
+ return reinterpret_cast<const uintptr_t*>(fp)[0] - kStackFrameAdjustment;
}
-StackTrace::~StackTrace() {
+uintptr_t GetStackFramePC(uintptr_t fp) {
+ return reinterpret_cast<const uintptr_t*>(fp)[1];
}
-const void *const *StackTrace::Addresses(size_t* count) const {
- *count = count_;
- if (count_)
- return trace_;
- return NULL;
+bool IsStackFrameValid(uintptr_t fp, uintptr_t prev_fp, uintptr_t stack_end) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (fp <= prev_fp) return false;
+
+ // Assume huge stack frames are bogus.
+ if (fp - prev_fp > 100000) return false;
+
+ // Check alignment.
+ if (fp & (sizeof(uintptr_t) - 1)) return false;
+
+ if (stack_end) {
+ // Both fp[0] and fp[1] must be within the stack.
+ if (fp > stack_end - 2 * sizeof(uintptr_t)) return false;
+
+ // Additional check to filter out false positives.
+ if (GetStackFramePC(fp) < 32768) return false;
+ }
+
+ return true;
+};
+
+// ScanStackForNextFrame() scans the stack for a valid frame to allow unwinding
+// past system libraries. Only supported on Linux where system libraries are
+// usually in the middle of the trace:
+//
+// TraceStackFramePointers
+// <more frames from Chrome>
+// base::WorkSourceDispatch <-- unwinding stops (next frame is invalid),
+// g_main_context_dispatch ScanStackForNextFrame() is called
+// <more frames from glib>
+// g_main_context_iteration
+// base::MessagePumpGlib::Run <-- ScanStackForNextFrame() finds valid frame,
+// base::RunLoop::Run unwinding resumes
+// <more frames from Chrome>
+// __libc_start_main
+//
+// For stack scanning to be efficient it's very important for the thread to
+// be started by Chrome. In that case we naturally terminate unwinding once
+// we reach the origin of the stack (i.e. GetStackEnd()). If the thread is
+// not started by Chrome (e.g. Android's main thread), then we end up always
+// scanning area at the origin of the stack, wasting time and not finding any
+// frames (since Android libraries don't have frame pointers).
+//
+// ScanStackForNextFrame() returns 0 if it couldn't find a valid frame
+// (or if stack scanning is not supported on the current platform).
+uintptr_t ScanStackForNextFrame(uintptr_t fp, uintptr_t stack_end) {
+#if defined(OS_LINUX)
+ // Enough to resume almost all prematurely terminated traces.
+ constexpr size_t kMaxStackScanArea = 8192;
+
+ if (!stack_end) {
+ // Too dangerous to scan without knowing where the stack ends.
+ return 0;
+ }
+
+ fp += sizeof(uintptr_t); // current frame is known to be invalid
+ uintptr_t last_fp_to_scan = std::min(fp + kMaxStackScanArea, stack_end) -
+ sizeof(uintptr_t);
+ for (;fp <= last_fp_to_scan; fp += sizeof(uintptr_t)) {
+ uintptr_t next_fp = GetNextStackFrame(fp);
+ if (IsStackFrameValid(next_fp, fp, stack_end)) {
+ // Check two frames deep. Since stack frame is just a pointer to
+ // a higher address on the stack, it's relatively easy to find
+ // something that looks like one. However two linked frames are
+ // far less likely to be bogus.
+ uintptr_t next2_fp = GetNextStackFrame(next_fp);
+ if (IsStackFrameValid(next2_fp, next_fp, stack_end)) {
+ return fp;
+ }
+ }
+ }
+#endif // defined(OS_LINUX)
+
+ return 0;
}
-std::string StackTrace::ToString() const {
- std::stringstream stream;
-#if !defined(__UCLIBC__)
- OutputToStream(&stream);
-#endif
- return stream.str();
+// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
+// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
+// Both frame pointers must come from __builtin_frame_address().
+// Returns previous stack frame |fp| was linked to.
+void* LinkStackFrames(void* fpp, void* parent_fp) {
+ uintptr_t fp = reinterpret_cast<uintptr_t>(fpp) - kStackFrameAdjustment;
+ void* prev_parent_fp = reinterpret_cast<void**>(fp)[0];
+ reinterpret_cast<void**>(fp)[0] = parent_fp;
+ return prev_parent_fp;
}
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // HAVE_TRACE_STACK_FRAME_POINTERS
-#if defined(OS_ANDROID)
+} // namespace
-static uintptr_t GetStackEnd() {
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+uintptr_t GetStackEnd() {
+#if defined(OS_ANDROID)
// Bionic reads proc/maps on every call to pthread_getattr_np() when called
// from the main thread. So we need to cache end of stack in that case to get
// acceptable performance.
@@ -58,7 +157,6 @@ static uintptr_t GetStackEnd() {
static uintptr_t main_stack_end = 0;
bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
-
if (is_main_thread && main_stack_end) {
return main_stack_end;
}
@@ -69,9 +167,7 @@ static uintptr_t GetStackEnd() {
int error = pthread_getattr_np(pthread_self(), &attributes);
if (!error) {
error = pthread_attr_getstack(
- &attributes,
- reinterpret_cast<void**>(&stack_begin),
- &stack_size);
+ &attributes, reinterpret_cast<void**>(&stack_begin), &stack_size);
pthread_attr_destroy(&attributes);
}
DCHECK(!error);
@@ -80,67 +176,101 @@ static uintptr_t GetStackEnd() {
if (is_main_thread) {
main_stack_end = stack_end;
}
- return stack_end;
+ return stack_end; // 0 in case of error
+
+#elif defined(OS_LINUX) && defined(__GLIBC__)
+
+ if (GetCurrentProcId() == PlatformThread::CurrentId()) {
+ // For the main thread we have a shortcut.
+ return reinterpret_cast<uintptr_t>(__libc_stack_end);
+ }
+
+// No easy way to get end of the stack for non-main threads,
+// see crbug.com/617730.
+#elif defined(OS_MACOSX)
+ return reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(pthread_self()));
+#endif
+
+ // Don't know how to get end of the stack.
+ return 0;
}
+#endif // HAVE_TRACE_STACK_FRAME_POINTERS
-#endif // defined(OS_ANDROID)
+StackTrace::StackTrace() : StackTrace(arraysize(trace_)) {}
+
+StackTrace::StackTrace(const void* const* trace, size_t count) {
+ count = std::min(count, arraysize(trace_));
+ if (count)
+ memcpy(trace_, trace, count * sizeof(trace_[0]));
+ count_ = count;
+}
+
+const void *const *StackTrace::Addresses(size_t* count) const {
+ *count = count_;
+ if (count_)
+ return trace_;
+ return NULL;
+}
+
+std::string StackTrace::ToString() const {
+ std::stringstream stream;
+#if !defined(__UCLIBC__)
+ OutputToStream(&stream);
+#endif
+ return stream.str();
+}
+
+#if HAVE_TRACE_STACK_FRAME_POINTERS
size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial) {
// Usage of __builtin_frame_address() enables frame pointers in this
- // function even if they are not enabled globally. So 'sp' will always
+ // function even if they are not enabled globally. So 'fp' will always
// be valid.
- uintptr_t sp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+ uintptr_t fp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0)) -
+ kStackFrameAdjustment;
-#if defined(OS_ANDROID)
uintptr_t stack_end = GetStackEnd();
-#endif
size_t depth = 0;
while (depth < max_depth) {
-#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
- // GCC and LLVM generate slightly different frames on ARM, see
- // https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
- // x86-compatible frame, while GCC needs adjustment.
- sp -= sizeof(uintptr_t);
-#endif
-
-#if defined(OS_ANDROID)
- // Both sp[0] and s[1] must be valid.
- if (sp + 2 * sizeof(uintptr_t) > stack_end) {
- break;
- }
-#endif
-
if (skip_initial != 0) {
skip_initial--;
} else {
- out_trace[depth++] = reinterpret_cast<const void**>(sp)[1];
+ out_trace[depth++] = reinterpret_cast<const void*>(GetStackFramePC(fp));
}
- // Find out next frame pointer
- // (heuristics are from TCMalloc's stacktrace functions)
- {
- uintptr_t next_sp = reinterpret_cast<const uintptr_t*>(sp)[0];
-
- // With the stack growing downwards, older stack frame must be
- // at a greater address that the current one.
- if (next_sp <= sp) break;
-
- // Assume stack frames larger than 100,000 bytes are bogus.
- if (next_sp - sp > 100000) break;
-
- // Check alignment.
- if (sp & (sizeof(void*) - 1)) break;
+ uintptr_t next_fp = GetNextStackFrame(fp);
+ if (IsStackFrameValid(next_fp, fp, stack_end)) {
+ fp = next_fp;
+ continue;
+ }
- sp = next_sp;
+ next_fp = ScanStackForNextFrame(fp, stack_end);
+ if (next_fp) {
+ fp = next_fp;
+ continue;
}
+
+ // Failed to find next frame.
+ break;
}
return depth;
}
+ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
+ : fp_(fp),
+ parent_fp_(parent_fp),
+ original_parent_fp_(LinkStackFrames(fp, parent_fp)) {}
+
+ScopedStackFrameLinker::~ScopedStackFrameLinker() {
+ void* previous_parent_fp = LinkStackFrames(fp_, original_parent_fp_);
+ CHECK_EQ(parent_fp_, previous_parent_fp)
+ << "Stack frame's parent pointer has changed!";
+}
+
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
} // namespace debug
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index 23e7b5164b..4c9b73e87d 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/base_export.h"
+#include "base/macros.h"
#include "build/build_config.h"
#if defined(OS_POSIX)
@@ -44,6 +45,11 @@ namespace debug {
// done in official builds because it has security implications).
BASE_EXPORT bool EnableInProcessStackDumping();
+// Returns end of the stack, or 0 if we couldn't get it.
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+BASE_EXPORT uintptr_t GetStackEnd();
+#endif
+
// A stacktrace can be helpful in debugging. For example, you can include a
// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
// can later see where the given object was created from.
@@ -52,9 +58,13 @@ class BASE_EXPORT StackTrace {
// Creates a stacktrace from the current location.
StackTrace();
+ // Creates a stacktrace from the current location, of up to |count| entries.
+ // |count| will be limited to at most |kMaxTraces|.
+ explicit StackTrace(size_t count);
+
// Creates a stacktrace from an existing array of instruction
// pointers (such as returned by Addresses()). |count| will be
- // trimmed to |kMaxTraces|.
+ // limited to at most |kMaxTraces|.
StackTrace(const void* const* trace, size_t count);
#if defined(OS_WIN)
@@ -67,8 +77,6 @@ class BASE_EXPORT StackTrace {
// Copying and assignment are allowed with the default functions.
- ~StackTrace();
-
// Gets an array of instruction pointer values. |*count| will be set to the
// number of elements in the returned array.
const void* const* Addresses(size_t* count) const;
@@ -113,6 +121,57 @@ class BASE_EXPORT StackTrace {
BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial);
+
+// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
+// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
+// Both frame pointers must come from __builtin_frame_address().
+// Destructor restores original linkage of |fp| to avoid corrupting caller's
+// frame register on return.
+//
+// This class can be used to repair broken stack frame chain in cases
+// when execution flow goes into code built without frame pointers:
+//
+// void DoWork() {
+// Call_SomeLibrary();
+// }
+// static __thread void* g_saved_fp;
+// void Call_SomeLibrary() {
+// g_saved_fp = __builtin_frame_address(0);
+// some_library_call(...); // indirectly calls SomeLibrary_Callback()
+// }
+// void SomeLibrary_Callback() {
+// ScopedStackFrameLinker linker(__builtin_frame_address(0), g_saved_fp);
+// ...
+// TraceStackFramePointers(...);
+// }
+//
+// This produces the following trace:
+//
+// #0 SomeLibrary_Callback()
+// #1 <address of the code inside SomeLibrary that called #0>
+// #2 DoWork()
+// ...rest of the trace...
+//
+// SomeLibrary doesn't use frame pointers, so when SomeLibrary_Callback()
+// is called, stack frame register contains bogus value that becomes callback'
+// parent frame address. Without ScopedStackFrameLinker unwinding would've
+// stopped at that bogus frame address yielding just two first frames (#0, #1).
+// ScopedStackFrameLinker overwrites callback's parent frame address with
+// Call_SomeLibrary's frame, so unwinder produces full trace without even
+// noticing that stack frame chain was broken.
+class BASE_EXPORT ScopedStackFrameLinker {
+ public:
+ ScopedStackFrameLinker(void* fp, void* parent_fp);
+ ~ScopedStackFrameLinker();
+
+ private:
+ void* fp_;
+ void* parent_fp_;
+ void* original_parent_fp_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
+};
+
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
namespace internal {
diff --git a/base/debug/stack_trace_posix.cc b/base/debug/stack_trace_posix.cc
index 3c0299cb41..78bc650c79 100644
--- a/base/debug/stack_trace_posix.cc
+++ b/base/debug/stack_trace_posix.cc
@@ -16,13 +16,14 @@
#include <sys/types.h>
#include <unistd.h>
+#include <algorithm>
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <vector>
-#if defined(__GLIBCXX__)
+#if !defined(USE_SYMBOLIZE)
#include <cxxabi.h>
#endif
#if !defined(__UCLIBC__)
@@ -33,8 +34,11 @@
#include <AvailabilityMacros.h>
#endif
-#include "base/debug/debugger.h"
+#if defined(OS_LINUX)
#include "base/debug/proc_maps_linux.h"
+#endif
+
+#include "base/debug/debugger.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/free_deleter.h"
@@ -66,16 +70,18 @@ const char kSymbolCharacters[] =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
#endif // !defined(USE_SYMBOLIZE) && defined(__GLIBCXX__)
-#if !defined(USE_SYMBOLIZE)
+#if !defined(USE_SYMBOLIZE) && !defined(__UCLIBC__)
// Demangles C++ symbols in the given text. Example:
//
// "out/Debug/base_unittests(_ZN10StackTraceC1Ev+0x20) [0x817778c]"
// =>
// "out/Debug/base_unittests(StackTrace::StackTrace()+0x20) [0x817778c]"
-#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
void DemangleSymbols(std::string* text) {
// Note: code in this function is NOT async-signal safe (std::string uses
// malloc internally).
+ ALLOW_UNUSED_PARAM(text);
+#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
+
std::string::size_type search_from = 0;
while (search_from < text->size()) {
// Look for the start of a mangled symbol, from search_from.
@@ -110,11 +116,8 @@ void DemangleSymbols(std::string* text) {
search_from = mangled_start + 2;
}
}
-}
-#elif !defined(__UCLIBC__)
-void DemangleSymbols(std::string* /* text */) {}
#endif // defined(__GLIBCXX__) && !defined(__UCLIBC__)
-
+}
#endif // !defined(USE_SYMBOLIZE)
class BacktraceOutputHandler {
@@ -125,7 +128,7 @@ class BacktraceOutputHandler {
virtual ~BacktraceOutputHandler() {}
};
-#if defined(USE_SYMBOLIZE) || !defined(__UCLIBC__)
+#if !defined(__UCLIBC__)
void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
// This should be more than enough to store a 64-bit number in hex:
// 16 hex digits + 1 for null-terminator.
@@ -135,7 +138,6 @@ void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
buf, sizeof(buf), 16, 12);
handler->HandleOutput(buf);
}
-#endif // defined(USE_SYMBOLIZE) || !defined(__UCLIBC__)
#if defined(USE_SYMBOLIZE)
void OutputFrameId(intptr_t frame_id, BacktraceOutputHandler* handler) {
@@ -149,13 +151,9 @@ void OutputFrameId(intptr_t frame_id, BacktraceOutputHandler* handler) {
}
#endif // defined(USE_SYMBOLIZE)
-#if !defined(__UCLIBC__)
-void ProcessBacktrace(void *const * trace,
+void ProcessBacktrace(void *const *trace,
size_t size,
BacktraceOutputHandler* handler) {
- (void)trace; // unused based on build context below.
- (void)size; // unusud based on build context below.
- (void)handler; // unused based on build context below.
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
@@ -216,7 +214,7 @@ void PrintToStderr(const char* output) {
}
void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
- (void)void_context; // unused depending on build context
+ ALLOW_UNUSED_PARAM(void_context); // unused depending on build context
// NOTE: This code MUST be async-signal safe.
// NO malloc or stdio is allowed here.
@@ -386,6 +384,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
// Non-Mac OSes should probably reraise the signal as well, but the Linux
// sandbox tests break on CrOS devices.
// https://code.google.com/p/chromium/issues/detail?id=551681
+ PrintToStderr("Calling _exit(1). Core file will not be generated.\n");
_exit(1);
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
}
@@ -450,8 +449,6 @@ void WarmUpBacktrace() {
StackTrace stack_trace;
}
-} // namespace
-
#if defined(USE_SYMBOLIZE)
// class SandboxSymbolizeHelper.
@@ -467,7 +464,8 @@ class SandboxSymbolizeHelper {
public:
// Returns the singleton instance.
static SandboxSymbolizeHelper* GetInstance() {
- return Singleton<SandboxSymbolizeHelper>::get();
+ return Singleton<SandboxSymbolizeHelper,
+ LeakySingletonTraits<SandboxSymbolizeHelper>>::get();
}
private:
@@ -683,6 +681,8 @@ class SandboxSymbolizeHelper {
};
#endif // USE_SYMBOLIZE
+} // namespace
+
bool EnableInProcessStackDumping() {
#if defined(USE_SYMBOLIZE)
SandboxSymbolizeHelper::GetInstance();
@@ -719,15 +719,18 @@ bool EnableInProcessStackDumping() {
return success;
}
-StackTrace::StackTrace() {
- // NOTE: This code MUST be async-signal safe (it's used by in-process
- // stack dumping signal handler). NO malloc or stdio is allowed here.
+StackTrace::StackTrace(size_t count) {
+// NOTE: This code MUST be async-signal safe (it's used by in-process
+// stack dumping signal handler). NO malloc or stdio is allowed here.
#if !defined(__UCLIBC__)
+ count = std::min(arraysize(trace_), count);
+
// Though the backtrace API man page does not list any possible negative
// return values, we take no chance.
- count_ = base::saturated_cast<size_t>(backtrace(trace_, arraysize(trace_)));
+ count_ = base::saturated_cast<size_t>(backtrace(trace_, count));
#else
+ ALLOW_UNUSED_PARAM(count);
count_ = 0;
#endif
}
diff --git a/base/debug/task_annotator.cc b/base/debug/task_annotator.cc
index 4ba4d91b88..46969f28ca 100644
--- a/base/debug/task_annotator.cc
+++ b/base/debug/task_annotator.cc
@@ -4,6 +4,9 @@
#include "base/debug/task_annotator.h"
+#include <array>
+
+#include "base/debug/activity_tracker.h"
#include "base/debug/alias.h"
#include "base/pending_task.h"
#include "base/trace_event/trace_event.h"
@@ -27,32 +30,37 @@ void TaskAnnotator::DidQueueTask(const char* queue_function,
}
void TaskAnnotator::RunTask(const char* queue_function,
- const PendingTask& pending_task) {
+ PendingTask* pending_task) {
+ ScopedTaskRunActivity task_activity(*pending_task);
+
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
tracked_objects::Duration queue_duration =
- stopwatch.StartTime() - pending_task.EffectiveTimePosted();
+ stopwatch.StartTime() - pending_task->EffectiveTimePosted();
- TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- queue_function,
- TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
- TRACE_EVENT_FLAG_FLOW_IN,
- "queue_duration",
- queue_duration.InMilliseconds());
+ TRACE_EVENT_WITH_FLOW1(
+ TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), queue_function,
+ TRACE_ID_MANGLE(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_IN,
+ "queue_duration", queue_duration.InMilliseconds());
- // Before running the task, store the program counter where it was posted
- // and deliberately alias it to ensure it is on the stack if the task
- // crashes. Be careful not to assume that the variable itself will have the
- // expected value when displayed by the optimizer in an optimized build.
- // Look at a memory dump of the stack.
- const void* program_counter = pending_task.posted_from.program_counter();
- debug::Alias(&program_counter);
+ // Before running the task, store the task backtrace with the chain of
+ // PostTasks that resulted in this call and deliberately alias it to ensure
+ // it is on the stack if the task crashes. Be careful not to assume that the
+ // variable itself will have the expected value when displayed by the
+ // optimizer in an optimized build. Look at a memory dump of the stack.
+ static constexpr int kStackTaskTraceSnapshotSize =
+ std::tuple_size<decltype(pending_task->task_backtrace)>::value + 1;
+ std::array<const void*, kStackTaskTraceSnapshotSize> task_backtrace;
+ task_backtrace[0] = pending_task->posted_from.program_counter();
+ std::copy(pending_task->task_backtrace.begin(),
+ pending_task->task_backtrace.end(), task_backtrace.begin() + 1);
+ debug::Alias(&task_backtrace);
- pending_task.task.Run();
+ std::move(pending_task->task).Run();
stopwatch.Stop();
- tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
- pending_task, stopwatch);
+ tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(*pending_task,
+ stopwatch);
}
uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
diff --git a/base/debug/task_annotator.h b/base/debug/task_annotator.h
index 2687c5c930..34115d8f3d 100644
--- a/base/debug/task_annotator.h
+++ b/base/debug/task_annotator.h
@@ -28,7 +28,7 @@ class BASE_EXPORT TaskAnnotator {
// Run a previously queued task. |queue_function| should match what was
// passed into |DidQueueTask| for this task.
- void RunTask(const char* queue_function, const PendingTask& pending_task);
+ void RunTask(const char* queue_function, PendingTask* pending_task);
private:
// Creates a process-wide unique ID to represent this task in trace events.
diff --git a/base/debug/task_annotator_unittest.cc b/base/debug/task_annotator_unittest.cc
index 9f5c442327..8a1c8bdc87 100644
--- a/base/debug/task_annotator_unittest.cc
+++ b/base/debug/task_annotator_unittest.cc
@@ -24,7 +24,7 @@ TEST(TaskAnnotatorTest, QueueAndRunTask) {
TaskAnnotator annotator;
annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
EXPECT_EQ(0, result);
- annotator.RunTask("TaskAnnotatorTest::Queue", pending_task);
+ annotator.RunTask("TaskAnnotatorTest::Queue", &pending_task);
EXPECT_EQ(123, result);
}
diff --git a/base/debug/thread_heap_usage_tracker.h b/base/debug/thread_heap_usage_tracker.h
new file mode 100644
index 0000000000..508a0a3973
--- /dev/null
+++ b/base/debug/thread_heap_usage_tracker.h
@@ -0,0 +1,117 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
+#define BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
+
+#include <stdint.h>
+
+#include "base/allocator/features.h"
+#include "base/base_export.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace allocator {
+struct AllocatorDispatch;
+} // namespace allocator
+
+namespace debug {
+
+// Used to store the heap allocator usage in a scope.
+struct ThreadHeapUsage {
+ // The cumulative number of allocation operations.
+ uint64_t alloc_ops;
+
+ // The cumulative number of allocated bytes. Where available, this is
+ // inclusive heap padding and estimated or actual heap overhead.
+ uint64_t alloc_bytes;
+
+ // Where available, cumulative number of heap padding and overhead bytes.
+ uint64_t alloc_overhead_bytes;
+
+ // The cumulative number of free operations.
+ uint64_t free_ops;
+
+ // The cumulative number of bytes freed.
+ // Only recorded if the underlying heap shim can return the size of an
+ // allocation.
+ uint64_t free_bytes;
+
+ // The maximal value of |alloc_bytes| - |free_bytes| seen for this thread.
+ // Only recorded if the underlying heap shim supports returning the size of
+ // an allocation.
+ uint64_t max_allocated_bytes;
+};
+
+// By keeping a tally on heap operations, it's possible to track:
+// - the number of alloc/free operations, where a realloc is zero or one
+// of each, depending on the input parameters (see man realloc).
+// - the number of bytes allocated/freed.
+// - the number of estimated bytes of heap overhead used.
+// - the high-watermark amount of bytes allocated in the scope.
+// This in turn allows measuring the memory usage and memory usage churn over
+// a scope. Scopes must be cleanly nested, and each scope must be
+// destroyed on the thread where it's created.
+//
+// Note that this depends on the capabilities of the underlying heap shim. If
+// that shim can not yield a size estimate for an allocation, it's not possible
+// to keep track of overhead, freed bytes and the allocation high water mark.
+class BASE_EXPORT ThreadHeapUsageTracker {
+ public:
+ ThreadHeapUsageTracker();
+ ~ThreadHeapUsageTracker();
+
+ // Start tracking heap usage on this thread.
+ // This may only be called on the thread where the instance is created.
+ // Note IsHeapTrackingEnabled() must be true.
+ void Start();
+
+ // Stop tracking heap usage on this thread and store the usage tallied.
+ // If |usage_is_exclusive| is true, the usage tallied won't be added to the
+ // outer scope's usage. If |usage_is_exclusive| is false, the usage tallied
+ // in this scope will also tally to any outer scope.
+ // This may only be called on the thread where the instance is created.
+ void Stop(bool usage_is_exclusive);
+
+ // After Stop() returns the usage tallied from Start() to Stop().
+ const ThreadHeapUsage& usage() const { return usage_; }
+
+ // Returns this thread's heap usage from the start of the innermost
+ // enclosing ThreadHeapUsageTracker instance, if any.
+ static ThreadHeapUsage GetUsageSnapshot();
+
+ // Enables the heap intercept. May only be called once, and only if the heap
+ // shim is available, e.g. if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) is
+ // true.
+ static void EnableHeapTracking();
+
+ // Returns true iff heap tracking is enabled.
+ static bool IsHeapTrackingEnabled();
+
+ protected:
+ // Exposed for testing only - note that it's safe to re-EnableHeapTracking()
+ // after calling this function in tests.
+ static void DisableHeapTrackingForTesting();
+
+ // Exposed for testing only.
+ static void EnsureTLSInitialized();
+
+ // Exposed to allow testing the shim without inserting it in the allocator
+ // shim chain.
+ static base::allocator::AllocatorDispatch* GetDispatchForTesting();
+
+ private:
+ ThreadChecker thread_checker_;
+
+ // The heap usage at Start(), or the difference from Start() to Stop().
+ ThreadHeapUsage usage_;
+
+ // This thread's heap usage, non-null from Start() to Stop().
+ ThreadHeapUsage* thread_usage_;
+};
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_ \ No newline at end of file
diff --git a/base/feature_list.cc b/base/feature_list.cc
index 435165e10c..353136c12b 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -10,7 +10,9 @@
#include <vector>
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
+#include "base/pickle.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
@@ -26,6 +28,42 @@ FeatureList* g_instance = nullptr;
// Tracks whether the FeatureList instance was initialized via an accessor.
bool g_initialized_from_accessor = false;
+// An allocator entry for a feature in shared memory. The FeatureEntry is
+// followed by a base::Pickle object that contains the feature and trial name.
+struct FeatureEntry {
+ // SHA1(FeatureEntry): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0x06567CA6 + 1;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 8;
+
+ // Specifies whether a feature override enables or disables the feature. Same
+ // values as the OverrideState enum in feature_list.h
+ uint32_t override_state;
+
+ // Size of the pickled structure, NOT the total size of this entry.
+ uint32_t pickle_size;
+
+ // Reads the feature and trial name from the pickle. Calling this is only
+ // valid on an initialized entry that's in shared memory.
+ bool GetFeatureAndTrialName(StringPiece* feature_name,
+ StringPiece* trial_name) const {
+ const char* src =
+ reinterpret_cast<const char*>(this) + sizeof(FeatureEntry);
+
+ Pickle pickle(src, pickle_size);
+ PickleIterator pickle_iter(pickle);
+
+ if (!pickle_iter.ReadStringPiece(feature_name))
+ return false;
+
+ // Return true because we are not guaranteed to have a trial name anyways.
+ auto sink = pickle_iter.ReadStringPiece(trial_name);
+ ALLOW_UNUSED_LOCAL(sink);
+ return true;
+ }
+};
+
// Some characters are not allowed to appear in feature names or the associated
// field trial names, as they are used as special characters for command-line
// serialization. This function checks that the strings are ASCII (since they
@@ -55,6 +93,26 @@ void FeatureList::InitializeFromCommandLine(
initialized_from_command_line_ = true;
}
+void FeatureList::InitializeFromSharedMemory(
+ PersistentMemoryAllocator* allocator) {
+ DCHECK(!initialized_);
+
+ PersistentMemoryAllocator::Iterator iter(allocator);
+ const FeatureEntry* entry;
+ while ((entry = iter.GetNextOfObject<FeatureEntry>()) != nullptr) {
+ OverrideState override_state =
+ static_cast<OverrideState>(entry->override_state);
+
+ StringPiece feature_name;
+ StringPiece trial_name;
+ if (!entry->GetFeatureAndTrialName(&feature_name, &trial_name))
+ continue;
+
+ FieldTrial* trial = FieldTrialList::Find(trial_name.as_string());
+ RegisterOverride(feature_name, override_state, trial);
+ }
+}
+
bool FeatureList::IsFeatureOverriddenFromCommandLine(
const std::string& feature_name,
OverrideState state) const {
@@ -97,6 +155,30 @@ void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
RegisterOverride(feature_name, override_state, field_trial);
}
+void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
+ DCHECK(initialized_);
+
+ for (const auto& override : overrides_) {
+ Pickle pickle;
+ pickle.WriteString(override.first);
+ if (override.second.field_trial)
+ pickle.WriteString(override.second.field_trial->trial_name());
+
+ size_t total_size = sizeof(FeatureEntry) + pickle.size();
+ FeatureEntry* entry = allocator->New<FeatureEntry>(total_size);
+ if (!entry)
+ return;
+
+ entry->override_state = override.second.overridden_state;
+ entry->pickle_size = pickle.size();
+
+ char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry);
+ memcpy(dst, pickle.data(), pickle.size());
+
+ allocator->MakeIterable(entry);
+ }
+}
+
void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
std::string* disable_overrides) {
DCHECK(initialized_);
@@ -197,10 +279,19 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
}
// static
-void FeatureList::ClearInstanceForTesting() {
- delete g_instance;
+std::unique_ptr<FeatureList> FeatureList::ClearInstanceForTesting() {
+ FeatureList* old_instance = g_instance;
g_instance = nullptr;
g_initialized_from_accessor = false;
+ return base::WrapUnique(old_instance);
+}
+
+// static
+void FeatureList::RestoreInstanceForTesting(
+ std::unique_ptr<FeatureList> instance) {
+ DCHECK(!g_instance);
+ // Note: Intentional leak of global singleton.
+ g_instance = instance.release();
}
void FeatureList::FinalizeInitialization() {
diff --git a/base/feature_list.h b/base/feature_list.h
index e9ed00a124..09e8408aa8 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -13,6 +13,7 @@
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
+#include "base/metrics/persistent_memory_allocator.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
@@ -31,6 +32,8 @@ enum FeatureState {
// for a given feature name - generally defined as a constant global variable or
// file static.
struct BASE_EXPORT Feature {
+ constexpr Feature(const char* name, FeatureState default_state)
+ : name(name), default_state(default_state) {}
// The name of the feature. This should be unique to each feature and is used
// for enabling/disabling features via command line flags and experiments.
const char* const name;
@@ -92,6 +95,11 @@ class BASE_EXPORT FeatureList {
void InitializeFromCommandLine(const std::string& enable_features,
const std::string& disable_features);
+ // Initializes feature overrides through the field trial allocator, which
+ // we're using to store the feature names, their override state, and the name
+ // of the associated field trial.
+ void InitializeFromSharedMemory(PersistentMemoryAllocator* allocator);
+
// Specifies whether a feature override enables or disables the feature.
enum OverrideState {
OVERRIDE_USE_DEFAULT,
@@ -124,6 +132,9 @@ class BASE_EXPORT FeatureList {
OverrideState override_state,
FieldTrial* field_trial);
+ // Loops through feature overrides and serializes them all into |allocator|.
+ void AddFeaturesToAllocator(PersistentMemoryAllocator* allocator);
+
// Returns comma-separated lists of feature names (in the same format that is
// accepted by InitializeFromCommandLine()) corresponding to features that
// have been overridden - either through command-line or via FieldTrials. For
@@ -163,13 +174,27 @@ class BASE_EXPORT FeatureList {
// Registers the given |instance| to be the singleton feature list for this
// process. This should only be called once and |instance| must not be null.
+ // Note: If you are considering using this for the purposes of testing, take
+ // a look at using base/test/scoped_feature_list.h instead.
static void SetInstance(std::unique_ptr<FeatureList> instance);
- // Clears the previously-registered singleton instance for tests.
- static void ClearInstanceForTesting();
+ // Clears the previously-registered singleton instance for tests and returns
+ // the old instance.
+ // Note: Most tests should never call this directly. Instead consider using
+ // base::test::ScopedFeatureList.
+ static std::unique_ptr<FeatureList> ClearInstanceForTesting();
+
+ // Sets a given (initialized) |instance| to be the singleton feature list,
+ // for testing. Existing instance must be null. This is primarily intended
+ // to support base::test::ScopedFeatureList helper class.
+ static void RestoreInstanceForTesting(std::unique_ptr<FeatureList> instance);
private:
FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
+ FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
+ StoreAndRetrieveFeaturesFromSharedMemory);
+ FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
+ StoreAndRetrieveAssociatedFeaturesFromSharedMemory);
struct OverrideEntry {
// The overridden enable (on/off) state of the feature.
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
index 9d1dcb72f3..fb3b320ae9 100644
--- a/base/feature_list_unittest.cc
+++ b/base/feature_list_unittest.cc
@@ -13,6 +13,7 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
+#include "base/metrics/persistent_memory_allocator.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -21,12 +22,12 @@ namespace base {
namespace {
-const char kFeatureOnByDefaultName[] = "OnByDefault";
+constexpr char kFeatureOnByDefaultName[] = "OnByDefault";
struct Feature kFeatureOnByDefault {
kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
};
-const char kFeatureOffByDefaultName[] = "OffByDefault";
+constexpr char kFeatureOffByDefaultName[] = "OffByDefault";
struct Feature kFeatureOffByDefault {
kFeatureOffByDefaultName, FEATURE_DISABLED_BY_DEFAULT
};
@@ -468,4 +469,68 @@ TEST_F(FeatureListTest, UninitializedInstance_IsEnabledReturnsFalse) {
EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
}
+TEST_F(FeatureListTest, StoreAndRetrieveFeaturesFromSharedMemory) {
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+
+ // Create some overrides.
+ feature_list->RegisterOverride(kFeatureOffByDefaultName,
+ FeatureList::OVERRIDE_ENABLE_FEATURE, nullptr);
+ feature_list->RegisterOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, nullptr);
+ feature_list->FinalizeInitialization();
+
+ // Create an allocator and store the overrides.
+ std::unique_ptr<SharedMemory> shm(new SharedMemory());
+ shm->CreateAndMapAnonymous(4 << 10);
+ SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+ feature_list->AddFeaturesToAllocator(&allocator);
+
+ std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
+
+ // Check that the new feature list is empty.
+ EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+
+ feature_list2->InitializeFromSharedMemory(&allocator);
+ // Check that the new feature list now has 2 overrides.
+ EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+}
+
+TEST_F(FeatureListTest, StoreAndRetrieveAssociatedFeaturesFromSharedMemory) {
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+
+ // Create some overrides.
+ FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+ FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
+ feature_list->FinalizeInitialization();
+
+ // Create an allocator and store the overrides.
+ std::unique_ptr<SharedMemory> shm(new SharedMemory());
+ shm->CreateAndMapAnonymous(4 << 10);
+ SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+ feature_list->AddFeaturesToAllocator(&allocator);
+
+ std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
+ feature_list2->InitializeFromSharedMemory(&allocator);
+ feature_list2->FinalizeInitialization();
+
+ // Check that the field trials are still associated.
+ FieldTrial* associated_trial1 =
+ feature_list2->GetAssociatedFieldTrial(kFeatureOnByDefault);
+ FieldTrial* associated_trial2 =
+ feature_list2->GetAssociatedFieldTrial(kFeatureOffByDefault);
+ EXPECT_EQ(associated_trial1, trial1);
+ EXPECT_EQ(associated_trial2, trial2);
+}
+
} // namespace base
diff --git a/base/file_version_info_unittest.cc b/base/file_version_info_unittest.cc
deleted file mode 100644
index 67edc7737f..0000000000
--- a/base/file_version_info_unittest.cc
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/file_version_info.h"
-
-#include <stddef.h>
-
-#include <memory>
-
-#include "base/files/file_path.h"
-#include "base/macros.h"
-
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(OS_WIN)
-#include "base/path_service.h"
-#include "base/file_version_info_win.h"
-#endif
-
-using base::FilePath;
-
-namespace {
-
-#if defined(OS_WIN)
-FilePath GetTestDataPath() {
- FilePath path;
- PathService::Get(base::DIR_SOURCE_ROOT, &path);
- path = path.AppendASCII("base");
- path = path.AppendASCII("test");
- path = path.AppendASCII("data");
- path = path.AppendASCII("file_version_info_unittest");
- return path;
-}
-#endif
-
-} // namespace
-
-#if defined(OS_WIN)
-TEST(FileVersionInfoTest, HardCodedProperties) {
- const wchar_t kDLLName[] = {L"FileVersionInfoTest1.dll"};
-
- const wchar_t* const kExpectedValues[15] = {
- // FileVersionInfoTest.dll
- L"Goooooogle", // company_name
- L"Google", // company_short_name
- L"This is the product name", // product_name
- L"This is the product short name", // product_short_name
- L"The Internal Name", // internal_name
- L"4.3.2.1", // product_version
- L"Private build property", // private_build
- L"Special build property", // special_build
- L"This is a particularly interesting comment", // comments
- L"This is the original filename", // original_filename
- L"This is my file description", // file_description
- L"1.2.3.4", // file_version
- L"This is the legal copyright", // legal_copyright
- L"This is the legal trademarks", // legal_trademarks
- L"This is the last change", // last_change
- };
-
- FilePath dll_path = GetTestDataPath();
- dll_path = dll_path.Append(kDLLName);
-
- std::unique_ptr<FileVersionInfo> version_info(
- FileVersionInfo::CreateFileVersionInfo(dll_path));
-
- int j = 0;
- EXPECT_EQ(kExpectedValues[j++], version_info->company_name());
- EXPECT_EQ(kExpectedValues[j++], version_info->company_short_name());
- EXPECT_EQ(kExpectedValues[j++], version_info->product_name());
- EXPECT_EQ(kExpectedValues[j++], version_info->product_short_name());
- EXPECT_EQ(kExpectedValues[j++], version_info->internal_name());
- EXPECT_EQ(kExpectedValues[j++], version_info->product_version());
- EXPECT_EQ(kExpectedValues[j++], version_info->private_build());
- EXPECT_EQ(kExpectedValues[j++], version_info->special_build());
- EXPECT_EQ(kExpectedValues[j++], version_info->comments());
- EXPECT_EQ(kExpectedValues[j++], version_info->original_filename());
- EXPECT_EQ(kExpectedValues[j++], version_info->file_description());
- EXPECT_EQ(kExpectedValues[j++], version_info->file_version());
- EXPECT_EQ(kExpectedValues[j++], version_info->legal_copyright());
- EXPECT_EQ(kExpectedValues[j++], version_info->legal_trademarks());
- EXPECT_EQ(kExpectedValues[j++], version_info->last_change());
-}
-#endif
-
-#if defined(OS_WIN)
-TEST(FileVersionInfoTest, IsOfficialBuild) {
- const wchar_t* kDLLNames[] = {
- L"FileVersionInfoTest1.dll",
- L"FileVersionInfoTest2.dll"
- };
-
- const bool kExpected[] = {
- true,
- false,
- };
-
- // Test consistency check.
- ASSERT_EQ(arraysize(kDLLNames), arraysize(kExpected));
-
- for (size_t i = 0; i < arraysize(kDLLNames); ++i) {
- FilePath dll_path = GetTestDataPath();
- dll_path = dll_path.Append(kDLLNames[i]);
-
- std::unique_ptr<FileVersionInfo> version_info(
- FileVersionInfo::CreateFileVersionInfo(dll_path));
-
- EXPECT_EQ(kExpected[i], version_info->is_official_build());
- }
-}
-#endif
-
-#if defined(OS_WIN)
-TEST(FileVersionInfoTest, CustomProperties) {
- FilePath dll_path = GetTestDataPath();
- dll_path = dll_path.AppendASCII("FileVersionInfoTest1.dll");
-
- std::unique_ptr<FileVersionInfo> version_info(
- FileVersionInfo::CreateFileVersionInfo(dll_path));
-
- // Test few existing properties.
- std::wstring str;
- FileVersionInfoWin* version_info_win =
- static_cast<FileVersionInfoWin*>(version_info.get());
- EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 1", &str));
- EXPECT_EQ(L"Un", str);
- EXPECT_EQ(L"Un", version_info_win->GetStringValue(L"Custom prop 1"));
-
- EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 2", &str));
- EXPECT_EQ(L"Deux", str);
- EXPECT_EQ(L"Deux", version_info_win->GetStringValue(L"Custom prop 2"));
-
- EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 3", &str));
- EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043", str);
- EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043",
- version_info_win->GetStringValue(L"Custom prop 3"));
-
- // Test an non-existing property.
- EXPECT_FALSE(version_info_win->GetValue(L"Unknown property", &str));
- EXPECT_EQ(L"", version_info_win->GetStringValue(L"Unknown property"));
-}
-#endif
diff --git a/base/files/dir_reader_posix_unittest.cc b/base/files/dir_reader_posix_unittest.cc
index a75858feeb..5d7fd8b139 100644
--- a/base/files/dir_reader_posix_unittest.cc
+++ b/base/files/dir_reader_posix_unittest.cc
@@ -29,7 +29,7 @@ TEST(DirReaderPosixUnittest, Read) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- const char* dir = temp_dir.path().value().c_str();
+ const char* dir = temp_dir.GetPath().value().c_str();
ASSERT_TRUE(dir);
const int prev_wd = open(".", O_RDONLY | O_DIRECTORY);
diff --git a/base/files/file.cc b/base/files/file.cc
index ab05630062..1b2224e323 100644
--- a/base/files/file.cc
+++ b/base/files/file.cc
@@ -138,12 +138,4 @@ std::string File::ErrorToString(Error error) {
return "";
}
-bool File::Flush() {
- ElapsedTimer timer;
- SCOPED_FILE_TRACE("Flush");
- bool return_value = DoFlush();
- UMA_HISTOGRAM_TIMES("PlatformFile.FlushTime", timer.Elapsed());
- return return_value;
-}
-
} // namespace base
diff --git a/base/files/file.h b/base/files/file.h
index ae2bd1b61b..94a9d5cf49 100644
--- a/base/files/file.h
+++ b/base/files/file.h
@@ -63,28 +63,31 @@ class BASE_EXPORT File {
// FLAG_EXCLUSIVE_(READ|WRITE) only grant exclusive access to the file on
// creation on POSIX; for existing files, consider using Lock().
enum Flags {
- FLAG_OPEN = 1 << 0, // Opens a file, only if it exists.
- FLAG_CREATE = 1 << 1, // Creates a new file, only if it does not
- // already exist.
- FLAG_OPEN_ALWAYS = 1 << 2, // May create a new file.
- FLAG_CREATE_ALWAYS = 1 << 3, // May overwrite an old file.
- FLAG_OPEN_TRUNCATED = 1 << 4, // Opens a file and truncates it, only if it
- // exists.
+ FLAG_OPEN = 1 << 0, // Opens a file, only if it exists.
+ FLAG_CREATE = 1 << 1, // Creates a new file, only if it does not
+ // already exist.
+ FLAG_OPEN_ALWAYS = 1 << 2, // May create a new file.
+ FLAG_CREATE_ALWAYS = 1 << 3, // May overwrite an old file.
+ FLAG_OPEN_TRUNCATED = 1 << 4, // Opens a file and truncates it, only if it
+ // exists.
FLAG_READ = 1 << 5,
FLAG_WRITE = 1 << 6,
FLAG_APPEND = 1 << 7,
- FLAG_EXCLUSIVE_READ = 1 << 8, // EXCLUSIVE is opposite of Windows SHARE.
+ FLAG_EXCLUSIVE_READ = 1 << 8, // EXCLUSIVE is opposite of Windows SHARE.
FLAG_EXCLUSIVE_WRITE = 1 << 9,
FLAG_ASYNC = 1 << 10,
- FLAG_TEMPORARY = 1 << 11, // Used on Windows only.
- FLAG_HIDDEN = 1 << 12, // Used on Windows only.
+ FLAG_TEMPORARY = 1 << 11, // Used on Windows only.
+ FLAG_HIDDEN = 1 << 12, // Used on Windows only.
FLAG_DELETE_ON_CLOSE = 1 << 13,
- FLAG_WRITE_ATTRIBUTES = 1 << 14, // Used on Windows only.
- FLAG_SHARE_DELETE = 1 << 15, // Used on Windows only.
- FLAG_TERMINAL_DEVICE = 1 << 16, // Serial port flags.
- FLAG_BACKUP_SEMANTICS = 1 << 17, // Used on Windows only.
- FLAG_EXECUTE = 1 << 18, // Used on Windows only.
- FLAG_SEQUENTIAL_SCAN = 1 << 19, // Used on Windows only.
+ FLAG_WRITE_ATTRIBUTES = 1 << 14, // Used on Windows only.
+ FLAG_SHARE_DELETE = 1 << 15, // Used on Windows only.
+ FLAG_TERMINAL_DEVICE = 1 << 16, // Serial port flags.
+ FLAG_BACKUP_SEMANTICS = 1 << 17, // Used on Windows only.
+ FLAG_EXECUTE = 1 << 18, // Used on Windows only.
+ FLAG_SEQUENTIAL_SCAN = 1 << 19, // Used on Windows only.
+ FLAG_CAN_DELETE_ON_CLOSE = 1 << 20, // Requests permission to delete a file
+ // via DeleteOnClose() (Windows only).
+ // See DeleteOnClose() for details.
};
// This enum has been recorded in multiple histograms. If the order of the
@@ -290,11 +293,41 @@ class BASE_EXPORT File {
// object that was created or initialized with this flag will have unlinked
// the underlying file when it was created or opened. On Windows, the
// underlying file is deleted when the last handle to it is closed.
- File Duplicate();
+ File Duplicate() const;
bool async() const { return async_; }
#if defined(OS_WIN)
+ // Sets or clears the DeleteFile disposition on the handle. Returns true if
+ // the disposition was set or cleared, as indicated by |delete_on_close|.
+ //
+ // Microsoft Windows deletes a file only when the last handle to the
+ // underlying kernel object is closed when the DeleteFile disposition has been
+ // set by any handle holder. This disposition is be set by:
+ // - Calling the Win32 DeleteFile function with the path to a file.
+ // - Opening/creating a file with FLAG_DELETE_ON_CLOSE.
+ // - Opening/creating a file with FLAG_CAN_DELETE_ON_CLOSE and subsequently
+ // calling DeleteOnClose(true).
+ //
+ // In all cases, all pre-existing handles to the file must have been opened
+ // with FLAG_SHARE_DELETE.
+ //
+ // So:
+ // - Use FLAG_SHARE_DELETE when creating/opening a file to allow another
+ // entity on the system to cause it to be deleted when it is closed. (Note:
+ // another entity can delete the file the moment after it is closed, so not
+ // using this permission doesn't provide any protections.)
+ // - Use FLAG_DELETE_ON_CLOSE for any file that is to be deleted after use.
+ // The OS will ensure it is deleted even in the face of process termination.
+ // - Use FLAG_CAN_DELETE_ON_CLOSE in conjunction with DeleteOnClose() to alter
+ // the DeleteFile disposition on an open handle. This fine-grained control
+ // allows for marking a file for deletion during processing so that it is
+ // deleted in the event of untimely process termination, and then clearing
+ // this state once the file is suitable for persistence.
+ bool DeleteOnClose(bool delete_on_close);
+#endif
+
+#if defined(OS_WIN)
static Error OSErrorToFileError(DWORD last_error);
#elif defined(OS_POSIX)
static Error OSErrorToFileError(int saved_errno);
@@ -310,10 +343,6 @@ class BASE_EXPORT File {
// traversal ('..') components.
void DoInitialize(const FilePath& path, uint32_t flags);
- // TODO(tnagel): Reintegrate into Flush() once histogram isn't needed anymore,
- // cf. issue 473337.
- bool DoFlush();
-
void SetPlatformFile(PlatformFile file);
#if defined(OS_WIN)
diff --git a/base/files/file_descriptor_watcher_posix.cc b/base/files/file_descriptor_watcher_posix.cc
new file mode 100644
index 0000000000..9746e35ea7
--- /dev/null
+++ b/base/files/file_descriptor_watcher_posix.cc
@@ -0,0 +1,210 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_descriptor_watcher_posix.h"
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+// MessageLoopForIO used to watch file descriptors for which callbacks are
+// registered from a given thread.
+LazyInstance<ThreadLocalPointer<MessageLoopForIO>>::Leaky
+ tls_message_loop_for_io = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+FileDescriptorWatcher::Controller::~Controller() {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+
+ // Delete |watcher_| on the MessageLoopForIO.
+ //
+ // If the MessageLoopForIO is deleted before Watcher::StartWatching() runs,
+ // |watcher_| is leaked. If the MessageLoopForIO is deleted after
+ // Watcher::StartWatching() runs but before the DeleteSoon task runs,
+ // |watcher_| is deleted from Watcher::WillDestroyCurrentMessageLoop().
+ message_loop_for_io_task_runner_->DeleteSoon(FROM_HERE, watcher_.release());
+
+ // Since WeakPtrs are invalidated by the destructor, RunCallback() won't be
+ // invoked after this returns.
+}
+
+class FileDescriptorWatcher::Controller::Watcher
+ : public MessageLoopForIO::Watcher,
+ public MessageLoop::DestructionObserver {
+ public:
+ Watcher(WeakPtr<Controller> controller, MessageLoopForIO::Mode mode, int fd);
+ ~Watcher() override;
+
+ void StartWatching();
+
+ private:
+ friend class FileDescriptorWatcher;
+
+ // MessageLoopForIO::Watcher:
+ void OnFileCanReadWithoutBlocking(int fd) override;
+ void OnFileCanWriteWithoutBlocking(int fd) override;
+
+ // MessageLoop::DestructionObserver:
+ void WillDestroyCurrentMessageLoop() override;
+
+ // Used to instruct the MessageLoopForIO to stop watching the file descriptor.
+ MessageLoopForIO::FileDescriptorWatcher file_descriptor_watcher_;
+
+ // Runs tasks on the sequence on which this was instantiated (i.e. the
+ // sequence on which the callback must run).
+ const scoped_refptr<SequencedTaskRunner> callback_task_runner_ =
+ SequencedTaskRunnerHandle::Get();
+
+ // The Controller that created this Watcher.
+ WeakPtr<Controller> controller_;
+
+ // Whether this Watcher is notified when |fd_| becomes readable or writable
+ // without blocking.
+ const MessageLoopForIO::Mode mode_;
+
+ // The watched file descriptor.
+ const int fd_;
+
+ // Except for the constructor, every method of this class must run on the same
+ // MessageLoopForIO thread.
+ ThreadChecker thread_checker_;
+
+ // Whether this Watcher was registered as a DestructionObserver on the
+ // MessageLoopForIO thread.
+ bool registered_as_destruction_observer_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(Watcher);
+};
+
+FileDescriptorWatcher::Controller::Watcher::Watcher(
+ WeakPtr<Controller> controller,
+ MessageLoopForIO::Mode mode,
+ int fd)
+ : file_descriptor_watcher_(FROM_HERE),
+ controller_(controller),
+ mode_(mode),
+ fd_(fd) {
+ DCHECK(callback_task_runner_);
+ thread_checker_.DetachFromThread();
+}
+
+FileDescriptorWatcher::Controller::Watcher::~Watcher() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ MessageLoopForIO::current()->RemoveDestructionObserver(this);
+}
+
+void FileDescriptorWatcher::Controller::Watcher::StartWatching() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ MessageLoopForIO::current()->WatchFileDescriptor(
+ fd_, false, mode_, &file_descriptor_watcher_, this);
+
+ if (!registered_as_destruction_observer_) {
+ MessageLoopForIO::current()->AddDestructionObserver(this);
+ registered_as_destruction_observer_ = true;
+ }
+}
+
+void FileDescriptorWatcher::Controller::Watcher::OnFileCanReadWithoutBlocking(
+ int fd) {
+ DCHECK_EQ(fd_, fd);
+ DCHECK_EQ(MessageLoopForIO::WATCH_READ, mode_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Run the callback on the sequence on which the watch was initiated.
+ callback_task_runner_->PostTask(FROM_HERE,
+ Bind(&Controller::RunCallback, controller_));
+}
+
+void FileDescriptorWatcher::Controller::Watcher::OnFileCanWriteWithoutBlocking(
+ int fd) {
+ DCHECK_EQ(fd_, fd);
+ DCHECK_EQ(MessageLoopForIO::WATCH_WRITE, mode_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Run the callback on the sequence on which the watch was initiated.
+ callback_task_runner_->PostTask(FROM_HERE,
+ Bind(&Controller::RunCallback, controller_));
+}
+
+void FileDescriptorWatcher::Controller::Watcher::
+ WillDestroyCurrentMessageLoop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // A Watcher is owned by a Controller. When the Controller is deleted, it
+ // transfers ownership of the Watcher to a delete task posted to the
+ // MessageLoopForIO. If the MessageLoopForIO is deleted before the delete task
+ // runs, the following line takes care of deleting the Watcher.
+ delete this;
+}
+
+FileDescriptorWatcher::Controller::Controller(MessageLoopForIO::Mode mode,
+ int fd,
+ const Closure& callback)
+ : callback_(callback),
+ message_loop_for_io_task_runner_(
+ tls_message_loop_for_io.Get().Get()->task_runner()),
+ weak_factory_(this) {
+ DCHECK(!callback_.is_null());
+ DCHECK(message_loop_for_io_task_runner_);
+ watcher_ = MakeUnique<Watcher>(weak_factory_.GetWeakPtr(), mode, fd);
+ StartWatching();
+}
+
+void FileDescriptorWatcher::Controller::StartWatching() {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+ // It is safe to use Unretained() below because |watcher_| can only be deleted
+ // by a delete task posted to |message_loop_for_io_task_runner_| by this
+ // Controller's destructor. Since this delete task hasn't been posted yet, it
+ // can't run before the task posted below.
+ message_loop_for_io_task_runner_->PostTask(
+ FROM_HERE, Bind(&Watcher::StartWatching, Unretained(watcher_.get())));
+}
+
+void FileDescriptorWatcher::Controller::RunCallback() {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+
+ WeakPtr<Controller> weak_this = weak_factory_.GetWeakPtr();
+
+ callback_.Run();
+
+ // If |this| wasn't deleted, re-enable the watch.
+ if (weak_this)
+ StartWatching();
+}
+
+FileDescriptorWatcher::FileDescriptorWatcher(
+ MessageLoopForIO* message_loop_for_io) {
+ DCHECK(message_loop_for_io);
+ DCHECK(!tls_message_loop_for_io.Get().Get());
+ tls_message_loop_for_io.Get().Set(message_loop_for_io);
+}
+
+FileDescriptorWatcher::~FileDescriptorWatcher() {
+ tls_message_loop_for_io.Get().Set(nullptr);
+}
+
+std::unique_ptr<FileDescriptorWatcher::Controller>
+FileDescriptorWatcher::WatchReadable(int fd, const Closure& callback) {
+ return WrapUnique(new Controller(MessageLoopForIO::WATCH_READ, fd, callback));
+}
+
+std::unique_ptr<FileDescriptorWatcher::Controller>
+FileDescriptorWatcher::WatchWritable(int fd, const Closure& callback) {
+ return WrapUnique(
+ new Controller(MessageLoopForIO::WATCH_WRITE, fd, callback));
+}
+
+} // namespace base
diff --git a/base/files/file_descriptor_watcher_posix.h b/base/files/file_descriptor_watcher_posix.h
new file mode 100644
index 0000000000..6cc011bb3e
--- /dev/null
+++ b/base/files/file_descriptor_watcher_posix.h
@@ -0,0 +1,99 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
+#define BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+// The FileDescriptorWatcher API allows callbacks to be invoked when file
+// descriptors are readable or writable without blocking.
+class BASE_EXPORT FileDescriptorWatcher {
+ public:
+ // Instantiated and returned by WatchReadable() or WatchWritable(). The
+ // constructor registers a callback to be invoked when a file descriptor is
+ // readable or writable without blocking and the destructor unregisters it.
+ class Controller {
+ public:
+ // Unregisters the callback registered by the constructor.
+ ~Controller();
+
+ private:
+ friend class FileDescriptorWatcher;
+ class Watcher;
+
+ // Registers |callback| to be invoked when |fd| is readable or writable
+ // without blocking (depending on |mode|).
+ Controller(MessageLoopForIO::Mode mode, int fd, const Closure& callback);
+
+ // Starts watching the file descriptor.
+ void StartWatching();
+
+ // Runs |callback_|.
+ void RunCallback();
+
+ // The callback to run when the watched file descriptor is readable or
+ // writable without blocking.
+ Closure callback_;
+
+ // TaskRunner associated with the MessageLoopForIO that watches the file
+ // descriptor.
+ const scoped_refptr<SingleThreadTaskRunner>
+ message_loop_for_io_task_runner_;
+
+ // Notified by the MessageLoopForIO associated with
+ // |message_loop_for_io_task_runner_| when the watched file descriptor is
+ // readable or writable without blocking. Posts a task to run RunCallback()
+ // on the sequence on which the Controller was instantiated. When the
+ // Controller is deleted, ownership of |watcher_| is transfered to a delete
+ // task posted to the MessageLoopForIO. This ensures that |watcher_| isn't
+ // deleted while it is being used by the MessageLoopForIO.
+ std::unique_ptr<Watcher> watcher_;
+
+ // Validates that the Controller is used on the sequence on which it was
+ // instantiated.
+ SequenceChecker sequence_checker_;
+
+ WeakPtrFactory<Controller> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(Controller);
+ };
+
+ // Registers |message_loop_for_io| to watch file descriptors for which
+ // callbacks are registered from the current thread via WatchReadable() or
+ // WatchWritable(). |message_loop_for_io| may run on another thread. The
+ // constructed FileDescriptorWatcher must not outlive |message_loop_for_io|.
+ FileDescriptorWatcher(MessageLoopForIO* message_loop_for_io);
+ ~FileDescriptorWatcher();
+
+ // Registers |callback| to be invoked on the current sequence when |fd| is
+ // readable or writable without blocking. |callback| is unregistered when the
+ // returned Controller is deleted (deletion must happen on the current
+ // sequence). To call these methods, a FileDescriptorWatcher must have been
+ // instantiated on the current thread and SequencedTaskRunnerHandle::IsSet()
+ // must return true.
+ static std::unique_ptr<Controller> WatchReadable(int fd,
+ const Closure& callback);
+ static std::unique_ptr<Controller> WatchWritable(int fd,
+ const Closure& callback);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
diff --git a/base/files/file_descriptor_watcher_posix_unittest.cc b/base/files/file_descriptor_watcher_posix_unittest.cc
new file mode 100644
index 0000000000..7ff40c5fee
--- /dev/null
+++ b/base/files/file_descriptor_watcher_posix_unittest.cc
@@ -0,0 +1,318 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_descriptor_watcher_posix.h"
+
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker_impl.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class Mock {
+ public:
+ Mock() = default;
+
+ MOCK_METHOD0(ReadableCallback, void());
+ MOCK_METHOD0(WritableCallback, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Mock);
+};
+
+enum class FileDescriptorWatcherTestType {
+ MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD,
+ MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD,
+};
+
+class FileDescriptorWatcherTest
+ : public testing::TestWithParam<FileDescriptorWatcherTestType> {
+ public:
+ FileDescriptorWatcherTest()
+ : message_loop_(GetParam() == FileDescriptorWatcherTestType::
+ MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD
+ ? new MessageLoopForIO
+ : new MessageLoop),
+ other_thread_("FileDescriptorWatcherTest_OtherThread") {}
+ ~FileDescriptorWatcherTest() override = default;
+
+ void SetUp() override {
+ ASSERT_EQ(0, pipe(pipe_fds_));
+
+ MessageLoop* message_loop_for_io;
+ if (GetParam() ==
+ FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD) {
+ Thread::Options options;
+ options.message_loop_type = MessageLoop::TYPE_IO;
+ ASSERT_TRUE(other_thread_.StartWithOptions(options));
+ message_loop_for_io = other_thread_.message_loop();
+ } else {
+ message_loop_for_io = message_loop_.get();
+ }
+
+ ASSERT_TRUE(message_loop_for_io->IsType(MessageLoop::TYPE_IO));
+ file_descriptor_watcher_ = MakeUnique<FileDescriptorWatcher>(
+ static_cast<MessageLoopForIO*>(message_loop_for_io));
+ }
+
+ void TearDown() override {
+ if (GetParam() ==
+ FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD &&
+ message_loop_) {
+ // Allow the delete task posted by the Controller's destructor to run.
+ base::RunLoop().RunUntilIdle();
+ }
+
+ EXPECT_EQ(0, IGNORE_EINTR(close(pipe_fds_[0])));
+ EXPECT_EQ(0, IGNORE_EINTR(close(pipe_fds_[1])));
+ }
+
+ protected:
+ int read_file_descriptor() const { return pipe_fds_[0]; }
+ int write_file_descriptor() const { return pipe_fds_[1]; }
+
+ // Waits for a short delay and run pending tasks.
+ void WaitAndRunPendingTasks() {
+ PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+ RunLoop().RunUntilIdle();
+ }
+
+ // Registers ReadableCallback() to be called on |mock_| when
+ // read_file_descriptor() is readable without blocking.
+ std::unique_ptr<FileDescriptorWatcher::Controller> WatchReadable() {
+ std::unique_ptr<FileDescriptorWatcher::Controller> controller =
+ FileDescriptorWatcher::WatchReadable(
+ read_file_descriptor(),
+ Bind(&Mock::ReadableCallback, Unretained(&mock_)));
+ EXPECT_TRUE(controller);
+
+ // Unless read_file_descriptor() was readable before the callback was
+ // registered, this shouldn't do anything.
+ WaitAndRunPendingTasks();
+
+ return controller;
+ }
+
+ // Registers WritableCallback() to be called on |mock_| when
+ // write_file_descriptor() is writable without blocking.
+ std::unique_ptr<FileDescriptorWatcher::Controller> WatchWritable() {
+ std::unique_ptr<FileDescriptorWatcher::Controller> controller =
+ FileDescriptorWatcher::WatchWritable(
+ read_file_descriptor(),
+ Bind(&Mock::WritableCallback, Unretained(&mock_)));
+ EXPECT_TRUE(controller);
+ return controller;
+ }
+
+ void WriteByte() {
+ constexpr char kByte = '!';
+ ASSERT_TRUE(
+ WriteFileDescriptor(write_file_descriptor(), &kByte, sizeof(kByte)));
+ }
+
+ void ReadByte() {
+ // This is always called as part of the WatchReadable() callback, which
+ // should run on the main thread.
+ EXPECT_TRUE(thread_checker_.CalledOnValidThread());
+
+ char buffer;
+ ASSERT_TRUE(ReadFromFD(read_file_descriptor(), &buffer, sizeof(buffer)));
+ }
+
+ // Mock on wich callbacks are invoked.
+ testing::StrictMock<Mock> mock_;
+
+ // MessageLoop bound to the main thread.
+ std::unique_ptr<MessageLoop> message_loop_;
+
+ // Thread running a MessageLoopForIO. Used when the test type is
+ // MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD.
+ Thread other_thread_;
+
+ private:
+ // Determines which MessageLoopForIO is used to watch file descriptors for
+ // which callbacks are registered on the main thread.
+ std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher_;
+
+ // Watched file descriptors.
+ int pipe_fds_[2];
+
+ // Used to verify that callbacks run on the thread on which they are
+ // registered.
+ ThreadCheckerImpl thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcherTest);
+};
+
+} // namespace
+
+TEST_P(FileDescriptorWatcherTest, WatchWritable) {
+ auto controller = WatchWritable();
+
+// On Mac and iOS, the write end of a newly created pipe is writable without
+// blocking.
+#if defined(OS_MACOSX)
+ RunLoop run_loop;
+ EXPECT_CALL(mock_, WritableCallback())
+ .WillOnce(testing::Invoke(&run_loop, &RunLoop::Quit));
+ run_loop.Run();
+#endif // defined(OS_MACOSX)
+}
+
+TEST_P(FileDescriptorWatcherTest, WatchReadableOneByte) {
+ auto controller = WatchReadable();
+
+ // Write 1 byte to the pipe, making it readable without blocking. Expect one
+ // call to ReadableCallback() which will read 1 byte from the pipe.
+ WriteByte();
+ RunLoop run_loop;
+ EXPECT_CALL(mock_, ReadableCallback())
+ .WillOnce(testing::Invoke([this, &run_loop]() {
+ ReadByte();
+ run_loop.Quit();
+ }));
+ run_loop.Run();
+ testing::Mock::VerifyAndClear(&mock_);
+
+ // No more call to ReadableCallback() is expected.
+ WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, WatchReadableTwoBytes) {
+ auto controller = WatchReadable();
+
+ // Write 2 bytes to the pipe. Expect two calls to ReadableCallback() which
+ // will each read 1 byte from the pipe.
+ WriteByte();
+ WriteByte();
+ RunLoop run_loop;
+ EXPECT_CALL(mock_, ReadableCallback())
+ .WillOnce(testing::Invoke([this]() { ReadByte(); }))
+ .WillOnce(testing::Invoke([this, &run_loop]() {
+ ReadByte();
+ run_loop.Quit();
+ }));
+ run_loop.Run();
+ testing::Mock::VerifyAndClear(&mock_);
+
+ // No more call to ReadableCallback() is expected.
+ WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, WatchReadableByteWrittenFromCallback) {
+ auto controller = WatchReadable();
+
+ // Write 1 byte to the pipe. Expect one call to ReadableCallback() from which
+ // 1 byte is read and 1 byte is written to the pipe. Then, expect another call
+ // to ReadableCallback() from which the remaining byte is read from the pipe.
+ WriteByte();
+ RunLoop run_loop;
+ EXPECT_CALL(mock_, ReadableCallback())
+ .WillOnce(testing::Invoke([this]() {
+ ReadByte();
+ WriteByte();
+ }))
+ .WillOnce(testing::Invoke([this, &run_loop]() {
+ ReadByte();
+ run_loop.Quit();
+ }));
+ run_loop.Run();
+ testing::Mock::VerifyAndClear(&mock_);
+
+ // No more call to ReadableCallback() is expected.
+ WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, DeleteControllerFromCallback) {
+ auto controller = WatchReadable();
+
+ // Write 1 byte to the pipe. Expect one call to ReadableCallback() from which
+ // |controller| is deleted.
+ WriteByte();
+ RunLoop run_loop;
+ EXPECT_CALL(mock_, ReadableCallback())
+ .WillOnce(testing::Invoke([&run_loop, &controller]() {
+ controller = nullptr;
+ run_loop.Quit();
+ }));
+ run_loop.Run();
+ testing::Mock::VerifyAndClear(&mock_);
+
+ // Since |controller| has been deleted, no call to ReadableCallback() is
+ // expected even though the pipe is still readable without blocking.
+ WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest,
+ DeleteControllerBeforeFileDescriptorReadable) {
+ auto controller = WatchReadable();
+
+ // Cancel the watch.
+ controller = nullptr;
+
+ // Write 1 byte to the pipe to make it readable without blocking.
+ WriteByte();
+
+ // No call to ReadableCallback() is expected.
+ WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, DeleteControllerAfterFileDescriptorReadable) {
+ auto controller = WatchReadable();
+
+ // Write 1 byte to the pipe to make it readable without blocking.
+ WriteByte();
+
+ // Cancel the watch.
+ controller = nullptr;
+
+ // No call to ReadableCallback() is expected.
+ WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, DeleteControllerAfterDeleteMessageLoopForIO) {
+ auto controller = WatchReadable();
+
+ // Delete the MessageLoopForIO.
+ if (GetParam() ==
+ FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD) {
+ message_loop_ = nullptr;
+ } else {
+ other_thread_.Stop();
+ }
+
+ // Deleting |controller| shouldn't crash even though that causes a task to be
+ // posted to the MessageLoopForIO thread.
+ controller = nullptr;
+}
+
+INSTANTIATE_TEST_CASE_P(
+ MessageLoopForIOOnMainThread,
+ FileDescriptorWatcherTest,
+ ::testing::Values(
+ FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD));
+INSTANTIATE_TEST_CASE_P(
+ MessageLoopForIOOnOtherThread,
+ FileDescriptorWatcherTest,
+ ::testing::Values(
+ FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD));
+
+} // namespace base
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 29f12a80aa..9f67f9bc49 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -176,6 +176,7 @@ FilePath::FilePath() {
FilePath::FilePath(const FilePath& that) : path_(that.path_) {
}
+FilePath::FilePath(FilePath&& that) = default;
FilePath::FilePath(StringPieceType path) {
path.CopyToString(&path_);
@@ -192,6 +193,8 @@ FilePath& FilePath::operator=(const FilePath& that) {
return *this;
}
+FilePath& FilePath::operator=(FilePath&& that) = default;
+
bool FilePath::operator==(const FilePath& that) const {
#if defined(FILE_PATH_USES_DRIVE_LETTERS)
return EqualDriveLetterCaseInsensitive(this->path_, that.path_);
diff --git a/base/files/file_path.h b/base/files/file_path.h
index 3234df7bfb..02846f6892 100644
--- a/base/files/file_path.h
+++ b/base/files/file_path.h
@@ -182,6 +182,13 @@ class BASE_EXPORT FilePath {
~FilePath();
FilePath& operator=(const FilePath& that);
+ // Constructs FilePath with the contents of |that|, which is left in valid but
+ // unspecified state.
+ FilePath(FilePath&& that);
+ // Replaces the contents with those of |that|, which is left in valid but
+ // unspecified state.
+ FilePath& operator=(FilePath&& that);
+
bool operator==(const FilePath& that) const;
bool operator!=(const FilePath& that) const;
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
index d8c5969513..a091e62dd1 100644
--- a/base/files/file_path_unittest.cc
+++ b/base/files/file_path_unittest.cc
@@ -9,7 +9,6 @@
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/test/scoped_locale.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
diff --git a/base/files/file_path_watcher.cc b/base/files/file_path_watcher.cc
index a4624ab609..245bd8efe2 100644
--- a/base/files/file_path_watcher.cc
+++ b/base/files/file_path_watcher.cc
@@ -8,22 +8,16 @@
#include "base/files/file_path_watcher.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
#include "build/build_config.h"
namespace base {
FilePathWatcher::~FilePathWatcher() {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
impl_->Cancel();
}
// static
-void FilePathWatcher::CancelWatch(
- const scoped_refptr<PlatformDelegate>& delegate) {
- delegate->CancelOnMessageLoopThread();
-}
-
-// static
bool FilePathWatcher::RecursiveWatchAvailable() {
#if (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_WIN) || \
defined(OS_LINUX) || defined(OS_ANDROID)
@@ -44,6 +38,7 @@ FilePathWatcher::PlatformDelegate::~PlatformDelegate() {
bool FilePathWatcher::Watch(const FilePath& path,
bool recursive,
const Callback& callback) {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
DCHECK(path.IsAbsolute());
return impl_->Watch(path, recursive, callback);
}
diff --git a/base/files/file_path_watcher.h b/base/files/file_path_watcher.h
index d5c6db1acf..9e29d0a9d5 100644
--- a/base/files/file_path_watcher.h
+++ b/base/files/file_path_watcher.h
@@ -7,12 +7,15 @@
#ifndef BASE_FILES_FILE_PATH_WATCHER_H_
#define BASE_FILES_FILE_PATH_WATCHER_H_
+#include <memory>
+
#include "base/base_export.h"
#include "base/callback.h"
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
+#include "base/sequence_checker.h"
+#include "base/sequenced_task_runner.h"
namespace base {
@@ -25,6 +28,8 @@ namespace base {
// detect the creation and deletion of files in a watched directory, but will
// not detect modifications to those files. See file_path_watcher_kqueue.cc for
// details.
+//
+// Must be destroyed on the sequence that invokes Watch().
class BASE_EXPORT FilePathWatcher {
public:
// Callback type for Watch(). |path| points to the file that was updated,
@@ -33,9 +38,10 @@ class BASE_EXPORT FilePathWatcher {
typedef base::Callback<void(const FilePath& path, bool error)> Callback;
// Used internally to encapsulate different members on different platforms.
- class PlatformDelegate : public base::RefCountedThreadSafe<PlatformDelegate> {
+ class PlatformDelegate {
public:
PlatformDelegate();
+ virtual ~PlatformDelegate();
// Start watching for the given |path| and notify |delegate| about changes.
virtual bool Watch(const FilePath& path,
@@ -44,25 +50,16 @@ class BASE_EXPORT FilePathWatcher {
// Stop watching. This is called from FilePathWatcher's dtor in order to
// allow to shut down properly while the object is still alive.
- // It can be called from any thread.
virtual void Cancel() = 0;
protected:
- friend class base::RefCountedThreadSafe<PlatformDelegate>;
friend class FilePathWatcher;
- virtual ~PlatformDelegate();
-
- // Stop watching. This is only called on the thread of the appropriate
- // message loop. Since it can also be called more than once, it should
- // check |is_cancelled()| to avoid duplicate work.
- virtual void CancelOnMessageLoopThread() = 0;
-
- scoped_refptr<base::SingleThreadTaskRunner> task_runner() const {
+ scoped_refptr<SequencedTaskRunner> task_runner() const {
return task_runner_;
}
- void set_task_runner(scoped_refptr<base::SingleThreadTaskRunner> runner) {
+ void set_task_runner(scoped_refptr<SequencedTaskRunner> runner) {
task_runner_ = std::move(runner);
}
@@ -76,32 +73,34 @@ class BASE_EXPORT FilePathWatcher {
}
private:
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<SequencedTaskRunner> task_runner_;
bool cancelled_;
+
+ DISALLOW_COPY_AND_ASSIGN(PlatformDelegate);
};
FilePathWatcher();
- virtual ~FilePathWatcher();
-
- // A callback that always cleans up the PlatformDelegate, either when executed
- // or when deleted without having been executed at all, as can happen during
- // shutdown.
- static void CancelWatch(const scoped_refptr<PlatformDelegate>& delegate);
+ ~FilePathWatcher();
// Returns true if the platform and OS version support recursive watches.
static bool RecursiveWatchAvailable();
// Invokes |callback| whenever updates to |path| are detected. This should be
- // called at most once, and from a MessageLoop of TYPE_IO. Set |recursive| to
- // true, to watch |path| and its children. The callback will be invoked on
- // the same loop. Returns true on success.
+ // called at most once. Set |recursive| to true to watch |path| and its
+ // children. The callback will be invoked on the same sequence. Returns true
+ // on success.
+ //
+ // On POSIX, this must be called from a thread that supports
+ // FileDescriptorWatcher.
//
// Recursive watch is not supported on all platforms and file systems.
// Watch() will return false in the case of failure.
bool Watch(const FilePath& path, bool recursive, const Callback& callback);
private:
- scoped_refptr<PlatformDelegate> impl_;
+ std::unique_ptr<PlatformDelegate> impl_;
+
+ SequenceChecker sequence_checker_;
DISALLOW_COPY_AND_ASSIGN(FilePathWatcher);
};
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc
index e9d25080e7..e9a87b0e05 100644
--- a/base/files/file_path_watcher_fsevents.cc
+++ b/base/files/file_path_watcher_fsevents.cc
@@ -13,10 +13,8 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/mac/scoped_cftyperef.h"
-#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
@@ -70,16 +68,21 @@ FilePath ResolvePath(const FilePath& path) {
FilePathWatcherFSEvents::FilePathWatcherFSEvents()
: queue_(dispatch_queue_create(
- base::StringPrintf(
- "org.chromium.base.FilePathWatcher.%p", this).c_str(),
+ base::StringPrintf("org.chromium.base.FilePathWatcher.%p", this)
+ .c_str(),
DISPATCH_QUEUE_SERIAL)),
- fsevent_stream_(nullptr) {
+ fsevent_stream_(nullptr),
+ weak_factory_(this) {}
+
+FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
+ DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
+ DCHECK(callback_.is_null())
+ << "Cancel() must be called before FilePathWatcher is destroyed.";
}
bool FilePathWatcherFSEvents::Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) {
- DCHECK(MessageLoopForIO::current());
DCHECK(!callback.is_null());
DCHECK(callback_.is_null());
@@ -88,7 +91,7 @@ bool FilePathWatcherFSEvents::Watch(const FilePath& path,
if (!recursive)
return false;
- set_task_runner(ThreadTaskRunnerHandle::Get());
+ set_task_runner(SequencedTaskRunnerHandle::Get());
callback_ = callback;
FSEventStreamEventId start_event = FSEventsGetCurrentEventId();
@@ -107,11 +110,15 @@ void FilePathWatcherFSEvents::Cancel() {
set_cancelled();
callback_.Reset();
- // Switch to the dispatch queue to tear down the event stream. As the queue
- // is owned by this object, and this method is called from the destructor,
- // execute the block synchronously.
+ // Switch to the dispatch queue to tear down the event stream. As the queue is
+ // owned by |this|, and this method is called from the destructor, execute the
+ // block synchronously.
dispatch_sync(queue_, ^{
- CancelOnMessageLoopThread();
+ if (fsevent_stream_) {
+ DestroyEventStream();
+ target_.clear();
+ resolved_target_.clear();
+ }
});
}
@@ -142,31 +149,40 @@ void FilePathWatcherFSEvents::FSEventsCallback(
// the directory to be watched gets created.
if (root_changed) {
// Resetting the event stream from within the callback fails (FSEvents spews
- // bad file descriptor errors), so post a task to do the reset.
- dispatch_async(watcher->queue_, ^{
- watcher->UpdateEventStream(root_change_at);
- });
+ // bad file descriptor errors), so do the reset asynchronously.
+ //
+ // We can't dispatch_async a call to UpdateEventStream() directly because
+ // there would be no guarantee that |watcher| still exists when it runs.
+ //
+ // Instead, bounce on task_runner() and use a WeakPtr to verify that
+ // |watcher| still exists. If it does, dispatch_async a call to
+ // UpdateEventStream(). Because the destructor of |watcher| runs on
+ // task_runner() and calls dispatch_sync, it is guaranteed that |watcher|
+ // still exists when UpdateEventStream() runs.
+ watcher->task_runner()->PostTask(
+ FROM_HERE, Bind(
+ [](WeakPtr<FilePathWatcherFSEvents> weak_watcher,
+ FSEventStreamEventId root_change_at) {
+ if (!weak_watcher)
+ return;
+ FilePathWatcherFSEvents* watcher = weak_watcher.get();
+ dispatch_async(watcher->queue_, ^{
+ watcher->UpdateEventStream(root_change_at);
+ });
+ },
+ watcher->weak_factory_.GetWeakPtr(), root_change_at));
}
watcher->OnFilePathsChanged(paths);
}
-FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
- // This method may be called on either the libdispatch or task_runner()
- // thread. Checking callback_ on the libdispatch thread here is safe because
- // it is executing in a task posted by Cancel() which first reset callback_.
- // PostTask forms a sufficient memory barrier to ensure that the value is
- // consistent on the target thread.
- DCHECK(callback_.is_null())
- << "Cancel() must be called before FilePathWatcher is destroyed.";
-}
-
void FilePathWatcherFSEvents::OnFilePathsChanged(
const std::vector<FilePath>& paths) {
DCHECK(!resolved_target_.empty());
task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherFSEvents::DispatchEvents, this, paths,
- target_, resolved_target_));
+ FROM_HERE,
+ Bind(&FilePathWatcherFSEvents::DispatchEvents, weak_factory_.GetWeakPtr(),
+ paths, target_, resolved_target_));
}
void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths,
@@ -187,18 +203,6 @@ void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths,
}
}
-void FilePathWatcherFSEvents::CancelOnMessageLoopThread() {
- // For all other implementations, the "message loop thread" is the IO thread,
- // as returned by task_runner(). This implementation, however, needs to
- // cancel pending work on the Dispatch Queue thread.
-
- if (fsevent_stream_) {
- DestroyEventStream();
- target_.clear();
- resolved_target_.clear();
- }
-}
-
void FilePathWatcherFSEvents::UpdateEventStream(
FSEventStreamEventId start_event) {
// It can happen that the watcher gets canceled while tasks that call this
@@ -234,8 +238,9 @@ void FilePathWatcherFSEvents::UpdateEventStream(
FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
if (!FSEventStreamStart(fsevent_stream_)) {
- task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
+ task_runner()->PostTask(FROM_HERE,
+ Bind(&FilePathWatcherFSEvents::ReportError,
+ weak_factory_.GetWeakPtr(), target_));
}
}
@@ -244,8 +249,9 @@ bool FilePathWatcherFSEvents::ResolveTargetPath() {
bool changed = resolved != resolved_target_;
resolved_target_ = resolved;
if (resolved_target_.empty()) {
- task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
+ task_runner()->PostTask(FROM_HERE,
+ Bind(&FilePathWatcherFSEvents::ReportError,
+ weak_factory_.GetWeakPtr(), target_));
}
return changed;
}
diff --git a/base/files/file_path_watcher_fsevents.h b/base/files/file_path_watcher_fsevents.h
index cfbe020b51..dcdf2fbf9d 100644
--- a/base/files/file_path_watcher_fsevents.h
+++ b/base/files/file_path_watcher_fsevents.h
@@ -14,6 +14,7 @@
#include "base/files/file_path_watcher.h"
#include "base/mac/scoped_dispatch_object.h"
#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
namespace base {
@@ -26,6 +27,7 @@ namespace base {
class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
public:
FilePathWatcherFSEvents();
+ ~FilePathWatcherFSEvents() override;
// FilePathWatcher::PlatformDelegate overrides.
bool Watch(const FilePath& path,
@@ -41,8 +43,6 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
const FSEventStreamEventFlags flags[],
const FSEventStreamEventId event_ids[]);
- ~FilePathWatcherFSEvents() override;
-
// Called from FSEventsCallback whenever there is a change to the paths.
void OnFilePathsChanged(const std::vector<FilePath>& paths);
@@ -53,9 +53,6 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
const FilePath& target,
const FilePath& resolved_target);
- // Cleans up and stops the event stream.
- void CancelOnMessageLoopThread() override;
-
// (Re-)Initialize the event stream to start reporting events from
// |start_event|.
void UpdateEventStream(FSEventStreamEventId start_event);
@@ -92,6 +89,8 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
// (Only accessed from the libdispatch queue.)
FSEventStreamRef fsevent_stream_;
+ WeakPtrFactory<FilePathWatcherFSEvents> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents);
};
diff --git a/base/files/file_path_watcher_kqueue.cc b/base/files/file_path_watcher_kqueue.cc
index 6d034cd9a2..a28726acb0 100644
--- a/base/files/file_path_watcher_kqueue.cc
+++ b/base/files/file_path_watcher_kqueue.cc
@@ -12,7 +12,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/strings/stringprintf.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
// On some platforms these are not defined.
#if !defined(EV_RECEIPT)
@@ -26,7 +26,9 @@ namespace base {
FilePathWatcherKQueue::FilePathWatcherKQueue() : kqueue_(-1) {}
-FilePathWatcherKQueue::~FilePathWatcherKQueue() {}
+FilePathWatcherKQueue::~FilePathWatcherKQueue() {
+ DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
+}
void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) {
CloseFileDescriptor(&event.ident);
@@ -36,7 +38,6 @@ void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) {
}
int FilePathWatcherKQueue::EventsForPath(FilePath path, EventVector* events) {
- DCHECK(MessageLoopForIO::current());
// Make sure that we are working with a clean slate.
DCHECK(events->empty());
@@ -230,9 +231,74 @@ bool FilePathWatcherKQueue::UpdateWatches(bool* target_file_affected) {
return true;
}
-void FilePathWatcherKQueue::OnFileCanReadWithoutBlocking(int fd) {
- DCHECK(MessageLoopForIO::current());
- DCHECK_EQ(fd, kqueue_);
+bool FilePathWatcherKQueue::Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) {
+ DCHECK(target_.value().empty()); // Can only watch one path.
+ DCHECK(!callback.is_null());
+ DCHECK_EQ(kqueue_, -1);
+ // Recursive watch is not supported using kqueue.
+ DCHECK(!recursive);
+
+ callback_ = callback;
+ target_ = path;
+
+ set_task_runner(SequencedTaskRunnerHandle::Get());
+
+ kqueue_ = kqueue();
+ if (kqueue_ == -1) {
+ DPLOG(ERROR) << "kqueue";
+ return false;
+ }
+
+ int last_entry = EventsForPath(target_, &events_);
+ DCHECK_NE(last_entry, 0);
+
+ EventVector responses(last_entry);
+
+ int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], last_entry,
+ &responses[0], last_entry, NULL));
+ if (!AreKeventValuesValid(&responses[0], count)) {
+ // Calling Cancel() here to close any file descriptors that were opened.
+ // This would happen in the destructor anyways, but FilePathWatchers tend to
+ // be long lived, and if an error has occurred, there is no reason to waste
+ // the file descriptors.
+ Cancel();
+ return false;
+ }
+
+ // It's safe to use Unretained() because the watch is cancelled and the
+ // callback cannot be invoked after |kqueue_watch_controller_| (which is a
+ // member of |this|) has been deleted.
+ kqueue_watch_controller_ = FileDescriptorWatcher::WatchReadable(
+ kqueue_,
+ Bind(&FilePathWatcherKQueue::OnKQueueReadable, Unretained(this)));
+
+ return true;
+}
+
+void FilePathWatcherKQueue::Cancel() {
+ if (!task_runner()) {
+ set_cancelled();
+ return;
+ }
+
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ if (!is_cancelled()) {
+ set_cancelled();
+ kqueue_watch_controller_.reset();
+ if (IGNORE_EINTR(close(kqueue_)) != 0) {
+ DPLOG(ERROR) << "close kqueue";
+ }
+ kqueue_ = -1;
+ std::for_each(events_.begin(), events_.end(), ReleaseEvent);
+ events_.clear();
+ callback_.Reset();
+ }
+}
+
+void FilePathWatcherKQueue::OnKQueueReadable() {
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
DCHECK(events_.size());
// Request the file system update notifications that have occurred and return
@@ -303,89 +369,4 @@ void FilePathWatcherKQueue::OnFileCanReadWithoutBlocking(int fd) {
}
}
-void FilePathWatcherKQueue::OnFileCanWriteWithoutBlocking(int /* fd */) {
- NOTREACHED();
-}
-
-void FilePathWatcherKQueue::WillDestroyCurrentMessageLoop() {
- CancelOnMessageLoopThread();
-}
-
-bool FilePathWatcherKQueue::Watch(const FilePath& path,
- bool recursive,
- const FilePathWatcher::Callback& callback) {
- DCHECK(MessageLoopForIO::current());
- DCHECK(target_.value().empty()); // Can only watch one path.
- DCHECK(!callback.is_null());
- DCHECK_EQ(kqueue_, -1);
-
- if (recursive) {
- // Recursive watch is not supported using kqueue.
- NOTIMPLEMENTED();
- return false;
- }
-
- callback_ = callback;
- target_ = path;
-
- MessageLoop::current()->AddDestructionObserver(this);
- io_task_runner_ = ThreadTaskRunnerHandle::Get();
-
- kqueue_ = kqueue();
- if (kqueue_ == -1) {
- DPLOG(ERROR) << "kqueue";
- return false;
- }
-
- int last_entry = EventsForPath(target_, &events_);
- DCHECK_NE(last_entry, 0);
-
- EventVector responses(last_entry);
-
- int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], last_entry,
- &responses[0], last_entry, NULL));
- if (!AreKeventValuesValid(&responses[0], count)) {
- // Calling Cancel() here to close any file descriptors that were opened.
- // This would happen in the destructor anyways, but FilePathWatchers tend to
- // be long lived, and if an error has occurred, there is no reason to waste
- // the file descriptors.
- Cancel();
- return false;
- }
-
- return MessageLoopForIO::current()->WatchFileDescriptor(
- kqueue_, true, MessageLoopForIO::WATCH_READ, &kqueue_watcher_, this);
-}
-
-void FilePathWatcherKQueue::Cancel() {
- SingleThreadTaskRunner* task_runner = io_task_runner_.get();
- if (!task_runner) {
- set_cancelled();
- return;
- }
- if (!task_runner->BelongsToCurrentThread()) {
- task_runner->PostTask(FROM_HERE,
- base::Bind(&FilePathWatcherKQueue::Cancel, this));
- return;
- }
- CancelOnMessageLoopThread();
-}
-
-void FilePathWatcherKQueue::CancelOnMessageLoopThread() {
- DCHECK(MessageLoopForIO::current());
- if (!is_cancelled()) {
- set_cancelled();
- kqueue_watcher_.StopWatchingFileDescriptor();
- if (IGNORE_EINTR(close(kqueue_)) != 0) {
- DPLOG(ERROR) << "close kqueue";
- }
- kqueue_ = -1;
- std::for_each(events_.begin(), events_.end(), ReleaseEvent);
- events_.clear();
- io_task_runner_ = NULL;
- MessageLoop::current()->RemoveDestructionObserver(this);
- callback_.Reset();
- }
-}
-
} // namespace base
diff --git a/base/files/file_path_watcher_kqueue.h b/base/files/file_path_watcher_kqueue.h
index d9db8c2587..ef79be5596 100644
--- a/base/files/file_path_watcher_kqueue.h
+++ b/base/files/file_path_watcher_kqueue.h
@@ -6,13 +6,14 @@
#define BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
#include <sys/event.h>
+
+#include <memory>
#include <vector>
+#include "base/files/file_descriptor_watcher_posix.h"
#include "base/files/file_path.h"
#include "base/files/file_path_watcher.h"
#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
-#include "base/single_thread_task_runner.h"
namespace base {
@@ -27,18 +28,10 @@ namespace base {
// detect the creation and deletion of files, just not the modification of
// files. It does however detect the attribute changes that the FSEvents impl
// would miss.
-class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
- public MessageLoopForIO::Watcher,
- public MessageLoop::DestructionObserver {
+class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate {
public:
FilePathWatcherKQueue();
-
- // MessageLoopForIO::Watcher overrides.
- void OnFileCanReadWithoutBlocking(int fd) override;
- void OnFileCanWriteWithoutBlocking(int fd) override;
-
- // MessageLoop::DestructionObserver overrides.
- void WillDestroyCurrentMessageLoop() override;
+ ~FilePathWatcherKQueue() override;
// FilePathWatcher::PlatformDelegate overrides.
bool Watch(const FilePath& path,
@@ -46,9 +39,6 @@ class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
const FilePathWatcher::Callback& callback) override;
void Cancel() override;
- protected:
- ~FilePathWatcherKQueue() override;
-
private:
class EventData {
public:
@@ -60,8 +50,8 @@ class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
typedef std::vector<struct kevent> EventVector;
- // Can only be called on |io_task_runner_|'s thread.
- void CancelOnMessageLoopThread() override;
+ // Called when data is available in |kqueue_|.
+ void OnKQueueReadable();
// Returns true if the kevent values are error free.
bool AreKeventValuesValid(struct kevent* kevents, int count);
@@ -119,12 +109,14 @@ class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
}
EventVector events_;
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
- MessageLoopForIO::FileDescriptorWatcher kqueue_watcher_;
FilePathWatcher::Callback callback_;
FilePath target_;
int kqueue_;
+ // Throughout the lifetime of this, OnKQueueReadable() will be called when
+ // data is available in |kqueue_|.
+ std::unique_ptr<FileDescriptorWatcher::Controller> kqueue_watch_controller_;
+
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherKQueue);
};
diff --git a/base/files/file_path_watcher_linux.cc b/base/files/file_path_watcher_linux.cc
index 87bddd3dea..1dc833dc88 100644
--- a/base/files/file_path_watcher_linux.cc
+++ b/base/files/file_path_watcher_linux.cc
@@ -28,12 +28,14 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/weak_ptr.h"
#include "base/posix/eintr_wrapper.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/synchronization/lock.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
namespace base {
@@ -61,12 +63,14 @@ class InotifyReader {
void OnInotifyEvent(const inotify_event* event);
private:
- friend struct DefaultLazyInstanceTraits<InotifyReader>;
+ friend struct LazyInstanceTraitsBase<InotifyReader>;
typedef std::set<FilePathWatcherImpl*> WatcherSet;
InotifyReader();
- ~InotifyReader();
+ // There is no destructor because |g_inotify_reader| is a
+ // base::LazyInstace::Leaky object. Having a destructor causes build
+ // issues with GCC 6 (http://crbug.com/636346).
// We keep track of which delegates want to be notified on which watches.
hash_map<Watch, WatcherSet> watchers_;
@@ -80,19 +84,16 @@ class InotifyReader {
// File descriptor returned by inotify_init.
const int inotify_fd_;
- // Use self-pipe trick to unblock select during shutdown.
- int shutdown_pipe_[2];
-
// Flag set to true when startup was successful.
bool valid_;
DISALLOW_COPY_AND_ASSIGN(InotifyReader);
};
-class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
- public MessageLoop::DestructionObserver {
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
public:
FilePathWatcherImpl();
+ ~FilePathWatcherImpl() override;
// Called for each event coming from the watch. |fired_watch| identifies the
// watch that fired, |child| indicates what has changed, and is relative to
@@ -107,10 +108,13 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
bool deleted,
bool is_dir);
- protected:
- ~FilePathWatcherImpl() override {}
-
private:
+ void OnFilePathChangedOnOriginSequence(InotifyReader::Watch fired_watch,
+ const FilePath::StringType& child,
+ bool created,
+ bool deleted,
+ bool is_dir);
+
// Start watching |path| for changes and notify |delegate| on each change.
// Returns true if watch for |path| has been added successfully.
bool Watch(const FilePath& path,
@@ -120,14 +124,6 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
// Cancel the watch. This unregisters the instance with InotifyReader.
void Cancel() override;
- // Cleans up and stops observing the message_loop() thread.
- void CancelOnMessageLoopThread() override;
-
- // Deletion of the FilePathWatcher will call Cancel() to dispose of this
- // object in the right thread. This also observes destruction of the required
- // cleanup thread, in case it quits before Cancel() is called.
- void WillDestroyCurrentMessageLoop() override;
-
// Inotify watches are installed for all directory components of |target_|.
// A WatchEntry instance holds:
// - |watch|: the watch descriptor for a component.
@@ -191,16 +187,15 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
hash_map<InotifyReader::Watch, FilePath> recursive_paths_by_watch_;
std::map<FilePath, InotifyReader::Watch> recursive_watches_by_path_;
+ WeakPtrFactory<FilePathWatcherImpl> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
};
-void InotifyReaderCallback(InotifyReader* reader, int inotify_fd,
- int shutdown_fd) {
+void InotifyReaderCallback(InotifyReader* reader, int inotify_fd) {
// Make sure the file descriptors are good for use with select().
CHECK_LE(0, inotify_fd);
CHECK_GT(FD_SETSIZE, inotify_fd);
- CHECK_LE(0, shutdown_fd);
- CHECK_GT(FD_SETSIZE, shutdown_fd);
trace_event::TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop();
@@ -208,20 +203,15 @@ void InotifyReaderCallback(InotifyReader* reader, int inotify_fd,
fd_set rfds;
FD_ZERO(&rfds);
FD_SET(inotify_fd, &rfds);
- FD_SET(shutdown_fd, &rfds);
// Wait until some inotify events are available.
int select_result =
- HANDLE_EINTR(select(std::max(inotify_fd, shutdown_fd) + 1,
- &rfds, NULL, NULL, NULL));
+ HANDLE_EINTR(select(inotify_fd + 1, &rfds, NULL, NULL, NULL));
if (select_result < 0) {
DPLOG(WARNING) << "select failed";
return;
}
- if (FD_ISSET(shutdown_fd, &rfds))
- return;
-
// Adjust buffer size to current event queue size.
int buffer_size;
int ioctl_result = HANDLE_EINTR(ioctl(inotify_fd, FIONREAD,
@@ -263,33 +253,14 @@ InotifyReader::InotifyReader()
if (inotify_fd_ < 0)
PLOG(ERROR) << "inotify_init() failed";
- shutdown_pipe_[0] = -1;
- shutdown_pipe_[1] = -1;
- if (inotify_fd_ >= 0 && pipe(shutdown_pipe_) == 0 && thread_.Start()) {
+ if (inotify_fd_ >= 0 && thread_.Start()) {
thread_.task_runner()->PostTask(
FROM_HERE,
- Bind(&InotifyReaderCallback, this, inotify_fd_, shutdown_pipe_[0]));
+ Bind(&InotifyReaderCallback, this, inotify_fd_));
valid_ = true;
}
}
-InotifyReader::~InotifyReader() {
- if (valid_) {
- // Write to the self-pipe so that the select call in InotifyReaderTask
- // returns.
- ssize_t ret = HANDLE_EINTR(write(shutdown_pipe_[1], "", 1));
- DPCHECK(ret > 0);
- DCHECK_EQ(ret, 1);
- thread_.Stop();
- }
- if (inotify_fd_ >= 0)
- close(inotify_fd_);
- if (shutdown_pipe_[0] >= 0)
- close(shutdown_pipe_[0]);
- if (shutdown_pipe_[1] >= 0)
- close(shutdown_pipe_[1]);
-}
-
InotifyReader::Watch InotifyReader::AddWatch(
const FilePath& path, FilePathWatcherImpl* watcher) {
if (!valid_)
@@ -343,7 +314,10 @@ void InotifyReader::OnInotifyEvent(const inotify_event* event) {
}
FilePathWatcherImpl::FilePathWatcherImpl()
- : recursive_(false) {
+ : recursive_(false), weak_factory_(this) {}
+
+FilePathWatcherImpl::~FilePathWatcherImpl() {
+ DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
}
void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
@@ -351,22 +325,25 @@ void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
bool created,
bool deleted,
bool is_dir) {
- if (!task_runner()->BelongsToCurrentThread()) {
- // Switch to task_runner() to access |watches_| safely.
- task_runner()->PostTask(FROM_HERE,
- Bind(&FilePathWatcherImpl::OnFilePathChanged, this,
- fired_watch, child, created, deleted, is_dir));
- return;
- }
-
- // Check to see if CancelOnMessageLoopThread() has already been called.
- // May happen when code flow reaches here from the PostTask() above.
- if (watches_.empty()) {
- DCHECK(target_.empty());
- return;
- }
+ DCHECK(!task_runner()->RunsTasksOnCurrentThread());
+
+ // This method is invoked on the Inotify thread. Switch to task_runner() to
+ // access |watches_| safely. Use a WeakPtr to prevent the callback from
+ // running after |this| is destroyed (i.e. after the watch is cancelled).
+ task_runner()->PostTask(
+ FROM_HERE, Bind(&FilePathWatcherImpl::OnFilePathChangedOnOriginSequence,
+ weak_factory_.GetWeakPtr(), fired_watch, child, created,
+ deleted, is_dir));
+}
- DCHECK(MessageLoopForIO::current());
+void FilePathWatcherImpl::OnFilePathChangedOnOriginSequence(
+ InotifyReader::Watch fired_watch,
+ const FilePath::StringType& child,
+ bool created,
+ bool deleted,
+ bool is_dir) {
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ DCHECK(!watches_.empty());
DCHECK(HasValidWatchVector());
// Used below to avoid multiple recursive updates.
@@ -451,13 +428,11 @@ bool FilePathWatcherImpl::Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) {
DCHECK(target_.empty());
- DCHECK(MessageLoopForIO::current());
- set_task_runner(ThreadTaskRunnerHandle::Get());
+ set_task_runner(SequencedTaskRunnerHandle::Get());
callback_ = callback;
target_ = path;
recursive_ = recursive;
- MessageLoop::current()->AddDestructionObserver(this);
std::vector<FilePath::StringType> comps;
target_.GetComponents(&comps);
@@ -470,47 +445,29 @@ bool FilePathWatcherImpl::Watch(const FilePath& path,
}
void FilePathWatcherImpl::Cancel() {
- if (callback_.is_null()) {
- // Watch was never called, or the message_loop() thread is already gone.
+ if (!callback_) {
+ // Watch() was never called.
set_cancelled();
return;
}
- // Switch to the message_loop() if necessary so we can access |watches_|.
- if (!task_runner()->BelongsToCurrentThread()) {
- task_runner()->PostTask(FROM_HERE, Bind(&FilePathWatcher::CancelWatch,
- make_scoped_refptr(this)));
- } else {
- CancelOnMessageLoopThread();
- }
-}
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ DCHECK(!is_cancelled());
-void FilePathWatcherImpl::CancelOnMessageLoopThread() {
- DCHECK(task_runner()->BelongsToCurrentThread());
set_cancelled();
-
- if (!callback_.is_null()) {
- MessageLoop::current()->RemoveDestructionObserver(this);
- callback_.Reset();
- }
+ callback_.Reset();
for (size_t i = 0; i < watches_.size(); ++i)
g_inotify_reader.Get().RemoveWatch(watches_[i].watch, this);
watches_.clear();
target_.clear();
-
- if (recursive_)
- RemoveRecursiveWatches();
-}
-
-void FilePathWatcherImpl::WillDestroyCurrentMessageLoop() {
- CancelOnMessageLoopThread();
+ RemoveRecursiveWatches();
}
void FilePathWatcherImpl::UpdateWatches() {
- // Ensure this runs on the message_loop() exclusively in order to avoid
+ // Ensure this runs on the task_runner() exclusively in order to avoid
// concurrency issues.
- DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
DCHECK(HasValidWatchVector());
// Walk the list of watches and update them as we go.
@@ -541,6 +498,8 @@ void FilePathWatcherImpl::UpdateWatches() {
void FilePathWatcherImpl::UpdateRecursiveWatches(
InotifyReader::Watch fired_watch,
bool is_dir) {
+ DCHECK(HasValidWatchVector());
+
if (!recursive_)
return;
@@ -551,7 +510,8 @@ void FilePathWatcherImpl::UpdateRecursiveWatches(
// Check to see if this is a forced update or if some component of |target_|
// has changed. For these cases, redo the watches for |target_| and below.
- if (!ContainsKey(recursive_paths_by_watch_, fired_watch)) {
+ if (!ContainsKey(recursive_paths_by_watch_, fired_watch) &&
+ fired_watch != watches_.back().watch) {
UpdateRecursiveWatchesForPath(target_);
return;
}
@@ -560,7 +520,10 @@ void FilePathWatcherImpl::UpdateRecursiveWatches(
if (!is_dir)
return;
- const FilePath& changed_dir = recursive_paths_by_watch_[fired_watch];
+ const FilePath& changed_dir =
+ ContainsKey(recursive_paths_by_watch_, fired_watch) ?
+ recursive_paths_by_watch_[fired_watch] :
+ target_;
std::map<FilePath, InotifyReader::Watch>::iterator start_it =
recursive_watches_by_path_.lower_bound(changed_dir);
@@ -683,7 +646,8 @@ bool FilePathWatcherImpl::HasValidWatchVector() const {
} // namespace
FilePathWatcher::FilePathWatcher() {
- impl_ = new FilePathWatcherImpl();
+ sequence_checker_.DetachFromSequence();
+ impl_ = MakeUnique<FilePathWatcherImpl>();
}
} // namespace base
diff --git a/base/files/file_path_watcher_mac.cc b/base/files/file_path_watcher_mac.cc
deleted file mode 100644
index 7338eafa44..0000000000
--- a/base/files/file_path_watcher_mac.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/files/file_path_watcher.h"
-#include "base/files/file_path_watcher_kqueue.h"
-#include "build/build_config.h"
-
-#if !defined(OS_IOS)
-#include "base/files/file_path_watcher_fsevents.h"
-#endif
-
-namespace base {
-
-namespace {
-
-class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
- public:
- bool Watch(const FilePath& path,
- bool recursive,
- const FilePathWatcher::Callback& callback) override {
- // Use kqueue for non-recursive watches and FSEvents for recursive ones.
- DCHECK(!impl_.get());
- if (recursive) {
- if (!FilePathWatcher::RecursiveWatchAvailable())
- return false;
-#if !defined(OS_IOS)
- impl_ = new FilePathWatcherFSEvents();
-#endif // OS_IOS
- } else {
- impl_ = new FilePathWatcherKQueue();
- }
- DCHECK(impl_.get());
- return impl_->Watch(path, recursive, callback);
- }
-
- void Cancel() override {
- if (impl_.get())
- impl_->Cancel();
- set_cancelled();
- }
-
- void CancelOnMessageLoopThread() override {
- if (impl_.get())
- impl_->Cancel();
- set_cancelled();
- }
-
- protected:
- ~FilePathWatcherImpl() override {}
-
- scoped_refptr<PlatformDelegate> impl_;
-};
-
-} // namespace
-
-FilePathWatcher::FilePathWatcher() {
- impl_ = new FilePathWatcherImpl();
-}
-
-} // namespace base
diff --git a/base/files/file_path_watcher_unittest.cc b/base/files/file_path_watcher_unittest.cc
index a40e4858b4..d2ec37bbec 100644
--- a/base/files/file_path_watcher_unittest.cc
+++ b/base/files/file_path_watcher_unittest.cc
@@ -28,7 +28,6 @@
#include "base/synchronization/waitable_event.h"
#include "base/test/test_file_util.h"
#include "base/test/test_timeouts.h"
-#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -37,6 +36,10 @@
#include "base/android/path_utils.h"
#endif // defined(OS_ANDROID)
+#if defined(OS_POSIX)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif // defined(OS_POSIX)
+
namespace base {
namespace {
@@ -131,30 +134,19 @@ class TestDelegate : public TestDelegateBase {
DISALLOW_COPY_AND_ASSIGN(TestDelegate);
};
-void SetupWatchCallback(const FilePath& target,
- FilePathWatcher* watcher,
- TestDelegateBase* delegate,
- bool recursive_watch,
- bool* result,
- base::WaitableEvent* completion) {
- *result = watcher->Watch(target, recursive_watch,
- base::Bind(&TestDelegateBase::OnFileChanged,
- delegate->AsWeakPtr()));
- completion->Signal();
-}
-
class FilePathWatcherTest : public testing::Test {
public:
FilePathWatcherTest()
- : file_thread_("FilePathWatcherTest") {}
+#if defined(OS_POSIX)
+ : file_descriptor_watcher_(&loop_)
+#endif
+ {
+ }
~FilePathWatcherTest() override {}
protected:
void SetUp() override {
- // Create a separate file thread in order to test proper thread usage.
- base::Thread::Options options(MessageLoop::TYPE_IO, 0);
- ASSERT_TRUE(file_thread_.StartWithOptions(options));
#if defined(OS_ANDROID)
// Watching files is only permitted when all parent directories are
// accessible, which is not the case for the default temp directory
@@ -171,16 +163,12 @@ class FilePathWatcherTest : public testing::Test {
void TearDown() override { RunLoop().RunUntilIdle(); }
- void DeleteDelegateOnFileThread(TestDelegate* delegate) {
- file_thread_.task_runner()->DeleteSoon(FROM_HERE, delegate);
- }
-
FilePath test_file() {
- return temp_dir_.path().AppendASCII("FilePathWatcherTest");
+ return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest");
}
FilePath test_link() {
- return temp_dir_.path().AppendASCII("FilePathWatcherTest.lnk");
+ return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest.lnk");
}
// Write |content| to |file|. Returns true on success.
@@ -196,18 +184,23 @@ class FilePathWatcherTest : public testing::Test {
bool WaitForEvents() WARN_UNUSED_RESULT {
collector_->Reset();
+
+ RunLoop run_loop;
// Make sure we timeout if we don't get notified.
- loop_.PostDelayedTask(FROM_HERE,
- MessageLoop::QuitWhenIdleClosure(),
- TestTimeouts::action_timeout());
- RunLoop().Run();
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, run_loop.QuitWhenIdleClosure(),
+ TestTimeouts::action_timeout());
+ run_loop.Run();
return collector_->Success();
}
NotificationCollector* collector() { return collector_.get(); }
- MessageLoop loop_;
- base::Thread file_thread_;
+ MessageLoopForIO loop_;
+#if defined(OS_POSIX)
+ FileDescriptorWatcher file_descriptor_watcher_;
+#endif
+
ScopedTempDir temp_dir_;
scoped_refptr<NotificationCollector> collector_;
@@ -219,14 +212,9 @@ bool FilePathWatcherTest::SetupWatch(const FilePath& target,
FilePathWatcher* watcher,
TestDelegateBase* delegate,
bool recursive_watch) {
- base::WaitableEvent completion(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- bool result;
- file_thread_.task_runner()->PostTask(
- FROM_HERE, base::Bind(SetupWatchCallback, target, watcher, delegate,
- recursive_watch, &result, &completion));
- completion.Wait();
- return result;
+ return watcher->Watch(
+ target, recursive_watch,
+ base::Bind(&TestDelegateBase::OnFileChanged, delegate->AsWeakPtr()));
}
// Basic test: Create the file and verify that we notice.
@@ -237,7 +225,6 @@ TEST_F(FilePathWatcherTest, NewFile) {
ASSERT_TRUE(WriteFile(test_file(), "content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that modifying the file is caught.
@@ -251,12 +238,11 @@ TEST_F(FilePathWatcherTest, ModifiedFile) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(WriteFile(test_file(), "new content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that moving the file into place is caught.
TEST_F(FilePathWatcherTest, MovedFile) {
- FilePath source_file(temp_dir_.path().AppendASCII("source"));
+ FilePath source_file(temp_dir_.GetPath().AppendASCII("source"));
ASSERT_TRUE(WriteFile(source_file, "content"));
FilePathWatcher watcher;
@@ -266,7 +252,6 @@ TEST_F(FilePathWatcherTest, MovedFile) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(base::Move(source_file, test_file()));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, DeletedFile) {
@@ -279,7 +264,6 @@ TEST_F(FilePathWatcherTest, DeletedFile) {
// Now make sure we get notified if the file is deleted.
base::DeleteFile(test_file(), false);
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Used by the DeleteDuringNotify test below.
@@ -327,11 +311,9 @@ TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
// Flaky on MacOS (and ARM linux): http://crbug.com/85930
TEST_F(FilePathWatcherTest, DISABLED_DestroyWithPendingNotification) {
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
- FilePathWatcher* watcher = new FilePathWatcher;
- ASSERT_TRUE(SetupWatch(test_file(), watcher, delegate.get(), false));
+ FilePathWatcher watcher;
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
ASSERT_TRUE(WriteFile(test_file(), "content"));
- file_thread_.task_runner()->DeleteSoon(FROM_HERE, watcher);
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
@@ -343,15 +325,13 @@ TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
ASSERT_TRUE(WriteFile(test_file(), "content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate1.release());
- DeleteDelegateOnFileThread(delegate2.release());
}
// Verify that watching a file whose parent directory doesn't exist yet works if
// the directory and file are created eventually.
TEST_F(FilePathWatcherTest, NonExistentDirectory) {
FilePathWatcher watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
FilePath file(dir.AppendASCII("file"));
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
@@ -370,13 +350,12 @@ TEST_F(FilePathWatcherTest, NonExistentDirectory) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Exercises watch reconfiguration for the case that directories on the path
// are rapidly created.
TEST_F(FilePathWatcherTest, DirectoryChain) {
- FilePath path(temp_dir_.path());
+ FilePath path(temp_dir_.GetPath());
std::vector<std::string> dir_names;
for (int i = 0; i < 20; i++) {
std::string dir(base::StringPrintf("d%d", i));
@@ -389,7 +368,7 @@ TEST_F(FilePathWatcherTest, DirectoryChain) {
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
- FilePath sub_path(temp_dir_.path());
+ FilePath sub_path(temp_dir_.GetPath());
for (std::vector<std::string>::const_iterator d(dir_names.begin());
d != dir_names.end(); ++d) {
sub_path = sub_path.AppendASCII(*d);
@@ -403,7 +382,6 @@ TEST_F(FilePathWatcherTest, DirectoryChain) {
ASSERT_TRUE(WriteFile(file, "content v2"));
VLOG(1) << "Waiting for file modification";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#if defined(OS_MACOSX)
@@ -412,7 +390,7 @@ TEST_F(FilePathWatcherTest, DirectoryChain) {
#endif
TEST_F(FilePathWatcherTest, DisappearingDirectory) {
FilePathWatcher watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
FilePath file(dir.AppendASCII("file"));
ASSERT_TRUE(base::CreateDirectory(dir));
ASSERT_TRUE(WriteFile(file, "content"));
@@ -421,7 +399,6 @@ TEST_F(FilePathWatcherTest, DisappearingDirectory) {
ASSERT_TRUE(base::DeleteFile(dir, true));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Tests that a file that is deleted and reappears is tracked correctly.
@@ -438,12 +415,11 @@ TEST_F(FilePathWatcherTest, DeleteAndRecreate) {
ASSERT_TRUE(WriteFile(test_file(), "content"));
VLOG(1) << "Waiting for file creation";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, WatchDirectory) {
FilePathWatcher watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
FilePath file1(dir.AppendASCII("file1"));
FilePath file2(dir.AppendASCII("file2"));
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -471,14 +447,13 @@ TEST_F(FilePathWatcherTest, WatchDirectory) {
ASSERT_TRUE(WriteFile(file2, "content"));
VLOG(1) << "Waiting for file2 creation";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, MoveParent) {
FilePathWatcher file_watcher;
FilePathWatcher subdir_watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
- FilePath dest(temp_dir_.path().AppendASCII("dest"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+ FilePath dest(temp_dir_.GetPath().AppendASCII("dest"));
FilePath subdir(dir.AppendASCII("subdir"));
FilePath file(subdir.AppendASCII("file"));
std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
@@ -497,18 +472,15 @@ TEST_F(FilePathWatcherTest, MoveParent) {
base::Move(dir, dest);
VLOG(1) << "Waiting for directory move";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(file_delegate.release());
- DeleteDelegateOnFileThread(subdir_delegate.release());
}
TEST_F(FilePathWatcherTest, RecursiveWatch) {
FilePathWatcher watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
bool setup_result = SetupWatch(dir, &watcher, delegate.get(), true);
if (!FilePathWatcher::RecursiveWatchAvailable()) {
ASSERT_FALSE(setup_result);
- DeleteDelegateOnFileThread(delegate.release());
return;
}
ASSERT_TRUE(setup_result);
@@ -564,7 +536,6 @@ TEST_F(FilePathWatcherTest, RecursiveWatch) {
// Delete "$dir/subdir/subdir_child_dir/child_dir_file1".
ASSERT_TRUE(base::DeleteFile(child_dir_file1, false));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#if defined(OS_POSIX)
@@ -581,14 +552,14 @@ TEST_F(FilePathWatcherTest, RecursiveWithSymLink) {
return;
FilePathWatcher watcher;
- FilePath test_dir(temp_dir_.path().AppendASCII("test_dir"));
+ FilePath test_dir(temp_dir_.GetPath().AppendASCII("test_dir"));
ASSERT_TRUE(base::CreateDirectory(test_dir));
FilePath symlink(test_dir.AppendASCII("symlink"));
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
ASSERT_TRUE(SetupWatch(symlink, &watcher, delegate.get(), true));
// Link creation.
- FilePath target1(temp_dir_.path().AppendASCII("target1"));
+ FilePath target1(temp_dir_.GetPath().AppendASCII("target1"));
ASSERT_TRUE(base::CreateSymbolicLink(target1, symlink));
ASSERT_TRUE(WaitForEvents());
@@ -602,7 +573,7 @@ TEST_F(FilePathWatcherTest, RecursiveWithSymLink) {
ASSERT_TRUE(WaitForEvents());
// Link change.
- FilePath target2(temp_dir_.path().AppendASCII("target2"));
+ FilePath target2(temp_dir_.GetPath().AppendASCII("target2"));
ASSERT_TRUE(base::CreateDirectory(target2));
ASSERT_TRUE(base::DeleteFile(symlink, false));
ASSERT_TRUE(base::CreateSymbolicLink(target2, symlink));
@@ -612,18 +583,16 @@ TEST_F(FilePathWatcherTest, RecursiveWithSymLink) {
FilePath target2_file(target2.AppendASCII("file"));
ASSERT_TRUE(WriteFile(target2_file, "content"));
ASSERT_TRUE(WaitForEvents());
-
- DeleteDelegateOnFileThread(delegate.release());
}
#endif // OS_POSIX
TEST_F(FilePathWatcherTest, MoveChild) {
FilePathWatcher file_watcher;
FilePathWatcher subdir_watcher;
- FilePath source_dir(temp_dir_.path().AppendASCII("source"));
+ FilePath source_dir(temp_dir_.GetPath().AppendASCII("source"));
FilePath source_subdir(source_dir.AppendASCII("subdir"));
FilePath source_file(source_subdir.AppendASCII("file"));
- FilePath dest_dir(temp_dir_.path().AppendASCII("dest"));
+ FilePath dest_dir(temp_dir_.GetPath().AppendASCII("dest"));
FilePath dest_subdir(dest_dir.AppendASCII("subdir"));
FilePath dest_file(dest_subdir.AppendASCII("file"));
@@ -640,8 +609,6 @@ TEST_F(FilePathWatcherTest, MoveChild) {
// Move the directory into place, s.t. the watched file appears.
ASSERT_TRUE(base::Move(source_dir, dest_dir));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(file_delegate.release());
- DeleteDelegateOnFileThread(subdir_delegate.release());
}
// Verify that changing attributes on a file is caught
@@ -662,7 +629,6 @@ TEST_F(FilePathWatcherTest, FileAttributesChanged) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(base::MakeFileUnreadable(test_file()));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#if defined(OS_LINUX)
@@ -678,7 +644,6 @@ TEST_F(FilePathWatcherTest, CreateLink) {
// Note that test_file() doesn't have to exist.
ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that deleting a symlink is caught.
@@ -694,7 +659,6 @@ TEST_F(FilePathWatcherTest, DeleteLink) {
// Now make sure we get notified if the link is deleted.
ASSERT_TRUE(base::DeleteFile(test_link(), false));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that modifying a target file that a link is pointing to
@@ -710,7 +674,6 @@ TEST_F(FilePathWatcherTest, ModifiedLinkedFile) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(WriteFile(test_file(), "new content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that creating a target file that a link is pointing to
@@ -725,7 +688,6 @@ TEST_F(FilePathWatcherTest, CreateTargetLinkedFile) {
// Now make sure we get notified if the target file is created.
ASSERT_TRUE(WriteFile(test_file(), "content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that deleting a target file that a link is pointing to
@@ -741,15 +703,14 @@ TEST_F(FilePathWatcherTest, DeleteTargetLinkedFile) {
// Now make sure we get notified if the target file is deleted.
ASSERT_TRUE(base::DeleteFile(test_file(), false));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that watching a file whose parent directory is a link that
// doesn't exist yet works if the symlink is created eventually.
TEST_F(FilePathWatcherTest, LinkedDirectoryPart1) {
FilePathWatcher watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
- FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+ FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
FilePath file(dir.AppendASCII("file"));
FilePath linkfile(link_dir.AppendASCII("file"));
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -770,15 +731,14 @@ TEST_F(FilePathWatcherTest, LinkedDirectoryPart1) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that watching a file whose parent directory is a
// dangling symlink works if the directory is created eventually.
TEST_F(FilePathWatcherTest, LinkedDirectoryPart2) {
FilePathWatcher watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
- FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+ FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
FilePath file(dir.AppendASCII("file"));
FilePath linkfile(link_dir.AppendASCII("file"));
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -800,15 +760,14 @@ TEST_F(FilePathWatcherTest, LinkedDirectoryPart2) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that watching a file with a symlink on the path
// to the file works.
TEST_F(FilePathWatcherTest, LinkedDirectoryPart3) {
FilePathWatcher watcher;
- FilePath dir(temp_dir_.path().AppendASCII("dir"));
- FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
+ FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+ FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
FilePath file(dir.AppendASCII("file"));
FilePath linkfile(link_dir.AppendASCII("file"));
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -828,7 +787,6 @@ TEST_F(FilePathWatcherTest, LinkedDirectoryPart3) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#endif // OS_LINUX
@@ -879,7 +837,8 @@ bool ChangeFilePermissions(const FilePath& path, Permission perm, bool allow) {
// Verify that changing attributes on a directory works.
TEST_F(FilePathWatcherTest, DirAttributesChanged) {
- FilePath test_dir1(temp_dir_.path().AppendASCII("DirAttributesChangedDir1"));
+ FilePath test_dir1(
+ temp_dir_.GetPath().AppendASCII("DirAttributesChangedDir1"));
FilePath test_dir2(test_dir1.AppendASCII("DirAttributesChangedDir2"));
FilePath test_file(test_dir2.AppendASCII("DirAttributesChangedFile"));
// Setup a directory hierarchy.
@@ -905,7 +864,6 @@ TEST_F(FilePathWatcherTest, DirAttributesChanged) {
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, false));
ASSERT_TRUE(WaitForEvents());
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, true));
- DeleteDelegateOnFileThread(delegate.release());
}
#endif // OS_MACOSX
diff --git a/base/files/file_posix.cc b/base/files/file_posix.cc
index 12f80c4f8f..2738d6c45c 100644
--- a/base/files/file_posix.cc
+++ b/base/files/file_posix.cc
@@ -11,7 +11,7 @@
#include <unistd.h>
#include "base/logging.h"
-#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
@@ -323,7 +323,7 @@ int64_t File::GetLength() {
stat_wrapper_t file_info;
if (CallFstat(file_.get(), &file_info))
- return false;
+ return -1;
return file_info.st_size;
}
@@ -372,7 +372,7 @@ File::Error File::Unlock() {
return CallFcntlFlock(file_.get(), false);
}
-File File::Duplicate() {
+File File::Duplicate() const {
if (!IsValid())
return File();
@@ -513,9 +513,10 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
}
#endif // !defined(OS_NACL)
-bool File::DoFlush() {
+bool File::Flush() {
ThreadRestrictions::AssertIOAllowed();
DCHECK(IsValid());
+ SCOPED_FILE_TRACE("Flush");
#if defined(OS_NACL)
NOTIMPLEMENTED(); // NaCl doesn't implement fsync.
diff --git a/base/files/file_tracing.cc b/base/files/file_tracing.cc
index 6d11cbc746..48f57412f9 100644
--- a/base/files/file_tracing.cc
+++ b/base/files/file_tracing.cc
@@ -4,47 +4,62 @@
#include "base/files/file_tracing.h"
+#include "base/atomicops.h"
#include "base/files/file.h"
+using base::subtle::AtomicWord;
+
namespace base {
namespace {
-FileTracing::Provider* g_provider = nullptr;
+AtomicWord g_provider;
+}
+
+FileTracing::Provider* GetProvider() {
+ AtomicWord provider = base::subtle::Acquire_Load(&g_provider);
+ return reinterpret_cast<FileTracing::Provider*>(provider);
}
// static
bool FileTracing::IsCategoryEnabled() {
- return g_provider && g_provider->FileTracingCategoryIsEnabled();
+ FileTracing::Provider* provider = GetProvider();
+ return provider && provider->FileTracingCategoryIsEnabled();
}
// static
void FileTracing::SetProvider(FileTracing::Provider* provider) {
- g_provider = provider;
+ base::subtle::Release_Store(&g_provider,
+ reinterpret_cast<AtomicWord>(provider));
}
FileTracing::ScopedEnabler::ScopedEnabler() {
- if (g_provider)
- g_provider->FileTracingEnable(this);
+ FileTracing::Provider* provider = GetProvider();
+ if (provider)
+ provider->FileTracingEnable(this);
}
FileTracing::ScopedEnabler::~ScopedEnabler() {
- if (g_provider)
- g_provider->FileTracingDisable(this);
+ FileTracing::Provider* provider = GetProvider();
+ if (provider)
+ provider->FileTracingDisable(this);
}
FileTracing::ScopedTrace::ScopedTrace() : id_(nullptr) {}
FileTracing::ScopedTrace::~ScopedTrace() {
- if (id_ && g_provider)
- g_provider->FileTracingEventEnd(name_, id_);
+ if (id_) {
+ FileTracing::Provider* provider = GetProvider();
+ if (provider)
+ provider->FileTracingEventEnd(name_, id_);
+ }
}
void FileTracing::ScopedTrace::Initialize(const char* name,
- File* file,
+ const File* file,
int64_t size) {
id_ = &file->trace_enabler_;
name_ = name;
- g_provider->FileTracingEventBegin(name_, id_, file->tracing_path_, size);
+ GetProvider()->FileTracingEventBegin(name_, id_, file->tracing_path_, size);
}
} // namespace base
diff --git a/base/files/file_tracing.h b/base/files/file_tracing.h
index bedd7be64b..1fbfcd4498 100644
--- a/base/files/file_tracing.h
+++ b/base/files/file_tracing.h
@@ -37,21 +37,21 @@ class BASE_EXPORT FileTracing {
virtual bool FileTracingCategoryIsEnabled() const = 0;
// Enables file tracing for |id|. Must be called before recording events.
- virtual void FileTracingEnable(void* id) = 0;
+ virtual void FileTracingEnable(const void* id) = 0;
// Disables file tracing for |id|.
- virtual void FileTracingDisable(void* id) = 0;
+ virtual void FileTracingDisable(const void* id) = 0;
// Begins an event for |id| with |name|. |path| tells where in the directory
// structure the event is happening (and may be blank). |size| is the number
// of bytes involved in the event.
virtual void FileTracingEventBegin(const char* name,
- void* id,
+ const void* id,
const FilePath& path,
int64_t size) = 0;
// Ends an event for |id| with |name|.
- virtual void FileTracingEventEnd(const char* name, void* id) = 0;
+ virtual void FileTracingEventEnd(const char* name, const void* id) = 0;
};
// Sets a global file tracing provider to query categories and record events.
@@ -73,12 +73,12 @@ class BASE_EXPORT FileTracing {
// event to trace (e.g. "Read", "Write") and must have an application
// lifetime (e.g. static or literal). |file| is the file being traced; must
// outlive this class. |size| is the size (in bytes) of this event.
- void Initialize(const char* name, File* file, int64_t size);
+ void Initialize(const char* name, const File* file, int64_t size);
private:
// The ID of this trace. Based on the |file| passed to |Initialize()|. Must
// outlive this class.
- void* id_;
+ const void* id_;
// The name of the event to trace (e.g. "Read", "Write"). Prefixed with
// "File".
diff --git a/base/files/file_unittest.cc b/base/files/file_unittest.cc
index 2445f7e128..66c312b60d 100644
--- a/base/files/file_unittest.cc
+++ b/base/files/file_unittest.cc
@@ -9,6 +9,7 @@
#include <utility>
#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
#include "base/files/scoped_temp_dir.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -20,7 +21,7 @@ using base::FilePath;
TEST(FileTest, Create) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("create_file_1");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
{
// Don't create a File at all.
@@ -92,7 +93,7 @@ TEST(FileTest, Create) {
{
// Create a delete-on-close file.
- file_path = temp_dir.path().AppendASCII("create_file_2");
+ file_path = temp_dir.GetPath().AppendASCII("create_file_2");
File file(file_path,
base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ |
base::File::FLAG_DELETE_ON_CLOSE);
@@ -107,7 +108,7 @@ TEST(FileTest, Create) {
TEST(FileTest, Async) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("create_file");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("create_file");
{
File file(file_path, base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_ASYNC);
@@ -125,7 +126,7 @@ TEST(FileTest, Async) {
TEST(FileTest, DeleteOpenFile) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("create_file_1");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
// Create a file.
File file(file_path,
@@ -152,7 +153,7 @@ TEST(FileTest, DeleteOpenFile) {
TEST(FileTest, ReadWrite) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("read_write_file");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("read_write_file");
File file(file_path,
base::File::FLAG_CREATE | base::File::FLAG_READ |
base::File::FLAG_WRITE);
@@ -224,7 +225,7 @@ TEST(FileTest, ReadWrite) {
TEST(FileTest, Append) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("append_file");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("append_file");
File file(file_path, base::File::FLAG_CREATE | base::File::FLAG_APPEND);
ASSERT_TRUE(file.IsValid());
@@ -272,7 +273,7 @@ TEST(FileTest, Append) {
TEST(FileTest, Length) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("truncate_file");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("truncate_file");
File file(file_path,
base::File::FLAG_CREATE | base::File::FLAG_READ |
base::File::FLAG_WRITE);
@@ -324,7 +325,7 @@ TEST(FileTest, DISABLED_TouchGetInfo) {
#endif
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- File file(temp_dir.path().AppendASCII("touch_get_info_file"),
+ File file(temp_dir.GetPath().AppendASCII("touch_get_info_file"),
base::File::FLAG_CREATE | base::File::FLAG_WRITE |
base::File::FLAG_WRITE_ATTRIBUTES);
ASSERT_TRUE(file.IsValid());
@@ -387,7 +388,8 @@ TEST(FileTest, DISABLED_TouchGetInfo) {
TEST(FileTest, ReadAtCurrentPosition) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("read_at_current_position");
+ FilePath file_path =
+ temp_dir.GetPath().AppendASCII("read_at_current_position");
File file(file_path,
base::File::FLAG_CREATE | base::File::FLAG_READ |
base::File::FLAG_WRITE);
@@ -411,7 +413,8 @@ TEST(FileTest, ReadAtCurrentPosition) {
TEST(FileTest, WriteAtCurrentPosition) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("write_at_current_position");
+ FilePath file_path =
+ temp_dir.GetPath().AppendASCII("write_at_current_position");
File file(file_path,
base::File::FLAG_CREATE | base::File::FLAG_READ |
base::File::FLAG_WRITE);
@@ -434,7 +437,7 @@ TEST(FileTest, WriteAtCurrentPosition) {
TEST(FileTest, Seek) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("seek_file");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("seek_file");
File file(file_path,
base::File::FLAG_CREATE | base::File::FLAG_READ |
base::File::FLAG_WRITE);
@@ -451,7 +454,7 @@ TEST(FileTest, Seek) {
TEST(FileTest, Duplicate) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("file");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
File file(file_path,(base::File::FLAG_CREATE |
base::File::FLAG_READ |
base::File::FLAG_WRITE));
@@ -478,7 +481,7 @@ TEST(FileTest, Duplicate) {
TEST(FileTest, DuplicateDeleteOnClose) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("file");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
File file(file_path,(base::File::FLAG_CREATE |
base::File::FLAG_READ |
base::File::FLAG_WRITE |
@@ -495,7 +498,8 @@ TEST(FileTest, DuplicateDeleteOnClose) {
TEST(FileTest, GetInfoForDirectory) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath empty_dir = temp_dir.path().Append(FILE_PATH_LITERAL("gpfi_test"));
+ FilePath empty_dir =
+ temp_dir.GetPath().Append(FILE_PATH_LITERAL("gpfi_test"));
ASSERT_TRUE(CreateDirectory(empty_dir));
base::File dir(
@@ -514,4 +518,158 @@ TEST(FileTest, GetInfoForDirectory) {
EXPECT_FALSE(info.is_symbolic_link);
EXPECT_EQ(0, info.size);
}
+
+TEST(FileTest, DeleteNoop) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Creating and closing a file with DELETE perms should do nothing special.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, Delete) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Creating a file with DELETE and then marking for delete on close should
+ // delete it.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_TRUE(file.DeleteOnClose(true));
+ file.Close();
+ ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, DeleteThenRevoke) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Creating a file with DELETE, marking it for delete, then clearing delete on
+ // close should not delete it.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_TRUE(file.DeleteOnClose(true));
+ ASSERT_TRUE(file.DeleteOnClose(false));
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, IrrevokableDeleteOnClose) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // DELETE_ON_CLOSE cannot be revoked by this opener.
+ File file(
+ file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
+ base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ // https://msdn.microsoft.com/library/windows/desktop/aa364221.aspx says that
+ // setting the dispositon has no effect if the handle was opened with
+ // FLAG_DELETE_ON_CLOSE. Do not make the test's success dependent on whether
+ // or not SetFileInformationByHandle indicates success or failure. (It happens
+ // to indicate success on Windows 10.)
+ file.DeleteOnClose(false);
+ file.Close();
+ ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, IrrevokableDeleteOnCloseOther) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // DELETE_ON_CLOSE cannot be revoked by another opener.
+ File file(
+ file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
+ base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+
+ File file2(
+ file_path,
+ (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
+ base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file2.IsValid());
+
+ file2.DeleteOnClose(false);
+ file2.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+ file.Close();
+ ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, DeleteWithoutPermission) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // It should not be possible to mark a file for deletion when it was not
+ // created/opened with DELETE.
+ File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_FALSE(file.DeleteOnClose(true));
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, UnsharedDeleteOnClose) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Opening with DELETE_ON_CLOSE when a previous opener hasn't enabled sharing
+ // will fail.
+ File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE));
+ ASSERT_TRUE(file.IsValid());
+ File file2(
+ file_path,
+ (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
+ base::File::FLAG_DELETE_ON_CLOSE | base::File::FLAG_SHARE_DELETE));
+ ASSERT_FALSE(file2.IsValid());
+
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, NoDeleteOnCloseWithMappedFile) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Mapping a file into memory blocks DeleteOnClose.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_EQ(5, file.WriteAtCurrentPos("12345", 5));
+
+ {
+ base::MemoryMappedFile mapping;
+ ASSERT_TRUE(mapping.Initialize(file.Duplicate()));
+ ASSERT_EQ(5U, mapping.length());
+
+ EXPECT_FALSE(file.DeleteOnClose(true));
+ }
+
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
#endif // defined(OS_WIN)
diff --git a/base/files/file_util.h b/base/files/file_util.h
index 420dcaee61..5ada35f9a4 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -294,10 +294,6 @@ BASE_EXPORT bool DevicePathToDriveLetterPath(const FilePath& device_path,
// be resolved with this function.
BASE_EXPORT bool NormalizeToNativeFilePath(const FilePath& path,
FilePath* nt_path);
-
-// Given an existing file in |path|, returns whether this file is on a network
-// drive or not. If |path| does not exist, this function returns false.
-BASE_EXPORT bool IsOnNetworkDrive(const base::FilePath& path);
#endif
// This function will return if the given file is a symlink or not.
@@ -311,7 +307,9 @@ BASE_EXPORT bool TouchFile(const FilePath& path,
const Time& last_accessed,
const Time& last_modified);
-// Wrapper for fopen-like calls. Returns non-NULL FILE* on success.
+// Wrapper for fopen-like calls. Returns non-NULL FILE* on success. The
+// underlying file descriptor (POSIX) or handle (Windows) is unconditionally
+// configured to not be propagated to child processes.
BASE_EXPORT FILE* OpenFile(const FilePath& filename, const char* mode);
// Closes file opened by OpenFile. Returns true on success.
@@ -365,6 +363,17 @@ BASE_EXPORT int GetUniquePathNumber(const FilePath& path,
BASE_EXPORT bool SetNonBlocking(int fd);
#if defined(OS_POSIX)
+// Creates a non-blocking, close-on-exec pipe.
+// This creates a non-blocking pipe that is not intended to be shared with any
+// child process. This will be done atomically if the operating system supports
+// it. Returns true if it was able to create the pipe, otherwise false.
+BASE_EXPORT bool CreateLocalNonBlockingPipe(int fds[2]);
+
+// Sets the given |fd| to close-on-exec mode.
+// Returns true if it was able to set it in the close-on-exec mode, otherwise
+// false.
+BASE_EXPORT bool SetCloseOnExec(int fd);
+
// Test that |path| can only be changed by a given user and members of
// a given set of groups.
// Specifically, test that all parts of |path| under (and including) |base|:
diff --git a/base/files/file_util_mac.mm b/base/files/file_util_mac.mm
index e9c6c65159..5a99aa0e81 100644
--- a/base/files/file_util_mac.mm
+++ b/base/files/file_util_mac.mm
@@ -4,8 +4,9 @@
#include "base/files/file_util.h"
-#include <copyfile.h>
#import <Foundation/Foundation.h>
+#include <copyfile.h>
+#include <stdlib.h>
#include "base/files/file_path.h"
#include "base/mac/foundation_util.h"
@@ -23,6 +24,15 @@ bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
}
bool GetTempDir(base::FilePath* path) {
+ // In order to facilitate hermetic runs on macOS, first check $TMPDIR.
+ // NOTE: $TMPDIR is ALMOST ALWAYS set on macOS (unless the user un-set it).
+ const char* env_tmpdir = getenv("TMPDIR");
+ if (env_tmpdir) {
+ *path = base::FilePath(env_tmpdir);
+ return true;
+ }
+
+ // If we didn't find it, fall back to the native function.
NSString* tmp = NSTemporaryDirectory();
if (tmp == nil)
return false;
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
index 85a1b41d46..a03ca8d8d8 100644
--- a/base/files/file_util_posix.cc
+++ b/base/files/file_util_posix.cc
@@ -185,6 +185,19 @@ bool DetermineDevShmExecutable() {
#endif // defined(OS_LINUX)
#endif // !defined(OS_NACL_NONSFI)
+#if !defined(OS_MACOSX)
+// Appends |mode_char| to |mode| before the optional character set encoding; see
+// https://www.gnu.org/software/libc/manual/html_node/Opening-Streams.html for
+// details.
+std::string AppendModeCharacter(StringPiece mode, char mode_char) {
+ std::string result(mode.as_string());
+ size_t comma_pos = result.find(',');
+ result.insert(comma_pos == std::string::npos ? result.length() : comma_pos, 1,
+ mode_char);
+ return result;
+}
+#endif
+
} // namespace
#if !defined(OS_NACL_NONSFI)
@@ -276,11 +289,8 @@ bool CopyDirectory(const FilePath& from_path,
FilePath real_from_path = MakeAbsoluteFilePath(from_path);
if (real_from_path.empty())
return false;
- if (real_to_path.value().size() >= real_from_path.value().size() &&
- real_to_path.value().compare(0, real_from_path.value().size(),
- real_from_path.value()) == 0) {
+ if (real_to_path == real_from_path || real_from_path.IsParent(real_to_path))
return false;
- }
int traverse_type = FileEnumerator::FILES | FileEnumerator::SHOW_SYM_LINKS;
if (recursive)
@@ -351,6 +361,29 @@ bool CopyDirectory(const FilePath& from_path,
}
#endif // !defined(OS_NACL_NONSFI)
+bool CreateLocalNonBlockingPipe(int fds[2]) {
+#if defined(OS_LINUX)
+ return pipe2(fds, O_CLOEXEC | O_NONBLOCK) == 0;
+#else
+ int raw_fds[2];
+ if (pipe(raw_fds) != 0)
+ return false;
+ ScopedFD fd_out(raw_fds[0]);
+ ScopedFD fd_in(raw_fds[1]);
+ if (!SetCloseOnExec(fd_out.get()))
+ return false;
+ if (!SetCloseOnExec(fd_in.get()))
+ return false;
+ if (!SetNonBlocking(fd_out.get()))
+ return false;
+ if (!SetNonBlocking(fd_in.get()))
+ return false;
+ fds[0] = fd_out.release();
+ fds[1] = fd_in.release();
+ return true;
+#endif
+}
+
bool SetNonBlocking(int fd) {
const int flags = fcntl(fd, F_GETFL);
if (flags == -1)
@@ -362,6 +395,21 @@ bool SetNonBlocking(int fd) {
return true;
}
+bool SetCloseOnExec(int fd) {
+#if defined(OS_NACL_NONSFI)
+ const int flags = 0;
+#else
+ const int flags = fcntl(fd, F_GETFD);
+ if (flags == -1)
+ return false;
+ if (flags & FD_CLOEXEC)
+ return true;
+#endif // defined(OS_NACL_NONSFI)
+ if (HANDLE_EINTR(fcntl(fd, F_SETFD, flags | FD_CLOEXEC)) == -1)
+ return false;
+ return true;
+}
+
bool PathExists(const FilePath& path) {
ThreadRestrictions::AssertIOAllowed();
#if defined(OS_ANDROID)
@@ -677,11 +725,29 @@ bool GetFileInfo(const FilePath& file_path, File::Info* results) {
#endif // !defined(OS_NACL_NONSFI)
FILE* OpenFile(const FilePath& filename, const char* mode) {
+ // 'e' is unconditionally added below, so be sure there is not one already
+ // present before a comma in |mode|.
+ DCHECK(
+ strchr(mode, 'e') == nullptr ||
+ (strchr(mode, ',') != nullptr && strchr(mode, 'e') > strchr(mode, ',')));
ThreadRestrictions::AssertIOAllowed();
FILE* result = NULL;
+#if defined(OS_MACOSX)
+ // macOS does not provide a mode character to set O_CLOEXEC; see
+ // https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man3/fopen.3.html.
+ const char* the_mode = mode;
+#else
+ std::string mode_with_e(AppendModeCharacter(mode, 'e'));
+ const char* the_mode = mode_with_e.c_str();
+#endif
do {
- result = fopen(filename.value().c_str(), mode);
+ result = fopen(filename.value().c_str(), the_mode);
} while (!result && errno == EINTR);
+#if defined(OS_MACOSX)
+ // Mark the descriptor as close-on-exec.
+ if (result)
+ SetCloseOnExec(fileno(result));
+#endif
return result;
}
diff --git a/base/files/important_file_writer.cc b/base/files/important_file_writer.cc
index 28550ad52f..b46846277b 100644
--- a/base/files/important_file_writer.cc
+++ b/base/files/important_file_writer.cc
@@ -18,7 +18,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
@@ -57,9 +57,18 @@ void LogFailure(const FilePath& path, TempFileFailure failure_code,
// Helper function to call WriteFileAtomically() with a
// std::unique_ptr<std::string>.
-bool WriteScopedStringToFileAtomically(const FilePath& path,
- std::unique_ptr<std::string> data) {
- return ImportantFileWriter::WriteFileAtomically(path, *data);
+void WriteScopedStringToFileAtomically(
+ const FilePath& path,
+ std::unique_ptr<std::string> data,
+ Closure before_write_callback,
+ Callback<void(bool success)> after_write_callback) {
+ if (!before_write_callback.is_null())
+ before_write_callback.Run();
+
+ bool result = ImportantFileWriter::WriteFileAtomically(path, *data);
+
+ if (!after_write_callback.is_null())
+ after_write_callback.Run(result);
}
} // namespace
@@ -94,6 +103,7 @@ bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
File tmp_file(tmp_file_path, File::FLAG_OPEN | File::FLAG_WRITE);
if (!tmp_file.IsValid()) {
LogFailure(path, FAILED_OPENING, "could not open temporary file");
+ DeleteFile(tmp_file_path, false);
return false;
}
@@ -168,8 +178,11 @@ void ImportantFileWriter::WriteNow(std::unique_ptr<std::string> data) {
if (HasPendingWrite())
timer_.Stop();
- auto task = Bind(&WriteScopedStringToFileAtomically, path_, Passed(&data));
- if (!PostWriteTask(task)) {
+ Closure task = Bind(&WriteScopedStringToFileAtomically, path_, Passed(&data),
+ Passed(&before_next_write_callback_),
+ Passed(&after_next_write_callback_));
+
+ if (!task_runner_->PostTask(FROM_HERE, MakeCriticalClosure(task))) {
// Posting the task to background message loop is not expected
// to fail, but if it does, avoid losing data and just hit the disk
// on the current thread.
@@ -203,37 +216,11 @@ void ImportantFileWriter::DoScheduledWrite() {
serializer_ = nullptr;
}
-void ImportantFileWriter::RegisterOnNextSuccessfulWriteCallback(
- const Closure& on_next_successful_write) {
- DCHECK(on_next_successful_write_.is_null());
- on_next_successful_write_ = on_next_successful_write;
-}
-
-bool ImportantFileWriter::PostWriteTask(const Callback<bool()>& task) {
- // TODO(gab): This code could always use PostTaskAndReplyWithResult and let
- // ForwardSuccessfulWrite() no-op if |on_next_successful_write_| is null, but
- // PostTaskAndReply causes memory leaks in tests (crbug.com/371974) and
- // suppressing all of those is unrealistic hence we avoid most of them by
- // using PostTask() in the typical scenario below.
- if (!on_next_successful_write_.is_null()) {
- return PostTaskAndReplyWithResult(
- task_runner_.get(),
- FROM_HERE,
- MakeCriticalClosure(task),
- Bind(&ImportantFileWriter::ForwardSuccessfulWrite,
- weak_factory_.GetWeakPtr()));
- }
- return task_runner_->PostTask(
- FROM_HERE,
- MakeCriticalClosure(Bind(IgnoreResult(task))));
-}
-
-void ImportantFileWriter::ForwardSuccessfulWrite(bool result) {
- DCHECK(CalledOnValidThread());
- if (result && !on_next_successful_write_.is_null()) {
- on_next_successful_write_.Run();
- on_next_successful_write_.Reset();
- }
+void ImportantFileWriter::RegisterOnNextWriteCallbacks(
+ const Closure& before_next_write_callback,
+ const Callback<void(bool success)>& after_next_write_callback) {
+ before_next_write_callback_ = before_next_write_callback;
+ after_next_write_callback_ = after_next_write_callback;
}
} // namespace base
diff --git a/base/files/important_file_writer.h b/base/files/important_file_writer.h
index 0bd8a7fd35..f154b043b2 100644
--- a/base/files/important_file_writer.h
+++ b/base/files/important_file_writer.h
@@ -20,24 +20,21 @@
namespace base {
class SequencedTaskRunner;
-class Thread;
-// Helper to ensure that a file won't be corrupted by the write (for example on
-// application crash). Consider a naive way to save an important file F:
+// Helper for atomically writing a file to ensure that it won't be corrupted by
+// *application* crash during write (implemented as create, flush, rename).
//
-// 1. Open F for writing, truncating it.
-// 2. Write new data to F.
+// As an added benefit, ImportantFileWriter makes it less likely that the file
+// is corrupted by *system* crash, though even if the ImportantFileWriter call
+// has already returned at the time of the crash it is not specified which
+// version of the file (old or new) is preserved. And depending on system
+// configuration (hardware and software) a significant likelihood of file
+// corruption may remain, thus using ImportantFileWriter is not a valid
+// substitute for file integrity checks and recovery codepaths for malformed
+// files.
//
-// It's good when it works, but it gets very bad if step 2. doesn't complete.
-// It can be caused by a crash, a computer hang, or a weird I/O error. And you
-// end up with a broken file.
-//
-// To be safe, we don't start with writing directly to F. Instead, we write to
-// to a temporary file. Only after that write is successful, we rename the
-// temporary file to target filename.
-//
-// If you want to know more about this approach and ext3/ext4 fsync issues, see
-// http://blog.valerieaurora.org/2009/04/16/dont-panic-fsync-ext34-and-your-data/
+// Also note that ImportantFileWriter can be *really* slow (cf. File::Flush()
+// for details) and thus please don't block shutdown on ImportantFileWriter.
class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
public:
// Used by ScheduleSave to lazily provide the data to be saved. Allows us
@@ -53,8 +50,9 @@ class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
virtual ~DataSerializer() {}
};
- // Save |data| to |path| in an atomic manner (see the class comment above).
- // Blocks and writes data on the current thread.
+ // Save |data| to |path| in an atomic manner. Blocks and writes data on the
+ // current thread. Does not guarantee file integrity across system crash (see
+ // the class comment above).
static bool WriteFileAtomically(const FilePath& path, StringPiece data);
// Initialize the writer.
@@ -95,25 +93,26 @@ class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
// Serialize data pending to be saved and execute write on backend thread.
void DoScheduledWrite();
- // Registers |on_next_successful_write| to be called once, on the next
- // successful write event. Only one callback can be set at once.
- void RegisterOnNextSuccessfulWriteCallback(
- const Closure& on_next_successful_write);
+ // Registers |before_next_write_callback| and |after_next_write_callback| to
+ // be synchronously invoked from WriteFileAtomically() before its next write
+ // and after its next write, respectively. The boolean passed to
+ // |after_next_write_callback| indicates whether the write was successful.
+ // Both callbacks must be thread safe as they will be called on |task_runner_|
+ // and may be called during Chrome shutdown.
+ // If called more than once before a write is scheduled on |task_runner|, the
+ // latest callbacks clobber the others.
+ void RegisterOnNextWriteCallbacks(
+ const Closure& before_next_write_callback,
+ const Callback<void(bool success)>& after_next_write_callback);
TimeDelta commit_interval() const {
return commit_interval_;
}
private:
- // Helper method for WriteNow().
- bool PostWriteTask(const Callback<bool()>& task);
-
- // If |result| is true and |on_next_successful_write_| is set, invokes
- // |on_successful_write_| and then resets it; no-ops otherwise.
- void ForwardSuccessfulWrite(bool result);
-
- // Invoked once and then reset on the next successful write event.
- Closure on_next_successful_write_;
+ // Invoked synchronously on the next write event.
+ Closure before_next_write_callback_;
+ Callback<void(bool success)> after_next_write_callback_;
// Path being written to.
const FilePath path_;
diff --git a/base/files/important_file_writer_unittest.cc b/base/files/important_file_writer_unittest.cc
index 43e051ebcf..9b8dcfd4e3 100644
--- a/base/files/important_file_writer_unittest.cc
+++ b/base/files/important_file_writer_unittest.cc
@@ -46,39 +46,61 @@ class DataSerializer : public ImportantFileWriter::DataSerializer {
const std::string data_;
};
-class SuccessfulWriteObserver {
+enum WriteCallbackObservationState {
+ NOT_CALLED,
+ CALLED_WITH_ERROR,
+ CALLED_WITH_SUCCESS,
+};
+
+class WriteCallbacksObserver {
public:
- SuccessfulWriteObserver() : successful_write_observed_(false) {}
+ WriteCallbacksObserver() = default;
- // Register on_successful_write() to be called on the next successful write
+ // Register OnBeforeWrite() and OnAfterWrite() to be called on the next write
// of |writer|.
- void ObserveNextSuccessfulWrite(ImportantFileWriter* writer);
+ void ObserveNextWriteCallbacks(ImportantFileWriter* writer);
- // Returns true if a successful write was observed via on_successful_write()
- // and resets the observation state to false regardless.
- bool GetAndResetObservationState();
+ // Returns the |WriteCallbackObservationState| which was observed, then resets
+ // it to |NOT_CALLED|.
+ WriteCallbackObservationState GetAndResetObservationState();
private:
- void on_successful_write() {
- EXPECT_FALSE(successful_write_observed_);
- successful_write_observed_ = true;
+ void OnBeforeWrite() {
+ EXPECT_FALSE(before_write_called_);
+ before_write_called_ = true;
+ }
+
+ void OnAfterWrite(bool success) {
+ EXPECT_EQ(NOT_CALLED, after_write_observation_state_);
+ after_write_observation_state_ =
+ success ? CALLED_WITH_SUCCESS : CALLED_WITH_ERROR;
}
- bool successful_write_observed_;
+ bool before_write_called_ = false;
+ WriteCallbackObservationState after_write_observation_state_ = NOT_CALLED;
- DISALLOW_COPY_AND_ASSIGN(SuccessfulWriteObserver);
+ DISALLOW_COPY_AND_ASSIGN(WriteCallbacksObserver);
};
-void SuccessfulWriteObserver::ObserveNextSuccessfulWrite(
+void WriteCallbacksObserver::ObserveNextWriteCallbacks(
ImportantFileWriter* writer) {
- writer->RegisterOnNextSuccessfulWriteCallback(base::Bind(
- &SuccessfulWriteObserver::on_successful_write, base::Unretained(this)));
+ writer->RegisterOnNextWriteCallbacks(
+ base::Bind(&WriteCallbacksObserver::OnBeforeWrite,
+ base::Unretained(this)),
+ base::Bind(&WriteCallbacksObserver::OnAfterWrite,
+ base::Unretained(this)));
}
-bool SuccessfulWriteObserver::GetAndResetObservationState() {
- bool was_successful_write_observed = successful_write_observed_;
- successful_write_observed_ = false;
- return was_successful_write_observed;
+WriteCallbackObservationState
+WriteCallbacksObserver::GetAndResetObservationState() {
+ EXPECT_EQ(after_write_observation_state_ != NOT_CALLED, before_write_called_)
+ << "The before-write callback should always be called before the "
+ "after-write callback";
+
+ WriteCallbackObservationState state = after_write_observation_state_;
+ before_write_called_ = false;
+ after_write_observation_state_ = NOT_CALLED;
+ return state;
}
} // namespace
@@ -88,11 +110,11 @@ class ImportantFileWriterTest : public testing::Test {
ImportantFileWriterTest() { }
void SetUp() override {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- file_ = temp_dir_.path().AppendASCII("test-file");
+ file_ = temp_dir_.GetPath().AppendASCII("test-file");
}
protected:
- SuccessfulWriteObserver successful_write_observer_;
+ WriteCallbacksObserver write_callback_observer_;
FilePath file_;
MessageLoop loop_;
@@ -103,49 +125,102 @@ class ImportantFileWriterTest : public testing::Test {
TEST_F(ImportantFileWriterTest, Basic) {
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
EXPECT_FALSE(PathExists(writer.path()));
- EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
- writer.WriteNow(WrapUnique(new std::string("foo")));
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+ writer.WriteNow(MakeUnique<std::string>("foo"));
RunLoop().RunUntilIdle();
- EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("foo", GetFileContent(writer.path()));
}
-TEST_F(ImportantFileWriterTest, BasicWithSuccessfulWriteObserver) {
+TEST_F(ImportantFileWriterTest, WriteWithObserver) {
ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
EXPECT_FALSE(PathExists(writer.path()));
- EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
- successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
- writer.WriteNow(WrapUnique(new std::string("foo")));
- RunLoop().RunUntilIdle();
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
// Confirm that the observer is invoked.
- EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
+ write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+ writer.WriteNow(MakeUnique<std::string>("foo"));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(CALLED_WITH_SUCCESS,
+ write_callback_observer_.GetAndResetObservationState());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("foo", GetFileContent(writer.path()));
// Confirm that re-installing the observer works for another write.
- EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
- successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
- writer.WriteNow(WrapUnique(new std::string("bar")));
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+ write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+ writer.WriteNow(MakeUnique<std::string>("bar"));
RunLoop().RunUntilIdle();
- EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
+ EXPECT_EQ(CALLED_WITH_SUCCESS,
+ write_callback_observer_.GetAndResetObservationState());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("bar", GetFileContent(writer.path()));
// Confirm that writing again without re-installing the observer doesn't
// result in a notification.
- EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
- writer.WriteNow(WrapUnique(new std::string("baz")));
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+ writer.WriteNow(MakeUnique<std::string>("baz"));
RunLoop().RunUntilIdle();
- EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("baz", GetFileContent(writer.path()));
}
+TEST_F(ImportantFileWriterTest, FailedWriteWithObserver) {
+ // Use an invalid file path (relative paths are invalid) to get a
+ // FILE_ERROR_ACCESS_DENIED error when trying to write the file.
+ ImportantFileWriter writer(FilePath().AppendASCII("bad/../path"),
+ ThreadTaskRunnerHandle::Get());
+ EXPECT_FALSE(PathExists(writer.path()));
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+ write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+ writer.WriteNow(MakeUnique<std::string>("foo"));
+ RunLoop().RunUntilIdle();
+
+ // Confirm that the write observer was invoked with its boolean parameter set
+ // to false.
+ EXPECT_EQ(CALLED_WITH_ERROR,
+ write_callback_observer_.GetAndResetObservationState());
+ EXPECT_FALSE(PathExists(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, CallbackRunsOnWriterThread) {
+ base::Thread file_writer_thread("ImportantFileWriter test thread");
+ file_writer_thread.Start();
+ ImportantFileWriter writer(file_, file_writer_thread.task_runner());
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+
+ // Block execution on |file_writer_thread| to verify that callbacks are
+ // executed on it.
+ base::WaitableEvent wait_helper(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ file_writer_thread.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::WaitableEvent::Wait, base::Unretained(&wait_helper)));
+
+ write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+ writer.WriteNow(MakeUnique<std::string>("foo"));
+ RunLoop().RunUntilIdle();
+
+ // Expect the callback to not have been executed before the
+ // |file_writer_thread| is unblocked.
+ EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+
+ wait_helper.Signal();
+ file_writer_thread.FlushForTesting();
+
+ EXPECT_EQ(CALLED_WITH_SUCCESS,
+ write_callback_observer_.GetAndResetObservationState());
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
TEST_F(ImportantFileWriterTest, ScheduleWrite) {
ImportantFileWriter writer(file_,
ThreadTaskRunnerHandle::Get(),
diff --git a/base/files/memory_mapped_file_posix.cc b/base/files/memory_mapped_file_posix.cc
index 4899cf0cda..90ba6f49c1 100644
--- a/base/files/memory_mapped_file_posix.cc
+++ b/base/files/memory_mapped_file_posix.cc
@@ -31,7 +31,7 @@ bool MemoryMappedFile::MapFileRegionToMemory(
if (region == MemoryMappedFile::Region::kWholeFile) {
int64_t file_len = file_.GetLength();
- if (file_len == -1) {
+ if (file_len < 0) {
DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
return false;
}
@@ -78,7 +78,12 @@ bool MemoryMappedFile::MapFileRegionToMemory(
// POSIX won't auto-extend the file when it is written so it must first
// be explicitly extended to the maximum size. Zeros will fill the new
// space.
- file_.SetLength(std::max(file_.GetLength(), region.offset + region.size));
+ auto file_len = file_.GetLength();
+ if (file_len < 0) {
+ DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
+ return false;
+ }
+ file_.SetLength(std::max(file_len, region.offset + region.size));
flags |= PROT_READ | PROT_WRITE;
break;
}
diff --git a/base/files/scoped_file.cc b/base/files/scoped_file.cc
index 8ce45b8ba3..78d4ca5263 100644
--- a/base/files/scoped_file.cc
+++ b/base/files/scoped_file.cc
@@ -37,6 +37,14 @@ void ScopedFDCloseTraits::Free(int fd) {
int close_errno = errno;
base::debug::Alias(&close_errno);
+#if defined(OS_LINUX)
+ // NB: Some file descriptors can return errors from close() e.g. network
+ // filesystems such as NFS and Linux input devices. On Linux, errors from
+ // close other than EBADF do not indicate failure to actually close the fd.
+ if (ret != 0 && errno != EBADF)
+ ret = 0;
+#endif
+
PCHECK(0 == ret);
}
diff --git a/base/files/scoped_temp_dir.cc b/base/files/scoped_temp_dir.cc
index 27b758ed90..26815217c6 100644
--- a/base/files/scoped_temp_dir.cc
+++ b/base/files/scoped_temp_dir.cc
@@ -76,6 +76,11 @@ FilePath ScopedTempDir::Take() {
return ret;
}
+const FilePath& ScopedTempDir::GetPath() const {
+ DCHECK(!path_.empty()) << "Did you call CreateUniqueTempDir* before?";
+ return path_;
+}
+
bool ScopedTempDir::IsValid() const {
return !path_.empty() && DirectoryExists(path_);
}
diff --git a/base/files/scoped_temp_dir.h b/base/files/scoped_temp_dir.h
index b1f2f5b874..a5aaf84362 100644
--- a/base/files/scoped_temp_dir.h
+++ b/base/files/scoped_temp_dir.h
@@ -47,7 +47,9 @@ class BASE_EXPORT ScopedTempDir {
// when this object goes out of scope.
FilePath Take();
- const FilePath& path() const { return path_; }
+ // Returns the path to the created directory. Call one of the
+ // CreateUniqueTempDir* methods before getting the path.
+ const FilePath& GetPath() const;
// Returns true if path_ is non-empty and exists.
bool IsValid() const;
diff --git a/base/files/scoped_temp_dir_unittest.cc b/base/files/scoped_temp_dir_unittest.cc
index 3b2f28e50e..024b438aa0 100644
--- a/base/files/scoped_temp_dir_unittest.cc
+++ b/base/files/scoped_temp_dir_unittest.cc
@@ -53,7 +53,7 @@ TEST(ScopedTempDir, TempDir) {
{
ScopedTempDir dir;
EXPECT_TRUE(dir.CreateUniqueTempDir());
- test_path = dir.path();
+ test_path = dir.GetPath();
EXPECT_TRUE(DirectoryExists(test_path));
FilePath tmp_dir;
EXPECT_TRUE(base::GetTempDir(&tmp_dir));
@@ -72,7 +72,7 @@ TEST(ScopedTempDir, UniqueTempDirUnderPath) {
{
ScopedTempDir dir;
EXPECT_TRUE(dir.CreateUniqueTempDirUnderPath(base_path));
- test_path = dir.path();
+ test_path = dir.GetPath();
EXPECT_TRUE(DirectoryExists(test_path));
EXPECT_TRUE(base_path.IsParent(test_path));
EXPECT_TRUE(test_path.value().find(base_path.value()) != std::string::npos);
@@ -99,12 +99,12 @@ TEST(ScopedTempDir, MultipleInvocations) {
TEST(ScopedTempDir, LockedTempDir) {
ScopedTempDir dir;
EXPECT_TRUE(dir.CreateUniqueTempDir());
- base::File file(dir.path().Append(FILE_PATH_LITERAL("temp")),
+ base::File file(dir.GetPath().Append(FILE_PATH_LITERAL("temp")),
base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
EXPECT_TRUE(file.IsValid());
EXPECT_EQ(base::File::FILE_OK, file.error_details());
EXPECT_FALSE(dir.Delete()); // We should not be able to delete.
- EXPECT_FALSE(dir.path().empty()); // We should still have a valid path.
+ EXPECT_FALSE(dir.GetPath().empty()); // We should still have a valid path.
file.Close();
// Now, we should be able to delete.
EXPECT_TRUE(dir.Delete());
diff --git a/base/id_map.h b/base/id_map.h
index ef6b1564fa..d171fb14c1 100644
--- a/base/id_map.h
+++ b/base/id_map.h
@@ -7,20 +7,16 @@
#include <stddef.h>
#include <stdint.h>
+#include <memory>
#include <set>
+#include <type_traits>
+#include <utility>
#include "base/containers/hash_tables.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/sequence_checker.h"
-// Ownership semantics - own pointer means the pointer is deleted in Remove()
-// & during destruction
-enum IDMapOwnershipSemantics {
- IDMapExternalPointer,
- IDMapOwnPointer
-};
-
// This object maintains a list of IDs that can be quickly converted to
// pointers to objects. It is implemented as a hash table, optimized for
// relatively small data sets (in the common case, there will be exactly one
@@ -29,25 +25,24 @@ enum IDMapOwnershipSemantics {
// Items can be inserted into the container with arbitrary ID, but the caller
// must ensure they are unique. Inserting IDs and relying on automatically
// generated ones is not allowed because they can collide.
-//
-// This class does not have a virtual destructor, do not inherit from it when
-// ownership semantics are set to own because pointers will leak.
-template <typename T,
- IDMapOwnershipSemantics OS = IDMapExternalPointer,
- typename K = int32_t>
-class IDMap {
+
+// The map's value type (the V param) can be any dereferenceable type, such as a
+// raw pointer or smart pointer
+template <typename V, typename K = int32_t>
+class IDMap final {
public:
using KeyType = K;
private:
- typedef base::hash_map<KeyType, T*> HashTable;
+ using T = typename std::remove_reference<decltype(*V())>::type;
+ using HashTable = base::hash_map<KeyType, V>;
public:
IDMap() : iteration_depth_(0), next_id_(1), check_on_null_data_(false) {
// A number of consumers of IDMap create it on one thread but always
// access it from a different, but consistent, thread (or sequence)
- // post-construction. The first call to CalledOnValidSequencedThread()
- // will re-bind it.
+ // post-construction. The first call to CalledOnValidSequence() will re-bind
+ // it.
sequence_checker_.DetachFromSequence();
}
@@ -56,7 +51,6 @@ class IDMap {
// thread. However, all the accesses may take place on another thread (or
// sequence), such as the IO thread. Detaching again to clean this up.
sequence_checker_.DetachFromSequence();
- Releaser<OS, 0>::release_all(&data_);
}
// Sets whether Add and Replace should DCHECK if passed in NULL data.
@@ -64,29 +58,16 @@ class IDMap {
void set_check_on_null_data(bool value) { check_on_null_data_ = value; }
// Adds a view with an automatically generated unique ID. See AddWithID.
- KeyType Add(T* data) {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
- DCHECK(!check_on_null_data_ || data);
- KeyType this_id = next_id_;
- DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
- data_[this_id] = data;
- next_id_++;
- return this_id;
- }
+ KeyType Add(V data) { return AddInternal(std::move(data)); }
// Adds a new data member with the specified ID. The ID must not be in
// the list. The caller either must generate all unique IDs itself and use
// this function, or allow this object to generate IDs and call Add. These
- // two methods may not be mixed, or duplicate IDs may be generated
- void AddWithID(T* data, KeyType id) {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
- DCHECK(!check_on_null_data_ || data);
- DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
- data_[id] = data;
- }
+ // two methods may not be mixed, or duplicate IDs may be generated.
+ void AddWithID(V data, KeyType id) { AddWithIDInternal(std::move(data), id); }
void Remove(KeyType id) {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
typename HashTable::iterator i = data_.find(id);
if (i == data_.end()) {
NOTREACHED() << "Attempting to remove an item not in the list";
@@ -94,36 +75,28 @@ class IDMap {
}
if (iteration_depth_ == 0) {
- Releaser<OS, 0>::release(i->second);
data_.erase(i);
} else {
removed_ids_.insert(id);
}
}
- // Replaces the value for |id| with |new_data| and returns a pointer to the
- // existing value. If there is no entry for |id|, the map is not altered and
- // nullptr is returned. The OwnershipSemantics of the map have no effect on
- // how the existing value is treated, the IDMap does not delete the existing
- // value being replaced.
- T* Replace(KeyType id, T* new_data) {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ // Replaces the value for |id| with |new_data| and returns the existing value.
+ // Should only be called with an already added id.
+ V Replace(KeyType id, V new_data) {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
DCHECK(!check_on_null_data_ || new_data);
typename HashTable::iterator i = data_.find(id);
- if (i == data_.end()) {
- NOTREACHED() << "Attempting to replace an item not in the list";
- return nullptr;
- }
+ DCHECK(i != data_.end());
- T* temp = i->second;
- i->second = new_data;
- return temp;
+ std::swap(i->second, new_data);
+ return new_data;
}
void Clear() {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
if (iteration_depth_ == 0) {
- Releaser<OS, 0>::release_all(&data_);
+ data_.clear();
} else {
for (typename HashTable::iterator i = data_.begin();
i != data_.end(); ++i)
@@ -132,20 +105,20 @@ class IDMap {
}
bool IsEmpty() const {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
return size() == 0u;
}
T* Lookup(KeyType id) const {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
typename HashTable::const_iterator i = data_.find(id);
if (i == data_.end())
- return NULL;
- return i->second;
+ return nullptr;
+ return &*i->second;
}
size_t size() const {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
return data_.size() - removed_ids_.size();
}
@@ -160,9 +133,7 @@ class IDMap {
template<class ReturnType>
class Iterator {
public:
- Iterator(IDMap<T, OS, K>* map)
- : map_(map),
- iter_(map_->data_.begin()) {
+ Iterator(IDMap<V, K>* map) : map_(map), iter_(map_->data_.begin()) {
Init();
}
@@ -180,7 +151,7 @@ class IDMap {
}
~Iterator() {
- DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(map_->sequence_checker_.CalledOnValidSequence());
// We're going to decrement iteration depth. Make sure it's greater than
// zero so that it doesn't become negative.
@@ -191,29 +162,29 @@ class IDMap {
}
bool IsAtEnd() const {
- DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(map_->sequence_checker_.CalledOnValidSequence());
return iter_ == map_->data_.end();
}
KeyType GetCurrentKey() const {
- DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(map_->sequence_checker_.CalledOnValidSequence());
return iter_->first;
}
ReturnType* GetCurrentValue() const {
- DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
- return iter_->second;
+ DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+ return &*iter_->second;
}
void Advance() {
- DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(map_->sequence_checker_.CalledOnValidSequence());
++iter_;
SkipRemovedEntries();
}
private:
void Init() {
- DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(map_->sequence_checker_.CalledOnValidSequence());
++map_->iteration_depth_;
SkipRemovedEntries();
}
@@ -226,7 +197,7 @@ class IDMap {
}
}
- IDMap<T, OS, K>* map_;
+ IDMap<V, K>* map_;
typename HashTable::const_iterator iter_;
};
@@ -234,24 +205,22 @@ class IDMap {
typedef Iterator<const T> const_iterator;
private:
+ KeyType AddInternal(V data) {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK(!check_on_null_data_ || data);
+ KeyType this_id = next_id_;
+ DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
+ data_[this_id] = std::move(data);
+ next_id_++;
+ return this_id;
+ }
- // The dummy parameter is there because C++ standard does not allow
- // explicitly specialized templates inside classes
- template<IDMapOwnershipSemantics OI, int dummy> struct Releaser {
- static inline void release(T* ptr) {}
- static inline void release_all(HashTable* table) {}
- };
-
- template<int dummy> struct Releaser<IDMapOwnPointer, dummy> {
- static inline void release(T* ptr) { delete ptr;}
- static inline void release_all(HashTable* table) {
- for (typename HashTable::iterator i = table->begin();
- i != table->end(); ++i) {
- delete i->second;
- }
- table->clear();
- }
- };
+ void AddWithIDInternal(V data, KeyType id) {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK(!check_on_null_data_ || data);
+ DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
+ data_[id] = std::move(data);
+ }
void Compact() {
DCHECK_EQ(0, iteration_depth_);
diff --git a/base/id_map_unittest.cc b/base/id_map_unittest.cc
index a3f0808915..42949bb5b9 100644
--- a/base/id_map_unittest.cc
+++ b/base/id_map_unittest.cc
@@ -6,6 +6,9 @@
#include <stdint.h>
+#include <memory>
+
+#include "base/memory/ptr_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -23,7 +26,7 @@ class DestructorCounter {
};
TEST(IDMapTest, Basic) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
EXPECT_TRUE(map.IsEmpty());
EXPECT_EQ(0U, map.size());
@@ -62,7 +65,7 @@ TEST(IDMapTest, Basic) {
}
TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
TestObject obj1;
TestObject obj2;
@@ -73,7 +76,7 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
map.Add(&obj3);
{
- IDMap<TestObject>::const_iterator iter(&map);
+ IDMap<TestObject*>::const_iterator iter(&map);
EXPECT_EQ(1, map.iteration_depth());
@@ -95,7 +98,7 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
}
TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
const int kCount = 5;
TestObject obj[kCount];
@@ -107,16 +110,16 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
int32_t ids_in_iteration_order[kCount];
const TestObject* objs_in_iteration_order[kCount];
int counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
ids_in_iteration_order[counter] = iter.GetCurrentKey();
objs_in_iteration_order[counter] = iter.GetCurrentValue();
counter++;
}
counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
EXPECT_EQ(1, map.iteration_depth());
switch (counter) {
@@ -147,7 +150,7 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
}
TEST(IDMapTest, CopyIterator) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
TestObject obj1;
TestObject obj2;
@@ -160,12 +163,12 @@ TEST(IDMapTest, CopyIterator) {
EXPECT_EQ(0, map.iteration_depth());
{
- IDMap<TestObject>::const_iterator iter1(&map);
+ IDMap<TestObject*>::const_iterator iter1(&map);
EXPECT_EQ(1, map.iteration_depth());
// Make sure that copying the iterator correctly increments
// map's iteration depth.
- IDMap<TestObject>::const_iterator iter2(iter1);
+ IDMap<TestObject*>::const_iterator iter2(iter1);
EXPECT_EQ(2, map.iteration_depth());
}
@@ -175,7 +178,7 @@ TEST(IDMapTest, CopyIterator) {
}
TEST(IDMapTest, AssignIterator) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
TestObject obj1;
TestObject obj2;
@@ -188,10 +191,10 @@ TEST(IDMapTest, AssignIterator) {
EXPECT_EQ(0, map.iteration_depth());
{
- IDMap<TestObject>::const_iterator iter1(&map);
+ IDMap<TestObject*>::const_iterator iter1(&map);
EXPECT_EQ(1, map.iteration_depth());
- IDMap<TestObject>::const_iterator iter2(&map);
+ IDMap<TestObject*>::const_iterator iter2(&map);
EXPECT_EQ(2, map.iteration_depth());
// Make sure that assigning the iterator correctly updates
@@ -205,7 +208,7 @@ TEST(IDMapTest, AssignIterator) {
}
TEST(IDMapTest, IteratorRemainsValidWhenClearing) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
const int kCount = 5;
TestObject obj[kCount];
@@ -217,16 +220,16 @@ TEST(IDMapTest, IteratorRemainsValidWhenClearing) {
int32_t ids_in_iteration_order[kCount];
const TestObject* objs_in_iteration_order[kCount];
int counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
ids_in_iteration_order[counter] = iter.GetCurrentKey();
objs_in_iteration_order[counter] = iter.GetCurrentValue();
counter++;
}
counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
switch (counter) {
case 0:
EXPECT_EQ(ids_in_iteration_order[0], iter.GetCurrentKey());
@@ -258,18 +261,17 @@ TEST(IDMapTest, OwningPointersDeletesThemOnRemove) {
int map_external_ids[kCount];
int owned_del_count = 0;
- DestructorCounter* owned_obj[kCount];
int map_owned_ids[kCount];
- IDMap<DestructorCounter> map_external;
- IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+ IDMap<DestructorCounter*> map_external;
+ IDMap<std::unique_ptr<DestructorCounter>> map_owned;
for (int i = 0; i < kCount; ++i) {
external_obj[i] = new DestructorCounter(&external_del_count);
map_external_ids[i] = map_external.Add(external_obj[i]);
- owned_obj[i] = new DestructorCounter(&owned_del_count);
- map_owned_ids[i] = map_owned.Add(owned_obj[i]);
+ map_owned_ids[i] =
+ map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
}
for (int i = 0; i < kCount; ++i) {
@@ -295,17 +297,15 @@ TEST(IDMapTest, OwningPointersDeletesThemOnClear) {
DestructorCounter* external_obj[kCount];
int owned_del_count = 0;
- DestructorCounter* owned_obj[kCount];
- IDMap<DestructorCounter> map_external;
- IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+ IDMap<DestructorCounter*> map_external;
+ IDMap<std::unique_ptr<DestructorCounter>> map_owned;
for (int i = 0; i < kCount; ++i) {
external_obj[i] = new DestructorCounter(&external_del_count);
map_external.Add(external_obj[i]);
- owned_obj[i] = new DestructorCounter(&owned_del_count);
- map_owned.Add(owned_obj[i]);
+ map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
}
EXPECT_EQ(external_del_count, 0);
@@ -332,18 +332,16 @@ TEST(IDMapTest, OwningPointersDeletesThemOnDestruct) {
DestructorCounter* external_obj[kCount];
int owned_del_count = 0;
- DestructorCounter* owned_obj[kCount];
{
- IDMap<DestructorCounter> map_external;
- IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+ IDMap<DestructorCounter*> map_external;
+ IDMap<std::unique_ptr<DestructorCounter>> map_owned;
for (int i = 0; i < kCount; ++i) {
external_obj[i] = new DestructorCounter(&external_del_count);
map_external.Add(external_obj[i]);
- owned_obj[i] = new DestructorCounter(&owned_del_count);
- map_owned.Add(owned_obj[i]);
+ map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
}
}
@@ -358,14 +356,14 @@ TEST(IDMapTest, OwningPointersDeletesThemOnDestruct) {
}
TEST(IDMapTest, Int64KeyType) {
- IDMap<TestObject, IDMapExternalPointer, int64_t> map;
+ IDMap<TestObject*, int64_t> map;
TestObject obj1;
const int64_t kId1 = 999999999999999999;
map.AddWithID(&obj1, kId1);
EXPECT_EQ(&obj1, map.Lookup(kId1));
- IDMap<TestObject, IDMapExternalPointer, int64_t>::const_iterator iter(&map);
+ IDMap<TestObject*, int64_t>::const_iterator iter(&map);
ASSERT_FALSE(iter.IsAtEnd());
EXPECT_EQ(kId1, iter.GetCurrentKey());
EXPECT_EQ(&obj1, iter.GetCurrentValue());
diff --git a/base/json/json_file_value_serializer.cc b/base/json/json_file_value_serializer.cc
index 1a9b7a23b2..661d25d798 100644
--- a/base/json/json_file_value_serializer.cc
+++ b/base/json/json_file_value_serializer.cc
@@ -53,11 +53,9 @@ bool JSONFileValueSerializer::SerializeInternal(const base::Value& root,
}
JSONFileValueDeserializer::JSONFileValueDeserializer(
- const base::FilePath& json_file_path)
- : json_file_path_(json_file_path),
- allow_trailing_comma_(false),
- last_read_size_(0U) {
-}
+ const base::FilePath& json_file_path,
+ int options)
+ : json_file_path_(json_file_path), options_(options), last_read_size_(0U) {}
JSONFileValueDeserializer::~JSONFileValueDeserializer() {
}
@@ -114,7 +112,6 @@ std::unique_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
return NULL;
}
- JSONStringValueDeserializer deserializer(json_string);
- deserializer.set_allow_trailing_comma(allow_trailing_comma_);
+ JSONStringValueDeserializer deserializer(json_string, options_);
return deserializer.Deserialize(error_code, error_str);
}
diff --git a/base/json/json_file_value_serializer.h b/base/json/json_file_value_serializer.h
index 67d2342b4c..a93950a608 100644
--- a/base/json/json_file_value_serializer.h
+++ b/base/json/json_file_value_serializer.h
@@ -48,8 +48,9 @@ class BASE_EXPORT JSONFileValueSerializer : public base::ValueSerializer {
class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
public:
// |json_file_path_| is the path of a file that will be source of the
- // deserialization.
- explicit JSONFileValueDeserializer(const base::FilePath& json_file_path);
+ // deserialization. |options| is a bitmask of JSONParserOptions.
+ explicit JSONFileValueDeserializer(const base::FilePath& json_file_path,
+ int options = 0);
~JSONFileValueDeserializer() override;
@@ -82,10 +83,6 @@ class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
// be a JsonFileError.
static const char* GetErrorMessageForCode(int error_code);
- void set_allow_trailing_comma(bool new_value) {
- allow_trailing_comma_ = new_value;
- }
-
// Returns the size (in bytes) of JSON string read from disk in the last
// successful |Deserialize()| call.
size_t get_last_read_size() const { return last_read_size_; }
@@ -96,7 +93,7 @@ class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
int ReadFileToString(std::string* json_string);
const base::FilePath json_file_path_;
- bool allow_trailing_comma_;
+ const int options_;
size_t last_read_size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JSONFileValueDeserializer);
diff --git a/base/json/json_parser.cc b/base/json/json_parser.cc
index d97eccc96c..c6f6409df6 100644
--- a/base/json/json_parser.cc
+++ b/base/json/json_parser.cc
@@ -24,149 +24,12 @@ namespace internal {
namespace {
-const int kStackMaxDepth = 100;
+// Chosen to support 99.9% of documents found in the wild late 2016.
+// http://crbug.com/673263
+const int kStackMaxDepth = 200;
const int32_t kExtendedASCIIStart = 0x80;
-// DictionaryHiddenRootValue and ListHiddenRootValue are used in conjunction
-// with JSONStringValue as an optimization for reducing the number of string
-// copies. When this optimization is active, the parser uses a hidden root to
-// keep the original JSON input string live and creates JSONStringValue children
-// holding StringPiece references to the input string, avoiding about 2/3rds of
-// string memory copies. The real root value is Swap()ed into the new instance.
-class DictionaryHiddenRootValue : public DictionaryValue {
- public:
- DictionaryHiddenRootValue(std::unique_ptr<std::string> json,
- std::unique_ptr<Value> root)
- : json_(std::move(json)) {
- DCHECK(root->IsType(Value::TYPE_DICTIONARY));
- DictionaryValue::Swap(static_cast<DictionaryValue*>(root.get()));
- }
-
- void Swap(DictionaryValue* other) override {
- DVLOG(1) << "Swap()ing a DictionaryValue inefficiently.";
-
- // First deep copy to convert JSONStringValue to std::string and swap that
- // copy with |other|, which contains the new contents of |this|.
- std::unique_ptr<DictionaryValue> copy(CreateDeepCopy());
- copy->Swap(other);
-
- // Then erase the contents of the current dictionary and swap in the
- // new contents, originally from |other|.
- Clear();
- json_.reset();
- DictionaryValue::Swap(copy.get());
- }
-
- // Not overriding DictionaryValue::Remove because it just calls through to
- // the method below.
-
- bool RemoveWithoutPathExpansion(const std::string& key,
- std::unique_ptr<Value>* out) override {
- // If the caller won't take ownership of the removed value, just call up.
- if (!out)
- return DictionaryValue::RemoveWithoutPathExpansion(key, out);
-
- DVLOG(1) << "Remove()ing from a DictionaryValue inefficiently.";
-
- // Otherwise, remove the value while its still "owned" by this and copy it
- // to convert any JSONStringValues to std::string.
- std::unique_ptr<Value> out_owned;
- if (!DictionaryValue::RemoveWithoutPathExpansion(key, &out_owned))
- return false;
-
- *out = out_owned->CreateDeepCopy();
-
- return true;
- }
-
- private:
- std::unique_ptr<std::string> json_;
-
- DISALLOW_COPY_AND_ASSIGN(DictionaryHiddenRootValue);
-};
-
-class ListHiddenRootValue : public ListValue {
- public:
- ListHiddenRootValue(std::unique_ptr<std::string> json,
- std::unique_ptr<Value> root)
- : json_(std::move(json)) {
- DCHECK(root->IsType(Value::TYPE_LIST));
- ListValue::Swap(static_cast<ListValue*>(root.get()));
- }
-
- void Swap(ListValue* other) override {
- DVLOG(1) << "Swap()ing a ListValue inefficiently.";
-
- // First deep copy to convert JSONStringValue to std::string and swap that
- // copy with |other|, which contains the new contents of |this|.
- std::unique_ptr<ListValue> copy(CreateDeepCopy());
- copy->Swap(other);
-
- // Then erase the contents of the current list and swap in the new contents,
- // originally from |other|.
- Clear();
- json_.reset();
- ListValue::Swap(copy.get());
- }
-
- bool Remove(size_t index, std::unique_ptr<Value>* out) override {
- // If the caller won't take ownership of the removed value, just call up.
- if (!out)
- return ListValue::Remove(index, out);
-
- DVLOG(1) << "Remove()ing from a ListValue inefficiently.";
-
- // Otherwise, remove the value while its still "owned" by this and copy it
- // to convert any JSONStringValues to std::string.
- std::unique_ptr<Value> out_owned;
- if (!ListValue::Remove(index, &out_owned))
- return false;
-
- *out = out_owned->CreateDeepCopy();
-
- return true;
- }
-
- private:
- std::unique_ptr<std::string> json_;
-
- DISALLOW_COPY_AND_ASSIGN(ListHiddenRootValue);
-};
-
-// A variant on StringValue that uses StringPiece instead of copying the string
-// into the Value. This can only be stored in a child of hidden root (above),
-// otherwise the referenced string will not be guaranteed to outlive it.
-class JSONStringValue : public Value {
- public:
- explicit JSONStringValue(StringPiece piece)
- : Value(TYPE_STRING), string_piece_(piece) {}
-
- // Overridden from Value:
- bool GetAsString(std::string* out_value) const override {
- string_piece_.CopyToString(out_value);
- return true;
- }
- bool GetAsString(string16* out_value) const override {
- *out_value = UTF8ToUTF16(string_piece_);
- return true;
- }
- Value* DeepCopy() const override {
- return new StringValue(string_piece_.as_string());
- }
- bool Equals(const Value* other) const override {
- std::string other_string;
- return other->IsType(TYPE_STRING) && other->GetAsString(&other_string) &&
- StringPiece(other_string) == string_piece_;
- }
-
- private:
- // The location in the original input stream.
- StringPiece string_piece_;
-
- DISALLOW_COPY_AND_ASSIGN(JSONStringValue);
-};
-
// Simple class that checks for maximum recursion/"stack overflow."
class StackMarker {
public:
@@ -190,6 +53,9 @@ class StackMarker {
} // namespace
+// This is U+FFFD.
+const char kUnicodeReplacementString[] = "\xEF\xBF\xBD";
+
JSONParser::JSONParser(int options)
: options_(options),
start_pos_(nullptr),
@@ -208,16 +74,7 @@ JSONParser::~JSONParser() {
}
std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
- std::unique_ptr<std::string> input_copy;
- // If the children of a JSON root can be detached, then hidden roots cannot
- // be used, so do not bother copying the input because StringPiece will not
- // be used anywhere.
- if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
- input_copy = MakeUnique<std::string>(input.as_string());
- start_pos_ = input_copy->data();
- } else {
- start_pos_ = input.data();
- }
+ start_pos_ = input.data();
pos_ = start_pos_;
end_pos_ = start_pos_ + input.length();
index_ = 0;
@@ -251,26 +108,6 @@ std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
}
}
- // Dictionaries and lists can contain JSONStringValues, so wrap them in a
- // hidden root.
- if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
- if (root->IsType(Value::TYPE_DICTIONARY)) {
- return MakeUnique<DictionaryHiddenRootValue>(std::move(input_copy),
- std::move(root));
- }
- if (root->IsType(Value::TYPE_LIST)) {
- return MakeUnique<ListHiddenRootValue>(std::move(input_copy),
- std::move(root));
- }
- if (root->IsType(Value::TYPE_STRING)) {
- // A string type could be a JSONStringValue, but because there's no
- // corresponding HiddenRootValue, the memory will be lost. Deep copy to
- // preserve it.
- return root->CreateDeepCopy();
- }
- }
-
- // All other values can be returned directly.
return root;
}
@@ -296,58 +133,62 @@ int JSONParser::error_column() const {
JSONParser::StringBuilder::StringBuilder() : StringBuilder(nullptr) {}
JSONParser::StringBuilder::StringBuilder(const char* pos)
- : pos_(pos),
- length_(0),
- string_(nullptr) {
-}
+ : pos_(pos), length_(0), has_string_(false) {}
-void JSONParser::StringBuilder::Swap(StringBuilder* other) {
- std::swap(other->string_, string_);
- std::swap(other->pos_, pos_);
- std::swap(other->length_, length_);
+JSONParser::StringBuilder::~StringBuilder() {
+ if (has_string_)
+ string_.Destroy();
}
-JSONParser::StringBuilder::~StringBuilder() {
- delete string_;
+void JSONParser::StringBuilder::operator=(StringBuilder&& other) {
+ pos_ = other.pos_;
+ length_ = other.length_;
+ has_string_ = other.has_string_;
+ if (has_string_)
+ string_.InitFromMove(std::move(other.string_));
}
void JSONParser::StringBuilder::Append(const char& c) {
DCHECK_GE(c, 0);
DCHECK_LT(static_cast<unsigned char>(c), 128);
- if (string_)
+ if (has_string_)
string_->push_back(c);
else
++length_;
}
-void JSONParser::StringBuilder::AppendString(const std::string& str) {
- DCHECK(string_);
- string_->append(str);
+void JSONParser::StringBuilder::AppendString(const char* str, size_t len) {
+ DCHECK(has_string_);
+ string_->append(str, len);
}
void JSONParser::StringBuilder::Convert() {
- if (string_)
+ if (has_string_)
return;
- string_ = new std::string(pos_, length_);
-}
-bool JSONParser::StringBuilder::CanBeStringPiece() const {
- return !string_;
+ has_string_ = true;
+ string_.Init(pos_, length_);
}
StringPiece JSONParser::StringBuilder::AsStringPiece() {
- if (string_)
- return StringPiece();
+ if (has_string_)
+ return StringPiece(*string_);
return StringPiece(pos_, length_);
}
const std::string& JSONParser::StringBuilder::AsString() {
- if (!string_)
+ if (!has_string_)
Convert();
return *string_;
}
+std::string JSONParser::StringBuilder::DestructiveAsString() {
+ if (has_string_)
+ return std::move(*string_);
+ return std::string(pos_, length_);
+}
+
// JSONParser private //////////////////////////////////////////////////////////
inline bool JSONParser::CanConsume(int length) {
@@ -467,11 +308,11 @@ bool JSONParser::EatComment() {
return false;
}
-Value* JSONParser::ParseNextToken() {
+std::unique_ptr<Value> JSONParser::ParseNextToken() {
return ParseToken(GetNextToken());
}
-Value* JSONParser::ParseToken(Token token) {
+std::unique_ptr<Value> JSONParser::ParseToken(Token token) {
switch (token) {
case T_OBJECT_BEGIN:
return ConsumeDictionary();
@@ -491,7 +332,7 @@ Value* JSONParser::ParseToken(Token token) {
}
}
-Value* JSONParser::ConsumeDictionary() {
+std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
if (*pos_ != '{') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
return nullptr;
@@ -529,13 +370,13 @@ Value* JSONParser::ConsumeDictionary() {
// The next token is the value. Ownership transfers to |dict|.
NextChar();
- Value* value = ParseNextToken();
+ std::unique_ptr<Value> value = ParseNextToken();
if (!value) {
// ReportError from deeper level.
return nullptr;
}
- dict->SetWithoutPathExpansion(key.AsString(), value);
+ dict->SetWithoutPathExpansion(key.AsStringPiece(), std::move(value));
NextChar();
token = GetNextToken();
@@ -552,10 +393,10 @@ Value* JSONParser::ConsumeDictionary() {
}
}
- return dict.release();
+ return std::move(dict);
}
-Value* JSONParser::ConsumeList() {
+std::unique_ptr<Value> JSONParser::ConsumeList() {
if (*pos_ != '[') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
return nullptr;
@@ -572,13 +413,13 @@ Value* JSONParser::ConsumeList() {
NextChar();
Token token = GetNextToken();
while (token != T_ARRAY_END) {
- Value* item = ParseToken(token);
+ std::unique_ptr<Value> item = ParseToken(token);
if (!item) {
// ReportError from deeper level.
return nullptr;
}
- list->Append(item);
+ list->Append(std::move(item));
NextChar();
token = GetNextToken();
@@ -595,22 +436,15 @@ Value* JSONParser::ConsumeList() {
}
}
- return list.release();
+ return std::move(list);
}
-Value* JSONParser::ConsumeString() {
+std::unique_ptr<Value> JSONParser::ConsumeString() {
StringBuilder string;
if (!ConsumeStringRaw(&string))
return nullptr;
- // Create the Value representation, using a hidden root, if configured
- // to do so, and if the string can be represented by StringPiece.
- if (string.CanBeStringPiece() && !(options_ & JSON_DETACHABLE_CHILDREN))
- return new JSONStringValue(string.AsStringPiece());
-
- if (string.CanBeStringPiece())
- string.Convert();
- return new StringValue(string.AsString());
+ return base::MakeUnique<Value>(string.DestructiveAsString());
}
bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
@@ -628,16 +462,24 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
int32_t next_char = 0;
while (CanConsume(1)) {
+ int start_index = index_;
pos_ = start_pos_ + index_; // CBU8_NEXT is postcrement.
CBU8_NEXT(start_pos_, index_, length, next_char);
if (next_char < 0 || !IsValidCharacter(next_char)) {
- ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
- return false;
+ if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
+ ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
+ return false;
+ }
+ CBU8_NEXT(start_pos_, start_index, length, next_char);
+ string.Convert();
+ string.AppendString(kUnicodeReplacementString,
+ arraysize(kUnicodeReplacementString) - 1);
+ continue;
}
if (next_char == '"') {
--index_; // Rewind by one because of CBU8_NEXT.
- out->Swap(&string);
+ *out = std::move(string);
return true;
}
@@ -670,7 +512,8 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
}
int hex_digit = 0;
- if (!HexStringToInt(StringPiece(NextChar(), 2), &hex_digit)) {
+ if (!HexStringToInt(StringPiece(NextChar(), 2), &hex_digit) ||
+ !IsValidCharacter(hex_digit)) {
ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
return false;
}
@@ -698,7 +541,7 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
return false;
}
- string.AppendString(utf8_units);
+ string.AppendString(utf8_units.data(), utf8_units.length());
break;
}
case '"':
@@ -823,11 +666,11 @@ void JSONParser::DecodeUTF8(const int32_t& point, StringBuilder* dest) {
dest->Convert();
// CBU8_APPEND_UNSAFE can overwrite up to 4 bytes, so utf8_units may not be
// zero terminated at this point. |offset| contains the correct length.
- dest->AppendString(std::string(utf8_units, offset));
+ dest->AppendString(utf8_units, offset);
}
}
-Value* JSONParser::ConsumeNumber() {
+std::unique_ptr<Value> JSONParser::ConsumeNumber() {
const char* num_start = pos_;
const int start_index = index_;
int end_index = start_index;
@@ -842,11 +685,7 @@ Value* JSONParser::ConsumeNumber() {
end_index = index_;
// The optional fraction part.
- if (*pos_ == '.') {
- if (!CanConsume(1)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
- }
+ if (CanConsume(1) && *pos_ == '.') {
NextChar();
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
@@ -856,10 +695,15 @@ Value* JSONParser::ConsumeNumber() {
}
// Optional exponent part.
- if (*pos_ == 'e' || *pos_ == 'E') {
+ if (CanConsume(1) && (*pos_ == 'e' || *pos_ == 'E')) {
NextChar();
- if (*pos_ == '-' || *pos_ == '+')
+ if (!CanConsume(1)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ if (*pos_ == '-' || *pos_ == '+') {
NextChar();
+ }
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
return nullptr;
@@ -892,25 +736,30 @@ Value* JSONParser::ConsumeNumber() {
int num_int;
if (StringToInt(num_string, &num_int))
- return new FundamentalValue(num_int);
+ return base::MakeUnique<Value>(num_int);
double num_double;
if (StringToDouble(num_string.as_string(), &num_double) &&
std::isfinite(num_double)) {
- return new FundamentalValue(num_double);
+ return base::MakeUnique<Value>(num_double);
}
return nullptr;
}
bool JSONParser::ReadInt(bool allow_leading_zeros) {
- char first = *pos_;
- int len = 0;
+ size_t len = 0;
+ char first = 0;
+
+ while (CanConsume(1)) {
+ if (!IsAsciiDigit(*pos_))
+ break;
+
+ if (len == 0)
+ first = *pos_;
- char c = first;
- while (CanConsume(1) && IsAsciiDigit(c)) {
- c = *NextChar();
++len;
+ NextChar();
}
if (len == 0)
@@ -922,7 +771,7 @@ bool JSONParser::ReadInt(bool allow_leading_zeros) {
return true;
}
-Value* JSONParser::ConsumeLiteral() {
+std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
switch (*pos_) {
case 't': {
const char kTrueLiteral[] = "true";
@@ -933,7 +782,7 @@ Value* JSONParser::ConsumeLiteral() {
return nullptr;
}
NextNChars(kTrueLen - 1);
- return new FundamentalValue(true);
+ return base::MakeUnique<Value>(true);
}
case 'f': {
const char kFalseLiteral[] = "false";
@@ -944,7 +793,7 @@ Value* JSONParser::ConsumeLiteral() {
return nullptr;
}
NextNChars(kFalseLen - 1);
- return new FundamentalValue(false);
+ return base::MakeUnique<Value>(false);
}
case 'n': {
const char kNullLiteral[] = "null";
@@ -955,7 +804,7 @@ Value* JSONParser::ConsumeLiteral() {
return nullptr;
}
NextNChars(kNullLen - 1);
- return Value::CreateNullValue().release();
+ return Value::CreateNullValue();
}
default:
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
diff --git a/base/json/json_parser.h b/base/json/json_parser.h
index 7539fa99ca..4f26458363 100644
--- a/base/json/json_parser.h
+++ b/base/json/json_parser.h
@@ -16,6 +16,7 @@
#include "base/gtest_prod_util.h"
#include "base/json/json_reader.h"
#include "base/macros.h"
+#include "base/memory/manual_constructor.h"
#include "base/strings/string_piece.h"
namespace base {
@@ -30,7 +31,7 @@ class JSONParserTest;
// to be used directly; it encapsulates logic that need not be exposed publicly.
//
// This parser guarantees O(n) time through the input string. It also optimizes
-// base::StringValue by using StringPiece where possible when returning Value
+// base::Value by using StringPiece where possible when returning Value
// objects by using "hidden roots," discussed in the implementation.
//
// Iteration happens on the byte level, with the functions CanConsume and
@@ -93,7 +94,7 @@ class BASE_EXPORT JSONParser {
// This class centralizes that logic.
class StringBuilder {
public:
- // Empty constructor. Used for creating a builder with which to Swap().
+ // Empty constructor. Used for creating a builder with which to assign to.
StringBuilder();
// |pos| is the beginning of an input string, excluding the |"|.
@@ -101,8 +102,7 @@ class BASE_EXPORT JSONParser {
~StringBuilder();
- // Swaps the contents of |other| with this.
- void Swap(StringBuilder* other);
+ void operator=(StringBuilder&& other);
// Either increases the |length_| of the string or copies the character if
// the StringBuilder has been converted. |c| must be in the basic ASCII
@@ -111,23 +111,24 @@ class BASE_EXPORT JSONParser {
void Append(const char& c);
// Appends a string to the std::string. Must be Convert()ed to use.
- void AppendString(const std::string& str);
+ void AppendString(const char* str, size_t len);
// Converts the builder from its default StringPiece to a full std::string,
// performing a copy. Once a builder is converted, it cannot be made a
// StringPiece again.
void Convert();
- // Returns whether the builder can be converted to a StringPiece.
- bool CanBeStringPiece() const;
-
- // Returns the StringPiece representation. Returns an empty piece if it
- // cannot be converted.
+ // Returns the builder as a StringPiece.
StringPiece AsStringPiece();
// Returns the builder as a std::string.
const std::string& AsString();
+ // Returns the builder as a string, invalidating all state. This allows
+ // the internal string buffer representation to be destructively moved
+ // in cases where the builder will not be needed any more.
+ std::string DestructiveAsString();
+
private:
// The beginning of the input string.
const char* pos_;
@@ -135,9 +136,10 @@ class BASE_EXPORT JSONParser {
// Number of bytes in |pos_| that make up the string being built.
size_t length_;
- // The copied string representation. NULL until Convert() is called.
- // Strong. std::unique_ptr<T> has too much of an overhead here.
- std::string* string_;
+ // The copied string representation. Will be uninitialized until Convert()
+ // is called, which will set has_string_ to true.
+ bool has_string_;
+ base::ManualConstructor<std::string> string_;
};
// Quick check that the stream has capacity to consume |length| more bytes.
@@ -161,28 +163,27 @@ class BASE_EXPORT JSONParser {
// currently wound to a '/'.
bool EatComment();
- // Calls GetNextToken() and then ParseToken(). Caller owns the result.
- Value* ParseNextToken();
+ // Calls GetNextToken() and then ParseToken().
+ std::unique_ptr<Value> ParseNextToken();
// Takes a token that represents the start of a Value ("a structural token"
- // in RFC terms) and consumes it, returning the result as an object the
- // caller owns.
- Value* ParseToken(Token token);
+ // in RFC terms) and consumes it, returning the result as a Value.
+ std::unique_ptr<Value> ParseToken(Token token);
// Assuming that the parser is currently wound to '{', this parses a JSON
// object into a DictionaryValue.
- Value* ConsumeDictionary();
+ std::unique_ptr<Value> ConsumeDictionary();
// Assuming that the parser is wound to '[', this parses a JSON list into a
- // ListValue.
- Value* ConsumeList();
+ // std::unique_ptr<ListValue>.
+ std::unique_ptr<Value> ConsumeList();
// Calls through ConsumeStringRaw and wraps it in a value.
- Value* ConsumeString();
+ std::unique_ptr<Value> ConsumeString();
// Assuming that the parser is wound to a double quote, this parses a string,
// decoding any escape sequences and converts UTF-16 to UTF-8. Returns true on
- // success and Swap()s the result into |out|. Returns false on failure with
+ // success and places result into |out|. Returns false on failure with
// error information set.
bool ConsumeStringRaw(StringBuilder* out);
// Helper function for ConsumeStringRaw() that consumes the next four or 10
@@ -198,14 +199,14 @@ class BASE_EXPORT JSONParser {
// Assuming that the parser is wound to the start of a valid JSON number,
// this parses and converts it to either an int or double value.
- Value* ConsumeNumber();
+ std::unique_ptr<Value> ConsumeNumber();
// Helper that reads characters that are ints. Returns true if a number was
// read and false on error.
bool ReadInt(bool allow_leading_zeros);
// Consumes the literal values of |true|, |false|, and |null|, assuming the
// parser is wound to the first character of any of those.
- Value* ConsumeLiteral();
+ std::unique_ptr<Value> ConsumeLiteral();
// Compares two string buffers of a given length.
static bool StringsAreEqual(const char* left, const char* right, size_t len);
@@ -258,10 +259,14 @@ class BASE_EXPORT JSONParser {
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ReplaceInvalidCharacters);
DISALLOW_COPY_AND_ASSIGN(JSONParser);
};
+// Used when decoding and an invalid utf-8 sequence is encountered.
+BASE_EXPORT extern const char kUnicodeReplacementString[];
+
} // namespace internal
} // namespace base
diff --git a/base/json/json_parser_unittest.cc b/base/json/json_parser_unittest.cc
index 30255ca461..e3f635b76f 100644
--- a/base/json/json_parser_unittest.cc
+++ b/base/json/json_parser_unittest.cc
@@ -9,6 +9,8 @@
#include <memory>
#include "base/json/json_reader.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/stringprintf.h"
#include "base/values.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -17,8 +19,9 @@ namespace internal {
class JSONParserTest : public testing::Test {
public:
- JSONParser* NewTestParser(const std::string& input) {
- JSONParser* parser = new JSONParser(JSON_PARSE_RFC);
+ JSONParser* NewTestParser(const std::string& input,
+ int options = JSON_PARSE_RFC) {
+ JSONParser* parser = new JSONParser(options);
parser->start_pos_ = input.data();
parser->pos_ = parser->start_pos_;
parser->end_pos_ = parser->start_pos_ + input.length();
@@ -105,7 +108,7 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
// Literal |false|.
input = "false,|";
parser.reset(NewTestParser(input));
- value.reset(parser->ConsumeLiteral());
+ value = parser->ConsumeLiteral();
EXPECT_EQ('e', *parser->pos_);
TestLastThree(parser.get());
@@ -117,13 +120,13 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
// Literal |null|.
input = "null,|";
parser.reset(NewTestParser(input));
- value.reset(parser->ConsumeLiteral());
+ value = parser->ConsumeLiteral();
EXPECT_EQ('l', *parser->pos_);
TestLastThree(parser.get());
ASSERT_TRUE(value.get());
- EXPECT_TRUE(value->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(value->IsType(Value::Type::NONE));
}
TEST_F(JSONParserTest, ConsumeNumbers) {
@@ -143,7 +146,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
// Negative integer.
input = "-1234,|";
parser.reset(NewTestParser(input));
- value.reset(parser->ConsumeNumber());
+ value = parser->ConsumeNumber();
EXPECT_EQ('4', *parser->pos_);
TestLastThree(parser.get());
@@ -155,7 +158,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
// Double.
input = "12.34,|";
parser.reset(NewTestParser(input));
- value.reset(parser->ConsumeNumber());
+ value = parser->ConsumeNumber();
EXPECT_EQ('4', *parser->pos_);
TestLastThree(parser.get());
@@ -168,7 +171,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
// Scientific.
input = "42e3,|";
parser.reset(NewTestParser(input));
- value.reset(parser->ConsumeNumber());
+ value = parser->ConsumeNumber();
EXPECT_EQ('3', *parser->pos_);
TestLastThree(parser.get());
@@ -180,7 +183,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
// Negative scientific.
input = "314159e-5,|";
parser.reset(NewTestParser(input));
- value.reset(parser->ConsumeNumber());
+ value = parser->ConsumeNumber();
EXPECT_EQ('5', *parser->pos_);
TestLastThree(parser.get());
@@ -192,7 +195,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
// Positive scientific.
input = "0.42e+3,|";
parser.reset(NewTestParser(input));
- value.reset(parser->ConsumeNumber());
+ value = parser->ConsumeNumber();
EXPECT_EQ('3', *parser->pos_);
TestLastThree(parser.get());
@@ -243,14 +246,14 @@ TEST_F(JSONParserTest, ErrorMessages) {
EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, error_code);
std::string nested_json;
- for (int i = 0; i < 101; ++i) {
+ for (int i = 0; i < 201; ++i) {
nested_json.insert(nested_json.begin(), '[');
nested_json.append(1, ']');
}
root = JSONReader::ReadAndReturnError(nested_json, JSON_PARSE_RFC,
&error_code, &error_message);
EXPECT_FALSE(root.get());
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 100, JSONReader::kTooMuchNesting),
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 200, JSONReader::kTooMuchNesting),
error_message);
EXPECT_EQ(JSONReader::JSON_TOO_MUCH_NESTING, error_code);
@@ -323,5 +326,69 @@ TEST_F(JSONParserTest, DecodeUnicodeNonCharacter) {
EXPECT_FALSE(JSONReader::Read("[\"\\ud83f\\udffe\"]"));
}
+TEST_F(JSONParserTest, DecodeNegativeEscapeSequence) {
+ EXPECT_FALSE(JSONReader::Read("[\"\\x-A\"]"));
+ EXPECT_FALSE(JSONReader::Read("[\"\\u-00A\"]"));
+}
+
+// Verifies invalid utf-8 characters are replaced.
+TEST_F(JSONParserTest, ReplaceInvalidCharacters) {
+ const std::string bogus_char = "ó¿¿¿";
+ const std::string quoted_bogus_char = "\"" + bogus_char + "\"";
+ std::unique_ptr<JSONParser> parser(
+ NewTestParser(quoted_bogus_char, JSON_REPLACE_INVALID_CHARACTERS));
+ std::unique_ptr<Value> value(parser->ConsumeString());
+ ASSERT_TRUE(value.get());
+ std::string str;
+ EXPECT_TRUE(value->GetAsString(&str));
+ EXPECT_EQ(kUnicodeReplacementString, str);
+}
+
+TEST_F(JSONParserTest, ParseNumberErrors) {
+ const struct {
+ const char* input;
+ bool parse_success;
+ double value;
+ } kCases[] = {
+ // clang-format off
+ {"1", true, 1},
+ {"2.", false, 0},
+ {"42", true, 42},
+ {"6e", false, 0},
+ {"43e2", true, 4300},
+ {"43e-", false, 0},
+ {"9e-3", true, 0.009},
+ {"2e+", false, 0},
+ {"2e+2", true, 200},
+ // clang-format on
+ };
+
+ for (unsigned int i = 0; i < arraysize(kCases); ++i) {
+ auto test_case = kCases[i];
+ SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case.input));
+
+ // MSan will do a better job detecting over-read errors if the input is
+ // not nul-terminated on the heap.
+ size_t str_len = strlen(test_case.input);
+ auto non_nul_termianted = MakeUnique<char[]>(str_len);
+ memcpy(non_nul_termianted.get(), test_case.input, str_len);
+
+ StringPiece string_piece(non_nul_termianted.get(), str_len);
+ std::unique_ptr<Value> result = JSONReader::Read(string_piece);
+ if (test_case.parse_success) {
+ EXPECT_TRUE(result);
+ } else {
+ EXPECT_FALSE(result);
+ }
+
+ if (!result)
+ continue;
+
+ double double_value = 0;
+ EXPECT_TRUE(result->GetAsDouble(&double_value));
+ EXPECT_EQ(test_case.value, double_value);
+ }
+}
+
} // namespace internal
} // namespace base
diff --git a/base/json/json_reader.h b/base/json/json_reader.h
index a954821a28..a39b37adeb 100644
--- a/base/json/json_reader.h
+++ b/base/json/json_reader.h
@@ -55,6 +55,11 @@ enum JSONParserOptions {
// if the child is Remove()d from root, it would result in use-after-free
// unless it is DeepCopy()ed or this option is used.
JSON_DETACHABLE_CHILDREN = 1 << 1,
+
+ // If set the parser replaces invalid characters with the Unicode replacement
+ // character (U+FFFD). If not set, invalid characters trigger a hard error and
+ // parsing fails.
+ JSON_REPLACE_INVALID_CHARACTERS = 1 << 2,
};
class BASE_EXPORT JSONReader {
diff --git a/base/json/json_reader_unittest.cc b/base/json/json_reader_unittest.cc
index 84732c4d75..1344de6391 100644
--- a/base/json/json_reader_unittest.cc
+++ b/base/json/json_reader_unittest.cc
@@ -29,7 +29,7 @@ TEST(JSONReaderTest, Reading) {
// some whitespace checking
std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(root->IsType(Value::Type::NONE));
}
{
@@ -41,23 +41,23 @@ TEST(JSONReaderTest, Reading) {
// Simple bool
std::unique_ptr<Value> root = JSONReader().ReadToValue("true ");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
}
{
// Embedded comment
std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(root->IsType(Value::Type::NONE));
root = JSONReader().ReadToValue("40 /* comment */");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
root = JSONReader().ReadToValue("true // comment");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
root = JSONReader().ReadToValue("/* comment */\"sample string\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string value;
EXPECT_TRUE(root->GetAsString(&value));
EXPECT_EQ("sample string", value);
@@ -75,7 +75,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_EQ(3u, list->GetSize());
root = JSONReader().ReadToValue("/* comment **/42");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(42, int_val);
root = JSONReader().ReadToValue(
@@ -83,7 +83,7 @@ TEST(JSONReaderTest, Reading) {
"// */ 43\n"
"44");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(44, int_val);
}
@@ -92,7 +92,7 @@ TEST(JSONReaderTest, Reading) {
// Test number formats
std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
int int_val = 0;
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(43, int_val);
@@ -110,7 +110,7 @@ TEST(JSONReaderTest, Reading) {
// clause).
std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
int int_val = 1;
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(0, int_val);
@@ -122,13 +122,13 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
ASSERT_TRUE(root);
double double_val;
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(2147483648.0, double_val);
root = JSONReader().ReadToValue("-2147483649");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
@@ -138,42 +138,42 @@ TEST(JSONReaderTest, Reading) {
// Parse a double
std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(43.1, double_val);
root = JSONReader().ReadToValue("4.3e-1");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(.43, double_val);
root = JSONReader().ReadToValue("2.1e0");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(2.1, double_val);
root = JSONReader().ReadToValue("2.1e+0001");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(21.0, double_val);
root = JSONReader().ReadToValue("0.01");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(0.01, double_val);
root = JSONReader().ReadToValue("1.00");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(1.0, double_val);
@@ -213,7 +213,7 @@ TEST(JSONReaderTest, Reading) {
// Test string parser
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("hello world", str_val);
@@ -223,7 +223,7 @@ TEST(JSONReaderTest, Reading) {
// Empty string
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("", str_val);
@@ -234,7 +234,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
@@ -245,7 +245,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
@@ -319,7 +319,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_EQ(1U, list->GetSize());
Value* tmp_value = nullptr;
ASSERT_TRUE(list->Get(0, &tmp_value));
- EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(tmp_value->IsType(Value::Type::BOOLEAN));
bool bool_value = false;
EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
EXPECT_TRUE(bool_value);
@@ -348,7 +348,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_DOUBLE_EQ(9.87654321, double_val);
Value* null_val = nullptr;
ASSERT_TRUE(dict_val->Get("null", &null_val));
- EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(null_val->IsType(Value::Type::NONE));
std::string str_val;
EXPECT_TRUE(dict_val->GetString("S", &str_val));
EXPECT_EQ("str", str_val);
@@ -486,7 +486,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
@@ -510,7 +510,7 @@ TEST(JSONReaderTest, Reading) {
// Test utf16 encoded strings.
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(
@@ -520,7 +520,7 @@ TEST(JSONReaderTest, Reading) {
root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
str_val.clear();
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
@@ -548,7 +548,7 @@ TEST(JSONReaderTest, Reading) {
{
// Test literal root objects.
std::unique_ptr<Value> root = JSONReader::Read("null");
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(root->IsType(Value::Type::NONE));
root = JSONReader::Read("true");
ASSERT_TRUE(root);
@@ -583,7 +583,7 @@ TEST(JSONReaderTest, ReadFromFile) {
JSONReader reader;
std::unique_ptr<Value> root(reader.ReadToValue(input));
ASSERT_TRUE(root) << reader.GetErrorMessage();
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
+ EXPECT_TRUE(root->IsType(Value::Type::DICTIONARY));
}
#endif // !__ANDROID__ && !__ANDROID_HOST__
diff --git a/base/json/json_string_value_serializer.cc b/base/json/json_string_value_serializer.cc
index cd786db9e7..2e46ab387a 100644
--- a/base/json/json_string_value_serializer.cc
+++ b/base/json/json_string_value_serializer.cc
@@ -41,18 +41,15 @@ bool JSONStringValueSerializer::SerializeInternal(const Value& root,
}
JSONStringValueDeserializer::JSONStringValueDeserializer(
- const base::StringPiece& json_string)
- : json_string_(json_string),
- allow_trailing_comma_(false) {
-}
+ const base::StringPiece& json_string,
+ int options)
+ : json_string_(json_string), options_(options) {}
JSONStringValueDeserializer::~JSONStringValueDeserializer() {}
std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
int* error_code,
std::string* error_str) {
- return base::JSONReader::ReadAndReturnError(
- json_string_, allow_trailing_comma_ ? base::JSON_ALLOW_TRAILING_COMMAS
- : base::JSON_PARSE_RFC,
- error_code, error_str);
+ return base::JSONReader::ReadAndReturnError(json_string_, options_,
+ error_code, error_str);
}
diff --git a/base/json/json_string_value_serializer.h b/base/json/json_string_value_serializer.h
index a97da23920..55a53e207f 100644
--- a/base/json/json_string_value_serializer.h
+++ b/base/json/json_string_value_serializer.h
@@ -47,8 +47,10 @@ class BASE_EXPORT JSONStringValueSerializer : public base::ValueSerializer {
class BASE_EXPORT JSONStringValueDeserializer : public base::ValueDeserializer {
public:
// This retains a reference to the contents of |json_string|, so the data
- // must outlive the JSONStringValueDeserializer.
- explicit JSONStringValueDeserializer(const base::StringPiece& json_string);
+ // must outlive the JSONStringValueDeserializer. |options| is a bitmask of
+ // JSONParserOptions.
+ explicit JSONStringValueDeserializer(const base::StringPiece& json_string,
+ int options = 0);
~JSONStringValueDeserializer() override;
@@ -62,15 +64,10 @@ class BASE_EXPORT JSONStringValueDeserializer : public base::ValueDeserializer {
std::unique_ptr<base::Value> Deserialize(int* error_code,
std::string* error_message) override;
- void set_allow_trailing_comma(bool new_value) {
- allow_trailing_comma_ = new_value;
- }
-
private:
// Data is owned by the caller of the constructor.
base::StringPiece json_string_;
- // If true, deserialization will allow trailing commas.
- bool allow_trailing_comma_;
+ const int options_;
DISALLOW_COPY_AND_ASSIGN(JSONStringValueDeserializer);
};
diff --git a/base/json/json_value_converter.h b/base/json/json_value_converter.h
index 4cca034f33..68ebfa23de 100644
--- a/base/json/json_value_converter.h
+++ b/base/json/json_value_converter.h
@@ -14,8 +14,7 @@
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/memory/scoped_vector.h"
-#include "base/stl_util.h"
+#include "base/memory/ptr_util.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h"
#include "base/values.h"
@@ -66,9 +65,9 @@
// }
// };
//
-// For repeated field, we just assume ScopedVector for its container
-// and you can put RegisterRepeatedInt or some other types. Use
-// RegisterRepeatedMessage for nested repeated fields.
+// For repeated field, we just assume std::vector<std::unique_ptr<ElementType>>
+// for its container and you can put RegisterRepeatedInt or some other types.
+// Use RegisterRepeatedMessage for nested repeated fields.
//
// Sometimes JSON format uses string representations for other types such
// like enum, timestamp, or URL. You can use RegisterCustomField method
@@ -200,7 +199,7 @@ class ValueFieldConverter : public ValueConverter<FieldType> {
public:
typedef bool(*ConvertFunc)(const base::Value* value, FieldType* field);
- ValueFieldConverter(ConvertFunc convert_func)
+ explicit ValueFieldConverter(ConvertFunc convert_func)
: convert_func_(convert_func) {}
bool Convert(const base::Value& value, FieldType* field) const override {
@@ -218,7 +217,7 @@ class CustomFieldConverter : public ValueConverter<FieldType> {
public:
typedef bool(*ConvertFunc)(const StringPiece& value, FieldType* field);
- CustomFieldConverter(ConvertFunc convert_func)
+ explicit CustomFieldConverter(ConvertFunc convert_func)
: convert_func_(convert_func) {}
bool Convert(const base::Value& value, FieldType* field) const override {
@@ -248,12 +247,13 @@ class NestedValueConverter : public ValueConverter<NestedType> {
};
template <typename Element>
-class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
+class RepeatedValueConverter
+ : public ValueConverter<std::vector<std::unique_ptr<Element>>> {
public:
RepeatedValueConverter() {}
bool Convert(const base::Value& value,
- ScopedVector<Element>* field) const override {
+ std::vector<std::unique_ptr<Element>>* field) const override {
const base::ListValue* list = NULL;
if (!value.GetAsList(&list)) {
// The field is not a list.
@@ -268,7 +268,7 @@ class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
std::unique_ptr<Element> e(new Element);
if (basic_converter_.Convert(*element, e.get())) {
- field->push_back(e.release());
+ field->push_back(std::move(e));
} else {
DVLOG(1) << "failure at " << i << "-th element";
return false;
@@ -284,12 +284,12 @@ class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
template <typename NestedType>
class RepeatedMessageConverter
- : public ValueConverter<ScopedVector<NestedType> > {
+ : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
public:
RepeatedMessageConverter() {}
bool Convert(const base::Value& value,
- ScopedVector<NestedType>* field) const override {
+ std::vector<std::unique_ptr<NestedType>>* field) const override {
const base::ListValue* list = NULL;
if (!value.GetAsList(&list))
return false;
@@ -302,7 +302,7 @@ class RepeatedMessageConverter
std::unique_ptr<NestedType> nested(new NestedType);
if (converter_.Convert(*element, nested.get())) {
- field->push_back(nested.release());
+ field->push_back(std::move(nested));
} else {
DVLOG(1) << "failure at " << i << "-th element";
return false;
@@ -318,15 +318,15 @@ class RepeatedMessageConverter
template <typename NestedType>
class RepeatedCustomValueConverter
- : public ValueConverter<ScopedVector<NestedType> > {
+ : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
public:
typedef bool(*ConvertFunc)(const base::Value* value, NestedType* field);
- RepeatedCustomValueConverter(ConvertFunc convert_func)
+ explicit RepeatedCustomValueConverter(ConvertFunc convert_func)
: convert_func_(convert_func) {}
bool Convert(const base::Value& value,
- ScopedVector<NestedType>* field) const override {
+ std::vector<std::unique_ptr<NestedType>>* field) const override {
const base::ListValue* list = NULL;
if (!value.GetAsList(&list))
return false;
@@ -339,7 +339,7 @@ class RepeatedCustomValueConverter
std::unique_ptr<NestedType> nested(new NestedType);
if ((*convert_func_)(element, nested.get())) {
- field->push_back(nested.release());
+ field->push_back(std::move(nested));
} else {
DVLOG(1) << "failure at " << i << "-th element";
return false;
@@ -365,41 +365,42 @@ class JSONValueConverter {
void RegisterIntField(const std::string& field_name,
int StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, int>(
+ fields_.push_back(MakeUnique<internal::FieldConverter<StructType, int>>(
field_name, field, new internal::BasicValueConverter<int>));
}
void RegisterStringField(const std::string& field_name,
std::string StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, std::string>(
- field_name, field, new internal::BasicValueConverter<std::string>));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, std::string>>(
+ field_name, field, new internal::BasicValueConverter<std::string>));
}
void RegisterStringField(const std::string& field_name,
string16 StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, string16>(
- field_name, field, new internal::BasicValueConverter<string16>));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, string16>>(
+ field_name, field, new internal::BasicValueConverter<string16>));
}
void RegisterBoolField(const std::string& field_name,
bool StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, bool>(
+ fields_.push_back(MakeUnique<internal::FieldConverter<StructType, bool>>(
field_name, field, new internal::BasicValueConverter<bool>));
}
void RegisterDoubleField(const std::string& field_name,
double StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, double>(
+ fields_.push_back(MakeUnique<internal::FieldConverter<StructType, double>>(
field_name, field, new internal::BasicValueConverter<double>));
}
template <class NestedType>
void RegisterNestedField(
const std::string& field_name, NestedType StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, NestedType>(
- field_name,
- field,
- new internal::NestedValueConverter<NestedType>));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, NestedType>>(
+ field_name, field, new internal::NestedValueConverter<NestedType>));
}
template <typename FieldType>
@@ -407,10 +408,10 @@ class JSONValueConverter {
const std::string& field_name,
FieldType StructType::* field,
bool (*convert_func)(const StringPiece&, FieldType*)) {
- fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
- field_name,
- field,
- new internal::CustomFieldConverter<FieldType>(convert_func)));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, FieldType>>(
+ field_name, field,
+ new internal::CustomFieldConverter<FieldType>(convert_func)));
}
template <typename FieldType>
@@ -418,71 +419,76 @@ class JSONValueConverter {
const std::string& field_name,
FieldType StructType::* field,
bool (*convert_func)(const base::Value*, FieldType*)) {
- fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
- field_name,
- field,
- new internal::ValueFieldConverter<FieldType>(convert_func)));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, FieldType>>(
+ field_name, field,
+ new internal::ValueFieldConverter<FieldType>(convert_func)));
}
- void RegisterRepeatedInt(const std::string& field_name,
- ScopedVector<int> StructType::* field) {
+ void RegisterRepeatedInt(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<int>> StructType::*field) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<int> >(
+ MakeUnique<internal::FieldConverter<StructType,
+ std::vector<std::unique_ptr<int>>>>(
field_name, field, new internal::RepeatedValueConverter<int>));
}
- void RegisterRepeatedString(const std::string& field_name,
- ScopedVector<std::string> StructType::* field) {
+ void RegisterRepeatedString(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<std::string>> StructType::*field) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<std::string> >(
- field_name,
- field,
+ MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<std::string>>>>(
+ field_name, field,
new internal::RepeatedValueConverter<std::string>));
}
- void RegisterRepeatedString(const std::string& field_name,
- ScopedVector<string16> StructType::* field) {
- fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<string16> >(
- field_name,
- field,
- new internal::RepeatedValueConverter<string16>));
+ void RegisterRepeatedString(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<string16>> StructType::*field) {
+ fields_.push_back(MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<string16>>>>(
+ field_name, field, new internal::RepeatedValueConverter<string16>));
}
- void RegisterRepeatedDouble(const std::string& field_name,
- ScopedVector<double> StructType::* field) {
- fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<double> >(
- field_name, field, new internal::RepeatedValueConverter<double>));
+ void RegisterRepeatedDouble(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<double>> StructType::*field) {
+ fields_.push_back(MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<double>>>>(
+ field_name, field, new internal::RepeatedValueConverter<double>));
}
- void RegisterRepeatedBool(const std::string& field_name,
- ScopedVector<bool> StructType::* field) {
- fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<bool> >(
- field_name, field, new internal::RepeatedValueConverter<bool>));
+ void RegisterRepeatedBool(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<bool>> StructType::*field) {
+ fields_.push_back(MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<bool>>>>(
+ field_name, field, new internal::RepeatedValueConverter<bool>));
}
template <class NestedType>
void RegisterRepeatedCustomValue(
const std::string& field_name,
- ScopedVector<NestedType> StructType::* field,
+ std::vector<std::unique_ptr<NestedType>> StructType::*field,
bool (*convert_func)(const base::Value*, NestedType*)) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
- field_name,
- field,
+ MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<NestedType>>>>(
+ field_name, field,
new internal::RepeatedCustomValueConverter<NestedType>(
convert_func)));
}
template <class NestedType>
- void RegisterRepeatedMessage(const std::string& field_name,
- ScopedVector<NestedType> StructType::* field) {
+ void RegisterRepeatedMessage(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<NestedType>> StructType::*field) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
- field_name,
- field,
+ MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<NestedType>>>>(
+ field_name, field,
new internal::RepeatedMessageConverter<NestedType>));
}
@@ -491,9 +497,9 @@ class JSONValueConverter {
if (!value.GetAsDictionary(&dictionary_value))
return false;
- for(size_t i = 0; i < fields_.size(); ++i) {
+ for (size_t i = 0; i < fields_.size(); ++i) {
const internal::FieldConverterBase<StructType>* field_converter =
- fields_[i];
+ fields_[i].get();
const base::Value* field = NULL;
if (dictionary_value->Get(field_converter->field_path(), &field)) {
if (!field_converter->ConvertField(*field, output)) {
@@ -506,7 +512,8 @@ class JSONValueConverter {
}
private:
- ScopedVector<internal::FieldConverterBase<StructType> > fields_;
+ std::vector<std::unique_ptr<internal::FieldConverterBase<StructType>>>
+ fields_;
DISALLOW_COPY_AND_ASSIGN(JSONValueConverter);
};
diff --git a/base/json/json_value_converter_unittest.cc b/base/json/json_value_converter_unittest.cc
index 56ade24ac3..6a603d3a92 100644
--- a/base/json/json_value_converter_unittest.cc
+++ b/base/json/json_value_converter_unittest.cc
@@ -9,7 +9,6 @@
#include <vector>
#include "base/json/json_reader.h"
-#include "base/memory/scoped_vector.h"
#include "base/strings/string_piece.h"
#include "base/values.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -27,8 +26,8 @@ struct SimpleMessage {
bool baz;
bool bstruct;
SimpleEnum simple_enum;
- ScopedVector<int> ints;
- ScopedVector<std::string> string_values;
+ std::vector<std::unique_ptr<int>> ints;
+ std::vector<std::unique_ptr<std::string>> string_values;
SimpleMessage() : foo(0), baz(false), bstruct(false), simple_enum(FOO) {}
static bool ParseSimpleEnum(const StringPiece& value, SimpleEnum* field) {
@@ -80,7 +79,7 @@ struct SimpleMessage {
struct NestedMessage {
double foo;
SimpleMessage child;
- ScopedVector<SimpleMessage> children;
+ std::vector<std::unique_ptr<SimpleMessage>> children;
NestedMessage() : foo(0) {}
@@ -163,7 +162,7 @@ TEST(JSONValueConverterTest, ParseNestedMessage) {
EXPECT_EQ("value_2", *message.child.string_values[1]);
EXPECT_EQ(2, static_cast<int>(message.children.size()));
- const SimpleMessage* first_child = message.children[0];
+ const SimpleMessage* first_child = message.children[0].get();
ASSERT_TRUE(first_child);
EXPECT_EQ(2, first_child->foo);
EXPECT_EQ("foobar", first_child->bar);
@@ -172,7 +171,7 @@ TEST(JSONValueConverterTest, ParseNestedMessage) {
ASSERT_EQ(1U, first_child->string_values.size());
EXPECT_EQ("value_1", *first_child->string_values[0]);
- const SimpleMessage* second_child = message.children[1];
+ const SimpleMessage* second_child = message.children[1].get();
ASSERT_TRUE(second_child);
EXPECT_EQ(3, second_child->foo);
EXPECT_EQ("barbaz", second_child->bar);
diff --git a/base/json/json_value_serializer_unittest.cc b/base/json/json_value_serializer_unittest.cc
index 0c079b7623..1d58c61e04 100644
--- a/base/json/json_value_serializer_unittest.cc
+++ b/base/json/json_value_serializer_unittest.cc
@@ -78,11 +78,10 @@ void CheckJSONIsStillTheSame(const Value& value) {
}
void ValidateJsonList(const std::string& json) {
- std::unique_ptr<Value> root = JSONReader::Read(json);
- ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
- ListValue* list = static_cast<ListValue*>(root.get());
+ std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read(json));
+ ASSERT_TRUE(list);
ASSERT_EQ(1U, list->GetSize());
- Value* elt = NULL;
+ Value* elt = nullptr;
ASSERT_TRUE(list->Get(0, &elt));
int value = 0;
ASSERT_TRUE(elt && elt->GetAsInteger(&value));
@@ -98,7 +97,7 @@ TEST(JSONValueDeserializerTest, ReadProperJSONFromString) {
std::string error_message;
std::unique_ptr<Value> value =
str_deserializer.Deserialize(&error_code, &error_message);
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
ASSERT_EQ(0, error_code);
ASSERT_TRUE(error_message.empty());
// Verify if the same JSON is still there.
@@ -109,7 +108,7 @@ TEST(JSONValueDeserializerTest, ReadProperJSONFromString) {
TEST(JSONValueDeserializerTest, ReadProperJSONFromStringPiece) {
// Create a StringPiece for the substring of kProperJSONPadded that matches
// kProperJSON.
- base::StringPiece proper_json(kProperJSONPadded);
+ StringPiece proper_json(kProperJSONPadded);
proper_json = proper_json.substr(5, proper_json.length() - 10);
JSONStringValueDeserializer str_deserializer(proper_json);
@@ -117,7 +116,7 @@ TEST(JSONValueDeserializerTest, ReadProperJSONFromStringPiece) {
std::string error_message;
std::unique_ptr<Value> value =
str_deserializer.Deserialize(&error_code, &error_message);
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
ASSERT_EQ(0, error_code);
ASSERT_TRUE(error_message.empty());
// Verify if the same JSON is still there.
@@ -134,13 +133,14 @@ TEST(JSONValueDeserializerTest, ReadJSONWithTrailingCommasFromString) {
std::string error_message;
std::unique_ptr<Value> value =
str_deserializer.Deserialize(&error_code, &error_message);
- ASSERT_FALSE(value.get());
+ ASSERT_FALSE(value);
ASSERT_NE(0, error_code);
ASSERT_FALSE(error_message.empty());
- // Now the flag is set and it must pass.
- str_deserializer.set_allow_trailing_comma(true);
- value = str_deserializer.Deserialize(&error_code, &error_message);
- ASSERT_TRUE(value.get());
+ // Repeat with commas allowed.
+ JSONStringValueDeserializer str_deserializer2(kProperJSONWithCommas,
+ JSON_ALLOW_TRAILING_COMMAS);
+ value = str_deserializer2.Deserialize(&error_code, &error_message);
+ ASSERT_TRUE(value);
ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
// Verify if the same JSON is still there.
CheckJSONIsStillTheSame(*value);
@@ -151,7 +151,7 @@ TEST(JSONValueDeserializerTest, ReadProperJSONFromFile) {
ScopedTempDir tempdir;
ASSERT_TRUE(tempdir.CreateUniqueTempDir());
// Write it down in the file.
- FilePath temp_file(tempdir.path().AppendASCII("test.json"));
+ FilePath temp_file(tempdir.GetPath().AppendASCII("test.json"));
ASSERT_EQ(static_cast<int>(strlen(kProperJSON)),
WriteFile(temp_file, kProperJSON, strlen(kProperJSON)));
@@ -162,7 +162,7 @@ TEST(JSONValueDeserializerTest, ReadProperJSONFromFile) {
std::string error_message;
std::unique_ptr<Value> value =
file_deserializer.Deserialize(&error_code, &error_message);
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
ASSERT_EQ(0, error_code);
ASSERT_TRUE(error_message.empty());
// Verify if the same JSON is still there.
@@ -175,7 +175,7 @@ TEST(JSONValueDeserializerTest, ReadJSONWithCommasFromFile) {
ScopedTempDir tempdir;
ASSERT_TRUE(tempdir.CreateUniqueTempDir());
// Write it down in the file.
- FilePath temp_file(tempdir.path().AppendASCII("test.json"));
+ FilePath temp_file(tempdir.GetPath().AppendASCII("test.json"));
ASSERT_EQ(static_cast<int>(strlen(kProperJSONWithCommas)),
WriteFile(temp_file, kProperJSONWithCommas,
strlen(kProperJSONWithCommas)));
@@ -187,31 +187,31 @@ TEST(JSONValueDeserializerTest, ReadJSONWithCommasFromFile) {
std::string error_message;
std::unique_ptr<Value> value =
file_deserializer.Deserialize(&error_code, &error_message);
- ASSERT_FALSE(value.get());
+ ASSERT_FALSE(value);
ASSERT_NE(0, error_code);
ASSERT_FALSE(error_message.empty());
- // Now the flag is set and it must pass.
- file_deserializer.set_allow_trailing_comma(true);
- value = file_deserializer.Deserialize(&error_code, &error_message);
- ASSERT_TRUE(value.get());
+ // Repeat with commas allowed.
+ JSONFileValueDeserializer file_deserializer2(temp_file,
+ JSON_ALLOW_TRAILING_COMMAS);
+ value = file_deserializer2.Deserialize(&error_code, &error_message);
+ ASSERT_TRUE(value);
ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
// Verify if the same JSON is still there.
CheckJSONIsStillTheSame(*value);
}
TEST(JSONValueDeserializerTest, AllowTrailingComma) {
- std::unique_ptr<Value> root;
- std::unique_ptr<Value> root_expected;
static const char kTestWithCommas[] = "{\"key\": [true,],}";
static const char kTestNoCommas[] = "{\"key\": [true]}";
- JSONStringValueDeserializer deserializer(kTestWithCommas);
- deserializer.set_allow_trailing_comma(true);
+ JSONStringValueDeserializer deserializer(kTestWithCommas,
+ JSON_ALLOW_TRAILING_COMMAS);
JSONStringValueDeserializer deserializer_expected(kTestNoCommas);
- root = deserializer.Deserialize(NULL, NULL);
- ASSERT_TRUE(root.get());
- root_expected = deserializer_expected.Deserialize(NULL, NULL);
- ASSERT_TRUE(root_expected.get());
+ std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
+ ASSERT_TRUE(root);
+ std::unique_ptr<Value> root_expected;
+ root_expected = deserializer_expected.Deserialize(nullptr, nullptr);
+ ASSERT_TRUE(root_expected);
ASSERT_TRUE(root->Equals(root_expected.get()));
}
@@ -219,16 +219,14 @@ TEST(JSONValueSerializerTest, Roundtrip) {
static const char kOriginalSerialization[] =
"{\"bool\":true,\"double\":3.14,\"int\":42,\"list\":[1,2],\"null\":null}";
JSONStringValueDeserializer deserializer(kOriginalSerialization);
- std::unique_ptr<Value> root = deserializer.Deserialize(NULL, NULL);
- ASSERT_TRUE(root.get());
- ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-
- DictionaryValue* root_dict = static_cast<DictionaryValue*>(root.get());
+ std::unique_ptr<DictionaryValue> root_dict =
+ DictionaryValue::From(deserializer.Deserialize(nullptr, nullptr));
+ ASSERT_TRUE(root_dict);
- Value* null_value = NULL;
+ Value* null_value = nullptr;
ASSERT_TRUE(root_dict->Get("null", &null_value));
ASSERT_TRUE(null_value);
- ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
+ ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
bool bool_value = false;
ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
@@ -329,8 +327,9 @@ TEST(JSONValueSerializerTest, UnicodeStrings) {
// escaped ascii text -> json
JSONStringValueDeserializer deserializer(kExpected);
- std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
- ASSERT_TRUE(deserial_root.get());
+ std::unique_ptr<Value> deserial_root =
+ deserializer.Deserialize(nullptr, nullptr);
+ ASSERT_TRUE(deserial_root);
DictionaryValue* dict_root =
static_cast<DictionaryValue*>(deserial_root.get());
string16 web_value;
@@ -353,8 +352,9 @@ TEST(JSONValueSerializerTest, HexStrings) {
// escaped ascii text -> json
JSONStringValueDeserializer deserializer(kExpected);
- std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
- ASSERT_TRUE(deserial_root.get());
+ std::unique_ptr<Value> deserial_root =
+ deserializer.Deserialize(nullptr, nullptr);
+ ASSERT_TRUE(deserial_root);
DictionaryValue* dict_root =
static_cast<DictionaryValue*>(deserial_root.get());
string16 test_value;
@@ -364,8 +364,8 @@ TEST(JSONValueSerializerTest, HexStrings) {
// Test converting escaped regular chars
static const char kEscapedChars[] = "{\"test\":\"\\u0067\\u006f\"}";
JSONStringValueDeserializer deserializer2(kEscapedChars);
- deserial_root = deserializer2.Deserialize(NULL, NULL);
- ASSERT_TRUE(deserial_root.get());
+ deserial_root = deserializer2.Deserialize(nullptr, nullptr);
+ ASSERT_TRUE(deserial_root);
dict_root = static_cast<DictionaryValue*>(deserial_root.get());
ASSERT_TRUE(dict_root->GetString("test", &test_value));
ASSERT_EQ(ASCIIToUTF16("go"), test_value);
@@ -380,54 +380,48 @@ TEST(JSONValueSerializerTest, JSONReaderComments) {
ValidateJsonList("[ 1 //// ,2\r\n ]");
// It's ok to have a comment in a string.
- std::unique_ptr<Value> root = JSONReader::Read("[\"// ok\\n /* foo */ \"]");
- ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
- ListValue* list = static_cast<ListValue*>(root.get());
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader::Read("[\"// ok\\n /* foo */ \"]"));
+ ASSERT_TRUE(list);
ASSERT_EQ(1U, list->GetSize());
- Value* elt = NULL;
+ Value* elt = nullptr;
ASSERT_TRUE(list->Get(0, &elt));
std::string value;
ASSERT_TRUE(elt && elt->GetAsString(&value));
ASSERT_EQ("// ok\n /* foo */ ", value);
// You can't nest comments.
- root = JSONReader::Read("/* /* inner */ outer */ [ 1 ]");
- ASSERT_FALSE(root.get());
+ ASSERT_FALSE(JSONReader::Read("/* /* inner */ outer */ [ 1 ]"));
// Not a open comment token.
- root = JSONReader::Read("/ * * / [1]");
- ASSERT_FALSE(root.get());
+ ASSERT_FALSE(JSONReader::Read("/ * * / [1]"));
}
#if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
+
class JSONFileValueSerializerTest : public testing::Test {
protected:
void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); }
- base::ScopedTempDir temp_dir_;
+ ScopedTempDir temp_dir_;
};
TEST_F(JSONFileValueSerializerTest, Roundtrip) {
- base::FilePath original_file_path;
+ FilePath original_file_path;
ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
- original_file_path =
- original_file_path.Append(FILE_PATH_LITERAL("serializer_test.json"));
+ original_file_path = original_file_path.AppendASCII("serializer_test.json");
ASSERT_TRUE(PathExists(original_file_path));
JSONFileValueDeserializer deserializer(original_file_path);
- std::unique_ptr<Value> root;
- root = deserializer.Deserialize(NULL, NULL);
+ std::unique_ptr<DictionaryValue> root_dict =
+ DictionaryValue::From(deserializer.Deserialize(nullptr, nullptr));
+ ASSERT_TRUE(root_dict);
- ASSERT_TRUE(root.get());
- ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-
- DictionaryValue* root_dict = static_cast<DictionaryValue*>(root.get());
-
- Value* null_value = NULL;
+ Value* null_value = nullptr;
ASSERT_TRUE(root_dict->Get("null", &null_value));
ASSERT_TRUE(null_value);
- ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
+ ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
bool bool_value = false;
ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
@@ -442,35 +436,34 @@ TEST_F(JSONFileValueSerializerTest, Roundtrip) {
ASSERT_EQ("hello", string_value);
// Now try writing.
- const base::FilePath written_file_path =
- temp_dir_.path().Append(FILE_PATH_LITERAL("test_output.js"));
+ const FilePath written_file_path =
+ temp_dir_.GetPath().AppendASCII("test_output.js");
ASSERT_FALSE(PathExists(written_file_path));
JSONFileValueSerializer serializer(written_file_path);
- ASSERT_TRUE(serializer.Serialize(*root));
+ ASSERT_TRUE(serializer.Serialize(*root_dict));
ASSERT_TRUE(PathExists(written_file_path));
// Now compare file contents.
EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
- EXPECT_TRUE(base::DeleteFile(written_file_path, false));
+ EXPECT_TRUE(DeleteFile(written_file_path, false));
}
TEST_F(JSONFileValueSerializerTest, RoundtripNested) {
- base::FilePath original_file_path;
+ FilePath original_file_path;
ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
- original_file_path = original_file_path.Append(
- FILE_PATH_LITERAL("serializer_nested_test.json"));
+ original_file_path =
+ original_file_path.AppendASCII("serializer_nested_test.json");
ASSERT_TRUE(PathExists(original_file_path));
JSONFileValueDeserializer deserializer(original_file_path);
- std::unique_ptr<Value> root;
- root = deserializer.Deserialize(NULL, NULL);
- ASSERT_TRUE(root.get());
+ std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
+ ASSERT_TRUE(root);
// Now try writing.
- base::FilePath written_file_path = temp_dir_.path().Append(
- FILE_PATH_LITERAL("test_output.json"));
+ FilePath written_file_path =
+ temp_dir_.GetPath().AppendASCII("test_output.json");
ASSERT_FALSE(PathExists(written_file_path));
JSONFileValueSerializer serializer(written_file_path);
@@ -479,19 +472,18 @@ TEST_F(JSONFileValueSerializerTest, RoundtripNested) {
// Now compare file contents.
EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
- EXPECT_TRUE(base::DeleteFile(written_file_path, false));
+ EXPECT_TRUE(DeleteFile(written_file_path, false));
}
TEST_F(JSONFileValueSerializerTest, NoWhitespace) {
- base::FilePath source_file_path;
+ FilePath source_file_path;
ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &source_file_path));
- source_file_path = source_file_path.Append(
- FILE_PATH_LITERAL("serializer_test_nowhitespace.json"));
+ source_file_path =
+ source_file_path.AppendASCII("serializer_test_nowhitespace.json");
ASSERT_TRUE(PathExists(source_file_path));
JSONFileValueDeserializer deserializer(source_file_path);
- std::unique_ptr<Value> root;
- root = deserializer.Deserialize(NULL, NULL);
- ASSERT_TRUE(root.get());
+ std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
+ ASSERT_TRUE(root);
}
#endif // !__ANDROID__ && !__ANDROID_HOST__
diff --git a/base/json/json_writer.cc b/base/json/json_writer.cc
index 0b658eed59..07b9d5091c 100644
--- a/base/json/json_writer.cc
+++ b/base/json/json_writer.cc
@@ -57,12 +57,12 @@ JSONWriter::JSONWriter(int options, std::string* json)
bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
switch (node.GetType()) {
- case Value::TYPE_NULL: {
+ case Value::Type::NONE: {
json_string_->append("null");
return true;
}
- case Value::TYPE_BOOLEAN: {
+ case Value::Type::BOOLEAN: {
bool value;
bool result = node.GetAsBoolean(&value);
DCHECK(result);
@@ -70,7 +70,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_INTEGER: {
+ case Value::Type::INTEGER: {
int value;
bool result = node.GetAsInteger(&value);
DCHECK(result);
@@ -78,7 +78,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_DOUBLE: {
+ case Value::Type::DOUBLE: {
double value;
bool result = node.GetAsDouble(&value);
DCHECK(result);
@@ -110,7 +110,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_STRING: {
+ case Value::Type::STRING: {
std::string value;
bool result = node.GetAsString(&value);
DCHECK(result);
@@ -118,7 +118,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_LIST: {
+ case Value::Type::LIST: {
json_string_->push_back('[');
if (pretty_print_)
json_string_->push_back(' ');
@@ -128,7 +128,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
bool result = node.GetAsList(&list);
DCHECK(result);
for (const auto& value : *list) {
- if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY)
+ if (omit_binary_values_ && value->GetType() == Value::Type::BINARY)
continue;
if (first_value_has_been_output) {
@@ -149,7 +149,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_DICTIONARY: {
+ case Value::Type::DICTIONARY: {
json_string_->push_back('{');
if (pretty_print_)
json_string_->append(kPrettyPrintLineEnding);
@@ -161,7 +161,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
for (DictionaryValue::Iterator itr(*dict); !itr.IsAtEnd();
itr.Advance()) {
if (omit_binary_values_ &&
- itr.value().GetType() == Value::TYPE_BINARY) {
+ itr.value().GetType() == Value::Type::BINARY) {
continue;
}
@@ -194,7 +194,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_BINARY:
+ case Value::Type::BINARY:
// Successful only if we're allowed to omit it.
DLOG_IF(ERROR, !omit_binary_values_) << "Cannot serialize binary value.";
return omit_binary_values_;
diff --git a/base/json/json_writer.h b/base/json/json_writer.h
index ef43341409..57cb8c16a2 100644
--- a/base/json/json_writer.h
+++ b/base/json/json_writer.h
@@ -37,6 +37,8 @@ class BASE_EXPORT JSONWriter {
};
// Given a root node, generates a JSON string and puts it into |json|.
+ // The output string is overwritten and not appended.
+ //
// TODO(tc): Should we generate json if it would be invalid json (e.g.,
// |node| is not a DictionaryValue/ListValue or if there are inf/-inf float
// values)? Return true on success and false on failure.
diff --git a/base/json/json_writer_unittest.cc b/base/json/json_writer_unittest.cc
index 233ac5e867..6cb236fdc1 100644
--- a/base/json/json_writer_unittest.cc
+++ b/base/json/json_writer_unittest.cc
@@ -27,27 +27,27 @@ TEST(JSONWriterTest, BasicTypes) {
EXPECT_EQ("[]", output_js);
// Test integer values.
- EXPECT_TRUE(JSONWriter::Write(FundamentalValue(42), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value(42), &output_js));
EXPECT_EQ("42", output_js);
// Test boolean values.
- EXPECT_TRUE(JSONWriter::Write(FundamentalValue(true), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value(true), &output_js));
EXPECT_EQ("true", output_js);
// Test Real values should always have a decimal or an 'e'.
- EXPECT_TRUE(JSONWriter::Write(FundamentalValue(1.0), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value(1.0), &output_js));
EXPECT_EQ("1.0", output_js);
// Test Real values in the the range (-1, 1) must have leading zeros
- EXPECT_TRUE(JSONWriter::Write(FundamentalValue(0.2), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value(0.2), &output_js));
EXPECT_EQ("0.2", output_js);
// Test Real values in the the range (-1, 1) must have leading zeros
- EXPECT_TRUE(JSONWriter::Write(FundamentalValue(-0.8), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value(-0.8), &output_js));
EXPECT_EQ("-0.8", output_js);
// Test String values.
- EXPECT_TRUE(JSONWriter::Write(StringValue("foo"), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value("foo"), &output_js));
EXPECT_EQ("\"foo\"", output_js);
}
@@ -61,7 +61,7 @@ TEST(JSONWriterTest, NestedTypes) {
std::unique_ptr<DictionaryValue> inner_dict(new DictionaryValue());
inner_dict->SetInteger("inner int", 10);
list->Append(std::move(inner_dict));
- list->Append(WrapUnique(new ListValue()));
+ list->Append(MakeUnique<ListValue>());
list->AppendBoolean(true);
root_dict.Set("list", std::move(list));
@@ -119,9 +119,9 @@ TEST(JSONWriterTest, BinaryValues) {
ListValue binary_list;
binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
- binary_list.Append(WrapUnique(new FundamentalValue(5)));
+ binary_list.Append(MakeUnique<Value>(5));
binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
- binary_list.Append(WrapUnique(new FundamentalValue(2)));
+ binary_list.Append(MakeUnique<Value>(2));
binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
EXPECT_FALSE(JSONWriter::Write(binary_list, &output_js));
EXPECT_TRUE(JSONWriter::WriteWithOptions(
@@ -144,7 +144,7 @@ TEST(JSONWriterTest, DoublesAsInts) {
std::string output_js;
// Test allowing a double with no fractional part to be written as an integer.
- FundamentalValue double_value(1e10);
+ Value double_value(1e10);
EXPECT_TRUE(JSONWriter::WriteWithOptions(
double_value, JSONWriter::OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION,
&output_js));
diff --git a/base/lazy_instance.h b/base/lazy_instance.h
index ac970c55c1..5481f905cc 100644
--- a/base/lazy_instance.h
+++ b/base/lazy_instance.h
@@ -24,11 +24,11 @@
// requires that Type be a complete type so we can determine the size.
//
// Example usage:
-// static LazyInstance<MyClass> my_instance = LAZY_INSTANCE_INITIALIZER;
+// static LazyInstance<MyClass>::Leaky inst = LAZY_INSTANCE_INITIALIZER;
// void SomeMethod() {
-// my_instance.Get().SomeMethod(); // MyClass::SomeMethod()
+// inst.Get().SomeMethod(); // MyClass::SomeMethod()
//
-// MyClass* ptr = my_instance.Pointer();
+// MyClass* ptr = inst.Pointer();
// ptr->DoDoDo(); // MyClass::DoDoDo
// }
@@ -57,22 +57,15 @@
namespace base {
template <typename Type>
-struct DefaultLazyInstanceTraits {
- static const bool kRegisterOnExit = true;
-#ifndef NDEBUG
- static const bool kAllowedToAccessOnNonjoinableThread = false;
-#endif
-
+struct LazyInstanceTraitsBase {
static Type* New(void* instance) {
- DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u)
- << ": Bad boy, the buffer passed to placement new is not aligned!\n"
- "This may break some stuff like SSE-based optimizations assuming the "
- "<Type> objects are word aligned.";
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u);
// Use placement new to initialize our instance in our preallocated space.
// The parenthesis is very important here to force POD type initialization.
return new (instance) Type();
}
- static void Delete(Type* instance) {
+
+ static void CallDestructor(Type* instance) {
// Explicitly call the destructor.
instance->~Type();
}
@@ -82,6 +75,25 @@ struct DefaultLazyInstanceTraits {
// can implement the more complicated pieces out of line in the .cc file.
namespace internal {
+// This traits class causes destruction the contained Type at process exit via
+// AtExitManager. This is probably generally not what you want. Instead, prefer
+// Leaky below.
+template <typename Type>
+struct DestructorAtExitLazyInstanceTraits {
+ static const bool kRegisterOnExit = true;
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+
+ static Type* New(void* instance) {
+ return LazyInstanceTraitsBase<Type>::New(instance);
+ }
+
+ static void Delete(Type* instance) {
+ LazyInstanceTraitsBase<Type>::CallDestructor(instance);
+ }
+};
+
// Use LazyInstance<T>::Leaky for a less-verbose call-site typedef; e.g.:
// base::LazyInstance<T>::Leaky my_leaky_lazy_instance;
// instead of:
@@ -93,19 +105,22 @@ namespace internal {
template <typename Type>
struct LeakyLazyInstanceTraits {
static const bool kRegisterOnExit = false;
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
static const bool kAllowedToAccessOnNonjoinableThread = true;
#endif
static Type* New(void* instance) {
ANNOTATE_SCOPED_MEMORY_LEAK;
- return DefaultLazyInstanceTraits<Type>::New(instance);
+ return LazyInstanceTraitsBase<Type>::New(instance);
}
static void Delete(Type*) {}
};
+template <typename Type>
+struct ErrorMustSelectLazyOrDestructorAtExitForLazyInstance {};
+
// Our AtomicWord doubles as a spinlock, where a value of
-// kBeingCreatedMarker means the spinlock is being held for creation.
+// kLazyInstanceStateCreating means the spinlock is being held for creation.
static const subtle::AtomicWord kLazyInstanceStateCreating = 1;
// Check if instance needs to be created. If so return true otherwise
@@ -122,7 +137,10 @@ BASE_EXPORT void CompleteLazyInstance(subtle::AtomicWord* state,
} // namespace internal
-template <typename Type, typename Traits = DefaultLazyInstanceTraits<Type> >
+template <
+ typename Type,
+ typename Traits =
+ internal::ErrorMustSelectLazyOrDestructorAtExitForLazyInstance<Type>>
class LazyInstance {
public:
// Do not define a destructor, as doing so makes LazyInstance a
@@ -134,14 +152,16 @@ class LazyInstance {
// Convenience typedef to avoid having to repeat Type for leaky lazy
// instances.
- typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type> > Leaky;
+ typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type>> Leaky;
+ typedef LazyInstance<Type, internal::DestructorAtExitLazyInstanceTraits<Type>>
+ DestructorAtExit;
Type& Get() {
return *Pointer();
}
Type* Pointer() {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
// Avoid making TLS lookup on release builds.
if (!Traits::kAllowedToAccessOnNonjoinableThread)
ThreadRestrictions::AssertSingletonAllowed();
diff --git a/base/lazy_instance_unittest.cc b/base/lazy_instance_unittest.cc
index 8947b1291f..0aa4659465 100644
--- a/base/lazy_instance_unittest.cc
+++ b/base/lazy_instance_unittest.cc
@@ -45,7 +45,8 @@ int SlowConstructor::constructed = 0;
class SlowDelegate : public base::DelegateSimpleThread::Delegate {
public:
- explicit SlowDelegate(base::LazyInstance<SlowConstructor>* lazy)
+ explicit SlowDelegate(
+ base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy)
: lazy_(lazy) {}
void Run() override {
@@ -54,13 +55,13 @@ class SlowDelegate : public base::DelegateSimpleThread::Delegate {
}
private:
- base::LazyInstance<SlowConstructor>* lazy_;
+ base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy_;
};
} // namespace
-static base::LazyInstance<ConstructAndDestructLogger> lazy_logger =
- LAZY_INSTANCE_INITIALIZER;
+static base::LazyInstance<ConstructAndDestructLogger>::DestructorAtExit
+ lazy_logger = LAZY_INSTANCE_INITIALIZER;
TEST(LazyInstanceTest, Basic) {
{
@@ -81,7 +82,7 @@ TEST(LazyInstanceTest, Basic) {
EXPECT_EQ(4, destructed_seq_.GetNext());
}
-static base::LazyInstance<SlowConstructor> lazy_slow =
+static base::LazyInstance<SlowConstructor>::DestructorAtExit lazy_slow =
LAZY_INSTANCE_INITIALIZER;
TEST(LazyInstanceTest, ConstructorThreadSafety) {
@@ -126,7 +127,8 @@ TEST(LazyInstanceTest, LeakyLazyInstance) {
bool deleted1 = false;
{
base::ShadowingAtExitManager shadow;
- static base::LazyInstance<DeleteLogger> test = LAZY_INSTANCE_INITIALIZER;
+ static base::LazyInstance<DeleteLogger>::DestructorAtExit test =
+ LAZY_INSTANCE_INITIALIZER;
test.Get().SetDeletedPtr(&deleted1);
}
EXPECT_TRUE(deleted1);
@@ -164,9 +166,12 @@ TEST(LazyInstanceTest, Alignment) {
// Create some static instances with increasing sizes and alignment
// requirements. By ordering this way, the linker will need to do some work to
// ensure proper alignment of the static data.
- static LazyInstance<AlignedData<4> > align4 = LAZY_INSTANCE_INITIALIZER;
- static LazyInstance<AlignedData<32> > align32 = LAZY_INSTANCE_INITIALIZER;
- static LazyInstance<AlignedData<4096> > align4096 = LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<4>>::DestructorAtExit align4 =
+ LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<32>>::DestructorAtExit align32 =
+ LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<4096>>::DestructorAtExit align4096 =
+ LAZY_INSTANCE_INITIALIZER;
EXPECT_ALIGNED(align4.Pointer(), 4);
EXPECT_ALIGNED(align32.Pointer(), 32);
diff --git a/base/location.h b/base/location.h
index 21e270c5a9..dd78515ce2 100644
--- a/base/location.h
+++ b/base/location.h
@@ -97,7 +97,7 @@ struct BASE_EXPORT LocationSnapshot {
BASE_EXPORT const void* GetProgramCounter();
// Define a macro to record the current source location.
-#define FROM_HERE FROM_HERE_WITH_EXPLICIT_FUNCTION(__FUNCTION__)
+#define FROM_HERE FROM_HERE_WITH_EXPLICIT_FUNCTION(__func__)
#define FROM_HERE_WITH_EXPLICIT_FUNCTION(function_name) \
::tracked_objects::Location(function_name, \
diff --git a/base/logging.cc b/base/logging.cc
index 381e9eea0f..a8736badd3 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -7,12 +7,12 @@
#include <limits.h>
#include <stdint.h>
+#include "base/debug/activity_tracker.h"
#include "base/macros.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include <io.h>
-#include <windows.h>
typedef HANDLE FileHandle;
typedef HANDLE MutexHandle;
// Windows warns on using write(). It prefers _write().
@@ -346,6 +346,11 @@ void CloseLogFileUnlocked() {
} // namespace
+// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
+// an object of the correct type on the LHS of the unused part of the ternary
+// operator.
+std::ostream* g_swallow_stream;
+
LoggingSettings::LoggingSettings()
: logging_dest(LOG_DEFAULT),
log_file(nullptr),
@@ -737,6 +742,12 @@ LogMessage::~LogMessage() {
}
if (severity_ == LOG_FATAL) {
+ // Write the log message to the global activity tracker, if running.
+ base::debug::GlobalActivityTracker* tracker =
+ base::debug::GlobalActivityTracker::Get();
+ if (tracker)
+ tracker->RecordLogMessage(str_newline);
+
// Ensure the first characters of the string are on the stack so they
// are contained in minidumps for diagnostic purposes.
char str_stack[1024];
@@ -780,18 +791,13 @@ void LogMessage::Init(const char* file, int line) {
if (g_log_thread_id)
stream_ << base::PlatformThread::CurrentId() << ':';
if (g_log_timestamp) {
- time_t t = time(nullptr);
-#if defined(__ANDROID__) || defined(ANDROID)
+#if defined(OS_POSIX)
+ timeval tv;
+ gettimeofday(&tv, nullptr);
+ time_t t = tv.tv_sec;
struct tm local_time;
- memset(&local_time, 0, sizeof(local_time));
-#else
- struct tm local_time = {0};
-#endif
-#ifdef _MSC_VER
- localtime_s(&local_time, &t);
-#else
+ memset(&local_time, 0, sizeof(local_time));
localtime_r(&t, &local_time);
-#endif
struct tm* tm_time = &local_time;
stream_ << std::setfill('0')
<< std::setw(2) << 1 + tm_time->tm_mon
@@ -800,7 +806,23 @@ void LogMessage::Init(const char* file, int line) {
<< std::setw(2) << tm_time->tm_hour
<< std::setw(2) << tm_time->tm_min
<< std::setw(2) << tm_time->tm_sec
+ << '.'
+ << std::setw(6) << tv.tv_usec
<< ':';
+#elif defined(OS_WIN)
+ SYSTEMTIME local_time;
+ GetLocalTime(&local_time);
+ stream_ << std::setfill('0')
+ << std::setw(2) << local_time.wMonth
+ << std::setw(2) << local_time.wDay
+ << '/'
+ << std::setw(2) << local_time.wHour
+ << std::setw(2) << local_time.wMinute
+ << std::setw(2) << local_time.wSecond
+ << '.'
+ << std::setw(3) << local_time.wMilliseconds
+ << ':';
+#endif
}
if (g_log_tickcount)
stream_ << TickCount() << ':';
diff --git a/base/logging.h b/base/logging.h
index 2bfc972601..7ca018e227 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -15,6 +15,7 @@
#include <utility>
#include "base/base_export.h"
+#include "base/compiler_specific.h"
#include "base/debug/debugger.h"
#include "base/macros.h"
#include "base/template_util.h"
@@ -140,34 +141,6 @@
// There is the special severity of DFATAL, which logs FATAL in debug mode,
// ERROR in normal mode.
-// Note that "The behavior of a C++ program is undefined if it adds declarations
-// or definitions to namespace std or to a namespace within namespace std unless
-// otherwise specified." --C++11[namespace.std]
-//
-// We've checked that this particular definition has the intended behavior on
-// our implementations, but it's prone to breaking in the future, and please
-// don't imitate this in your own definitions without checking with some
-// standard library experts.
-namespace std {
-// These functions are provided as a convenience for logging, which is where we
-// use streams (it is against Google style to use streams in other places). It
-// is designed to allow you to emit non-ASCII Unicode strings to the log file,
-// which is normally ASCII. It is relatively slow, so try not to use it for
-// common cases. Non-ASCII characters will be converted to UTF-8 by these
-// operators.
-BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
-inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
- return out << wstr.c_str();
-}
-
-template<typename T>
-typename std::enable_if<std::is_enum<T>::value, std::ostream&>::type operator<<(
- std::ostream& out, T value) {
- return out << static_cast<typename std::underlying_type<T>::type>(value);
-}
-
-} // namespace std
-
namespace logging {
// TODO(avi): do we want to do a unification of character types here?
@@ -337,15 +310,16 @@ const LogSeverity LOG_DFATAL = LOG_FATAL;
// by LOG() and LOG_IF, etc. Since these are used all over our code, it's
// better to have compact code for these operations.
#define COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...) \
- logging::ClassName(__FILE__, __LINE__, logging::LOG_INFO , ##__VA_ARGS__)
-#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
- logging::ClassName(__FILE__, __LINE__, logging::LOG_WARNING , ##__VA_ARGS__)
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_INFO, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_WARNING, \
+ ##__VA_ARGS__)
#define COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...) \
- logging::ClassName(__FILE__, __LINE__, logging::LOG_ERROR , ##__VA_ARGS__)
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_ERROR, ##__VA_ARGS__)
#define COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...) \
- logging::ClassName(__FILE__, __LINE__, logging::LOG_FATAL , ##__VA_ARGS__)
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_FATAL, ##__VA_ARGS__)
#define COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
- logging::ClassName(__FILE__, __LINE__, logging::LOG_DFATAL , ##__VA_ARGS__)
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_DFATAL, ##__VA_ARGS__)
#define COMPACT_GOOGLE_LOG_INFO \
COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
@@ -406,7 +380,7 @@ const LogSeverity LOG_0 = LOG_ERROR;
// The VLOG macros log with negative verbosities.
#define VLOG_STREAM(verbose_level) \
- logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
+ ::logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
#define VLOG(verbose_level) \
LAZY_STREAM(VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
@@ -417,11 +391,11 @@ const LogSeverity LOG_0 = LOG_ERROR;
#if defined (OS_WIN)
#define VPLOG_STREAM(verbose_level) \
- logging::Win32ErrorLogMessage(__FILE__, __LINE__, -verbose_level, \
+ ::logging::Win32ErrorLogMessage(__FILE__, __LINE__, -verbose_level, \
::logging::GetLastSystemErrorCode()).stream()
#elif defined(OS_POSIX)
#define VPLOG_STREAM(verbose_level) \
- logging::ErrnoLogMessage(__FILE__, __LINE__, -verbose_level, \
+ ::logging::ErrnoLogMessage(__FILE__, __LINE__, -verbose_level, \
::logging::GetLastSystemErrorCode()).stream()
#endif
@@ -434,7 +408,7 @@ const LogSeverity LOG_0 = LOG_ERROR;
// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
-#define LOG_ASSERT(condition) \
+#define LOG_ASSERT(condition) \
LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
#if defined(OS_WIN)
@@ -453,9 +427,23 @@ const LogSeverity LOG_0 = LOG_ERROR;
#define PLOG_IF(severity, condition) \
LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
-// The actual stream used isn't important.
-#define EAT_STREAM_PARAMETERS \
- true ? (void) 0 : ::logging::LogMessageVoidify() & LOG_STREAM(FATAL)
+BASE_EXPORT extern std::ostream* g_swallow_stream;
+
+// Note that g_swallow_stream is used instead of an arbitrary LOG() stream to
+// avoid the creation of an object with a non-trivial destructor (LogMessage).
+// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
+// pointless instructions to be emitted even at full optimization level, even
+// though the : arm of the ternary operator is clearly never executed. Using a
+// simpler object to be &'d with Voidify() avoids these extra instructions.
+// Using a simpler POD object with a templated operator<< also works to avoid
+// these instructions. However, this causes warnings on statically defined
+// implementations of operator<<(std::ostream, ...) in some .cc files, because
+// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
+// ostream* also is not suitable, because some compilers warn of undefined
+// behavior.
+#define EAT_STREAM_PARAMETERS \
+ true ? (void)0 \
+ : ::logging::LogMessageVoidify() & (*::logging::g_swallow_stream)
// Captures the result of a CHECK_EQ (for example) and facilitates testing as a
// boolean.
@@ -472,6 +460,84 @@ class CheckOpResult {
std::string* message_;
};
+// Crashes in the fastest possible way with no attempt at logging.
+// There are different constraints to satisfy here, see http://crbug.com/664209
+// for more context:
+// - The trap instructions, and hence the PC value at crash time, have to be
+// distinct and not get folded into the same opcode by the compiler.
+// On Linux/Android this is tricky because GCC still folds identical
+// asm volatile blocks. The workaround is generating distinct opcodes for
+// each CHECK using the __COUNTER__ macro.
+// - The debug info for the trap instruction has to be attributed to the source
+// line that has the CHECK(), to make crash reports actionable. This rules
+// out the ability of using a inline function, at least as long as clang
+// doesn't support attribute(artificial).
+// - Failed CHECKs should produce a signal that is distinguishable from an
+// invalid memory access, to improve the actionability of crash reports.
+// - The compiler should treat the CHECK as no-return instructions, so that the
+// trap code can be efficiently packed in the prologue of the function and
+// doesn't interfere with the main execution flow.
+// - When debugging, developers shouldn't be able to accidentally step over a
+// CHECK. This is achieved by putting opcodes that will cause a non
+// continuable exception after the actual trap instruction.
+// - Don't cause too much binary bloat.
+#if defined(COMPILER_GCC)
+
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
+// int 3 will generate a SIGTRAP.
+#define TRAP_SEQUENCE() \
+ asm volatile( \
+ "int3; ud2; push %0;" ::"i"(static_cast<unsigned char>(__COUNTER__)))
+
+#elif defined(ARCH_CPU_ARMEL) && !defined(OS_NACL)
+// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
+// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
+// cause a SIGTRAP from userspace without using a syscall (which would be a
+// problem for sandboxing).
+#define TRAP_SEQUENCE() \
+ asm volatile("bkpt #0; udf %0;" ::"i"(__COUNTER__ % 256))
+
+#elif defined(ARCH_CPU_ARM64) && !defined(OS_NACL)
+// This will always generate a SIGTRAP on arm64.
+#define TRAP_SEQUENCE() \
+ asm volatile("brk #0; hlt %0;" ::"i"(__COUNTER__ % 65536))
+
+#else
+// Crash report accuracy will not be guaranteed on other architectures, but at
+// least this will crash as expected.
+#define TRAP_SEQUENCE() __builtin_trap()
+#endif // ARCH_CPU_*
+
+#define IMMEDIATE_CRASH() \
+ ({ \
+ TRAP_SEQUENCE(); \
+ __builtin_unreachable(); \
+ })
+
+#elif defined(COMPILER_MSVC)
+
+// Clang is cleverer about coalescing int3s, so we need to add a unique-ish
+// instruction following the __debugbreak() to have it emit distinct locations
+// for CHECKs rather than collapsing them all together. It would be nice to use
+// a short intrinsic to do this (and perhaps have only one implementation for
+// both clang and MSVC), however clang-cl currently does not support intrinsics.
+// On the flip side, MSVC x64 doesn't support inline asm. So, we have to have
+// two implementations. Normally clang-cl's version will be 5 bytes (1 for
+// `int3`, 2 for `ud2`, 2 for `push byte imm`, however, TODO(scottmg):
+// https://crbug.com/694670 clang-cl doesn't currently support %'ing
+// __COUNTER__, so eventually it will emit the dword form of push.
+// TODO(scottmg): Reinvestigate a short sequence that will work on both
+// compilers once clang supports more intrinsics. See https://crbug.com/693713.
+#if defined(__clang__)
+#define IMMEDIATE_CRASH() ({__asm int 3 __asm ud2 __asm push __COUNTER__})
+#else
+#define IMMEDIATE_CRASH() __debugbreak()
+#endif // __clang__
+
+#else
+#error Port
+#endif
+
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode.
@@ -481,20 +547,14 @@ class CheckOpResult {
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
-// Make all CHECK functions discard their log strings to reduce code
-// bloat, and improve performance, for official release builds.
-
-#if defined(COMPILER_GCC) || __clang__
-#define LOGGING_CRASH() __builtin_trap()
-#else
-#define LOGGING_CRASH() ((void)(*(volatile char*)0 = 0))
-#endif
-
+// Make all CHECK functions discard their log strings to reduce code bloat, and
+// improve performance, for official release builds.
+//
// This is not calling BreakDebugger since this is called frequently, and
// calling an out-of-line function instead of a noreturn inline macro prevents
// compiler optimizations.
-#define CHECK(condition) \
- !(condition) ? LOGGING_CRASH() : EAT_STREAM_PARAMETERS
+#define CHECK(condition) \
+ UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_STREAM_PARAMETERS
#define PCHECK(condition) CHECK(condition)
@@ -510,26 +570,26 @@ class CheckOpResult {
// __analysis_assume gets confused on some conditions:
// http://randomascii.wordpress.com/2011/09/13/analyze-for-visual-studio-the-ugly-part-5/
-#define CHECK(condition) \
- __analysis_assume(!!(condition)), \
- LAZY_STREAM(LOG_STREAM(FATAL), false) \
- << "Check failed: " #condition ". "
+#define CHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(LOG_STREAM(FATAL), false) \
+ << "Check failed: " #condition ". "
-#define PCHECK(condition) \
- __analysis_assume(!!(condition)), \
- LAZY_STREAM(PLOG_STREAM(FATAL), false) \
- << "Check failed: " #condition ". "
+#define PCHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(PLOG_STREAM(FATAL), false) \
+ << "Check failed: " #condition ". "
#else // _PREFAST_
// Do as much work as possible out of line to reduce inline code size.
-#define CHECK(condition) \
- LAZY_STREAM(logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
+#define CHECK(condition) \
+ LAZY_STREAM(::logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
!(condition))
#define PCHECK(condition) \
LAZY_STREAM(PLOG_STREAM(FATAL), !(condition)) \
- << "Check failed: " #condition ". "
+ << "Check failed: " #condition ". "
#endif // _PREFAST_
@@ -541,12 +601,12 @@ class CheckOpResult {
// CHECK_EQ(2, a);
#define CHECK_OP(name, op, val1, val2) \
switch (0) case 0: default: \
- if (logging::CheckOpResult true_if_passed = \
- logging::Check##name##Impl((val1), (val2), \
- #val1 " " #op " " #val2)) \
+ if (::logging::CheckOpResult true_if_passed = \
+ ::logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2)) \
; \
else \
- logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
+ ::logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
#endif // !(OFFICIAL_BUILD && NDEBUG)
@@ -554,12 +614,26 @@ class CheckOpResult {
// it uses the definition for operator<<, with a few special cases below.
template <typename T>
inline typename std::enable_if<
- base::internal::SupportsOstreamOperator<const T&>::value,
+ base::internal::SupportsOstreamOperator<const T&>::value &&
+ !std::is_function<typename std::remove_pointer<T>::type>::value,
void>::type
MakeCheckOpValueString(std::ostream* os, const T& v) {
(*os) << v;
}
+// Provide an overload for functions and function pointers. Function pointers
+// don't implicitly convert to void* but do implicitly convert to bool, so
+// without this function pointers are always printed as 1 or 0. (MSVC isn't
+// standards-conforming here and converts function pointers to regular
+// pointers, so this is a no-op for MSVC.)
+template <typename T>
+inline typename std::enable_if<
+ std::is_function<typename std::remove_pointer<T>::type>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << reinterpret_cast<const void*>(v);
+}
+
// We need overloads for enums that don't support operator<<.
// (i.e. scoped enums where no operator<< overload was declared).
template <typename T>
@@ -611,16 +685,20 @@ std::string* MakeCheckOpString<std::string, std::string>(
// The (int, int) specialization works around the issue that the compiler
// will not instantiate the template version of the function on values of
// unnamed enum type - see comment below.
-#define DEFINE_CHECK_OP_IMPL(name, op) \
- template <class t1, class t2> \
- inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
- const char* names) { \
- if (v1 op v2) return NULL; \
- else return MakeCheckOpString(v1, v2, names); \
- } \
+#define DEFINE_CHECK_OP_IMPL(name, op) \
+ template <class t1, class t2> \
+ inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
+ const char* names) { \
+ if (v1 op v2) \
+ return NULL; \
+ else \
+ return ::logging::MakeCheckOpString(v1, v2, names); \
+ } \
inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
- if (v1 op v2) return NULL; \
- else return MakeCheckOpString(v1, v2, names); \
+ if (v1 op v2) \
+ return NULL; \
+ else \
+ return ::logging::MakeCheckOpString(v1, v2, names); \
}
DEFINE_CHECK_OP_IMPL(EQ, ==)
DEFINE_CHECK_OP_IMPL(NE, !=)
@@ -638,12 +716,6 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
-#define ENABLE_DLOG 0
-#else
-#define ENABLE_DLOG 1
-#endif
-
-#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
#define DCHECK_IS_ON() 0
#else
#define DCHECK_IS_ON() 1
@@ -651,7 +723,7 @@ DEFINE_CHECK_OP_IMPL(GT, > )
// Definitions for DLOG et al.
-#if ENABLE_DLOG
+#if DCHECK_IS_ON()
#define DLOG_IS_ON(severity) LOG_IS_ON(severity)
#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
@@ -660,12 +732,11 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define DVLOG_IF(verboselevel, condition) VLOG_IF(verboselevel, condition)
#define DVPLOG_IF(verboselevel, condition) VPLOG_IF(verboselevel, condition)
-#else // ENABLE_DLOG
+#else // DCHECK_IS_ON()
-// If ENABLE_DLOG is off, we want to avoid emitting any references to
-// |condition| (which may reference a variable defined only if NDEBUG
-// is not defined). Contrast this with DCHECK et al., which has
-// different behavior.
+// If !DCHECK_IS_ON(), we want to avoid emitting any references to |condition|
+// (which may reference a variable defined only if DCHECK_IS_ON()).
+// Contrast this with DCHECK et al., which has different behavior.
#define DLOG_IS_ON(severity) false
#define DLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
@@ -674,19 +745,7 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define DVLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
#define DVPLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
-#endif // ENABLE_DLOG
-
-// DEBUG_MODE is for uses like
-// if (DEBUG_MODE) foo.CheckThatFoo();
-// instead of
-// #ifndef NDEBUG
-// foo.CheckThatFoo();
-// #endif
-//
-// We tie its state to ENABLE_DLOG.
-enum { DEBUG_MODE = ENABLE_DLOG };
-
-#undef ENABLE_DLOG
+#endif // DCHECK_IS_ON()
#define DLOG(severity) \
LAZY_STREAM(LOG_STREAM(severity), DLOG_IS_ON(severity))
@@ -721,31 +780,63 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
// whether DCHECKs are enabled; this is so that we don't get unused
// variable warnings if the only use of a variable is in a DCHECK.
// This behavior is different from DLOG_IF et al.
+//
+// Note that the definition of the DCHECK macros depends on whether or not
+// DCHECK_IS_ON() is true. When DCHECK_IS_ON() is false, the macros use
+// EAT_STREAM_PARAMETERS to avoid expressions that would create temporaries.
#if defined(_PREFAST_) && defined(OS_WIN)
// See comments on the previous use of __analysis_assume.
-#define DCHECK(condition) \
- __analysis_assume(!!(condition)), \
- LAZY_STREAM(LOG_STREAM(DCHECK), false) \
- << "Check failed: " #condition ". "
+#define DCHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(LOG_STREAM(DCHECK), false) \
+ << "Check failed: " #condition ". "
-#define DPCHECK(condition) \
- __analysis_assume(!!(condition)), \
- LAZY_STREAM(PLOG_STREAM(DCHECK), false) \
- << "Check failed: " #condition ". "
+#define DPCHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(PLOG_STREAM(DCHECK), false) \
+ << "Check failed: " #condition ". "
-#else // _PREFAST_
+#elif defined(__clang_analyzer__)
+
+// Keeps the static analyzer from proceeding along the current codepath,
+// otherwise false positive errors may be generated by null pointer checks.
+inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) {
+ return false;
+}
-#define DCHECK(condition) \
- LAZY_STREAM(LOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
+#define DCHECK(condition) \
+ LAZY_STREAM( \
+ LOG_STREAM(DCHECK), \
+ DCHECK_IS_ON() ? (logging::AnalyzerNoReturn() || !(condition)) : false) \
<< "Check failed: " #condition ". "
-#define DPCHECK(condition) \
- LAZY_STREAM(PLOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
+#define DPCHECK(condition) \
+ LAZY_STREAM( \
+ PLOG_STREAM(DCHECK), \
+ DCHECK_IS_ON() ? (logging::AnalyzerNoReturn() || !(condition)) : false) \
<< "Check failed: " #condition ". "
-#endif // _PREFAST_
+#else
+
+#if DCHECK_IS_ON()
+
+#define DCHECK(condition) \
+ LAZY_STREAM(LOG_STREAM(DCHECK), !(condition)) \
+ << "Check failed: " #condition ". "
+#define DPCHECK(condition) \
+ LAZY_STREAM(PLOG_STREAM(DCHECK), !(condition)) \
+ << "Check failed: " #condition ". "
+
+#else // DCHECK_IS_ON()
+
+#define DCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+#define DPCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+
+#endif // DCHECK_IS_ON()
+
+#endif
// Helper macro for binary operators.
// Don't use this macro directly in your code, use DCHECK_EQ et al below.
@@ -753,16 +844,37 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
// macro is used in an 'if' clause such as:
// if (a == 1)
// DCHECK_EQ(2, a);
-#define DCHECK_OP(name, op, val1, val2) \
- switch (0) case 0: default: \
- if (logging::CheckOpResult true_if_passed = \
- DCHECK_IS_ON() ? \
- logging::Check##name##Impl((val1), (val2), \
- #val1 " " #op " " #val2) : nullptr) \
- ; \
- else \
- logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \
- true_if_passed.message()).stream()
+#if DCHECK_IS_ON()
+
+#define DCHECK_OP(name, op, val1, val2) \
+ switch (0) case 0: default: \
+ if (::logging::CheckOpResult true_if_passed = \
+ DCHECK_IS_ON() ? \
+ ::logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2) : nullptr) \
+ ; \
+ else \
+ ::logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \
+ true_if_passed.message()).stream()
+
+#else // DCHECK_IS_ON()
+
+// When DCHECKs aren't enabled, DCHECK_OP still needs to reference operator<<
+// overloads for |val1| and |val2| to avoid potential compiler warnings about
+// unused functions. For the same reason, it also compares |val1| and |val2|
+// using |op|.
+//
+// Note that the contract of DCHECK_EQ, etc is that arguments are only evaluated
+// once. Even though |val1| and |val2| appear twice in this version of the macro
+// expansion, this is OK, since the expression is never actually evaluated.
+#define DCHECK_OP(name, op, val1, val2) \
+ EAT_STREAM_PARAMETERS << (::logging::MakeCheckOpValueString( \
+ ::logging::g_swallow_stream, val1), \
+ ::logging::MakeCheckOpValueString( \
+ ::logging::g_swallow_stream, val2), \
+ (val1)op(val2))
+
+#endif // DCHECK_IS_ON()
// Equality/Inequality checks - compare two values, and log a
// LOG_DCHECK message including the two values when the result is not
@@ -770,7 +882,7 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
// defined.
//
// You may append to the error message like so:
-// DCHECK_NE(1, 2) << ": The world must be ending!";
+// DCHECK_NE(1, 2) << "The world must be ending!";
//
// We are very careful to ensure that each argument is evaluated exactly
// once, and that anything which is legal to pass as a function argument is
@@ -834,6 +946,9 @@ class BASE_EXPORT LogMessage {
std::ostream& stream() { return stream_; }
+ LogSeverity severity() { return severity_; }
+ std::string str() { return stream_.str(); }
+
private:
void Init(const char* file, int line);
@@ -941,12 +1056,14 @@ BASE_EXPORT void CloseLogFile();
// Async signal safe logging mechanism.
BASE_EXPORT void RawLog(int level, const char* message);
-#define RAW_LOG(level, message) logging::RawLog(logging::LOG_ ## level, message)
+#define RAW_LOG(level, message) \
+ ::logging::RawLog(::logging::LOG_##level, message)
-#define RAW_CHECK(condition) \
- do { \
- if (!(condition)) \
- logging::RawLog(logging::LOG_FATAL, "Check failed: " #condition "\n"); \
+#define RAW_CHECK(condition) \
+ do { \
+ if (!(condition)) \
+ ::logging::RawLog(::logging::LOG_FATAL, \
+ "Check failed: " #condition "\n"); \
} while (0)
#if defined(OS_WIN)
@@ -959,6 +1076,27 @@ BASE_EXPORT std::wstring GetLogFileFullPath();
} // namespace logging
+// Note that "The behavior of a C++ program is undefined if it adds declarations
+// or definitions to namespace std or to a namespace within namespace std unless
+// otherwise specified." --C++11[namespace.std]
+//
+// We've checked that this particular definition has the intended behavior on
+// our implementations, but it's prone to breaking in the future, and please
+// don't imitate this in your own definitions without checking with some
+// standard library experts.
+namespace std {
+// These functions are provided as a convenience for logging, which is where we
+// use streams (it is against Google style to use streams in other places). It
+// is designed to allow you to emit non-ASCII Unicode strings to the log file,
+// which is normally ASCII. It is relatively slow, so try not to use it for
+// common cases. Non-ASCII characters will be converted to UTF-8 by these
+// operators.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
+inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
+ return out << wstr.c_str();
+}
+} // namespace std
+
// The NOTIMPLEMENTED() macro annotates codepaths which have
// not been implemented yet.
//
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
index 8a20c54fb4..04f349cab6 100644
--- a/base/logging_unittest.cc
+++ b/base/logging_unittest.cc
@@ -9,6 +9,21 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_POSIX)
+#include <signal.h>
+#include <unistd.h>
+#include "base/posix/eintr_wrapper.h"
+#endif // OS_POSIX
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <ucontext.h>
+#endif
+
+#if defined(OS_WIN)
+#include <excpt.h>
+#include <windows.h>
+#endif // OS_WIN
+
namespace logging {
namespace {
@@ -54,16 +69,14 @@ class MockLogSource {
TEST_F(LoggingTest, BasicLogging) {
MockLogSource mock_log_source;
- EXPECT_CALL(mock_log_source, Log()).Times(DEBUG_MODE ? 16 : 8).
- WillRepeatedly(Return("log message"));
+ EXPECT_CALL(mock_log_source, Log())
+ .Times(DCHECK_IS_ON() ? 16 : 8)
+ .WillRepeatedly(Return("log message"));
SetMinLogLevel(LOG_INFO);
EXPECT_TRUE(LOG_IS_ON(INFO));
- // As of g++-4.5, the first argument to EXPECT_EQ cannot be a
- // constant expression.
- const bool kIsDebugMode = (DEBUG_MODE != 0);
- EXPECT_TRUE(kIsDebugMode == DLOG_IS_ON(INFO));
+ EXPECT_TRUE((DCHECK_IS_ON() != 0) == DLOG_IS_ON(INFO));
EXPECT_TRUE(VLOG_IS_ON(0));
LOG(INFO) << mock_log_source.Log();
@@ -190,6 +203,154 @@ TEST_F(LoggingTest, CheckStreamsAreLazy) {
#endif
+#if defined(OFFICIAL_BUILD) && defined(OS_WIN)
+NOINLINE void CheckContainingFunc(int death_location) {
+ CHECK(death_location != 1);
+ CHECK(death_location != 2);
+ CHECK(death_location != 3);
+}
+
+int GetCheckExceptionData(EXCEPTION_POINTERS* p, DWORD* code, void** addr) {
+ *code = p->ExceptionRecord->ExceptionCode;
+ *addr = p->ExceptionRecord->ExceptionAddress;
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
+ DWORD code1 = 0;
+ DWORD code2 = 0;
+ DWORD code3 = 0;
+ void* addr1 = nullptr;
+ void* addr2 = nullptr;
+ void* addr3 = nullptr;
+
+ // Record the exception code and addresses.
+ __try {
+ CheckContainingFunc(1);
+ } __except (
+ GetCheckExceptionData(GetExceptionInformation(), &code1, &addr1)) {
+ }
+
+ __try {
+ CheckContainingFunc(2);
+ } __except (
+ GetCheckExceptionData(GetExceptionInformation(), &code2, &addr2)) {
+ }
+
+ __try {
+ CheckContainingFunc(3);
+ } __except (
+ GetCheckExceptionData(GetExceptionInformation(), &code3, &addr3)) {
+ }
+
+ // Ensure that the exception codes are correct (in particular, breakpoints,
+ // not access violations).
+ EXPECT_EQ(STATUS_BREAKPOINT, code1);
+ EXPECT_EQ(STATUS_BREAKPOINT, code2);
+ EXPECT_EQ(STATUS_BREAKPOINT, code3);
+
+ // Ensure that none of the CHECKs are colocated.
+ EXPECT_NE(addr1, addr2);
+ EXPECT_NE(addr1, addr3);
+ EXPECT_NE(addr2, addr3);
+}
+
+#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_IOS) && \
+ (defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY))
+
+int g_child_crash_pipe;
+
+void CheckCrashTestSighandler(int, siginfo_t* info, void* context_ptr) {
+ // Conversely to what clearly stated in "man 2 sigaction", some Linux kernels
+ // do NOT populate the |info->si_addr| in the case of a SIGTRAP. Hence we
+ // need the arch-specific boilerplate below, which is inspired by breakpad.
+ // At the same time, on OSX, ucontext.h is deprecated but si_addr works fine.
+ uintptr_t crash_addr = 0;
+#if defined(OS_MACOSX)
+ crash_addr = reinterpret_cast<uintptr_t>(info->si_addr);
+#else // OS_POSIX && !OS_MACOSX
+ struct ucontext* context = reinterpret_cast<struct ucontext*>(context_ptr);
+#if defined(ARCH_CPU_X86)
+ crash_addr = static_cast<uintptr_t>(context->uc_mcontext.gregs[REG_EIP]);
+#elif defined(ARCH_CPU_X86_64)
+ crash_addr = static_cast<uintptr_t>(context->uc_mcontext.gregs[REG_RIP]);
+#elif defined(ARCH_CPU_ARMEL)
+ crash_addr = static_cast<uintptr_t>(context->uc_mcontext.arm_pc);
+#elif defined(ARCH_CPU_ARM64)
+ crash_addr = static_cast<uintptr_t>(context->uc_mcontext.pc);
+#endif // ARCH_*
+#endif // OS_POSIX && !OS_MACOSX
+ HANDLE_EINTR(write(g_child_crash_pipe, &crash_addr, sizeof(uintptr_t)));
+ _exit(0);
+}
+
+// CHECK causes a direct crash (without jumping to another function) only in
+// official builds. Unfortunately, continuous test coverage on official builds
+// is lower. DO_CHECK here falls back on a home-brewed implementation in
+// non-official builds, to catch regressions earlier in the CQ.
+#if defined(OFFICIAL_BUILD)
+#define DO_CHECK CHECK
+#else
+#define DO_CHECK(cond) \
+ if (!(cond)) \
+ IMMEDIATE_CRASH()
+#endif
+
+void CrashChildMain(int death_location) {
+ struct sigaction act = {};
+ act.sa_sigaction = CheckCrashTestSighandler;
+ act.sa_flags = SA_SIGINFO;
+ ASSERT_EQ(0, sigaction(SIGTRAP, &act, NULL));
+ ASSERT_EQ(0, sigaction(SIGBUS, &act, NULL));
+ ASSERT_EQ(0, sigaction(SIGILL, &act, NULL));
+ DO_CHECK(death_location != 1);
+ DO_CHECK(death_location != 2);
+ printf("\n");
+ DO_CHECK(death_location != 3);
+
+ // Should never reach this point.
+ const uintptr_t failed = 0;
+ HANDLE_EINTR(write(g_child_crash_pipe, &failed, sizeof(uintptr_t)));
+};
+
+void SpawnChildAndCrash(int death_location, uintptr_t* child_crash_addr) {
+ int pipefd[2];
+ ASSERT_EQ(0, pipe(pipefd));
+
+ int pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) { // child process.
+ close(pipefd[0]); // Close reader (parent) end.
+ g_child_crash_pipe = pipefd[1];
+ CrashChildMain(death_location);
+ FAIL() << "The child process was supposed to crash. It didn't.";
+ }
+
+ close(pipefd[1]); // Close writer (child) end.
+ DCHECK(child_crash_addr);
+ int res = HANDLE_EINTR(read(pipefd[0], child_crash_addr, sizeof(uintptr_t)));
+ ASSERT_EQ(static_cast<int>(sizeof(uintptr_t)), res);
+}
+
+TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
+ uintptr_t child_crash_addr_1 = 0;
+ uintptr_t child_crash_addr_2 = 0;
+ uintptr_t child_crash_addr_3 = 0;
+
+ SpawnChildAndCrash(1, &child_crash_addr_1);
+ SpawnChildAndCrash(2, &child_crash_addr_2);
+ SpawnChildAndCrash(3, &child_crash_addr_3);
+
+ ASSERT_NE(0u, child_crash_addr_1);
+ ASSERT_NE(0u, child_crash_addr_2);
+ ASSERT_NE(0u, child_crash_addr_3);
+ ASSERT_NE(child_crash_addr_1, child_crash_addr_2);
+ ASSERT_NE(child_crash_addr_1, child_crash_addr_3);
+ ASSERT_NE(child_crash_addr_2, child_crash_addr_3);
+}
+#endif // OS_POSIX
+
TEST_F(LoggingTest, DebugLoggingReleaseBehavior) {
#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
int debug_only_variable = 1;
@@ -217,6 +378,14 @@ TEST_F(LoggingTest, DcheckStreamsAreLazy) {
#endif
}
+void DcheckEmptyFunction1() {
+ // Provide a body so that Release builds do not cause the compiler to
+ // optimize DcheckEmptyFunction1 and DcheckEmptyFunction2 as a single
+ // function, which breaks the Dcheck tests below.
+ LOG(INFO) << "DcheckEmptyFunction1";
+}
+void DcheckEmptyFunction2() {}
+
TEST_F(LoggingTest, Dcheck) {
#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
// Release build.
@@ -258,6 +427,31 @@ TEST_F(LoggingTest, Dcheck) {
EXPECT_EQ(0, log_sink_call_count);
DCHECK_EQ(Animal::DOG, Animal::CAT);
EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
+
+ // Test DCHECK on functions and function pointers.
+ log_sink_call_count = 0;
+ struct MemberFunctions {
+ void MemberFunction1() {
+ // See the comment in DcheckEmptyFunction1().
+ LOG(INFO) << "Do not merge with MemberFunction2.";
+ }
+ void MemberFunction2() {}
+ };
+ void (MemberFunctions::*mp1)() = &MemberFunctions::MemberFunction1;
+ void (MemberFunctions::*mp2)() = &MemberFunctions::MemberFunction2;
+ void (*fp1)() = DcheckEmptyFunction1;
+ void (*fp2)() = DcheckEmptyFunction2;
+ void (*fp3)() = DcheckEmptyFunction1;
+ DCHECK_EQ(fp1, fp3);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(mp1, &MemberFunctions::MemberFunction1);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(mp2, &MemberFunctions::MemberFunction2);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(fp1, fp2);
+ EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
+ DCHECK_EQ(mp2, &MemberFunctions::MemberFunction1);
+ EXPECT_EQ(DCHECK_IS_ON() ? 2 : 0, log_sink_call_count);
}
TEST_F(LoggingTest, DcheckReleaseBehavior) {
diff --git a/base/mac/bind_objc_block.h b/base/mac/bind_objc_block.h
index 2434d444f5..9a481ed987 100644
--- a/base/mac/bind_objc_block.h
+++ b/base/mac/bind_objc_block.h
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
#include "base/mac/scoped_block.h"
// BindBlock builds a callback from an Objective-C block. Example usages:
@@ -27,6 +28,13 @@
// seven total arguments, and the bound block itself is used as one of these
// arguments, so functionally the templates are limited to binding blocks with
// zero through six arguments.
+//
+// For code compiled with ARC (automatic reference counting), use BindBlockArc.
+// This is because the method has a different implementation (to avoid over-
+// retaining the block) and need to have a different name not to break the ODR
+// (one definition rule). Another subtle difference is that the implementation
+// will call a different version of ScopedBlock constructor thus the linker must
+// not merge both functions.
namespace base {
@@ -41,6 +49,8 @@ R RunBlock(base::mac::ScopedBlock<R(^)(Args...)> block, Args... args) {
} // namespace internal
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+
// Construct a callback from an objective-C block with up to six arguments (see
// note above).
template<typename R, typename... Args>
@@ -52,6 +62,18 @@ base::Callback<R(Args...)> BindBlock(R(^block)(Args...)) {
block)));
}
+#else
+
+// Construct a callback from an objective-C block with up to six arguments (see
+// note above).
+template <typename R, typename... Args>
+base::Callback<R(Args...)> BindBlockArc(R (^block)(Args...)) {
+ return base::Bind(&base::internal::RunBlock<R, Args...>,
+ base::mac::ScopedBlock<R (^)(Args...)>(block));
+}
+
+#endif
+
} // namespace base
#endif // BASE_MAC_BIND_OBJC_BLOCK_H_
diff --git a/base/mac/bundle_locations.h b/base/mac/bundle_locations.h
index 276290b0e6..5cc44ba966 100644
--- a/base/mac/bundle_locations.h
+++ b/base/mac/bundle_locations.h
@@ -12,7 +12,6 @@
#import <Foundation/Foundation.h>
#else // __OBJC__
class NSBundle;
-class NSString;
#endif // __OBJC__
namespace base {
diff --git a/base/mac/foundation_util.h b/base/mac/foundation_util.h
index ee23a17fb1..69b61280c3 100644
--- a/base/mac/foundation_util.h
+++ b/base/mac/foundation_util.h
@@ -55,6 +55,12 @@ typedef unsigned int NSSearchPathDomainMask;
typedef struct OpaqueSecTrustRef* SecACLRef;
typedef struct OpaqueSecTrustedApplicationRef* SecTrustedApplicationRef;
+#if defined(OS_IOS)
+typedef struct CF_BRIDGED_TYPE(id) __SecPolicy* SecPolicyRef;
+#else
+typedef struct OpaqueSecPolicyRef* SecPolicyRef;
+#endif
+
namespace base {
class FilePath;
@@ -141,6 +147,8 @@ TYPE_NAME_FOR_CF_TYPE_DECL(CGColor);
TYPE_NAME_FOR_CF_TYPE_DECL(CTFont);
TYPE_NAME_FOR_CF_TYPE_DECL(CTRun);
+TYPE_NAME_FOR_CF_TYPE_DECL(SecPolicy);
+
#undef TYPE_NAME_FOR_CF_TYPE_DECL
// Retain/release calls for memory management in C++.
@@ -301,6 +309,7 @@ CF_CAST_DECL(CTFontDescriptor);
CF_CAST_DECL(CTRun);
CF_CAST_DECL(SecACL);
+CF_CAST_DECL(SecPolicy);
CF_CAST_DECL(SecTrustedApplication);
#undef CF_CAST_DECL
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
index 4f6fa60afd..eb8284ee58 100644
--- a/base/mac/foundation_util.mm
+++ b/base/mac/foundation_util.mm
@@ -213,6 +213,10 @@ TYPE_NAME_FOR_CF_TYPE_DEFN(CGColor);
TYPE_NAME_FOR_CF_TYPE_DEFN(CTFont);
TYPE_NAME_FOR_CF_TYPE_DEFN(CTRun);
+#if !defined(OS_IOS)
+TYPE_NAME_FOR_CF_TYPE_DEFN(SecPolicy);
+#endif
+
#undef TYPE_NAME_FOR_CF_TYPE_DEFN
void NSObjectRetain(void* obj) {
@@ -408,6 +412,7 @@ CFCastStrict<CTFontRef>(const CFTypeRef& cf_val) {
#if !defined(OS_IOS)
CF_CAST_DEFN(SecACL);
+CF_CAST_DEFN(SecPolicy);
CF_CAST_DEFN(SecTrustedApplication);
#endif
diff --git a/base/mac/mac_util.h b/base/mac/mac_util.h
index 84948f7ce8..67d1880849 100644
--- a/base/mac/mac_util.h
+++ b/base/mac/mac_util.h
@@ -5,11 +5,11 @@
#ifndef BASE_MAC_MAC_UTIL_H_
#define BASE_MAC_MAC_UTIL_H_
-#include <AvailabilityMacros.h>
-#include <Carbon/Carbon.h>
#include <stdint.h>
#include <string>
+#import <CoreGraphics/CoreGraphics.h>
+
#include "base/base_export.h"
namespace base {
@@ -31,9 +31,6 @@ enum FullScreenMode {
kFullScreenModeNormal = 10,
};
-BASE_EXPORT std::string PathFromFSRef(const FSRef& ref);
-BASE_EXPORT bool FSRefFromPath(const std::string& path, FSRef* ref);
-
// Returns an sRGB color space. The return value is a static value; do not
// release it!
BASE_EXPORT CGColorSpaceRef GetSRGBColorSpace();
@@ -66,11 +63,6 @@ BASE_EXPORT void ReleaseFullScreen(FullScreenMode mode);
BASE_EXPORT void SwitchFullScreenModes(FullScreenMode from_mode,
FullScreenMode to_mode);
-// Returns true if this process is in the foreground, meaning that it's the
-// frontmost process, the one whose menu bar is shown at the top of the main
-// display.
-BASE_EXPORT bool AmIForeground();
-
// Excludes the file given by |file_path| from being backed up by Time Machine.
BASE_EXPORT bool SetFileBackupExclusion(const FilePath& file_path);
@@ -108,85 +100,63 @@ BASE_EXPORT bool WasLaunchedAsHiddenLoginItem();
// an error, or true otherwise.
BASE_EXPORT bool RemoveQuarantineAttribute(const FilePath& file_path);
-// Run-time OS version checks. Use these instead of
-// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "OrEarlier" and
-// "OrLater" variants to those that check for a specific version, unless you
-// know for sure that you need to check for a specific version.
-
-// Mavericks is OS X 10.9, Darwin 13.
-BASE_EXPORT bool IsOSMavericks();
+namespace internal {
-// Yosemite is OS X 10.10, Darwin 14.
-BASE_EXPORT bool IsOSYosemite();
-BASE_EXPORT bool IsOSYosemiteOrEarlier();
-BASE_EXPORT bool IsOSYosemiteOrLater();
-
-// El Capitan is OS X 10.11, Darwin 15.
-BASE_EXPORT bool IsOSElCapitan();
-BASE_EXPORT bool IsOSElCapitanOrEarlier();
-BASE_EXPORT bool IsOSElCapitanOrLater();
-
-// Sierra is macOS 10.12, Darwin 16.
-BASE_EXPORT bool IsOSSierra();
-BASE_EXPORT bool IsOSSierraOrLater();
-
-// This should be infrequently used. It only makes sense to use this to avoid
-// codepaths that are very likely to break on future (unreleased, untested,
-// unborn) OS releases, or to log when the OS is newer than any known version.
-BASE_EXPORT bool IsOSLaterThanSierra_DontCallThis();
-
-// Inline functions that are redundant due to version ranges being mutually-
-// exclusive.
-inline bool IsOSYosemiteOrEarlier() { return !IsOSElCapitanOrLater(); }
-inline bool IsOSElCapitanOrEarlier() { return !IsOSSierraOrLater(); }
-
-// When the deployment target is set, the code produced cannot run on earlier
-// OS releases. That enables some of the IsOS* family to be implemented as
-// constant-value inline functions. The MAC_OS_X_VERSION_MIN_REQUIRED macro
-// contains the value of the deployment target.
-
-#if defined(MAC_OS_X_VERSION_10_9) && \
- MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_9
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_9
-inline bool IsOSMavericks() { return false; }
-#endif
+// Returns the system's Mac OS X minor version. This is the |y| value
+// in 10.y or 10.y.z.
+BASE_EXPORT int MacOSXMinorVersion();
-#if defined(MAC_OS_X_VERSION_10_10) && \
- MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_10
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_10
-inline bool IsOSYosemiteOrLater() { return true; }
-#endif
+} // namespace internal
-#if defined(MAC_OS_X_VERSION_10_10) && \
- MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_10
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_10
-inline bool IsOSYosemite() { return false; }
-#endif
+// Run-time OS version checks. Use these instead of
+// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "AtLeast" and
+// "AtMost" variants to those that check for a specific version, unless you
+// know for sure that you need to check for a specific version.
-#if defined(MAC_OS_X_VERSION_10_11) && \
- MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_11
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_11
-inline bool IsOSElCapitanOrLater() { return true; }
+#define DEFINE_IS_OS_FUNCS(V, TEST_DEPLOYMENT_TARGET) \
+ inline bool IsOS10_##V() { \
+ TEST_DEPLOYMENT_TARGET(>, V, false) \
+ return internal::MacOSXMinorVersion() == V; \
+ } \
+ inline bool IsAtLeastOS10_##V() { \
+ TEST_DEPLOYMENT_TARGET(>=, V, true) \
+ return internal::MacOSXMinorVersion() >= V; \
+ } \
+ inline bool IsAtMostOS10_##V() { \
+ TEST_DEPLOYMENT_TARGET(>, V, false) \
+ return internal::MacOSXMinorVersion() <= V; \
+ }
+
+#define TEST_DEPLOYMENT_TARGET(OP, V, RET) \
+ if (MAC_OS_X_VERSION_MIN_REQUIRED OP MAC_OS_X_VERSION_10_##V) \
+ return RET;
+#define IGNORE_DEPLOYMENT_TARGET(OP, V, RET)
+
+DEFINE_IS_OS_FUNCS(9, TEST_DEPLOYMENT_TARGET)
+DEFINE_IS_OS_FUNCS(10, TEST_DEPLOYMENT_TARGET)
+
+#ifdef MAC_OS_X_VERSION_10_11
+DEFINE_IS_OS_FUNCS(11, TEST_DEPLOYMENT_TARGET)
+#else
+DEFINE_IS_OS_FUNCS(11, IGNORE_DEPLOYMENT_TARGET)
#endif
-#if defined(MAC_OS_X_VERSION_10_11) && \
- MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_11
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11
-inline bool IsOSElCapitan() { return false; }
+#ifdef MAC_OS_X_VERSION_10_12
+DEFINE_IS_OS_FUNCS(12, TEST_DEPLOYMENT_TARGET)
+#else
+DEFINE_IS_OS_FUNCS(12, IGNORE_DEPLOYMENT_TARGET)
#endif
-#if defined(MAC_OS_X_VERSION_10_12) && \
- MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_12
-inline bool IsOSSierraOrLater() { return true; }
-#endif
+#undef IGNORE_DEPLOYMENT_TARGET
+#undef TEST_DEPLOYMENT_TARGET
+#undef DEFINE_IS_OS_FUNCS
-#if defined(MAC_OS_X_VERSION_10_12) && \
- MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_12
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12
-inline bool IsOSSierra() { return false; }
-inline bool IsOSLaterThanSierra_DontCallThis() { return true; }
-#endif
+// This should be infrequently used. It only makes sense to use this to avoid
+// codepaths that are very likely to break on future (unreleased, untested,
+// unborn) OS releases, or to log when the OS is newer than any known version.
+inline bool IsOSLaterThan10_12_DontCallThis() {
+ return !IsAtMostOS10_12();
+}
// Retrieve the system's model identifier string from the IOKit registry:
// for example, "MacPro4,1", "MacBookPro6,1". Returns empty string upon
diff --git a/base/mac/mach_port_broker.mm b/base/mac/mach_port_broker.mm
index bd47017f15..6d9fec5ab6 100644
--- a/base/mac/mach_port_broker.mm
+++ b/base/mac/mach_port_broker.mm
@@ -154,12 +154,7 @@ void MachPortBroker::HandleRequest() {
// Use the kernel audit information to make sure this message is from
// a task that this process spawned. The kernel audit token contains the
// unspoofable pid of the task that sent the message.
- //
- // TODO(rsesek): In the 10.7 SDK, there's audit_token_to_pid().
- pid_t child_pid;
- audit_token_to_au32(msg.trailer.msgh_audit,
- NULL, NULL, NULL, NULL, NULL, &child_pid, NULL, NULL);
-
+ pid_t child_pid = audit_token_to_pid(msg.trailer.msgh_audit);
mach_port_t child_task_port = msg.child_task_port.name;
// Take the lock and update the broker information.
diff --git a/base/mac/scoped_authorizationref.h b/base/mac/scoped_authorizationref.h
index 03cde86140..b83f8dfb35 100644
--- a/base/mac/scoped_authorizationref.h
+++ b/base/mac/scoped_authorizationref.h
@@ -11,7 +11,7 @@
#include "base/macros.h"
// ScopedAuthorizationRef maintains ownership of an AuthorizationRef. It is
-// patterned after the scoped_ptr interface.
+// patterned after the unique_ptr interface.
namespace base {
namespace mac {
diff --git a/base/mac/scoped_block.h b/base/mac/scoped_block.h
index 8199677f15..10ab4b4e88 100644
--- a/base/mac/scoped_block.h
+++ b/base/mac/scoped_block.h
@@ -36,9 +36,33 @@ struct ScopedBlockTraits {
// ScopedBlock<> is patterned after ScopedCFTypeRef<>, but uses Block_copy() and
// Block_release() instead of CFRetain() and CFRelease().
-
template <typename B>
-using ScopedBlock = ScopedTypeRef<B, internal::ScopedBlockTraits<B>>;
+class ScopedBlock : public ScopedTypeRef<B, internal::ScopedBlockTraits<B>> {
+ public:
+ using Traits = internal::ScopedBlockTraits<B>;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit ScopedBlock(
+ B block = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : ScopedTypeRef<B, Traits>(block, policy) {}
+#else
+ explicit ScopedBlock(B block = Traits::InvalidValue())
+ : ScopedTypeRef<B, Traits>(block, base::scoped_policy::RETAIN) {}
+#endif
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(B block = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ ScopedTypeRef<B, Traits>::reset(block, policy);
+ }
+#else
+ void reset(B block = Traits::InvalidValue()) {
+ ScopedTypeRef<B, Traits>::reset(block, base::scoped_policy::RETAIN);
+ }
+#endif
+};
} // namespace mac
} // namespace base
diff --git a/base/mac/scoped_nsobject.h b/base/mac/scoped_nsobject.h
index cc54aa0ca8..ecd8e78f9d 100644
--- a/base/mac/scoped_nsobject.h
+++ b/base/mac/scoped_nsobject.h
@@ -102,7 +102,7 @@ class scoped_nsprotocol
: ScopedTypeRef<NST, Traits>(that_as_subclass) {}
scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
- : ScopedTypeRef<NST, Traits>(that) {}
+ : ScopedTypeRef<NST, Traits>(std::move(that)) {}
scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
ScopedTypeRef<NST, Traits>::operator=(that);
@@ -166,7 +166,7 @@ class scoped_nsobject : public scoped_nsprotocol<NST*> {
: scoped_nsprotocol<NST*>(that_as_subclass) {}
scoped_nsobject(scoped_nsobject<NST>&& that)
- : scoped_nsprotocol<NST*>(that) {}
+ : scoped_nsprotocol<NST*>(std::move(that)) {}
scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
scoped_nsprotocol<NST*>::operator=(that);
@@ -214,7 +214,8 @@ class scoped_nsobject<id> : public scoped_nsprotocol<id> {
explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
: scoped_nsprotocol<id>(that_as_subclass) {}
- scoped_nsobject(scoped_nsobject<id>&& that) : scoped_nsprotocol<id>(that) {}
+ scoped_nsobject(scoped_nsobject<id>&& that)
+ : scoped_nsprotocol<id>(std::move(that)) {}
scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
scoped_nsprotocol<id>::operator=(that);
diff --git a/base/mac/sdk_forward_declarations.h b/base/mac/sdk_forward_declarations.h
index e9a11f700c..306cd93678 100644
--- a/base/mac/sdk_forward_declarations.h
+++ b/base/mac/sdk_forward_declarations.h
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// This file contains forward declarations for items in later SDKs than the
-// default one with which Chromium is built (currently 10.6).
+// default one with which Chromium is built (currently 10.10).
// If you call any function from this header, be sure to check at runtime for
// respondsToSelector: before calling these functions (else your code will crash
// on older OS X versions that chrome still supports).
@@ -14,206 +14,18 @@
#import <AppKit/AppKit.h>
#import <CoreBluetooth/CoreBluetooth.h>
#import <CoreWLAN/CoreWLAN.h>
-#import <ImageCaptureCore/ImageCaptureCore.h>
#import <IOBluetooth/IOBluetooth.h>
+#import <ImageCaptureCore/ImageCaptureCore.h>
+#import <QuartzCore/QuartzCore.h>
#include <stdint.h>
#include "base/base_export.h"
// ----------------------------------------------------------------------------
-// Either define or forward declare classes only available in OSX 10.7+.
-// ----------------------------------------------------------------------------
-
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
-
-@interface CWChannel : NSObject
-@end
-
-@interface CBPeripheral : NSObject
-@end
-
-@interface CBCentralManager : NSObject
-@end
-
-@interface CBUUID : NSObject
-@end
-
-#else
-
-@class CWChannel;
-@class CBPeripheral;
-@class CBCentralManager;
-@class CBUUID;
-
-#endif // MAC_OS_X_VERSION_10_7
-
-#if !defined(MAC_OS_X_VERSION_10_8) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
-
-@interface NSUUID : NSObject
-@end
-
-#else
-
-@class NSUUID;
-
-#endif // MAC_OS_X_VERSION_10_8
-
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
-
-// NSProgress is public API in 10.9, but a version of it exists and is usable
-// in 10.8.
-@interface NSProgress : NSObject
-@end
-
-@interface NSAppearance : NSObject
-@end
-
-#else
-
-@class NSProgress;
-@class NSAppearance;
-
-#endif // MAC_OS_X_VERSION_10_9
-
-// ----------------------------------------------------------------------------
// Define typedefs, enums, and protocols not available in the version of the
// OSX SDK being compiled against.
// ----------------------------------------------------------------------------
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
-
-enum {
- NSEventPhaseNone = 0, // event not associated with a phase.
- NSEventPhaseBegan = 0x1 << 0,
- NSEventPhaseStationary = 0x1 << 1,
- NSEventPhaseChanged = 0x1 << 2,
- NSEventPhaseEnded = 0x1 << 3,
- NSEventPhaseCancelled = 0x1 << 4
-};
-typedef NSUInteger NSEventPhase;
-
-enum {
- NSFullScreenWindowMask = 1 << 14,
-};
-
-enum {
- NSApplicationPresentationFullScreen = 1 << 10,
-};
-
-enum {
- NSWindowCollectionBehaviorFullScreenPrimary = 1 << 7,
- NSWindowCollectionBehaviorFullScreenAuxiliary = 1 << 8,
-};
-
-enum {
- NSEventSwipeTrackingLockDirection = 0x1 << 0,
- NSEventSwipeTrackingClampGestureAmount = 0x1 << 1,
-};
-typedef NSUInteger NSEventSwipeTrackingOptions;
-
-enum {
- NSWindowAnimationBehaviorDefault = 0,
- NSWindowAnimationBehaviorNone = 2,
- NSWindowAnimationBehaviorDocumentWindow = 3,
- NSWindowAnimationBehaviorUtilityWindow = 4,
- NSWindowAnimationBehaviorAlertPanel = 5
-};
-typedef NSInteger NSWindowAnimationBehavior;
-
-enum {
- NSWindowDocumentVersionsButton = 6,
- NSWindowFullScreenButton,
-};
-typedef NSUInteger NSWindowButton;
-
-enum CWChannelBand {
- kCWChannelBandUnknown = 0,
- kCWChannelBand2GHz = 1,
- kCWChannelBand5GHz = 2,
-};
-
-enum {
- kCWSecurityNone = 0,
- kCWSecurityWEP = 1,
- kCWSecurityWPAPersonal = 2,
- kCWSecurityWPAPersonalMixed = 3,
- kCWSecurityWPA2Personal = 4,
- kCWSecurityPersonal = 5,
- kCWSecurityDynamicWEP = 6,
- kCWSecurityWPAEnterprise = 7,
- kCWSecurityWPAEnterpriseMixed = 8,
- kCWSecurityWPA2Enterprise = 9,
- kCWSecurityEnterprise = 10,
- kCWSecurityUnknown = NSIntegerMax,
-};
-
-typedef NSInteger CWSecurity;
-
-enum {
- kBluetoothFeatureLESupportedController = (1 << 6L),
-};
-
-@protocol IOBluetoothDeviceInquiryDelegate
-- (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender;
-- (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender
- device:(IOBluetoothDevice*)device;
-- (void)deviceInquiryComplete:(IOBluetoothDeviceInquiry*)sender
- error:(IOReturn)error
- aborted:(BOOL)aborted;
-@end
-
-enum {
- CBPeripheralStateDisconnected = 0,
- CBPeripheralStateConnecting,
- CBPeripheralStateConnected,
-};
-typedef NSInteger CBPeripheralState;
-
-enum {
- CBCentralManagerStateUnknown = 0,
- CBCentralManagerStateResetting,
- CBCentralManagerStateUnsupported,
- CBCentralManagerStateUnauthorized,
- CBCentralManagerStatePoweredOff,
- CBCentralManagerStatePoweredOn,
-};
-typedef NSInteger CBCentralManagerState;
-
-@protocol CBCentralManagerDelegate;
-
-@protocol CBCentralManagerDelegate<NSObject>
-- (void)centralManagerDidUpdateState:(CBCentralManager*)central;
-- (void)centralManager:(CBCentralManager*)central
- didDiscoverPeripheral:(CBPeripheral*)peripheral
- advertisementData:(NSDictionary*)advertisementData
- RSSI:(NSNumber*)RSSI;
-@end
-
-#endif // MAC_OS_X_VERSION_10_7
-
-#if !defined(MAC_OS_X_VERSION_10_8) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
-
-enum { NSEventPhaseMayBegin = 0x1 << 5 };
-
-#endif // MAC_OS_X_VERSION_10_8
-
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
-
-enum {
- NSWindowOcclusionStateVisible = 1UL << 1,
-};
-typedef NSUInteger NSWindowOcclusionState;
-
-enum { NSWorkspaceLaunchWithErrorPresentation = 0x00000040 };
-
-#endif // MAC_OS_X_VERSION_10_9
-
#if !defined(MAC_OS_X_VERSION_10_11) || \
MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
@@ -232,7 +44,29 @@ typedef NSInteger NSPressureBehavior;
- (instancetype)initWithPressureBehavior:(NSPressureBehavior)pressureBehavior;
@end
-#endif // MAC_OS_X_VERSION_10_11
+enum {
+ NSSpringLoadingHighlightNone = 0,
+ NSSpringLoadingHighlightStandard,
+ NSSpringLoadingHighlightEmphasized
+};
+typedef NSUInteger NSSpringLoadingHighlight;
+
+#endif // MAC_OS_X_VERSION_10_11
+
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12
+
+// The protocol was formalized by the 10.12 SDK, but it was informally used
+// before.
+@protocol CAAnimationDelegate
+- (void)animationDidStart:(CAAnimation*)animation;
+- (void)animationDidStop:(CAAnimation*)animation finished:(BOOL)finished;
+@end
+
+@protocol CALayerDelegate
+@end
+
+#endif // MAC_OS_X_VERSION_10_12
// ----------------------------------------------------------------------------
// Define NSStrings only available in newer versions of the OSX SDK to force
@@ -240,27 +74,9 @@ typedef NSInteger NSPressureBehavior;
// ----------------------------------------------------------------------------
extern "C" {
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-BASE_EXPORT extern NSString* const NSWindowWillEnterFullScreenNotification;
-BASE_EXPORT extern NSString* const NSWindowWillExitFullScreenNotification;
-BASE_EXPORT extern NSString* const NSWindowDidEnterFullScreenNotification;
-BASE_EXPORT extern NSString* const NSWindowDidExitFullScreenNotification;
-BASE_EXPORT extern NSString* const
- NSWindowDidChangeBackingPropertiesNotification;
-BASE_EXPORT extern NSString* const CBAdvertisementDataServiceDataKey;
-BASE_EXPORT extern NSString* const CBAdvertisementDataServiceUUIDsKey;
-#endif // MAC_OS_X_VERSION_10_7
-
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
-BASE_EXPORT extern NSString* const NSWindowDidChangeOcclusionStateNotification;
-BASE_EXPORT extern NSString* const CBAdvertisementDataOverflowServiceUUIDsKey;
-BASE_EXPORT extern NSString* const CBAdvertisementDataIsConnectable;
-#endif // MAC_OS_X_VERSION_10_9
-
#if !defined(MAC_OS_X_VERSION_10_10) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+BASE_EXPORT extern NSString* const CIDetectorTypeQRCode;
BASE_EXPORT extern NSString* const NSUserActivityTypeBrowsingWeb;
BASE_EXPORT extern NSString* const NSAppearanceNameVibrantDark;
BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
@@ -268,250 +84,158 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
} // extern "C"
// ----------------------------------------------------------------------------
-// If compiling against an older version of the OSX SDK, declare functions that
-// are available in newer versions of the OSX SDK. If compiling against a newer
-// version of the OSX SDK, redeclare those same functions to suppress
-// -Wpartial-availability warnings.
+// If compiling against an older version of the OSX SDK, declare classes and
+// functions that are available in newer versions of the OSX SDK. If compiling
+// against a newer version of the OSX SDK, redeclare those same classes and
+// functions to suppress -Wpartial-availability warnings.
// ----------------------------------------------------------------------------
-// Once Chrome no longer supports OSX 10.6, everything within this preprocessor
+// Once Chrome no longer supports OSX 10.9, everything within this preprocessor
// block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-
-@interface NSEvent (LionSDK)
-+ (BOOL)isSwipeTrackingFromScrollEventsEnabled;
-- (NSEventPhase)momentumPhase;
-- (NSEventPhase)phase;
-- (BOOL)hasPreciseScrollingDeltas;
-- (CGFloat)scrollingDeltaX;
-- (CGFloat)scrollingDeltaY;
-- (void)trackSwipeEventWithOptions:(NSEventSwipeTrackingOptions)options
- dampenAmountThresholdMin:(CGFloat)minDampenThreshold
- max:(CGFloat)maxDampenThreshold
- usingHandler:(void (^)(CGFloat gestureAmount,
- NSEventPhase phase,
- BOOL isComplete,
- BOOL* stop))trackingHandler;
-- (BOOL)isDirectionInvertedFromDevice;
-@end
-
-@interface NSApplication (LionSDK)
-- (void)disableRelaunchOnLogin;
-@end
-
-@interface CALayer (LionSDK)
-- (CGFloat)contentsScale;
-- (void)setContentsScale:(CGFloat)contentsScale;
-@end
-
-@interface NSScreen (LionSDK)
-- (CGFloat)backingScaleFactor;
-- (NSRect)convertRectToBacking:(NSRect)aRect;
-@end
-
-@interface NSWindow (LionSDK)
-- (CGFloat)backingScaleFactor;
-- (NSWindowAnimationBehavior)animationBehavior;
-- (void)setAnimationBehavior:(NSWindowAnimationBehavior)newAnimationBehavior;
-- (void)toggleFullScreen:(id)sender;
-- (void)setRestorable:(BOOL)flag;
-- (NSRect)convertRectFromScreen:(NSRect)aRect;
-- (NSRect)convertRectToScreen:(NSRect)aRect;
-@end
-
-@interface NSCursor (LionSDKDeclarations)
-+ (NSCursor*)IBeamCursorForVerticalLayout;
-@end
-
-@interface NSAnimationContext (LionSDK)
-+ (void)runAnimationGroup:(void (^)(NSAnimationContext* context))changes
- completionHandler:(void (^)(void))completionHandler;
-@property(copy) void (^completionHandler)(void);
-@end
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
-@interface NSView (LionSDK)
-- (NSSize)convertSizeFromBacking:(NSSize)size;
-- (void)setWantsBestResolutionOpenGLSurface:(BOOL)flag;
-- (NSDraggingSession*)beginDraggingSessionWithItems:(NSArray*)items
- event:(NSEvent*)event
- source:
- (id<NSDraggingSource>)source;
+@interface NSUserActivity (YosemiteSDK)
+@property(readonly, copy) NSString* activityType;
+@property(copy) NSDictionary* userInfo;
+@property(copy) NSURL* webpageURL;
+- (instancetype)initWithActivityType:(NSString*)activityType;
+- (void)becomeCurrent;
+- (void)invalidate;
@end
-@interface NSObject (ICCameraDeviceDelegateLionSDK)
-- (void)deviceDidBecomeReadyWithCompleteContentCatalog:(ICDevice*)device;
-- (void)didDownloadFile:(ICCameraFile*)file
- error:(NSError*)error
- options:(NSDictionary*)options
- contextInfo:(void*)contextInfo;
+@interface CBUUID (YosemiteSDK)
+- (NSString*)UUIDString;
@end
-@interface CWInterface (LionSDK)
-- (BOOL)associateToNetwork:(CWNetwork*)network
- password:(NSString*)password
- error:(NSError**)error;
-- (NSSet*)scanForNetworksWithName:(NSString*)networkName error:(NSError**)error;
+@interface NSViewController (YosemiteSDK)
+- (void)viewDidLoad;
@end
-@interface CWChannel (LionSDK)
-@property(readonly) CWChannelBand channelBand;
+@interface NSWindow (YosemiteSDK)
+- (void)setTitlebarAppearsTransparent:(BOOL)flag;
@end
-@interface CWNetwork (LionSDK)
-@property(readonly) CWChannel* wlanChannel;
-@property(readonly) NSInteger rssiValue;
-- (BOOL)supportsSecurity:(CWSecurity)security;
+@interface NSProcessInfo (YosemiteSDK)
+@property(readonly) NSOperatingSystemVersion operatingSystemVersion;
@end
-@interface IOBluetoothHostController (LionSDK)
-- (NSString*)nameAsString;
-- (BluetoothHCIPowerState)powerState;
+@interface NSLayoutConstraint (YosemiteSDK)
+@property(getter=isActive) BOOL active;
++ (void)activateConstraints:(NSArray*)constraints;
@end
-@interface IOBluetoothL2CAPChannel (LionSDK)
-@property(readonly) BluetoothL2CAPMTU outgoingMTU;
+@interface NSVisualEffectView (YosemiteSDK)
+- (void)setState:(NSVisualEffectState)state;
@end
-@interface IOBluetoothDevice (LionSDK)
-- (NSString*)addressString;
-- (unsigned int)classOfDevice;
-- (BluetoothConnectionHandle)connectionHandle;
-- (BluetoothHCIRSSIValue)rawRSSI;
-- (NSArray*)services;
-- (IOReturn)performSDPQuery:(id)target uuids:(NSArray*)uuids;
-@end
+@class NSVisualEffectView;
-@interface CBPeripheral (LionSDK)
-@property(readonly, nonatomic) CFUUIDRef UUID;
-@property(retain, readonly) NSString* name;
-@property(readonly) BOOL isConnected;
+@interface CIQRCodeFeature (YosemiteSDK)
+@property(readonly) CGRect bounds;
+@property(readonly) CGPoint topLeft;
+@property(readonly) CGPoint topRight;
+@property(readonly) CGPoint bottomLeft;
+@property(readonly) CGPoint bottomRight;
+@property(readonly, copy) NSString* messageString;
@end
-@interface CBCentralManager (LionSDK)
-@property(readonly) CBCentralManagerState state;
-- (id)initWithDelegate:(id<CBCentralManagerDelegate>)delegate
- queue:(dispatch_queue_t)queue;
-- (void)scanForPeripheralsWithServices:(NSArray*)serviceUUIDs
- options:(NSDictionary*)options;
-- (void)stopScan;
-@end
+@class CIQRCodeFeature;
-@interface CBUUID (LionSDK)
-@property(nonatomic, readonly) NSData* data;
-+ (CBUUID*)UUIDWithString:(NSString*)theString;
+@interface NSView (YosemiteSDK)
+- (BOOL)isAccessibilitySelectorAllowed:(SEL)selector;
@end
-BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
- id object,
- NSString* notification,
- NSDictionary* user_info);
-
-#endif // MAC_OS_X_VERSION_10_7
-
-// Once Chrome no longer supports OSX 10.7, everything within this preprocessor
-// block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_8) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_8
+#endif // MAC_OS_X_VERSION_10_10
-@interface NSColor (MountainLionSDK)
-- (CGColorRef)CGColor;
-@end
+// Once Chrome no longer supports OSX 10.10.2, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_10_3) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10_3
-@interface NSUUID (MountainLionSDK)
-- (NSString*)UUIDString;
+@interface NSEvent (Yosemite_3_SDK)
+@property(readonly) NSInteger stage;
@end
-@interface NSControl (MountainLionSDK)
-@property BOOL allowsExpansionToolTips;
+@interface NSView (Yosemite_3_SDK)
+- (void)setPressureConfiguration:(NSPressureConfiguration*)aConfiguration;
@end
-#endif // MAC_OS_X_VERSION_10_8
-
-// Once Chrome no longer supports OSX 10.8, everything within this preprocessor
-// block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
-
-@interface NSProgress (MavericksSDK)
-
-- (instancetype)initWithParent:(NSProgress*)parentProgressOrNil
- userInfo:(NSDictionary*)userInfoOrNil;
-@property(copy) NSString* kind;
-
-@property int64_t totalUnitCount;
-@property int64_t completedUnitCount;
-
-@property(getter=isCancellable) BOOL cancellable;
-@property(getter=isPausable) BOOL pausable;
-@property(readonly, getter=isCancelled) BOOL cancelled;
-@property(readonly, getter=isPaused) BOOL paused;
-@property(copy) void (^cancellationHandler)(void);
-@property(copy) void (^pausingHandler)(void);
-- (void)cancel;
-- (void)pause;
-
-- (void)setUserInfoObject:(id)objectOrNil forKey:(NSString*)key;
-- (NSDictionary*)userInfo;
+#endif // MAC_OS_X_VERSION_10_10
-@property(readonly, getter=isIndeterminate) BOOL indeterminate;
-@property(readonly) double fractionCompleted;
+// ----------------------------------------------------------------------------
+// Define NSStrings only available in newer versions of the OSX SDK to force
+// them to be statically linked.
+// ----------------------------------------------------------------------------
-- (void)publish;
-- (void)unpublish;
+extern "C" {
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+BASE_EXPORT extern NSString* const CIDetectorTypeText;
+#endif // MAC_OS_X_VERSION_10_11
+} // extern "C"
-@end
+// Once Chrome no longer supports OSX 10.10, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
-@interface NSScreen (MavericksSDK)
-+ (BOOL)screensHaveSeparateSpaces;
-@end
+@class NSLayoutDimension;
+@class NSLayoutXAxisAnchor;
+@class NSLayoutYAxisAnchor;
-@interface NSView (MavericksSDK)
-- (void)setCanDrawSubviewsIntoLayer:(BOOL)flag;
-- (void)setAppearance:(NSAppearance*)appearance;
-- (NSAppearance*)effectiveAppearance;
+@interface NSObject (ElCapitanSDK)
+- (NSLayoutConstraint*)constraintEqualToConstant:(CGFloat)c;
@end
-@interface NSWindow (MavericksSDK)
-- (NSWindowOcclusionState)occlusionState;
+@interface NSView (ElCapitanSDK)
+@property(readonly, strong) NSLayoutXAxisAnchor* leftAnchor;
+@property(readonly, strong) NSLayoutXAxisAnchor* rightAnchor;
+@property(readonly, strong) NSLayoutYAxisAnchor* bottomAnchor;
+@property(readonly, strong) NSLayoutDimension* widthAnchor;
@end
-@interface NSAppearance (MavericksSDK)
-+ (id<NSObject>)appearanceNamed:(NSString*)name;
+@interface NSWindow (ElCapitanSDK)
+- (void)performWindowDragWithEvent:(NSEvent*)event;
@end
-@interface CBPeripheral (MavericksSDK)
-@property(readonly, nonatomic) NSUUID* identifier;
+@interface CIRectangleFeature (ElCapitanSDK)
+@property(readonly) CGRect bounds;
@end
-#endif // MAC_OS_X_VERSION_10_9
+@class CIRectangleFeature;
-// Once Chrome no longer supports OSX 10.9, everything within this preprocessor
-// block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_10) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+#endif // MAC_OS_X_VERSION_10_11
-@interface CBUUID (YosemiteSDK)
-- (NSString*)UUIDString;
-@end
-
-@interface NSViewController (YosemiteSDK)
-- (void)viewDidLoad;
-@end
+// Once Chrome no longer supports OSX 10.11, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12
-@interface NSWindow (YosemiteSDK)
-- (void)setTitlebarAppearsTransparent:(BOOL)flag;
+@interface NSWindow (SierraSDK)
+@property(class) BOOL allowsAutomaticWindowTabbing;
@end
-#endif // MAC_OS_X_VERSION_10_10
+#endif // MAC_OS_X_VERSION_10_12
-// Once Chrome no longer supports OSX 10.10.2, everything within this
+// Once Chrome no longer supports OSX 10.12.0, everything within this
// preprocessor block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_10_3) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10_3
-
-@interface NSEvent (YosemiteSDK)
-@property(readonly) NSInteger stage;
+#if !defined(MAC_OS_X_VERSION_10_12_1) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12_1
+
+@interface NSButton (SierraPointOneSDK)
+@property(copy) NSColor* bezelColor;
+@property BOOL imageHugsTitle;
++ (instancetype)buttonWithTitle:(NSString*)title
+ target:(id)target
+ action:(SEL)action;
++ (instancetype)buttonWithImage:(NSImage*)image
+ target:(id)target
+ action:(SEL)action;
++ (instancetype)buttonWithTitle:(NSString*)title
+ image:(NSImage*)image
+ target:(id)target
+ action:(SEL)action;
@end
@interface NSView (YosemiteSDK)
@@ -526,8 +250,6 @@ BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
// declared in the OSX 10.9+ SDK, so when compiling against an OSX 10.9+ SDK,
// declare the symbol.
// ----------------------------------------------------------------------------
-#if defined(MAC_OS_X_VERSION_10_9) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9
BASE_EXPORT extern "C" NSString* const kCWSSIDDidChangeNotification;
-#endif
+
#endif // BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
diff --git a/base/mac/sdk_forward_declarations.mm b/base/mac/sdk_forward_declarations.mm
index 4e1d7ec670..c624daedd8 100644
--- a/base/mac/sdk_forward_declarations.mm
+++ b/base/mac/sdk_forward_declarations.mm
@@ -4,43 +4,17 @@
#include "base/mac/sdk_forward_declarations.h"
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-NSString* const NSWindowWillEnterFullScreenNotification =
- @"NSWindowWillEnterFullScreenNotification";
-
-NSString* const NSWindowWillExitFullScreenNotification =
- @"NSWindowWillExitFullScreenNotification";
-
-NSString* const NSWindowDidEnterFullScreenNotification =
- @"NSWindowDidEnterFullScreenNotification";
-
-NSString* const NSWindowDidExitFullScreenNotification =
- @"NSWindowDidExitFullScreenNotification";
-
-NSString* const NSWindowDidChangeBackingPropertiesNotification =
- @"NSWindowDidChangeBackingPropertiesNotification";
-
-NSString* const CBAdvertisementDataServiceDataKey = @"kCBAdvDataServiceData";
-
-NSString* const CBAdvertisementDataServiceUUIDsKey = @"kCBAdvDataServiceUUIDs";
-#endif // MAC_OS_X_VERSION_10_7
-
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
-NSString* const NSWindowDidChangeOcclusionStateNotification =
- @"NSWindowDidChangeOcclusionStateNotification";
-
-NSString* const CBAdvertisementDataOverflowServiceUUIDsKey =
- @"kCBAdvDataOverflowServiceUUIDs";
-
-NSString* const CBAdvertisementDataIsConnectable = @"kCBAdvDataIsConnectable";
-#endif // MAC_OS_X_VERSION_10_9
-
#if !defined(MAC_OS_X_VERSION_10_10) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+NSString* const CIDetectorTypeQRCode = @"CIDetectorTypeQRCode";
+
NSString* const NSUserActivityTypeBrowsingWeb =
@"NSUserActivityTypeBrowsingWeb";
NSString* const NSAppearanceNameVibrantDark = @"NSAppearanceNameVibrantDark";
#endif // MAC_OS_X_VERSION_10_10
+
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+NSString* const CIDetectorTypeText = @"CIDetectorTypeText";
+#endif // MAC_OS_X_VERSION_10_11
diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc
index abe0cf3ff5..892c50ef70 100644
--- a/base/memory/aligned_memory_unittest.cc
+++ b/base/memory/aligned_memory_unittest.cc
@@ -44,10 +44,6 @@ TEST(AlignedMemoryTest, StackAlignment) {
EXPECT_ALIGNED(raw8.void_data(), 8);
EXPECT_ALIGNED(raw16.void_data(), 16);
-
- // TODO(ios): __attribute__((aligned(X))) with X >= 128 does not works on
- // the stack when building for arm64 on iOS, http://crbug.com/349003
-#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
EXPECT_ALIGNED(raw128.void_data(), 128);
// NaCl x86-64 compiler emits non-validating instructions for >128
@@ -61,14 +57,10 @@ TEST(AlignedMemoryTest, StackAlignment) {
EXPECT_EQ(256u, ALIGNOF(raw256));
EXPECT_ALIGNED(raw256.void_data(), 256);
- // TODO(ios): This test hits an armv7 bug in clang. crbug.com/138066
-#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
AlignedMemory<8, 4096> raw4096;
EXPECT_EQ(4096u, ALIGNOF(raw4096));
EXPECT_ALIGNED(raw4096.void_data(), 4096);
-#endif // !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
#endif // !(defined(OS_NACL) && defined(ARCH_CPU_X86_64))
-#endif // !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
}
TEST(AlignedMemoryTest, DynamicAllocation) {
diff --git a/base/memory/linked_ptr.h b/base/memory/linked_ptr.h
index 649dc10db7..68512864b2 100644
--- a/base/memory/linked_ptr.h
+++ b/base/memory/linked_ptr.h
@@ -69,7 +69,7 @@ class linked_ptr_internal {
mutable linked_ptr_internal const* next_;
};
-// TODO(http://crbug.com/556939): DEPRECATED: Use scoped_ptr instead (now that
+// TODO(http://crbug.com/556939): DEPRECATED: Use unique_ptr instead (now that
// we have support for moveable types inside STL containers).
template <typename T>
class linked_ptr {
diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc
index f5924d0fe7..46bbd7ad85 100644
--- a/base/memory/ref_counted.cc
+++ b/base/memory/ref_counted.cc
@@ -10,37 +10,32 @@ namespace base {
namespace subtle {
bool RefCountedThreadSafeBase::HasOneRef() const {
- return AtomicRefCountIsOne(
- &const_cast<RefCountedThreadSafeBase*>(this)->ref_count_);
+ return AtomicRefCountIsOne(&ref_count_);
}
-RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) {
-#ifndef NDEBUG
- in_dtor_ = false;
-#endif
-}
+RefCountedThreadSafeBase::RefCountedThreadSafeBase() = default;
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
"calling Release()";
#endif
}
void RefCountedThreadSafeBase::AddRef() const {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
#endif
AtomicRefCountInc(&ref_count_);
}
bool RefCountedThreadSafeBase::Release() const {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
DCHECK(!AtomicRefCountIsZero(&ref_count_));
#endif
if (!AtomicRefCountDec(&ref_count_)) {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
in_dtor_ = true;
#endif
return true;
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index b026d9ab03..9dd09ad346 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -14,10 +14,8 @@
#include "base/atomic_ref_count.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
-#include "base/macros.h"
-#ifndef NDEBUG
#include "base/logging.h"
-#endif
+#include "base/macros.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
@@ -32,16 +30,16 @@ class BASE_EXPORT RefCountedBase {
protected:
RefCountedBase()
: ref_count_(0)
- #ifndef NDEBUG
- , in_dtor_(false)
- #endif
- {
+#if DCHECK_IS_ON()
+ , in_dtor_(false)
+#endif
+ {
}
~RefCountedBase() {
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
- #endif
+#endif
}
@@ -50,9 +48,9 @@ class BASE_EXPORT RefCountedBase {
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
- #endif
+#endif
++ref_count_;
}
@@ -62,21 +60,21 @@ class BASE_EXPORT RefCountedBase {
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
- #endif
+#endif
if (--ref_count_ == 0) {
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
in_dtor_ = true;
- #endif
+#endif
return true;
}
return false;
}
private:
- mutable int ref_count_;
-#ifndef NDEBUG
+ mutable size_t ref_count_;
+#if DCHECK_IS_ON()
mutable bool in_dtor_;
#endif
@@ -99,9 +97,9 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool Release() const;
private:
- mutable AtomicRefCount ref_count_;
-#ifndef NDEBUG
- mutable bool in_dtor_;
+ mutable AtomicRefCount ref_count_ = 0;
+#if DCHECK_IS_ON()
+ mutable bool in_dtor_ = false;
#endif
DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
@@ -126,7 +124,7 @@ class BASE_EXPORT RefCountedThreadSafeBase {
template <class T>
class RefCounted : public subtle::RefCountedBase {
public:
- RefCounted() {}
+ RefCounted() = default;
void AddRef() const {
subtle::RefCountedBase::AddRef();
@@ -139,7 +137,7 @@ class RefCounted : public subtle::RefCountedBase {
}
protected:
- ~RefCounted() {}
+ ~RefCounted() = default;
private:
DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
@@ -176,7 +174,7 @@ struct DefaultRefCountedThreadSafeTraits {
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
- RefCountedThreadSafe() {}
+ RefCountedThreadSafe() = default;
void AddRef() const {
subtle::RefCountedThreadSafeBase::AddRef();
@@ -189,7 +187,7 @@ class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
}
protected:
- ~RefCountedThreadSafe() {}
+ ~RefCountedThreadSafe() = default;
private:
friend struct DefaultRefCountedThreadSafeTraits<T>;
@@ -213,7 +211,7 @@ class RefCountedData
private:
friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
- ~RefCountedData() {}
+ ~RefCountedData() = default;
};
} // namespace base
@@ -226,6 +224,9 @@ class RefCountedData
//
// class MyFoo : public RefCounted<MyFoo> {
// ...
+// private:
+// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
+// ~MyFoo(); // Destructor must be private/protected.
// };
//
// void some_function() {
@@ -237,7 +238,7 @@ class RefCountedData
// void some_other_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// ...
-// foo = NULL; // explicitly releases |foo|
+// foo = nullptr; // explicitly releases |foo|
// ...
// if (foo)
// foo->Method(param);
@@ -252,7 +253,7 @@ class RefCountedData
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
-// // now, |b| references the MyFoo object, and |a| references NULL.
+// // now, |b| references the MyFoo object, and |a| references nullptr.
// }
//
// To make both |a| and |b| in the above example reference the same MyFoo
@@ -271,8 +272,7 @@ class scoped_refptr {
public:
typedef T element_type;
- scoped_refptr() : ptr_(NULL) {
- }
+ scoped_refptr() {}
scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
@@ -314,12 +314,12 @@ class scoped_refptr {
T* get() const { return ptr_; }
T& operator*() const {
- assert(ptr_ != NULL);
+ assert(ptr_ != nullptr);
return *ptr_;
}
T* operator->() const {
- assert(ptr_ != NULL);
+ assert(ptr_ != nullptr);
return ptr_;
}
@@ -382,7 +382,7 @@ class scoped_refptr {
}
protected:
- T* ptr_;
+ T* ptr_ = nullptr;
private:
// Friend required for move constructors that set r.ptr_ to null.
@@ -397,11 +397,13 @@ class scoped_refptr {
static void Release(T* ptr);
};
+// static
template <typename T>
void scoped_refptr<T>::AddRef(T* ptr) {
ptr->AddRef();
}
+// static
template <typename T>
void scoped_refptr<T>::Release(T* ptr) {
ptr->Release();
diff --git a/base/memory/ref_counted_delete_on_message_loop.h b/base/memory/ref_counted_delete_on_message_loop.h
deleted file mode 100644
index de194e8479..0000000000
--- a/base/memory/ref_counted_delete_on_message_loop.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
-#define BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
-
-namespace base {
-
-// RefCountedDeleteOnMessageLoop is similar to RefCountedThreadSafe, and ensures
-// that the object will be deleted on a specified message loop.
-//
-// Sample usage:
-// class Foo : public RefCountedDeleteOnMessageLoop<Foo> {
-//
-// Foo(scoped_refptr<SingleThreadTaskRunner> loop)
-// : RefCountedDeleteOnMessageLoop<Foo>(std::move(loop)) {}
-// ...
-// private:
-// friend class RefCountedDeleteOnMessageLoop<Foo>;
-// friend class DeleteHelper<Foo>;
-//
-// ~Foo();
-// };
-
-// TODO(skyostil): Rename this to RefCountedDeleteOnTaskRunner.
-template <class T>
-class RefCountedDeleteOnMessageLoop : public subtle::RefCountedThreadSafeBase {
- public:
- // This constructor will accept a MessageL00pProxy object, but new code should
- // prefer a SingleThreadTaskRunner. A SingleThreadTaskRunner for the
- // MessageLoop on the current thread can be acquired by calling
- // MessageLoop::current()->task_runner().
- RefCountedDeleteOnMessageLoop(
- scoped_refptr<SingleThreadTaskRunner> task_runner)
- : task_runner_(std::move(task_runner)) {
- DCHECK(task_runner_);
- }
-
- void AddRef() const {
- subtle::RefCountedThreadSafeBase::AddRef();
- }
-
- void Release() const {
- if (subtle::RefCountedThreadSafeBase::Release())
- DestructOnMessageLoop();
- }
-
- protected:
- friend class DeleteHelper<RefCountedDeleteOnMessageLoop>;
- ~RefCountedDeleteOnMessageLoop() {}
-
- void DestructOnMessageLoop() const {
- const T* t = static_cast<const T*>(this);
- if (task_runner_->BelongsToCurrentThread())
- delete t;
- else
- task_runner_->DeleteSoon(FROM_HERE, t);
- }
-
- scoped_refptr<SingleThreadTaskRunner> task_runner_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnMessageLoop);
-};
-
-} // namespace base
-
-#endif // BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index 7c4e07af49..65c15d26ab 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -4,6 +4,8 @@
#include "base/memory/ref_counted.h"
+#include <utility>
+
#include "base/test/opaque_ref_counted.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -156,13 +158,34 @@ TEST(RefCountedUnitTest, ScopedRefPtrToSelfMoveAssignment) {
}
TEST(RefCountedUnitTest, ScopedRefPtrToOpaque) {
- scoped_refptr<base::OpaqueRefCounted> p = base::MakeOpaqueRefCounted();
- base::TestOpaqueRefCounted(p);
+ scoped_refptr<base::OpaqueRefCounted> initial = base::MakeOpaqueRefCounted();
+ base::TestOpaqueRefCounted(initial);
+
+ scoped_refptr<base::OpaqueRefCounted> assigned;
+ assigned = initial;
+
+ scoped_refptr<base::OpaqueRefCounted> copied(initial);
+
+ scoped_refptr<base::OpaqueRefCounted> moved(std::move(initial));
+
+ scoped_refptr<base::OpaqueRefCounted> move_assigned;
+ move_assigned = std::move(moved);
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToOpaqueThreadSafe) {
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> initial =
+ base::MakeOpaqueRefCountedThreadSafe();
+ base::TestOpaqueRefCountedThreadSafe(initial);
+
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> assigned;
+ assigned = initial;
+
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> copied(initial);
+
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> moved(std::move(initial));
- scoped_refptr<base::OpaqueRefCounted> q;
- q = p;
- base::TestOpaqueRefCounted(p);
- base::TestOpaqueRefCounted(q);
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> move_assigned;
+ move_assigned = std::move(moved);
}
TEST(RefCountedUnitTest, BooleanTesting) {
diff --git a/base/memory/scoped_vector.h b/base/memory/scoped_vector.h
index f3581eaa9b..a320b1e5d1 100644
--- a/base/memory/scoped_vector.h
+++ b/base/memory/scoped_vector.h
@@ -12,7 +12,6 @@
#include "base/logging.h"
#include "base/macros.h"
-#include "base/stl_util.h"
// ScopedVector wraps a vector deleting the elements from its
// destructor.
@@ -88,8 +87,10 @@ class ScopedVector {
// Resize, deleting elements in the disappearing range if we are shrinking.
void resize(size_t new_size) {
- if (v_.size() > new_size)
- STLDeleteContainerPointers(v_.begin() + new_size, v_.end());
+ if (v_.size() > new_size) {
+ for (auto it = v_.begin() + new_size; it != v_.end(); ++it)
+ delete *it;
+ }
v_.resize(new_size);
}
@@ -98,7 +99,11 @@ class ScopedVector {
v_.assign(begin, end);
}
- void clear() { STLDeleteElements(&v_); }
+ void clear() {
+ for (auto* item : *this)
+ delete item;
+ v_.clear();
+ }
// Like |clear()|, but doesn't delete any elements.
void weak_clear() { v_.clear(); }
@@ -124,7 +129,8 @@ class ScopedVector {
}
iterator erase(iterator first, iterator last) {
- STLDeleteContainerPointers(first, last);
+ for (auto it = first; it != last; ++it)
+ delete *it;
return v_.erase(first, last);
}
diff --git a/base/memory/scoped_vector_unittest.cc b/base/memory/scoped_vector_unittest.cc
index ea3dcdc485..916dab9a15 100644
--- a/base/memory/scoped_vector_unittest.cc
+++ b/base/memory/scoped_vector_unittest.cc
@@ -322,7 +322,7 @@ TEST(ScopedVectorTest, InsertRange) {
EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
}
-// Assertions for push_back(scoped_ptr).
+// Assertions for push_back(unique_ptr).
TEST(ScopedVectorTest, PushBackScopedPtr) {
int delete_counter = 0;
std::unique_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
index e1c9fa70bd..4b66cc6edd 100644
--- a/base/memory/shared_memory.h
+++ b/base/memory/shared_memory.h
@@ -34,31 +34,32 @@ class FilePath;
// Options for creating a shared memory object.
struct BASE_EXPORT SharedMemoryCreateOptions {
- SharedMemoryCreateOptions();
-
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The type of OS primitive that should back the SharedMemory object.
+ SharedMemoryHandle::Type type = SharedMemoryHandle::MACH;
+#else
// DEPRECATED (crbug.com/345734):
// If NULL, the object is anonymous. This pointer is owned by the caller
// and must live through the call to Create().
- const std::string* name_deprecated;
+ const std::string* name_deprecated = nullptr;
// DEPRECATED (crbug.com/345734):
// If true, and the shared memory already exists, Create() will open the
// existing shared memory and ignore the size parameter. If false,
// shared memory must not exist. This flag is meaningless unless
// name_deprecated is non-NULL.
- bool open_existing_deprecated;
-#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
+ bool open_existing_deprecated = false;
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
// Size of the shared memory object to be created.
// When opening an existing object, this has no effect.
- size_t size;
+ size_t size = 0;
// If true, mappings might need to be made executable later.
- bool executable;
+ bool executable = false;
// If true, the file can be shared read-only to a process.
- bool share_read_only;
+ bool share_read_only = false;
};
// Platform abstraction for shared memory. Provides a C++ wrapper
@@ -103,7 +104,7 @@ class BASE_EXPORT SharedMemory {
// The caller is responsible for destroying the duplicated OS primitive.
static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
-#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_POSIX)
// This method requires that the SharedMemoryHandle is backed by a POSIX fd.
static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
#endif
@@ -194,6 +195,13 @@ class BASE_EXPORT SharedMemory {
// identifier is not portable.
SharedMemoryHandle handle() const;
+ // Returns the underlying OS handle for this segment. The caller also gets
+ // ownership of the handle. This is logically equivalent to:
+ // SharedMemoryHandle dup = DuplicateHandle(handle());
+ // Close();
+ // return dup;
+ SharedMemoryHandle TakeHandle();
+
// Closes the open shared memory segment. The memory will remain mapped if
// it was previously mapped.
// It is safe to call Close repeatedly.
@@ -247,16 +255,36 @@ class BASE_EXPORT SharedMemory {
return ShareToProcessCommon(process, new_handle, true, SHARE_CURRENT_MODE);
}
+#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
+ !defined(OS_NACL)
+ using UniqueId = std::pair<dev_t, ino_t>;
+
+ struct UniqueIdHash {
+ size_t operator()(const UniqueId& id) const {
+ return HashInts(id.first, id.second);
+ }
+ };
+
+ // Returns a unique ID for this shared memory's handle. Note this function may
+ // access file system and be slow.
+ bool GetUniqueId(UniqueId* id) const;
+#endif
+
private:
#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
- !(defined(OS_MACOSX) && !defined(OS_IOS))
- bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly);
+ (!defined(OS_MACOSX) || defined(OS_IOS))
bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
#endif
+
enum ShareMode {
SHARE_READONLY,
SHARE_CURRENT_MODE,
};
+
+#if defined(OS_MACOSX)
+ bool Share(SharedMemoryHandle* new_handle, ShareMode share_mode);
+#endif
+
bool ShareToProcessCommon(ProcessHandle process,
SharedMemoryHandle* new_handle,
bool close_self,
@@ -271,6 +299,12 @@ class BASE_EXPORT SharedMemory {
#elif defined(OS_MACOSX) && !defined(OS_IOS)
// The OS primitive that backs the shared memory region.
SharedMemoryHandle shm_;
+
+ // The mechanism by which the memory is mapped. Only valid if |memory_| is not
+ // |nullptr|.
+ SharedMemoryHandle::Type mapped_memory_mechanism_;
+
+ int readonly_mapped_file_;
#elif defined(OS_POSIX)
int mapped_file_;
int readonly_mapped_file_;
@@ -282,6 +316,7 @@ class BASE_EXPORT SharedMemory {
DISALLOW_COPY_AND_ASSIGN(SharedMemory);
};
+
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_H_
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
index 8eff26b9dc..dc33eeafa1 100644
--- a/base/memory/shared_memory_handle.h
+++ b/base/memory/shared_memory_handle.h
@@ -15,6 +15,7 @@
#elif defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach.h>
#include "base/base_export.h"
+#include "base/file_descriptor_posix.h"
#include "base/macros.h"
#include "base/process/process_handle.h"
#elif defined(OS_POSIX)
@@ -24,8 +25,6 @@
namespace base {
-class Pickle;
-
// SharedMemoryHandle is a platform specific type which represents
// the underlying OS handle to a shared memory segment.
#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
@@ -85,9 +84,25 @@ class BASE_EXPORT SharedMemoryHandle {
#else
class BASE_EXPORT SharedMemoryHandle {
public:
+ enum Type {
+ // The SharedMemoryHandle is backed by a POSIX fd.
+ POSIX,
+ // The SharedMemoryHandle is backed by the Mach primitive "memory object".
+ MACH,
+ };
+
// The default constructor returns an invalid SharedMemoryHandle.
SharedMemoryHandle();
+ // Constructs a SharedMemoryHandle backed by the components of a
+ // FileDescriptor. The newly created instance has the same ownership semantics
+ // as base::FileDescriptor. This typically means that the SharedMemoryHandle
+ // takes ownership of the |fd| if |auto_close| is true. Unfortunately, it's
+ // common for existing code to make shallow copies of SharedMemoryHandle, and
+ // the one that is finally passed into a base::SharedMemory is the one that
+ // "consumes" the fd.
+ explicit SharedMemoryHandle(const base::FileDescriptor& file_descriptor);
+
// Makes a Mach-based SharedMemoryHandle of the given size. On error,
// subsequent calls to IsValid() return false.
explicit SharedMemoryHandle(mach_vm_size_t size);
@@ -122,7 +137,7 @@ class BASE_EXPORT SharedMemoryHandle {
mach_port_t GetMemoryObject() const;
// Returns false on a failure to determine the size. On success, populates the
- // output variable |size|. Returns 0 if the handle is invalid.
+ // output variable |size|.
bool GetSize(size_t* size) const;
// The SharedMemoryHandle must be valid.
@@ -138,24 +153,36 @@ class BASE_EXPORT SharedMemoryHandle {
bool OwnershipPassesToIPC() const;
private:
+ friend class SharedMemory;
+
// Shared code between copy constructor and operator=.
void CopyRelevantData(const SharedMemoryHandle& handle);
- mach_port_t memory_object_ = MACH_PORT_NULL;
+ Type type_;
- // The size of the shared memory region when |type_| is MACH. Only
- // relevant if |memory_object_| is not |MACH_PORT_NULL|.
- mach_vm_size_t size_ = 0;
+ // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
+ // mach port. |type_| determines the backing member.
+ union {
+ FileDescriptor file_descriptor_;
- // The pid of the process in which |memory_object_| is usable. Only
- // relevant if |memory_object_| is not |MACH_PORT_NULL|.
- base::ProcessId pid_ = 0;
+ struct {
+ mach_port_t memory_object_;
- // Whether passing this object as a parameter to an IPC message passes
- // ownership of |memory_object_| to the IPC stack. This is meant to mimic
- // the behavior of the |auto_close| parameter of FileDescriptor.
- // Defaults to |false|.
- bool ownership_passes_to_ipc_ = false;
+ // The size of the shared memory region when |type_| is MACH. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ mach_vm_size_t size_;
+
+ // The pid of the process in which |memory_object_| is usable. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ base::ProcessId pid_;
+
+ // Whether passing this object as a parameter to an IPC message passes
+ // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+ // the behavior of the |auto_close| parameter of FileDescriptor.
+ // Defaults to |false|.
+ bool ownership_passes_to_ipc_;
+ };
+ };
};
#endif
diff --git a/base/memory/shared_memory_handle_mac.cc b/base/memory/shared_memory_handle_mac.cc
index ad470bea81..9dfd3c1aea 100644
--- a/base/memory/shared_memory_handle_mac.cc
+++ b/base/memory/shared_memory_handle_mac.cc
@@ -10,13 +10,20 @@
#include <unistd.h>
#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
#include "base/posix/eintr_wrapper.h"
namespace base {
-SharedMemoryHandle::SharedMemoryHandle() {}
+SharedMemoryHandle::SharedMemoryHandle()
+ : type_(MACH), memory_object_(MACH_PORT_NULL) {}
+
+SharedMemoryHandle::SharedMemoryHandle(
+ const base::FileDescriptor& file_descriptor)
+ : type_(POSIX), file_descriptor_(file_descriptor) {}
SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
+ type_ = MACH;
mach_port_t named_right;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(),
@@ -39,7 +46,8 @@ SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
mach_vm_size_t size,
base::ProcessId pid)
- : memory_object_(memory_object),
+ : type_(MACH),
+ memory_object_(memory_object),
size_(size),
pid_(pid),
ownership_passes_to_ipc_(false) {}
@@ -53,29 +61,50 @@ SharedMemoryHandle& SharedMemoryHandle::operator=(
if (this == &handle)
return *this;
+ type_ = handle.type_;
CopyRelevantData(handle);
return *this;
}
SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- if (!IsValid())
- return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
-
- // Increment the ref count.
- kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
- MACH_PORT_RIGHT_SEND, 1);
- DCHECK_EQ(kr, KERN_SUCCESS);
- SharedMemoryHandle handle(*this);
- handle.SetOwnershipPassesToIPC(true);
- return handle;
+ switch (type_) {
+ case POSIX: {
+ if (!IsValid())
+ return SharedMemoryHandle();
+ int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
+ if (duped_fd < 0)
+ return SharedMemoryHandle();
+ return SharedMemoryHandle(FileDescriptor(duped_fd, true));
+ }
+ case MACH: {
+ if (!IsValid())
+ return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
+
+ // Increment the ref count.
+ kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+ MACH_PORT_RIGHT_SEND, 1);
+ DCHECK_EQ(kr, KERN_SUCCESS);
+ SharedMemoryHandle handle(*this);
+ handle.SetOwnershipPassesToIPC(true);
+ return handle;
+ }
+ }
}
bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
if (!IsValid() && !handle.IsValid())
return true;
- return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
- pid_ == handle.pid_;
+ if (type_ != handle.type_)
+ return false;
+
+ switch (type_) {
+ case POSIX:
+ return file_descriptor_.fd == handle.file_descriptor_.fd;
+ case MACH:
+ return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
+ pid_ == handle.pid_;
+ }
}
bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
@@ -83,10 +112,16 @@ bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
}
bool SharedMemoryHandle::IsValid() const {
- return memory_object_ != MACH_PORT_NULL;
+ switch (type_) {
+ case POSIX:
+ return file_descriptor_.fd >= 0;
+ case MACH:
+ return memory_object_ != MACH_PORT_NULL;
+ }
}
mach_port_t SharedMemoryHandle::GetMemoryObject() const {
+ DCHECK_EQ(type_, MACH);
return memory_object_;
}
@@ -96,8 +131,19 @@ bool SharedMemoryHandle::GetSize(size_t* size) const {
return true;
}
- *size = size_;
- return true;
+ switch (type_) {
+ case SharedMemoryHandle::POSIX:
+ struct stat st;
+ if (fstat(file_descriptor_.fd, &st) != 0)
+ return false;
+ if (st.st_size < 0)
+ return false;
+ *size = st.st_size;
+ return true;
+ case SharedMemoryHandle::MACH:
+ *size = size_;
+ return true;
+ }
}
bool SharedMemoryHandle::MapAt(off_t offset,
@@ -105,42 +151,69 @@ bool SharedMemoryHandle::MapAt(off_t offset,
void** memory,
bool read_only) {
DCHECK(IsValid());
- DCHECK_EQ(pid_, GetCurrentProcId());
- kern_return_t kr = mach_vm_map(
- mach_task_self(),
- reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
- bytes,
- 0, // Alignment mask
- VM_FLAGS_ANYWHERE, memory_object_, offset,
- FALSE, // Copy
- VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE), // Current protection
- VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK, // Maximum protection
- VM_INHERIT_NONE);
- return kr == KERN_SUCCESS;
+ switch (type_) {
+ case SharedMemoryHandle::POSIX:
+ *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
+ MAP_SHARED, file_descriptor_.fd, offset);
+ return *memory != MAP_FAILED;
+ case SharedMemoryHandle::MACH:
+ DCHECK_EQ(pid_, GetCurrentProcId());
+ kern_return_t kr = mach_vm_map(
+ mach_task_self(),
+ reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
+ bytes,
+ 0, // Alignment mask
+ VM_FLAGS_ANYWHERE,
+ memory_object_,
+ offset,
+ FALSE, // Copy
+ VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE), // Current protection
+ VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK, // Maximum protection
+ VM_INHERIT_NONE);
+ return kr == KERN_SUCCESS;
+ }
}
void SharedMemoryHandle::Close() const {
if (!IsValid())
return;
- kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
- if (kr != KERN_SUCCESS)
- DPLOG(ERROR) << "Error deallocating mach port: " << kr;
+ switch (type_) {
+ case POSIX:
+ if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+ DPLOG(ERROR) << "Error closing fd";
+ break;
+ case MACH:
+ kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+ if (kr != KERN_SUCCESS)
+ MACH_DLOG(ERROR, kr) << "Error deallocating mach port";
+ break;
+ }
}
void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+ DCHECK_EQ(type_, MACH);
ownership_passes_to_ipc_ = ownership_passes;
}
bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+ DCHECK_EQ(type_, MACH);
return ownership_passes_to_ipc_;
}
void SharedMemoryHandle::CopyRelevantData(const SharedMemoryHandle& handle) {
- memory_object_ = handle.memory_object_;
- size_ = handle.size_;
- pid_ = handle.pid_;
- ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
+ type_ = handle.type_;
+ switch (type_) {
+ case POSIX:
+ file_descriptor_ = handle.file_descriptor_;
+ break;
+ case MACH:
+ memory_object_ = handle.memory_object_;
+ size_ = handle.size_;
+ pid_ = handle.pid_;
+ ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
+ break;
+ }
}
} // namespace base
diff --git a/base/memory/shared_memory_helper.cc b/base/memory/shared_memory_helper.cc
new file mode 100644
index 0000000000..7fbfb7afad
--- /dev/null
+++ b/base/memory/shared_memory_helper.cc
@@ -0,0 +1,98 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_helper.h"
+
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+struct ScopedPathUnlinkerTraits {
+ static const FilePath* InvalidValue() { return nullptr; }
+
+ static void Free(const FilePath* path) {
+ if (unlink(path->value().c_str()))
+ PLOG(WARNING) << "unlink";
+ }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+using ScopedPathUnlinker =
+ ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+
+#if !defined(OS_ANDROID)
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+ ScopedFILE* fp,
+ ScopedFD* readonly_fd,
+ FilePath* path) {
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+ // It doesn't make sense to have a open-existing private piece of shmem
+ DCHECK(!options.open_existing_deprecated);
+#endif // !(defined(OS_MACOSX) && !defined(OS_IOS)
+ // Q: Why not use the shm_open() etc. APIs?
+ // A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
+ FilePath directory;
+ ScopedPathUnlinker path_unlinker;
+ if (!GetShmemTempDir(options.executable, &directory))
+ return false;
+
+ fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+
+ if (!*fp)
+ return false;
+
+ // Deleting the file prevents anyone else from mapping it in (making it
+ // private), and prevents the need for cleanup (once the last fd is
+ // closed, it is truly freed).
+ path_unlinker.reset(path);
+
+ if (options.share_read_only) {
+ // Also open as readonly so that we can ShareReadOnlyToProcess.
+ readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+ if (!readonly_fd->is_valid()) {
+ DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+ fp->reset();
+ return false;
+ }
+ }
+ return true;
+}
+
+bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd, int* mapped_file,
+ int* readonly_mapped_file) {
+ DCHECK_EQ(-1, *mapped_file);
+ DCHECK_EQ(-1, *readonly_mapped_file);
+ if (fp == NULL)
+ return false;
+
+ // This function theoretically can block on the disk, but realistically
+ // the temporary files we create will just go into the buffer cache
+ // and be deleted before they ever make it out to disk.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ if (readonly_fd.is_valid()) {
+ struct stat st = {};
+ if (fstat(fileno(fp.get()), &st))
+ NOTREACHED();
+
+ struct stat readonly_st = {};
+ if (fstat(readonly_fd.get(), &readonly_st))
+ NOTREACHED();
+ if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+ LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+ return false;
+ }
+ }
+
+ *mapped_file = HANDLE_EINTR(dup(fileno(fp.get())));
+ if (*mapped_file == -1) {
+ NOTREACHED() << "Call to dup failed, errno=" << errno;
+ }
+ *readonly_mapped_file = readonly_fd.release();
+
+ return true;
+}
+#endif // !defined(OS_ANDROID)
+
+} // namespace base
diff --git a/base/memory/shared_memory_helper.h b/base/memory/shared_memory_helper.h
new file mode 100644
index 0000000000..b515828c08
--- /dev/null
+++ b/base/memory/shared_memory_helper.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+#define BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+
+#include "base/memory/shared_memory.h"
+
+#include <fcntl.h>
+
+namespace base {
+
+#if !defined(OS_ANDROID)
+// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
+// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+ ScopedFILE* fp,
+ ScopedFD* readonly_fd,
+ FilePath* path);
+
+// Takes the outputs of CreateAnonymousSharedMemory and maps them properly to
+// |mapped_file| or |readonly_mapped_file|, depending on which one is populated.
+bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd, int* mapped_file,
+ int* readonly_mapped_file);
+#endif
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_HELPER_H_
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
index d15c63266d..d376daa579 100644
--- a/base/memory/shared_memory_mac.cc
+++ b/base/memory/shared_memory_mac.cc
@@ -4,22 +4,33 @@
#include "base/memory/shared_memory.h"
+#include <errno.h>
#include <mach/mach_vm.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
-#include "base/mac/foundation_util.h"
#include "base/mac/mac_util.h"
#include "base/mac/scoped_mach_vm.h"
+#include "base/memory/shared_memory_helper.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
#include "base/process/process_metrics.h"
-#include "base/profiler/scoped_tracker.h"
#include "base/scoped_generic.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
+#if defined(OS_MACOSX)
+#include "base/mac/foundation_util.h"
+#endif // OS_MACOSX
+
namespace base {
namespace {
@@ -67,18 +78,21 @@ bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
return true;
}
-} // namespace
-SharedMemoryCreateOptions::SharedMemoryCreateOptions()
- : size(0),
- executable(false),
- share_read_only(false) {}
+} // namespace
SharedMemory::SharedMemory()
- : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
+ : mapped_memory_mechanism_(SharedMemoryHandle::MACH),
+ readonly_mapped_file_(-1),
+ mapped_size_(0),
+ memory_(NULL),
+ read_only_(false),
+ requested_size_(0) {}
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
: shm_(handle),
+ mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
+ readonly_mapped_file_(-1),
mapped_size_(0),
memory_(NULL),
read_only_(read_only),
@@ -106,8 +120,7 @@ void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
// static
size_t SharedMemory::GetHandleLimit() {
- // This should be effectively unlimited on OS X.
- return 10000;
+ return GetMaxFds();
}
// static
@@ -116,6 +129,12 @@ SharedMemoryHandle SharedMemory::DuplicateHandle(
return handle.Duplicate();
}
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+ const SharedMemoryHandle& handle) {
+ return handle.file_descriptor_.fd;
+}
+
bool SharedMemory::CreateAndMapAnonymous(size_t size) {
return CreateAnonymous(size) && Map(size);
}
@@ -130,20 +149,53 @@ bool SharedMemory::GetSizeFromSharedMemoryHandle(
// Chromium mostly only uses the unique/private shmem as specified by
// "name == L"". The exception is in the StatsTable.
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile1(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::Start"));
DCHECK(!shm_.IsValid());
if (options.size == 0) return false;
if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
return false;
- shm_ = SharedMemoryHandle(options.size);
+ if (options.type == SharedMemoryHandle::MACH) {
+ shm_ = SharedMemoryHandle(options.size);
+ requested_size_ = options.size;
+ return shm_.IsValid();
+ }
+
+ // This function theoretically can block on the disk. Both profiling of real
+ // users and local instrumentation shows that this is a real problem.
+ // https://code.google.com/p/chromium/issues/detail?id=466437
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ ScopedFILE fp;
+ ScopedFD readonly_fd;
+
+ FilePath path;
+ bool result = CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+ if (!result)
+ return false;
+
+ if (!fp) {
+ PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+ return false;
+ }
+
+ // Get current size.
+ struct stat stat;
+ if (fstat(fileno(fp.get()), &stat) != 0)
+ return false;
+ const size_t current_size = stat.st_size;
+ if (current_size != options.size) {
+ if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+ return false;
+ }
requested_size_ = options.size;
- return shm_.IsValid();
+
+ int mapped_file = -1;
+ result = PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file,
+ &readonly_mapped_file_);
+
+ shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false));
+ return result;
}
bool SharedMemory::MapAt(off_t offset, size_t bytes) {
@@ -159,6 +211,7 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
mapped_size_ = bytes;
DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
(SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ mapped_memory_mechanism_ = shm_.type_;
} else {
memory_ = NULL;
}
@@ -170,48 +223,105 @@ bool SharedMemory::Unmap() {
if (memory_ == NULL)
return false;
- mach_vm_deallocate(mach_task_self(),
- reinterpret_cast<mach_vm_address_t>(memory_),
- mapped_size_);
+ switch (mapped_memory_mechanism_) {
+ case SharedMemoryHandle::POSIX:
+ munmap(memory_, mapped_size_);
+ break;
+ case SharedMemoryHandle::MACH:
+ mach_vm_deallocate(mach_task_self(),
+ reinterpret_cast<mach_vm_address_t>(memory_),
+ mapped_size_);
+ break;
+ }
+
memory_ = NULL;
mapped_size_ = 0;
return true;
}
SharedMemoryHandle SharedMemory::handle() const {
- return shm_;
+ switch (shm_.type_) {
+ case SharedMemoryHandle::POSIX:
+ return SharedMemoryHandle(
+ FileDescriptor(shm_.file_descriptor_.fd, false));
+ case SharedMemoryHandle::MACH:
+ return shm_;
+ }
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+ SharedMemoryHandle dup = DuplicateHandle(handle());
+ Close();
+ return dup;
}
void SharedMemory::Close() {
shm_.Close();
shm_ = SharedMemoryHandle();
+ if (shm_.type_ == SharedMemoryHandle::POSIX) {
+ if (readonly_mapped_file_ > 0) {
+ if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
+ PLOG(ERROR) << "close";
+ readonly_mapped_file_ = -1;
+ }
+ }
}
-bool SharedMemory::ShareToProcessCommon(ProcessHandle /*process*/,
- SharedMemoryHandle* new_handle,
- bool close_self,
- ShareMode share_mode) {
- DCHECK(shm_.IsValid());
+bool SharedMemory::Share(SharedMemoryHandle* new_handle, ShareMode share_mode) {
+ if (shm_.type_ == SharedMemoryHandle::MACH) {
+ DCHECK(shm_.IsValid());
+
+ bool success = false;
+ switch (share_mode) {
+ case SHARE_CURRENT_MODE:
+ *new_handle = shm_.Duplicate();
+ success = true;
+ break;
+ case SHARE_READONLY:
+ success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+ break;
+ }
+
+ if (success)
+ new_handle->SetOwnershipPassesToIPC(true);
+
+ return success;
+ }
- bool success = false;
+ int handle_to_dup = -1;
switch (share_mode) {
case SHARE_CURRENT_MODE:
- *new_handle = shm_.Duplicate();
- success = true;
+ handle_to_dup = shm_.file_descriptor_.fd;
break;
case SHARE_READONLY:
- success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+ // We could imagine re-opening the file from /dev/fd, but that can't make
+ // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
+ CHECK_GE(readonly_mapped_file_, 0);
+ handle_to_dup = readonly_mapped_file_;
break;
}
- if (success)
- new_handle->SetOwnershipPassesToIPC(true);
+ const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
+ if (new_fd < 0) {
+ DPLOG(ERROR) << "dup() failed.";
+ return false;
+ }
+
+ new_handle->file_descriptor_.fd = new_fd;
+ new_handle->type_ = SharedMemoryHandle::POSIX;
+
+ return true;
+}
+bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
+ SharedMemoryHandle* new_handle,
+ bool close_self,
+ ShareMode share_mode) {
+ bool success = Share(new_handle, share_mode);
if (close_self) {
Unmap();
Close();
}
-
return success;
}
diff --git a/base/memory/shared_memory_posix.cc b/base/memory/shared_memory_posix.cc
index 7e94223a5c..fb1a343906 100644
--- a/base/memory/shared_memory_posix.cc
+++ b/base/memory/shared_memory_posix.cc
@@ -14,12 +14,15 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
+#include "base/memory/shared_memory_helper.h"
+#include "base/memory/shared_memory_tracker.h"
#include "base/posix/eintr_wrapper.h"
#include "base/posix/safe_strerror.h"
#include "base/process/process_metrics.h"
-#include "base/profiler/scoped_tracker.h"
#include "base/scoped_generic.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
@@ -31,84 +34,6 @@
namespace base {
-namespace {
-
-struct ScopedPathUnlinkerTraits {
- static FilePath* InvalidValue() { return nullptr; }
-
- static void Free(FilePath* path) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::Unlink"));
- if (unlink(path->value().c_str()))
- PLOG(WARNING) << "unlink";
- }
-};
-
-// Unlinks the FilePath when the object is destroyed.
-typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
-
-#if !defined(OS_ANDROID) && !defined(__ANDROID__)
-// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
-// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
-// options.share_read_only is true. |path| is populated with the location of
-// the file before it was unlinked.
-// Returns false if there's an unhandled failure.
-bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
- ScopedFILE* fp,
- ScopedFD* readonly_fd,
- FilePath* path) {
- // It doesn't make sense to have a open-existing private piece of shmem
- DCHECK(!options.open_existing_deprecated);
- // Q: Why not use the shm_open() etc. APIs?
- // A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
- FilePath directory;
- ScopedPathUnlinker path_unlinker;
- if (GetShmemTempDir(options.executable, &directory)) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::OpenTemporaryFile"));
- fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
-
- // Deleting the file prevents anyone else from mapping it in (making it
- // private), and prevents the need for cleanup (once the last fd is
- // closed, it is truly freed).
- if (*fp)
- path_unlinker.reset(path);
- }
-
- if (*fp) {
- if (options.share_read_only) {
- // TODO(erikchen): Remove ScopedTracker below once
- // http://crbug.com/466437 is fixed.
- tracked_objects::ScopedTracker tracking_profile(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::OpenReadonly"));
- // Also open as readonly so that we can ShareReadOnlyToProcess.
- readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
- if (!readonly_fd->is_valid()) {
- DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
- fp->reset();
- return false;
- }
- }
- }
- return true;
-}
-#endif // !defined(OS_ANDROID) && !defined(__ANDROID__)
-}
-
-SharedMemoryCreateOptions::SharedMemoryCreateOptions()
- : name_deprecated(nullptr),
- open_existing_deprecated(false),
- size(0),
- executable(false),
- share_read_only(false) {}
-
SharedMemory::SharedMemory()
: mapped_file_(-1),
readonly_mapped_file_(-1),
@@ -194,11 +119,6 @@ bool SharedMemory::GetSizeFromSharedMemoryHandle(
// In case we want to delete it later, it may be useful to save the value
// of mem_filename after FilePathForMemoryName().
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile1(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::Start"));
DCHECK_EQ(-1, mapped_file_);
if (options.size == 0) return false;
@@ -288,7 +208,7 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
}
requested_size_ = options.size;
}
- if (fp == nullptr) {
+ if (fp == NULL) {
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
FilePath dir = path.DirName();
if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
@@ -301,7 +221,8 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
return false;
}
- return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+ return PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file_,
+ &readonly_mapped_file_);
}
// Our current implementation of shmem is with mmap()ing of files.
@@ -333,7 +254,8 @@ bool SharedMemory::Open(const std::string& name, bool read_only) {
DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
return false;
}
- return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+ return PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file_,
+ &readonly_mapped_file_);
}
#endif // !defined(OS_ANDROID) && !defined(__ANDROID__)
@@ -365,8 +287,10 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
if (mmap_succeeded) {
mapped_size_ = bytes;
- DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ DCHECK_EQ(0U,
+ reinterpret_cast<uintptr_t>(memory_) &
+ (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
} else {
memory_ = NULL;
}
@@ -379,6 +303,7 @@ bool SharedMemory::Unmap() {
return false;
munmap(memory_, mapped_size_);
+ SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
memory_ = NULL;
mapped_size_ = 0;
return true;
@@ -388,6 +313,14 @@ SharedMemoryHandle SharedMemory::handle() const {
return FileDescriptor(mapped_file_, false);
}
+SharedMemoryHandle SharedMemory::TakeHandle() {
+ FileDescriptor handle(mapped_file_, true);
+ mapped_file_ = -1;
+ memory_ = nullptr;
+ mapped_size_ = 0;
+ return handle;
+}
+
void SharedMemory::Close() {
if (mapped_file_ > 0) {
if (IGNORE_EINTR(close(mapped_file_)) < 0)
@@ -402,44 +335,6 @@ void SharedMemory::Close() {
}
#if !defined(OS_ANDROID) && !defined(__ANDROID__)
-bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
- DCHECK_EQ(-1, mapped_file_);
- DCHECK_EQ(-1, readonly_mapped_file_);
- if (fp == nullptr)
- return false;
-
- // This function theoretically can block on the disk, but realistically
- // the temporary files we create will just go into the buffer cache
- // and be deleted before they ever make it out to disk.
- base::ThreadRestrictions::ScopedAllowIO allow_io;
-
- struct stat st = {};
- if (fstat(fileno(fp.get()), &st))
- NOTREACHED();
- if (readonly_fd.is_valid()) {
- struct stat readonly_st = {};
- if (fstat(readonly_fd.get(), &readonly_st))
- NOTREACHED();
- if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
- LOG(ERROR) << "writable and read-only inodes don't match; bailing";
- return false;
- }
- }
-
- mapped_file_ = HANDLE_EINTR(dup(fileno(fp.get())));
- if (mapped_file_ == -1) {
- if (errno == EMFILE) {
- LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
- return false;
- } else {
- NOTREACHED() << "Call to dup failed, errno=" << errno;
- }
- }
- readonly_mapped_file_ = readonly_fd.release();
-
- return true;
-}
-
// For the given shmem named |mem_name|, return a filename to mmap()
// (and possibly create). Modifies |filename|. Return false on
// error, or true of we are happy.
@@ -502,4 +397,22 @@ bool SharedMemory::ShareToProcessCommon(ProcessHandle,
return true;
}
+bool SharedMemory::GetUniqueId(SharedMemory::UniqueId* id) const {
+ // This function is called just after mmap. fstat is a system call that might
+ // cause I/O. It's safe to call fstat here because mmap for shared memory is
+ // called in two cases:
+ // 1) To handle file-mapped memory
+ // 2) To handle annonymous shared memory
+ // In 1), I/O is already permitted. In 2), the backend is on page cache and
+ // fstat doesn't cause I/O access to the disk. See the discussion at
+ // crbug.com/604726#c41.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+ struct stat file_stat;
+ if (HANDLE_EINTR(::fstat(static_cast<int>(handle().fd), &file_stat)) != 0)
+ return false;
+ id->first = file_stat.st_dev;
+ id->second = file_stat.st_ino;
+ return true;
+}
+
} // namespace base
diff --git a/base/memory/shared_memory_tracker.cc b/base/memory/shared_memory_tracker.cc
new file mode 100644
index 0000000000..cfd4c85c53
--- /dev/null
+++ b/base/memory/shared_memory_tracker.cc
@@ -0,0 +1,93 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_tracker.h"
+
+#include "base/memory/shared_memory.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
+
+namespace base {
+
+SharedMemoryTracker::Usage::Usage() = default;
+
+SharedMemoryTracker::Usage::Usage(const Usage& rhs) = default;
+
+SharedMemoryTracker::Usage::~Usage() = default;
+
+// static
+SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
+ static SharedMemoryTracker* instance = new SharedMemoryTracker;
+ return instance;
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+ const SharedMemory& shared_memory) {
+ Usage usage;
+ // |shared_memory|'s unique ID must be generated here and it'd be too late at
+ // OnMemoryDump. An ID is generated with a SharedMemoryHandle, but the handle
+ // might already be closed at that time. Now IncrementMemoryUsage is called
+ // just after mmap and the handle must live then. See the discussion at
+ // crbug.com/604726#c30.
+ SharedMemory::UniqueId id;
+ if (!shared_memory.GetUniqueId(&id))
+ return;
+ usage.unique_id = id;
+ usage.size = shared_memory.mapped_size();
+ AutoLock hold(usages_lock_);
+ usages_[&shared_memory] = usage;
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+ const SharedMemory& shared_memory) {
+ AutoLock hold(usages_lock_);
+ usages_.erase(&shared_memory);
+}
+
+bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+ trace_event::ProcessMemoryDump* pmd) {
+ ALLOW_UNUSED_PARAM(args);
+ std::unordered_map<SharedMemory::UniqueId, size_t, SharedMemory::UniqueIdHash>
+ sizes;
+ {
+ AutoLock hold(usages_lock_);
+ for (const auto& usage : usages_)
+ sizes[usage.second.unique_id] += usage.second.size;
+ }
+ for (auto& size : sizes) {
+ const SharedMemory::UniqueId& id = size.first;
+ std::string dump_name = StringPrintf("%s/%lld.%lld", "shared_memory",
+ static_cast<long long>(id.first),
+ static_cast<long long>(id.second));
+ auto guid = trace_event::MemoryAllocatorDumpGuid(dump_name);
+ trace_event::MemoryAllocatorDump* local_dump =
+ pmd->CreateAllocatorDump(dump_name);
+ // TODO(hajimehoshi): The size is not resident size but virtual size so far.
+ // Fix this to record resident size.
+ local_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+ trace_event::MemoryAllocatorDump::kUnitsBytes,
+ size.second);
+ trace_event::MemoryAllocatorDump* global_dump =
+ pmd->CreateSharedGlobalAllocatorDump(guid);
+ global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+ trace_event::MemoryAllocatorDump::kUnitsBytes,
+ size.second);
+ // TOOD(hajimehoshi): Detect which the shared memory comes from browser,
+ // renderer or GPU process.
+ // TODO(hajimehoshi): Shared memory reported by GPU and discardable is
+ // currently double-counted. Add ownership edges to avoid this.
+ pmd->AddOwnershipEdge(local_dump->guid(), global_dump->guid());
+ }
+ return true;
+}
+
+SharedMemoryTracker::SharedMemoryTracker() {
+ trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "SharedMemoryTracker", nullptr);
+}
+
+SharedMemoryTracker::~SharedMemoryTracker() = default;
+
+} // namespace
diff --git a/base/memory/shared_memory_tracker.h b/base/memory/shared_memory_tracker.h
new file mode 100644
index 0000000000..fe1a3dd392
--- /dev/null
+++ b/base/memory/shared_memory_tracker.h
@@ -0,0 +1,56 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+#define BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+
+#include "base/memory/shared_memory.h"
+#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+namespace trace_event {
+class ProcessMemoryDump;
+}
+
+// SharedMemoryTracker tracks shared memory usage.
+class BASE_EXPORT SharedMemoryTracker
+ : public base::trace_event::MemoryDumpProvider {
+ public:
+ // Returns a singleton instance.
+ static SharedMemoryTracker* GetInstance();
+
+ // Records shared memory usage on mapping.
+ void IncrementMemoryUsage(const SharedMemory& shared_memory);
+
+ // Records shared memory usage on unmapping.
+ void DecrementMemoryUsage(const SharedMemory& shared_memory);
+
+ private:
+ struct Usage {
+ Usage();
+ Usage(const Usage& rhs);
+ ~Usage();
+ SharedMemory::UniqueId unique_id;
+ size_t size;
+ };
+
+ SharedMemoryTracker();
+ ~SharedMemoryTracker() override;
+
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
+ // Used to lock when |usages_| is modified or read.
+ Lock usages_lock_;
+ std::unordered_map<const SharedMemory*, Usage> usages_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
index f29865c21a..19dedccb47 100644
--- a/base/memory/shared_memory_unittest.cc
+++ b/base/memory/shared_memory_unittest.cc
@@ -316,8 +316,6 @@ TEST(SharedMemoryTest, AnonymousPrivate) {
}
}
-// The Mach functionality is tested in shared_memory_mac_unittest.cc.
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
TEST(SharedMemoryTest, ShareReadOnly) {
StringPiece contents = "Hello World";
@@ -325,6 +323,10 @@ TEST(SharedMemoryTest, ShareReadOnly) {
SharedMemoryCreateOptions options;
options.size = contents.size();
options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
ASSERT_TRUE(writable_shmem.Create(options));
ASSERT_TRUE(writable_shmem.Map(options.size));
memcpy(writable_shmem.memory(), contents.data(), contents.size());
@@ -400,7 +402,6 @@ TEST(SharedMemoryTest, ShareReadOnly) {
#error Unexpected platform; write a test that tries to make 'handle' writable.
#endif // defined(OS_POSIX) || defined(OS_WIN)
}
-#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
TEST(SharedMemoryTest, ShareToSelf) {
StringPiece contents = "Hello World";
@@ -474,7 +475,7 @@ TEST(SharedMemoryTest, MapTwice) {
EXPECT_EQ(old_address, memory.memory());
}
-#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_POSIX)
// This test is not applicable for iOS (crbug.com/399384).
#if !defined(OS_IOS)
// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
@@ -485,6 +486,10 @@ TEST(SharedMemoryTest, AnonymousExecutable) {
SharedMemoryCreateOptions options;
options.size = kTestSize;
options.executable = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
EXPECT_TRUE(shared_memory.Create(options));
EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
@@ -518,6 +523,10 @@ TEST(SharedMemoryTest, FilePermissionsAnonymous) {
SharedMemory shared_memory;
SharedMemoryCreateOptions options;
options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
// Set a file mode creation mask that gives all permissions.
ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
@@ -540,6 +549,10 @@ TEST(SharedMemoryTest, FilePermissionsNamed) {
SharedMemory shared_memory;
SharedMemoryCreateOptions options;
options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
// Set a file mode creation mask that gives all permissions.
ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
@@ -556,7 +569,7 @@ TEST(SharedMemoryTest, FilePermissionsNamed) {
}
#endif // !defined(OS_ANDROID)
-#endif // defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+#endif // defined(OS_POSIX)
// Map() will return addresses which are aligned to the platform page size, this
// varies from platform to platform though. Since we'd like to advertise a
diff --git a/base/memory/singleton.h b/base/memory/singleton.h
index 79e4441a8e..5c58d5fe29 100644
--- a/base/memory/singleton.h
+++ b/base/memory/singleton.h
@@ -22,6 +22,7 @@
#include "base/at_exit.h"
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/aligned_memory.h"
#include "base/threading/thread_restrictions.h"
@@ -63,7 +64,7 @@ struct DefaultSingletonTraits {
// exit. See below for the required call that makes this happen.
static const bool kRegisterAtExit = true;
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
// Set to false to disallow access on a non-joinable thread. This is
// different from kRegisterAtExit because StaticMemorySingletonTraits allows
// access on non-joinable threads, and gracefully handles this.
@@ -78,7 +79,7 @@ struct DefaultSingletonTraits {
template<typename Type>
struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
static const bool kRegisterAtExit = false;
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
static const bool kAllowedToAccessOnNonjoinableThread = true;
#endif
};
@@ -152,14 +153,17 @@ subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
// Example usage:
//
// In your header:
-// template <typename T> struct DefaultSingletonTraits;
+// namespace base {
+// template <typename T>
+// struct DefaultSingletonTraits;
+// }
// class FooClass {
// public:
// static FooClass* GetInstance(); <-- See comment below on this.
// void Bar() { ... }
// private:
// FooClass() { ... }
-// friend struct DefaultSingletonTraits<FooClass>;
+// friend struct base::DefaultSingletonTraits<FooClass>;
//
// DISALLOW_COPY_AND_ASSIGN(FooClass);
// };
@@ -167,7 +171,14 @@ subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
// In your source file:
// #include "base/memory/singleton.h"
// FooClass* FooClass::GetInstance() {
-// return Singleton<FooClass>::get();
+// return base::Singleton<FooClass>::get();
+// }
+//
+// Or for leaky singletons:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return base::Singleton<
+// FooClass, base::LeakySingletonTraits<FooClass>>::get();
// }
//
// And to call methods on FooClass:
@@ -227,7 +238,7 @@ class Singleton {
// Return a pointer to the one true instance of the class.
static Type* get() {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
// Avoid making TLS lookup on release builds.
if (!Traits::kAllowedToAccessOnNonjoinableThread)
ThreadRestrictions::AssertSingletonAllowed();
diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc
index 4e77b04973..c179b80097 100644
--- a/base/memory/weak_ptr.cc
+++ b/base/memory/weak_ptr.cc
@@ -17,13 +17,13 @@ WeakReference::Flag::Flag() : is_valid_(true) {
void WeakReference::Flag::Invalidate() {
// The flag being invalidated with a single ref implies that there are no
// weak pointers in existence. Allow deletion on other thread in this case.
- DCHECK(sequence_checker_.CalledOnValidSequencedThread() || HasOneRef())
+ DCHECK(sequence_checker_.CalledOnValidSequence() || HasOneRef())
<< "WeakPtrs must be invalidated on the same sequenced thread.";
is_valid_ = false;
}
bool WeakReference::Flag::IsValid() const {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread())
+ DCHECK(sequence_checker_.CalledOnValidSequence())
<< "WeakPtrs must be checked on the same sequenced thread.";
return is_valid_;
}
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
index ebcf33c57e..1a4870eab1 100644
--- a/base/memory/weak_ptr_unittest.cc
+++ b/base/memory/weak_ptr_unittest.cc
@@ -12,6 +12,7 @@
#include "base/location.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
#include "base/threading/thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -563,8 +564,6 @@ TEST(WeakPtrTest, NonOwnerThreadCanDeleteWeakPtr) {
background.DeleteArrow(arrow);
}
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
-
TEST(WeakPtrDeathTest, WeakPtrCopyDoesNotChangeThreadBinding) {
// The default style "fast" does not support multi-threaded tests
// (introduces deadlock on Linux).
@@ -588,7 +587,7 @@ TEST(WeakPtrDeathTest, WeakPtrCopyDoesNotChangeThreadBinding) {
// Although background thread created the copy, it can not deref the copied
// WeakPtr.
- ASSERT_DEATH(background.DeRef(arrow_copy), "");
+ ASSERT_DCHECK_DEATH(background.DeRef(arrow_copy));
background.DeleteArrow(arrow_copy);
}
@@ -610,7 +609,7 @@ TEST(WeakPtrDeathTest, NonOwnerThreadDereferencesWeakPtrAfterReference) {
// Background thread tries to deref target, which violates thread ownership.
BackgroundThread background;
background.Start();
- ASSERT_DEATH(background.DeRef(&arrow), "");
+ ASSERT_DCHECK_DEATH(background.DeRef(&arrow));
}
TEST(WeakPtrDeathTest, NonOwnerThreadDeletesWeakPtrAfterReference) {
@@ -630,7 +629,7 @@ TEST(WeakPtrDeathTest, NonOwnerThreadDeletesWeakPtrAfterReference) {
background.DeRef(&arrow);
// Main thread deletes Target, violating thread binding.
- ASSERT_DEATH(target.reset(), "");
+ ASSERT_DCHECK_DEATH(target.reset());
// |target.reset()| died so |target| still holds the object, so we
// must pass it to the background thread to teardown.
@@ -653,7 +652,7 @@ TEST(WeakPtrDeathTest, NonOwnerThreadDeletesObjectAfterReference) {
// Background thread tries to delete target, volating thread binding.
BackgroundThread background;
background.Start();
- ASSERT_DEATH(background.DeleteTarget(target.release()), "");
+ ASSERT_DCHECK_DEATH(background.DeleteTarget(target.release()));
}
TEST(WeakPtrDeathTest, NonOwnerThreadReferencesObjectAfterDeletion) {
@@ -673,9 +672,7 @@ TEST(WeakPtrDeathTest, NonOwnerThreadReferencesObjectAfterDeletion) {
background.DeleteTarget(target.release());
// Main thread attempts to dereference the target, violating thread binding.
- ASSERT_DEATH(arrow.target.get(), "");
+ ASSERT_DCHECK_DEATH(arrow.target.get());
}
-#endif
-
} // namespace base
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index bca1d52762..fed1494c04 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -17,7 +17,7 @@ namespace internal {
namespace {
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
// Delays larger than this are often bogus, and a warning should be emitted in
// debug builds to warn developers. http://crbug.com/450045
const int kTaskDelayWarningThresholdInSeconds =
@@ -68,8 +68,8 @@ bool IncomingTaskQueue::AddToIncomingQueue(
<< "Requesting super-long task delay period of " << delay.InSeconds()
<< " seconds from here: " << from_here.ToString();
- PendingTask pending_task(
- from_here, task, CalculateDelayedRuntime(delay), nestable);
+ PendingTask pending_task(from_here, task, CalculateDelayedRuntime(delay),
+ nestable);
#if defined(OS_WIN)
// We consider the task needs a high resolution timer if the delay is
// more than 0 and less than 32ms. This caps the relative error to
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index aff71d20bf..157e47fa14 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -16,7 +16,6 @@
namespace base {
class MessageLoop;
-class WaitableEvent;
namespace internal {
diff --git a/base/message_loop/message_loop.cc b/base/message_loop/message_loop.cc
index 54369a9b27..eba68a72ba 100644
--- a/base/message_loop/message_loop.cc
+++ b/base/message_loop/message_loop.cc
@@ -5,26 +5,19 @@
#include "base/message_loop/message_loop.h"
#include <algorithm>
-#include <memory>
#include <utility>
#include "base/bind.h"
#include "base/compiler_specific.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump_default.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/statistics_recorder.h"
#include "base/run_loop.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_task_runner_handle.h"
-#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
-#include "base/tracked_objects.h"
-#include "build/build_config.h"
#if defined(OS_MACOSX)
#include "base/message_loop/message_pump_mac.h"
@@ -44,51 +37,11 @@ namespace base {
namespace {
// A lazily created thread local storage for quick access to a thread's message
-// loop, if one exists. This should be safe and free of static constructors.
-LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
- LAZY_INSTANCE_INITIALIZER;
-
-// Logical events for Histogram profiling. Run with --message-loop-histogrammer
-// to get an accounting of messages and actions taken on each thread.
-const int kTaskRunEvent = 0x1;
-#if !defined(OS_NACL)
-const int kTimerEvent = 0x2;
-
-// Provide range of message IDs for use in histogramming and debug display.
-const int kLeastNonZeroMessageId = 1;
-const int kMaxMessageId = 1099;
-const int kNumberOfDistinctMessagesDisplayed = 1100;
-
-// Provide a macro that takes an expression (such as a constant, or macro
-// constant) and creates a pair to initialize an array of pairs. In this case,
-// our pair consists of the expressions value, and the "stringized" version
-// of the expression (i.e., the expression put in quotes). For example, if
-// we have:
-// #define FOO 2
-// #define BAR 5
-// then the following:
-// VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
-// will expand to:
-// {7, "FOO + BAR"}
-// We use the resulting array as an argument to our histogram, which reads the
-// number as a bucket identifier, and proceeds to use the corresponding name
-// in the pair (i.e., the quoted string) when printing out a histogram.
-#define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
-
-const LinearHistogram::DescriptionPair event_descriptions_[] = {
- // Provide some pretty print capability in our histogram for our internal
- // messages.
-
- // A few events we handle (kindred to messages), and used to profile actions.
- VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
- VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
-
- {-1, NULL} // The list must be null-terminated, per API to histogram.
-};
-#endif // !defined(OS_NACL)
-
-bool enable_histogrammer_ = false;
-
+// loop, if one exists.
+base::ThreadLocalPointer<MessageLoop>* GetTLSMessageLoop() {
+ static auto* lazy_tls_ptr = new base::ThreadLocalPointer<MessageLoop>();
+ return lazy_tls_ptr;
+}
MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
#if defined(OS_IOS)
@@ -171,8 +124,8 @@ MessageLoop::~MessageLoop() {
DCHECK(!did_work);
// Let interested parties have one last shot at accessing this.
- FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
- WillDestroyCurrentMessageLoop());
+ for (auto& observer : destruction_observers_)
+ observer.WillDestroyCurrentMessageLoop();
thread_task_runner_handle_.reset();
@@ -184,7 +137,7 @@ MessageLoop::~MessageLoop() {
// OK, now make it so that no one can find us.
if (current() == this)
- lazy_tls_ptr.Pointer()->Set(nullptr);
+ GetTLSMessageLoop()->Set(nullptr);
}
// static
@@ -192,12 +145,7 @@ MessageLoop* MessageLoop::current() {
// TODO(darin): sadly, we cannot enable this yet since people call us even
// when they have no intention of using us.
// DCHECK(loop) << "Ouch, did you forget to initialize me?";
- return lazy_tls_ptr.Pointer()->Get();
-}
-
-// static
-void MessageLoop::EnableHistogrammer(bool enable) {
- enable_histogrammer_ = enable;
+ return GetTLSMessageLoop()->Get();
}
// static
@@ -214,7 +162,7 @@ std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
// TODO(rvargas): Get rid of the OS guards.
#if defined(USE_GLIB) && !defined(OS_NACL)
typedef MessagePumpGlib MessagePumpForUI;
-#elif defined(OS_LINUX) && !defined(OS_NACL)
+#elif (defined(OS_LINUX) && !defined(OS_NACL)) || defined(OS_BSD)
typedef MessagePumpLibevent MessagePumpForUI;
#endif
@@ -268,39 +216,16 @@ void MessageLoop::RemoveDestructionObserver(
void MessageLoop::AddNestingObserver(NestingObserver* observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_nesting_);
nesting_observers_.AddObserver(observer);
}
void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_nesting_);
nesting_observers_.RemoveObserver(observer);
}
-void MessageLoop::PostTask(
- const tracked_objects::Location& from_here,
- const Closure& task) {
- task_runner_->PostTask(from_here, task);
-}
-
-void MessageLoop::PostDelayedTask(
- const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
- task_runner_->PostDelayedTask(from_here, task, delay);
-}
-
-void MessageLoop::Run() {
- DCHECK(pump_);
- RunLoop run_loop;
- run_loop.Run();
-}
-
-void MessageLoop::RunUntilIdle() {
- DCHECK(pump_);
- RunLoop run_loop;
- run_loop.RunUntilIdle();
-}
-
void MessageLoop::QuitWhenIdle() {
DCHECK_EQ(this, current());
if (run_loop_) {
@@ -334,6 +259,8 @@ Closure MessageLoop::QuitWhenIdleClosure() {
void MessageLoop::SetNestableTasksAllowed(bool allowed) {
if (allowed) {
+ CHECK(allow_nesting_);
+
// Kick the native pump just in case we enter a OS-driven nested message
// loop.
pump_->ScheduleWork();
@@ -351,11 +278,13 @@ bool MessageLoop::IsNested() {
void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_task_observers_);
task_observers_.AddObserver(task_observer);
}
void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_task_observers_);
task_observers_.RemoveObserver(task_observer);
}
@@ -391,8 +320,8 @@ MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
#endif
nestable_tasks_allowed_(true),
pump_factory_(pump_factory),
- message_histogram_(NULL),
- run_loop_(NULL),
+ run_loop_(nullptr),
+ current_pending_task_(nullptr),
incoming_task_queue_(new internal::IncomingTaskQueue(this)),
unbound_task_runner_(
new internal::MessageLoopTaskRunner(incoming_task_queue_)),
@@ -410,39 +339,39 @@ void MessageLoop::BindToCurrentThread() {
pump_ = CreateMessagePumpForType(type_);
DCHECK(!current()) << "should only have one message loop per thread";
- lazy_tls_ptr.Pointer()->Set(this);
+ GetTLSMessageLoop()->Set(this);
incoming_task_queue_->StartScheduling();
unbound_task_runner_->BindToCurrentThread();
unbound_task_runner_ = nullptr;
SetThreadTaskRunnerHandle();
- {
- // Save the current thread's ID for potential use by other threads
- // later from GetThreadName().
- thread_id_ = PlatformThread::CurrentId();
- subtle::MemoryBarrier();
- }
+ thread_id_ = PlatformThread::CurrentId();
}
std::string MessageLoop::GetThreadName() const {
- if (thread_id_ == kInvalidThreadId) {
- // |thread_id_| may already have been initialized but this thread might not
- // have received the update yet.
- subtle::MemoryBarrier();
- DCHECK_NE(kInvalidThreadId, thread_id_);
- }
+ DCHECK_NE(kInvalidThreadId, thread_id_)
+ << "GetThreadName() must only be called after BindToCurrentThread()'s "
+ << "side-effects have been synchronized with this thread.";
return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
}
void MessageLoop::SetTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
DCHECK_EQ(this, current());
+ DCHECK(task_runner);
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(!unbound_task_runner_);
task_runner_ = std::move(task_runner);
SetThreadTaskRunnerHandle();
}
+void MessageLoop::ClearTaskRunnerForTesting() {
+ DCHECK_EQ(this, current());
+ DCHECK(!unbound_task_runner_);
+ task_runner_ = nullptr;
+ thread_task_runner_handle_.reset();
+}
+
void MessageLoop::SetThreadTaskRunnerHandle() {
DCHECK_EQ(this, current());
// Clear the previous thread task runner first, because only one can exist at
@@ -453,7 +382,8 @@ void MessageLoop::SetThreadTaskRunnerHandle() {
void MessageLoop::RunHandler() {
DCHECK_EQ(this, current());
- StartHistogrammer();
+ DCHECK(run_loop_);
+ CHECK(allow_nesting_ || run_loop_->run_depth_ == 1);
pump_->Run(this);
}
@@ -468,15 +398,16 @@ bool MessageLoop::ProcessNextDelayedNonNestableTask() {
std::move(deferred_non_nestable_work_queue_.front());
deferred_non_nestable_work_queue_.pop();
- RunTask(pending_task);
+ RunTask(&pending_task);
return true;
}
-void MessageLoop::RunTask(const PendingTask& pending_task) {
+void MessageLoop::RunTask(PendingTask* pending_task) {
DCHECK(nestable_tasks_allowed_);
+ current_pending_task_ = pending_task;
#if defined(OS_WIN)
- if (pending_task.is_high_res) {
+ if (pending_task->is_high_res) {
pending_high_res_tasks_--;
CHECK_GE(pending_high_res_tasks_, 0);
}
@@ -485,22 +416,22 @@ void MessageLoop::RunTask(const PendingTask& pending_task) {
// Execute the task and assume the worst: It is probably not reentrant.
nestable_tasks_allowed_ = false;
- HistogramEvent(kTaskRunEvent);
+ TRACE_TASK_EXECUTION("MessageLoop::RunTask", *pending_task);
- TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
-
- FOR_EACH_OBSERVER(TaskObserver, task_observers_,
- WillProcessTask(pending_task));
+ for (auto& observer : task_observers_)
+ observer.WillProcessTask(*pending_task);
task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
- FOR_EACH_OBSERVER(TaskObserver, task_observers_,
- DidProcessTask(pending_task));
+ for (auto& observer : task_observers_)
+ observer.DidProcessTask(*pending_task);
nestable_tasks_allowed_ = true;
+
+ current_pending_task_ = nullptr;
}
bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
if (pending_task.nestable || run_loop_->run_depth_ == 1) {
- RunTask(pending_task);
+ RunTask(&pending_task);
// Show that we ran a task (Note: a new one might arrive as a
// consequence!).
return true;
@@ -565,40 +496,9 @@ void MessageLoop::ScheduleWork() {
pump_->ScheduleWork();
}
-#if defined(OS_WIN)
-bool MessageLoop::MessagePumpWasSignaled() {
- return pump_->WasSignaled();
-}
-#endif
-
-//------------------------------------------------------------------------------
-// Method and data for histogramming events and actions taken by each instance
-// on each thread.
-
-void MessageLoop::StartHistogrammer() {
-#if !defined(OS_NACL) // NaCl build has no metrics code.
- if (enable_histogrammer_ && !message_histogram_
- && StatisticsRecorder::IsActive()) {
- std::string thread_name = GetThreadName();
- DCHECK(!thread_name.empty());
- message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
- "MsgLoop:" + thread_name, kLeastNonZeroMessageId, kMaxMessageId,
- kNumberOfDistinctMessagesDisplayed,
- HistogramBase::kHexRangePrintingFlag, event_descriptions_);
- }
-#endif
-}
-
-void MessageLoop::HistogramEvent(int event) {
-#if !defined(OS_NACL)
- if (message_histogram_)
- message_histogram_->Add(event);
-#endif
-}
-
void MessageLoop::NotifyBeginNestedLoop() {
- FOR_EACH_OBSERVER(NestingObserver, nesting_observers_,
- OnBeginNestedMessageLoop());
+ for (auto& observer : nesting_observers_)
+ observer.OnBeginNestedMessageLoop();
}
bool MessageLoop::DoWork() {
@@ -688,19 +588,6 @@ bool MessageLoop::DoIdleWork() {
return false;
}
-void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
- void(*deleter)(const void*),
- const void* object) {
- task_runner()->PostNonNestableTask(from_here, Bind(deleter, object));
-}
-
-void MessageLoop::ReleaseSoonInternal(
- const tracked_objects::Location& from_here,
- void(*releaser)(const void*),
- const void* object) {
- task_runner()->PostNonNestableTask(from_here, Bind(releaser, object));
-}
-
#if !defined(OS_NACL)
//------------------------------------------------------------------------------
// MessageLoopForUI
@@ -713,6 +600,18 @@ void MessageLoopForUI::Start() {
// No Histogram support for UI message loop as it is managed by Java side
static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
}
+
+void MessageLoopForUI::StartForTesting(
+ base::android::JavaMessageHandlerFactory* factory,
+ WaitableEvent* test_done_event) {
+ // No Histogram support for UI message loop as it is managed by Java side
+ static_cast<MessagePumpForUI*>(pump_.get())
+ ->StartForUnitTest(this, factory, test_done_event);
+}
+
+void MessageLoopForUI::Abort() {
+ static_cast<MessagePumpForUI*>(pump_.get())->Abort();
+}
#endif
#if defined(OS_IOS)
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index ac522cf133..bfef261c38 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -13,7 +13,6 @@
#include "base/callback_forward.h"
#include "base/debug/task_annotator.h"
#include "base/gtest_prod_util.h"
-#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/incoming_task_queue.h"
@@ -22,10 +21,8 @@
#include "base/message_loop/timer_slack.h"
#include "base/observer_list.h"
#include "base/pending_task.h"
-#include "base/sequenced_task_runner_helpers.h"
#include "base/synchronization/lock.h"
#include "base/time/time.h"
-#include "base/tracking_info.h"
#include "build/build_config.h"
// TODO(sky): these includes should not be necessary. Nuke them.
@@ -37,9 +34,18 @@
#include "base/message_loop/message_pump_libevent.h"
#endif
+#if defined(OS_ANDROID)
+namespace base {
+namespace android {
+
+class JavaMessageHandlerFactory;
+
+} // namespace android
+} // namespace base
+#endif // defined(OS_ANDROID)
+
namespace base {
-class HistogramBase;
class RunLoop;
class ThreadTaskRunnerHandle;
class WaitableEvent;
@@ -47,8 +53,8 @@ class WaitableEvent;
// A MessageLoop is used to process events for a particular thread. There is
// at most one MessageLoop instance per thread.
//
-// Events include at a minimum Task instances submitted to PostTask and its
-// variants. Depending on the type of message pump used by the MessageLoop
+// Events include at a minimum Task instances submitted to the MessageLoop's
+// TaskRunner. Depending on the type of message pump used by the MessageLoop
// other events such as UI messages may be processed. On Windows APC calls (as
// time permits) and signals sent to a registered set of HANDLEs may also be
// processed.
@@ -122,8 +128,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Returns the MessageLoop object for the current thread, or null if none.
static MessageLoop* current();
- static void EnableHistogrammer(bool enable_histogrammer);
-
typedef std::unique_ptr<MessagePump>(MessagePumpFactory)();
// Uses the given base::MessagePumpForUIFactory to override the default
// MessagePump implementation for 'TYPE_UI'. Returns true if the factory
@@ -171,86 +175,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
void AddNestingObserver(NestingObserver* observer);
void RemoveNestingObserver(NestingObserver* observer);
- // NOTE: Deprecated; prefer task_runner() and the TaskRunner interfaces.
- // TODO(skyostil): Remove these functions (crbug.com/465354).
- //
- // The "PostTask" family of methods call the task's Run method asynchronously
- // from within a message loop at some point in the future.
- //
- // With the PostTask variant, tasks are invoked in FIFO order, inter-mixed
- // with normal UI or IO event processing. With the PostDelayedTask variant,
- // tasks are called after at least approximately 'delay_ms' have elapsed.
- //
- // The NonNestable variants work similarly except that they promise never to
- // dispatch the task from a nested invocation of MessageLoop::Run. Instead,
- // such tasks get deferred until the top-most MessageLoop::Run is executing.
- //
- // The MessageLoop takes ownership of the Task, and deletes it after it has
- // been Run().
- //
- // PostTask(from_here, task) is equivalent to
- // PostDelayedTask(from_here, task, 0).
- //
- // NOTE: These methods may be called on any thread. The Task will be invoked
- // on the thread that executes MessageLoop::Run().
- void PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
-
- void PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay);
-
- // A variant on PostTask that deletes the given object. This is useful
- // if the object needs to live until the next run of the MessageLoop (for
- // example, deleting a RenderProcessHost from within an IPC callback is not
- // good).
- //
- // NOTE: This method may be called on any thread. The object will be deleted
- // on the thread that executes MessageLoop::Run().
- template <class T>
- void DeleteSoon(const tracked_objects::Location& from_here, const T* object) {
- base::subtle::DeleteHelperInternal<T, void>::DeleteViaSequencedTaskRunner(
- this, from_here, object);
- }
-
- // A variant on PostTask that releases the given reference counted object
- // (by calling its Release method). This is useful if the object needs to
- // live until the next run of the MessageLoop, or if the object needs to be
- // released on a particular thread.
- //
- // A common pattern is to manually increment the object's reference count
- // (AddRef), clear the pointer, then issue a ReleaseSoon. The reference count
- // is incremented manually to ensure clearing the pointer does not trigger a
- // delete and to account for the upcoming decrement (ReleaseSoon). For
- // example:
- //
- // scoped_refptr<Foo> foo = ...
- // foo->AddRef();
- // Foo* raw_foo = foo.get();
- // foo = NULL;
- // message_loop->ReleaseSoon(raw_foo);
- //
- // NOTE: This method may be called on any thread. The object will be
- // released (and thus possibly deleted) on the thread that executes
- // MessageLoop::Run(). If this is not the same as the thread that calls
- // ReleaseSoon(FROM_HERE, ), then T MUST inherit from
- // RefCountedThreadSafe<T>!
- template <class T>
- void ReleaseSoon(const tracked_objects::Location& from_here,
- const T* object) {
- base::subtle::ReleaseHelperInternal<T, void>::ReleaseViaSequencedTaskRunner(
- this, from_here, object);
- }
-
- // Deprecated: use RunLoop instead.
- // Run the message loop.
- void Run();
-
- // Deprecated: use RunLoop instead.
- // Process all pending tasks, windows messages, etc., but don't wait/sleep.
- // Return as soon as all items that can be run are taken care of.
- void RunUntilIdle();
-
// Deprecated: use RunLoop instead.
//
// Signals the Run method to return when it becomes idle. It will continue to
@@ -291,9 +215,11 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Returns the type passed to the constructor.
Type type() const { return type_; }
- // Returns the name of the thread this message loop is bound to.
- // This function is only valid when this message loop is running and
- // BindToCurrentThread has already been called.
+ // Returns the name of the thread this message loop is bound to. This function
+ // is only valid when this message loop is running, BindToCurrentThread has
+ // already been called and has an "happens-before" relationship with this call
+ // (this relationship is obtained implicitly by the MessageLoop's task posting
+ // system unless calling this very early).
std::string GetThreadName() const;
// Gets the TaskRunner associated with this message loop.
@@ -308,6 +234,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// thread to which the message loop is bound.
void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
+ // Clears task_runner() and the ThreadTaskRunnerHandle for the target thread.
+ // Must be called on the thread to which the message loop is bound.
+ void ClearTaskRunnerForTesting();
+
// Enables or disables the recursive task processing. This happens in the case
// of recursive message loops. Some unwanted message loops may occur when
// using common controls or printer functions. By default, recursive task
@@ -388,16 +318,15 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
debug::TaskAnnotator* task_annotator() { return &task_annotator_; }
// Runs the specified PendingTask.
- void RunTask(const PendingTask& pending_task);
+ void RunTask(PendingTask* pending_task);
-#if defined(OS_WIN)
- // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
- // has been investigated.
- // This should be used for diagnostic only. If message pump wake-up mechanism
- // is based on auto-reset event this call would reset the event to unset
- // state.
- bool MessagePumpWasSignaled();
-#endif
+ // Disallow nesting. After this is called, running a nested RunLoop or calling
+ // Add/RemoveNestingObserver() on this MessageLoop will crash.
+ void DisallowNesting() { allow_nesting_ = false; }
+
+ // Disallow task observers. After this is called, calling
+ // Add/RemoveTaskObserver() on this MessageLoop will crash.
+ void DisallowTaskObservers() { allow_task_observers_ = false; }
//----------------------------------------------------------------------------
protected:
@@ -417,11 +346,13 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
void BindToCurrentThread();
private:
- friend class RunLoop;
friend class internal::IncomingTaskQueue;
+ friend class RunLoop;
friend class ScheduleWorkTest;
friend class Thread;
+ friend struct PendingTask;
FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
+ friend class PendingTaskTest;
// Creates a MessageLoop without binding to a thread.
// If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
@@ -468,15 +399,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// responsible for synchronizing ScheduleWork() calls.
void ScheduleWork();
- // Start recording histogram info about events and action IF it was enabled
- // and IF the statistics recorder can accept a registration of our histogram.
- void StartHistogrammer();
-
- // Add occurrence of event to our histogram, so that we can see what is being
- // done in a specific MessageLoop instance (i.e., specific thread).
- // If message_histogram_ is NULL, this is a no-op.
- void HistogramEvent(int event);
-
// Notify observers that a nested message loop is starting.
void NotifyBeginNestedLoop();
@@ -524,15 +446,19 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// if type_ is TYPE_CUSTOM and pump_ is null.
MessagePumpFactoryCallback pump_factory_;
- // A profiling histogram showing the counts of various messages and events.
- HistogramBase* message_histogram_;
-
RunLoop* run_loop_;
ObserverList<TaskObserver> task_observers_;
debug::TaskAnnotator task_annotator_;
+ // Used to allow creating a breadcrumb of program counters in PostTask.
+ // This variable is only initialized while a task is being executed and is
+ // meant only to store context for creating a backtrace breadcrumb. Do not
+ // attach other semantics to it without thinking through the use caes
+ // thoroughly.
+ const PendingTask* current_pending_task_;
+
scoped_refptr<internal::IncomingTaskQueue> incoming_task_queue_;
// A task runner which we haven't bound to a thread yet.
@@ -542,18 +468,15 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
scoped_refptr<SingleThreadTaskRunner> task_runner_;
std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
- // Id of the thread this message loop is bound to.
+ // Id of the thread this message loop is bound to. Initialized once when the
+ // MessageLoop is bound to its thread and constant forever after.
PlatformThreadId thread_id_;
- template <class T, class R> friend class base::subtle::DeleteHelperInternal;
- template <class T, class R> friend class base::subtle::ReleaseHelperInternal;
+ // Whether nesting is allowed.
+ bool allow_nesting_ = true;
- void DeleteSoonInternal(const tracked_objects::Location& from_here,
- void(*deleter)(const void*),
- const void* object);
- void ReleaseSoonInternal(const tracked_objects::Location& from_here,
- void(*releaser)(const void*),
- const void* object);
+ // Whether task observers are allowed.
+ bool allow_task_observers_ = true;
DISALLOW_COPY_AND_ASSIGN(MessageLoop);
};
@@ -599,6 +522,11 @@ class BASE_EXPORT MessageLoopForUI : public MessageLoop {
// never be called. Instead use Start(), which will forward all the native UI
// events to the Java message loop.
void Start();
+ void StartForTesting(base::android::JavaMessageHandlerFactory* factory,
+ WaitableEvent* test_done_event);
+ // In Android there are cases where we want to abort immediately without
+ // calling Quit(), in these cases we call Abort().
+ void Abort();
#endif
#if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index cabd25013b..54551daadd 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -12,7 +12,6 @@
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_task_runner.h"
#include "base/run_loop.h"
-#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -38,7 +37,7 @@ class MessageLoopTaskRunnerTest : public testing::Test {
task_thread_.Start();
// Allow us to pause the |task_thread_|'s MessageLoop.
- task_thread_.message_loop()->task_runner()->PostTask(
+ task_thread_.task_runner()->PostTask(
FROM_HERE, Bind(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
Unretained(this)));
}
@@ -109,23 +108,23 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
MessageLoop* reply_deleted_on = NULL;
int reply_delete_order = -1;
- scoped_refptr<LoopRecorder> task_recoder =
+ scoped_refptr<LoopRecorder> task_recorder =
new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
- scoped_refptr<LoopRecorder> reply_recoder =
+ scoped_refptr<LoopRecorder> reply_recorder =
new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
ASSERT_TRUE(task_thread_.task_runner()->PostTaskAndReply(
- FROM_HERE, Bind(&RecordLoop, task_recoder),
- Bind(&RecordLoopAndQuit, reply_recoder)));
+ FROM_HERE, Bind(&RecordLoop, task_recorder),
+ Bind(&RecordLoopAndQuit, reply_recorder)));
// Die if base::Bind doesn't retain a reference to the recorders.
- task_recoder = NULL;
- reply_recoder = NULL;
+ task_recorder = NULL;
+ reply_recorder = NULL;
ASSERT_FALSE(task_deleted_on);
ASSERT_FALSE(reply_deleted_on);
UnblockTaskThread();
- current_loop_->Run();
+ RunLoop().Run();
EXPECT_EQ(task_thread_.message_loop(), task_run_on);
EXPECT_EQ(current_loop_.get(), task_deleted_on);
@@ -142,9 +141,9 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReplyOnDeletedThreadDoesNotLeak) {
MessageLoop* reply_deleted_on = NULL;
int reply_delete_order = -1;
- scoped_refptr<LoopRecorder> task_recoder =
+ scoped_refptr<LoopRecorder> task_recorder =
new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
- scoped_refptr<LoopRecorder> reply_recoder =
+ scoped_refptr<LoopRecorder> reply_recorder =
new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
// Grab a task runner to a dead MessageLoop.
@@ -154,14 +153,14 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReplyOnDeletedThreadDoesNotLeak) {
task_thread_.Stop();
ASSERT_FALSE(
- task_runner->PostTaskAndReply(FROM_HERE, Bind(&RecordLoop, task_recoder),
- Bind(&RecordLoopAndQuit, reply_recoder)));
+ task_runner->PostTaskAndReply(FROM_HERE, Bind(&RecordLoop, task_recorder),
+ Bind(&RecordLoopAndQuit, reply_recorder)));
// The relay should have properly deleted its resources leaving us as the only
// reference.
EXPECT_EQ(task_delete_order, reply_delete_order);
- ASSERT_TRUE(task_recoder->HasOneRef());
- ASSERT_TRUE(reply_recoder->HasOneRef());
+ ASSERT_TRUE(task_recorder->HasOneRef());
+ ASSERT_TRUE(reply_recorder->HasOneRef());
// Nothing should have run though.
EXPECT_FALSE(task_run_on);
@@ -176,23 +175,23 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
MessageLoop* reply_deleted_on = NULL;
int reply_delete_order = -1;
- scoped_refptr<LoopRecorder> task_recoder =
+ scoped_refptr<LoopRecorder> task_recorder =
new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
- scoped_refptr<LoopRecorder> reply_recoder =
+ scoped_refptr<LoopRecorder> reply_recorder =
new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
// Enqueue the relay.
ASSERT_TRUE(current_loop_->task_runner()->PostTaskAndReply(
- FROM_HERE, Bind(&RecordLoop, task_recoder),
- Bind(&RecordLoopAndQuit, reply_recoder)));
+ FROM_HERE, Bind(&RecordLoop, task_recorder),
+ Bind(&RecordLoopAndQuit, reply_recorder)));
// Die if base::Bind doesn't retain a reference to the recorders.
- task_recoder = NULL;
- reply_recoder = NULL;
+ task_recorder = NULL;
+ reply_recorder = NULL;
ASSERT_FALSE(task_deleted_on);
ASSERT_FALSE(reply_deleted_on);
- current_loop_->Run();
+ RunLoop().Run();
EXPECT_EQ(current_loop_.get(), task_run_on);
EXPECT_EQ(current_loop_.get(), task_deleted_on);
@@ -211,19 +210,19 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
MessageLoop* reply_deleted_on = NULL;
int reply_delete_order = -1;
- scoped_refptr<LoopRecorder> task_recoder =
+ scoped_refptr<LoopRecorder> task_recorder =
new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
- scoped_refptr<LoopRecorder> reply_recoder =
+ scoped_refptr<LoopRecorder> reply_recorder =
new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
// Enqueue the relay.
task_thread_.task_runner()->PostTaskAndReply(
- FROM_HERE, Bind(&RecordLoop, task_recoder),
- Bind(&RecordLoopAndQuit, reply_recoder));
+ FROM_HERE, Bind(&RecordLoop, task_recorder),
+ Bind(&RecordLoopAndQuit, reply_recorder));
// Die if base::Bind doesn't retain a reference to the recorders.
- task_recoder = NULL;
- reply_recoder = NULL;
+ task_recorder = NULL;
+ reply_recorder = NULL;
ASSERT_FALSE(task_deleted_on);
ASSERT_FALSE(reply_deleted_on);
diff --git a/base/message_loop/message_loop_test.cc b/base/message_loop/message_loop_test.cc
index 1ab946f9e2..6ffb16d05a 100644
--- a/base/message_loop/message_loop_test.cc
+++ b/base/message_loop/message_loop_test.cc
@@ -15,6 +15,7 @@
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
namespace base {
namespace test {
@@ -97,20 +98,19 @@ void RunTest_PostTask(MessagePumpFactory factory) {
// Add tests to message loop
scoped_refptr<Foo> foo(new Foo());
std::string a("a"), b("b"), c("c"), d("d");
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&Foo::Test0, foo.get()));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&Foo::Test1ConstRef, foo.get(), a));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&Foo::Test1Ptr, foo.get(), &b));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&Foo::Test1Int, foo.get(), 100));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&Foo::Test2Ptr, foo.get(), &a, &c));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&Foo::Test2Mixed, foo.get(), a, &d));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&Foo::Test0, foo));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&Foo::Test1ConstRef, foo, a));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&Foo::Test1Ptr, foo, &b));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&Foo::Test1Int, foo, 100));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&Foo::Test2Ptr, foo, &a, &c));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&Foo::Test2Mixed, foo, a, &d));
// After all tests, post a message that will shut down the message loop
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
@@ -302,8 +302,8 @@ class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
~RecordDeletionProbe() {
*was_deleted_ = true;
if (post_on_delete_.get())
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_.get()));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_));
}
scoped_refptr<RecordDeletionProbe> post_on_delete_;
@@ -351,8 +351,8 @@ void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory) {
void NestingFunc(int* depth) {
if (*depth > 0) {
*depth -= 1;
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&NestingFunc, depth));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, depth));
MessageLoop::current()->SetNestableTasksAllowed(true);
RunLoop().Run();
@@ -365,8 +365,8 @@ void RunTest_Nesting(MessagePumpFactory factory) {
MessageLoop loop(std::move(pump));
int depth = 100;
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&NestingFunc, &depth));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, &depth));
RunLoop().Run();
EXPECT_EQ(depth, 0);
}
@@ -403,10 +403,9 @@ void RunNestedLoop(TestNestingObserver* observer,
RunLoop nested_loop;
// Verify that by the time the first task is run the observer has seen the
// message loop begin.
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&ExpectOneBeginNestedLoop, observer));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- nested_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, nested_loop.QuitClosure());
nested_loop.Run();
// Quitting message loops doesn't change the begin count.
@@ -518,7 +517,7 @@ void RecursiveFunc(TaskList* order, int cookie, int depth,
if (depth > 0) {
if (is_reentrant)
MessageLoop::current()->SetNestableTasksAllowed(true);
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
}
@@ -536,12 +535,12 @@ void RunTest_RecursiveDenial1(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, false));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, false));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
RunLoop().Run();
@@ -580,13 +579,13 @@ void RunTest_RecursiveDenial3(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&RecursiveSlowFunc, &order, 1, 2, false));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&RecursiveSlowFunc, &order, 2, 2, false));
- MessageLoop::current()->task_runner()->PostDelayedTask(
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, Bind(&OrderedFunc, &order, 3), TimeDelta::FromMilliseconds(5));
- MessageLoop::current()->task_runner()->PostDelayedTask(
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, Bind(&QuitFunc, &order, 4), TimeDelta::FromMilliseconds(5));
RunLoop().Run();
@@ -616,12 +615,12 @@ void RunTest_RecursiveSupport1(MessagePumpFactory factory) {
MessageLoop loop(std::move(pump));
TaskList order;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, true));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, true));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
RunLoop().Run();
@@ -650,14 +649,12 @@ void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory) {
TaskList order;
- MessageLoop::current()->task_runner()->PostNonNestableTask(
- FROM_HERE,
- Bind(&OrderedFunc, &order, 1));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 1));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
RunLoop().Run();
// FIFO order.
@@ -692,24 +689,18 @@ void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory) {
TaskList order;
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE,
- Bind(&FuncThatPumps, &order, 1));
- MessageLoop::current()->task_runner()->PostNonNestableTask(
- FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE,
- Bind(&OrderedFunc, &order, 3));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE,
- Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE,
- Bind(&OrderedFunc, &order, 5));
- MessageLoop::current()->task_runner()->PostNonNestableTask(
- FROM_HERE,
- Bind(&QuitFunc, &order, 6));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&FuncThatPumps, &order, 1));
+ ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 5));
+ ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+ FROM_HERE, Bind(&QuitFunc, &order, 6));
RunLoop().Run();
@@ -750,17 +741,15 @@ void RunTest_QuitNow(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&FuncThatQuitsNow));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 3));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&FuncThatQuitsNow));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
RunLoop().Run();
@@ -786,14 +775,14 @@ void RunTest_RunLoopQuitTop(MessagePumpFactory factory) {
RunLoop outer_run_loop;
RunLoop nested_run_loop;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, nested_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -816,14 +805,14 @@ void RunTest_RunLoopQuitNested(MessagePumpFactory factory) {
RunLoop outer_run_loop;
RunLoop nested_run_loop;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, nested_run_loop.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -847,16 +836,16 @@ void RunTest_RunLoopQuitBogus(MessagePumpFactory factory) {
RunLoop nested_run_loop;
RunLoop bogus_run_loop;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- bogus_run_loop.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, nested_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ bogus_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -882,36 +871,36 @@ void RunTest_RunLoopQuitDeep(MessagePumpFactory factory) {
RunLoop nested_loop3;
RunLoop nested_loop4;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 5));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 6));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- nested_loop1.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 7));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- nested_loop2.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 8));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- nested_loop3.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 9));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- nested_loop4.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 10));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 5));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 6));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop1.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 7));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop2.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 8));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop3.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 9));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop4.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 10));
outer_run_loop.Run();
@@ -949,9 +938,9 @@ void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory) {
run_loop.Quit();
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -968,13 +957,12 @@ void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 1));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- run_loop.QuitClosure());
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 1));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -995,20 +983,18 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&FuncThatQuitsNow));
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 3));
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, run_loop.QuitClosure()); // has no affect
- MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 4));
- MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
- Bind(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&OrderedFunc, &order, 4));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
RunLoop outer_run_loop;
outer_run_loop.Run();
@@ -1028,7 +1014,7 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index 52337e31a8..14fe1ee391 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -27,6 +27,11 @@
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_ANDROID)
+#include "base/android/jni_android.h"
+#include "base/test/android/java_handler_thread_for_testing.h"
+#endif
+
#if defined(OS_WIN)
#include "base/message_loop/message_pump_win.h"
#include "base/process/memory.h"
@@ -76,6 +81,53 @@ class Foo : public RefCounted<Foo> {
std::string result_;
};
+#if defined(OS_ANDROID)
+void AbortMessagePump() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ jclass exception = env->FindClass(
+ "org/chromium/base/TestSystemMessageHandler$TestException");
+
+ env->ThrowNew(exception,
+ "This is a test exception that should be caught in "
+ "TestSystemMessageHandler.handleMessage");
+ static_cast<base::MessageLoopForUI*>(base::MessageLoop::current())->Abort();
+}
+
+void RunTest_AbortDontRunMoreTasks(bool delayed) {
+ MessageLoop loop(MessageLoop::TYPE_JAVA);
+
+ WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ std::unique_ptr<android::JavaHandlerThreadForTesting> java_thread;
+ java_thread.reset(new android::JavaHandlerThreadForTesting(
+ "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
+ &test_done_event));
+ java_thread->Start();
+
+ if (delayed) {
+ java_thread->message_loop()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&AbortMessagePump), TimeDelta::FromMilliseconds(10));
+ } else {
+ java_thread->message_loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&AbortMessagePump));
+ }
+
+ // Wait to ensure we catch the correct exception (and don't crash)
+ test_done_event.Wait();
+
+ java_thread->Stop();
+ java_thread.reset();
+}
+
+TEST(MessageLoopTest, JavaExceptionAbort) {
+ RunTest_AbortDontRunMoreTasks(false);
+}
+TEST(MessageLoopTest, DelayedJavaExceptionAbort) {
+ RunTest_AbortDontRunMoreTasks(true);
+}
+#endif // defined(OS_ANDROID)
+
#if defined(OS_WIN)
// This function runs slowly to simulate a large amount of work being done.
@@ -107,7 +159,7 @@ void SubPumpFunc() {
}
void RunTest_PostDelayedTask_SharedTimer_SubPump() {
- MessageLoop loop(MessageLoop::TYPE_UI);
+ MessageLoop message_loop(MessageLoop::TYPE_UI);
// Test that the interval of the timer, used to run the next delayed task, is
// set to a value corresponding to when the next delayed task should run.
@@ -117,23 +169,20 @@ void RunTest_PostDelayedTask_SharedTimer_SubPump() {
int num_tasks = 1;
Time run_time;
- loop.PostTask(FROM_HERE, Bind(&SubPumpFunc));
+ message_loop.task_runner()->PostTask(FROM_HERE, Bind(&SubPumpFunc));
// This very delayed task should never run.
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+ message_loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
TimeDelta::FromSeconds(1000));
// This slightly delayed task should run from within SubPumpFunc.
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&PostQuitMessage, 0),
- TimeDelta::FromMilliseconds(10));
+ message_loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&PostQuitMessage, 0), TimeDelta::FromMilliseconds(10));
Time start_time = Time::Now();
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(1, num_tasks);
// Ensure that we ran in far less time than the slower timer.
@@ -260,7 +309,7 @@ void RecursiveFunc(TaskList* order, int cookie, int depth,
if (depth > 0) {
if (is_reentrant)
MessageLoop::current()->SetNestableTasksAllowed(true);
- MessageLoop::current()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
}
@@ -273,27 +322,25 @@ void QuitFunc(TaskList* order, int cookie) {
order->RecordEnd(QUITMESSAGELOOP, cookie);
}
-void RecursiveFuncWin(MessageLoop* target,
+void RecursiveFuncWin(scoped_refptr<SingleThreadTaskRunner> task_runner,
HANDLE event,
bool expect_window,
TaskList* order,
bool is_reentrant) {
- target->PostTask(FROM_HERE,
- Bind(&RecursiveFunc, order, 1, 2, is_reentrant));
- target->PostTask(FROM_HERE,
- Bind(&MessageBoxFunc, order, 2, is_reentrant));
- target->PostTask(FROM_HERE,
- Bind(&RecursiveFunc, order, 3, 2, is_reentrant));
+ task_runner->PostTask(FROM_HERE,
+ Bind(&RecursiveFunc, order, 1, 2, is_reentrant));
+ task_runner->PostTask(FROM_HERE,
+ Bind(&MessageBoxFunc, order, 2, is_reentrant));
+ task_runner->PostTask(FROM_HERE,
+ Bind(&RecursiveFunc, order, 3, 2, is_reentrant));
// The trick here is that for recursive task processing, this task will be
// ran _inside_ the MessageBox message loop, dismissing the MessageBox
// without a chance.
// For non-recursive task processing, this will be executed _after_ the
// MessageBox will have been dismissed by the code below, where
// expect_window_ is true.
- target->PostTask(FROM_HERE,
- Bind(&EndDialogFunc, order, 4));
- target->PostTask(FROM_HERE,
- Bind(&QuitFunc, order, 5));
+ task_runner->PostTask(FROM_HERE, Bind(&EndDialogFunc, order, 4));
+ task_runner->PostTask(FROM_HERE, Bind(&QuitFunc, order, 5));
// Enforce that every tasks are sent before starting to run the main thread
// message loop.
@@ -331,16 +378,12 @@ void RunTest_RecursiveDenial2(MessageLoop::Type message_loop_type) {
ASSERT_EQ(true, worker.StartWithOptions(options));
TaskList order;
win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
- worker.message_loop()->PostTask(FROM_HERE,
- Bind(&RecursiveFuncWin,
- MessageLoop::current(),
- event.Get(),
- true,
- &order,
- false));
+ worker.task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
+ event.Get(), true, &order, false));
// Let the other thread execute.
WaitForSingleObject(event.Get(), INFINITE);
- MessageLoop::current()->Run();
+ RunLoop().Run();
ASSERT_EQ(17u, order.Size());
EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
@@ -375,16 +418,12 @@ void RunTest_RecursiveSupport2(MessageLoop::Type message_loop_type) {
ASSERT_EQ(true, worker.StartWithOptions(options));
TaskList order;
win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
- worker.message_loop()->PostTask(FROM_HERE,
- Bind(&RecursiveFuncWin,
- MessageLoop::current(),
- event.Get(),
- false,
- &order,
- true));
+ worker.task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
+ event.Get(), false, &order, true));
// Let the other thread execute.
WaitForSingleObject(event.Get(), INFINITE);
- MessageLoop::current()->Run();
+ RunLoop().Run();
ASSERT_EQ(18u, order.Size());
EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
@@ -417,7 +456,7 @@ void RunTest_RecursiveSupport2(MessageLoop::Type message_loop_type) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
@@ -491,12 +530,9 @@ void RunTest_IOHandler() {
options.message_loop_type = MessageLoop::TYPE_IO;
ASSERT_TRUE(thread.StartWithOptions(options));
- MessageLoop* thread_loop = thread.message_loop();
- ASSERT_TRUE(NULL != thread_loop);
-
TestIOHandler handler(kPipeName, callback_called.Get(), false);
- thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
- Unretained(&handler)));
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TestIOHandler::Init, Unretained(&handler)));
// Make sure the thread runs and sleeps for lack of work.
PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
@@ -532,19 +568,16 @@ void RunTest_WaitForIO() {
options.message_loop_type = MessageLoop::TYPE_IO;
ASSERT_TRUE(thread.StartWithOptions(options));
- MessageLoop* thread_loop = thread.message_loop();
- ASSERT_TRUE(NULL != thread_loop);
-
TestIOHandler handler1(kPipeName1, callback1_called.Get(), false);
TestIOHandler handler2(kPipeName2, callback2_called.Get(), true);
- thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
- Unretained(&handler1)));
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TestIOHandler::Init, Unretained(&handler1)));
// TODO(ajwong): Do we really need such long Sleeps in this function?
// Make sure the thread runs and sleeps for lack of work.
TimeDelta delay = TimeDelta::FromMilliseconds(100);
PlatformThread::Sleep(delay);
- thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
- Unretained(&handler2)));
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TestIOHandler::Init, Unretained(&handler2)));
PlatformThread::Sleep(delay);
// At this time handler1 is waiting to be called, and the thread is waiting
@@ -581,9 +614,6 @@ RUN_MESSAGE_LOOP_TESTS(UI, &TypeUIMessagePumpFactory);
RUN_MESSAGE_LOOP_TESTS(IO, &TypeIOMessagePumpFactory);
#if defined(OS_WIN)
-// Additional set of tests for GPU version of UI message loop.
-RUN_MESSAGE_LOOP_TESTS(GPU, &MessagePumpForGpu::CreateMessagePumpForGpu);
-
TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
RunTest_PostDelayedTask_SharedTimer_SubPump();
}
@@ -657,26 +687,26 @@ TEST(MessageLoopTest, WaitForIO) {
}
TEST(MessageLoopTest, HighResolutionTimer) {
- MessageLoop loop;
+ MessageLoop message_loop;
Time::EnableHighResolutionTimer(true);
const TimeDelta kFastTimer = TimeDelta::FromMilliseconds(5);
const TimeDelta kSlowTimer = TimeDelta::FromMilliseconds(100);
- EXPECT_FALSE(loop.HasHighResolutionTasks());
+ EXPECT_FALSE(message_loop.HasHighResolutionTasks());
// Post a fast task to enable the high resolution timers.
- loop.PostDelayedTask(FROM_HERE, Bind(&PostNTasksThenQuit, 1),
- kFastTimer);
- EXPECT_TRUE(loop.HasHighResolutionTasks());
- loop.Run();
- EXPECT_FALSE(loop.HasHighResolutionTasks());
+ message_loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, 1), kFastTimer);
+ EXPECT_TRUE(message_loop.HasHighResolutionTasks());
+ RunLoop().Run();
+ EXPECT_FALSE(message_loop.HasHighResolutionTasks());
EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
// Check that a slow task does not trigger the high resolution logic.
- loop.PostDelayedTask(FROM_HERE, Bind(&PostNTasksThenQuit, 1),
- kSlowTimer);
- EXPECT_FALSE(loop.HasHighResolutionTasks());
- loop.Run();
- EXPECT_FALSE(loop.HasHighResolutionTasks());
+ message_loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, 1), kSlowTimer);
+ EXPECT_FALSE(message_loop.HasHighResolutionTasks());
+ RunLoop().Run();
+ EXPECT_FALSE(message_loop.HasHighResolutionTasks());
Time::EnableHighResolutionTimer(false);
}
@@ -709,7 +739,7 @@ TEST(MessageLoopTest, FileDescriptorWatcherOutlivesMessageLoop) {
int fd = pipefds[1];
{
// Arrange for controller to live longer than message loop.
- MessageLoopForIO::FileDescriptorWatcher controller;
+ MessageLoopForIO::FileDescriptorWatcher controller(FROM_HERE);
{
MessageLoopForIO message_loop;
@@ -736,7 +766,7 @@ TEST(MessageLoopTest, FileDescriptorWatcherDoubleStop) {
// Arrange for message loop to live longer than controller.
MessageLoopForIO message_loop;
{
- MessageLoopForIO::FileDescriptorWatcher controller;
+ MessageLoopForIO::FileDescriptorWatcher controller(FROM_HERE);
QuitDelegate delegate;
message_loop.WatchFileDescriptor(fd,
@@ -836,10 +866,10 @@ TEST(MessageLoopTest, ThreadMainTaskRunner) {
scoped_refptr<Foo> foo(new Foo());
std::string a("a");
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(
- &Foo::Test1ConstRef, foo.get(), a));
+ &Foo::Test1ConstRef, foo, a));
// Post quit task;
- MessageLoop::current()->task_runner()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
@@ -861,8 +891,10 @@ TEST(MessageLoopTest, IsType) {
void EmptyFunction() {}
void PostMultipleTasks() {
- MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&EmptyFunction));
- MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&EmptyFunction));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ base::Bind(&EmptyFunction));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ base::Bind(&EmptyFunction));
}
static const int kSignalMsg = WM_USER + 2;
@@ -890,19 +922,20 @@ LRESULT CALLBACK TestWndProcThunk(HWND hwnd, UINT message,
// First, we post a task that will post multiple no-op tasks to make sure
// that the pump's incoming task queue does not become empty during the
// test.
- MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&PostMultipleTasks));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ base::Bind(&PostMultipleTasks));
// Next, we post a task that posts a windows message to trigger the second
// stage of the test.
- MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(&PostWindowsMessage, hwnd));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&PostWindowsMessage, hwnd));
break;
case 2:
// Since we're about to enter a modal loop, tell the message loop that we
// intend to nest tasks.
MessageLoop::current()->SetNestableTasksAllowed(true);
bool did_run = false;
- MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(&EndTest, &did_run, hwnd));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&EndTest, &did_run, hwnd));
// Run a nested windows-style message loop and verify that our task runs. If
// it doesn't, then we'll loop here until the test times out.
MSG msg;
@@ -939,7 +972,7 @@ TEST(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
ASSERT_TRUE(PostMessage(message_hwnd, kSignalMsg, 0, 1));
- loop.Run();
+ RunLoop().Run();
ASSERT_TRUE(UnregisterClass(MAKEINTATOM(atom), instance));
}
@@ -962,7 +995,7 @@ TEST(MessageLoopTest, OriginalRunnerWorks) {
scoped_refptr<Foo> foo(new Foo());
original_runner->PostTask(FROM_HERE,
- Bind(&Foo::Test1ConstRef, foo.get(), "a"));
+ Bind(&Foo::Test1ConstRef, foo, "a"));
RunLoop().RunUntilIdle();
EXPECT_EQ(1, foo->test_count());
}
diff --git a/base/message_loop/message_pump.cc b/base/message_loop/message_pump.cc
index 2f740f2423..3d85b9b564 100644
--- a/base/message_loop/message_pump.cc
+++ b/base/message_loop/message_pump.cc
@@ -15,11 +15,4 @@ MessagePump::~MessagePump() {
void MessagePump::SetTimerSlack(TimerSlack) {
}
-#if defined(OS_WIN)
-bool MessagePump::WasSignaled() {
- NOTREACHED();
- return false;
-}
-#endif
-
} // namespace base
diff --git a/base/message_loop/message_pump.h b/base/message_loop/message_pump.h
index af8ed41f27..c53be80410 100644
--- a/base/message_loop/message_pump.h
+++ b/base/message_loop/message_pump.h
@@ -124,15 +124,6 @@ class BASE_EXPORT MessagePump : public NonThreadSafe {
// Sets the timer slack to the specified value.
virtual void SetTimerSlack(TimerSlack timer_slack);
-
-#if defined(OS_WIN)
- // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
- // has been investigated.
- // This should be used for diagnostic only. If message pump wake-up mechanism
- // is based on auto-reset event this call would reset the event to unset
- // state.
- virtual bool WasSignaled();
-#endif
};
} // namespace base
diff --git a/base/message_loop/message_pump_default.cc b/base/message_loop/message_pump_default.cc
index 3449aec860..cf68270c56 100644
--- a/base/message_loop/message_pump_default.cc
+++ b/base/message_loop/message_pump_default.cc
@@ -4,8 +4,6 @@
#include "base/message_loop/message_pump_default.h"
-#include <algorithm>
-
#include "base/logging.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -54,38 +52,11 @@ void MessagePumpDefault::Run(Delegate* delegate) {
if (delayed_work_time_.is_null()) {
event_.Wait();
} else {
- TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
- if (delay > TimeDelta()) {
-#if defined(OS_WIN)
- // TODO(stanisc): crbug.com/623223: Consider moving the OS_WIN specific
- // logic into TimedWait implementation in waitable_event_win.cc.
-
- // crbug.com/487724: on Windows, waiting for less than 1 ms results in
- // returning from TimedWait promptly and spinning
- // MessagePumpDefault::Run loop for up to 1 ms - until it is time to
- // run a delayed task. |min_delay| is the minimum possible wait to
- // to avoid the spinning.
- constexpr TimeDelta min_delay = TimeDelta::FromMilliseconds(1);
- do {
- delay = std::max(delay, min_delay);
- if (event_.TimedWait(delay))
- break;
-
- // TimedWait can time out earlier than the specified |delay| on
- // Windows. It doesn't make sense to run the outer loop in that case
- // because there isn't going to be any new work. It is less overhead
- // to just go back to wait.
- // In practice this inner wait loop might have up to 3 iterations.
- delay = delayed_work_time_ - TimeTicks::Now();
- } while (delay > TimeDelta());
-#else
- event_.TimedWait(delay);
-#endif
- } else {
- // It looks like delayed_work_time_ indicates a time in the past, so we
- // need to call DoDelayedWork now.
- delayed_work_time_ = TimeTicks();
- }
+ // No need to handle already expired |delayed_work_time_| in any special
+ // way. When |delayed_work_time_| is in the past TimeWaitUntil returns
+ // promptly and |delayed_work_time_| will re-initialized on a next
+ // DoDelayedWork call which has to be called in order to get here again.
+ event_.TimedWaitUntil(delayed_work_time_);
}
// Since event_ is auto-reset, we don't need to do anything special here
// other than service each delegate method.
diff --git a/base/message_loop/message_pump_glib.h b/base/message_loop/message_pump_glib.h
index a2b54d8542..d79dba55a2 100644
--- a/base/message_loop/message_pump_glib.h
+++ b/base/message_loop/message_pump_glib.h
@@ -69,7 +69,7 @@ class BASE_EXPORT MessagePumpGlib : public MessagePump {
// Dispatch() will be called.
int wakeup_pipe_read_;
int wakeup_pipe_write_;
- // Use a scoped_ptr to avoid needing the definition of GPollFD in the header.
+ // Use a unique_ptr to avoid needing the definition of GPollFD in the header.
std::unique_ptr<GPollFD> wakeup_gpollfd_;
DISALLOW_COPY_AND_ASSIGN(MessagePumpGlib);
diff --git a/base/message_loop/message_pump_glib_unittest.cc b/base/message_loop/message_pump_glib_unittest.cc
index 7ddd4f08a0..a89ccb9365 100644
--- a/base/message_loop/message_pump_glib_unittest.cc
+++ b/base/message_loop/message_pump_glib_unittest.cc
@@ -16,7 +16,9 @@
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -152,7 +154,7 @@ void ExpectProcessedEvents(EventInjector* injector, int count) {
// Posts a task on the current message loop.
void PostMessageLoopTask(const tracked_objects::Location& from_here,
const Closure& task) {
- MessageLoop::current()->PostTask(from_here, task);
+ ThreadTaskRunnerHandle::Get()->PostTask(from_here, task);
}
// Test fixture.
@@ -193,7 +195,7 @@ TEST_F(MessagePumpGLibTest, TestQuit) {
injector()->Reset();
// Quit from an event
injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
- loop()->Run();
+ RunLoop().Run();
EXPECT_EQ(1, injector()->processed_events());
}
@@ -213,7 +215,7 @@ TEST_F(MessagePumpGLibTest, TestEventTaskInterleave) {
injector()->AddEventAsTask(0, posted_task);
injector()->AddEventAsTask(0, Bind(&DoNothing));
injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
- loop()->Run();
+ RunLoop().Run();
EXPECT_EQ(4, injector()->processed_events());
injector()->Reset();
@@ -224,7 +226,7 @@ TEST_F(MessagePumpGLibTest, TestEventTaskInterleave) {
injector()->AddEventAsTask(0, posted_task);
injector()->AddEventAsTask(10, Bind(&DoNothing));
injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
- loop()->Run();
+ RunLoop().Run();
EXPECT_EQ(4, injector()->processed_events());
}
@@ -233,15 +235,15 @@ TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
// Tests that we process tasks while waiting for new events.
// The event queue is empty at first.
for (int i = 0; i < 10; ++i) {
- loop()->PostTask(FROM_HERE, Bind(&IncrementInt, &task_count));
+ loop()->task_runner()->PostTask(FROM_HERE,
+ Bind(&IncrementInt, &task_count));
}
// After all the previous tasks have executed, enqueue an event that will
// quit.
- loop()->PostTask(
- FROM_HERE,
- Bind(&EventInjector::AddEvent, Unretained(injector()), 0,
- MessageLoop::QuitWhenIdleClosure()));
- loop()->Run();
+ loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&EventInjector::AddEvent, Unretained(injector()), 0,
+ MessageLoop::QuitWhenIdleClosure()));
+ RunLoop().Run();
ASSERT_EQ(10, task_count);
EXPECT_EQ(1, injector()->processed_events());
@@ -249,21 +251,19 @@ TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
injector()->Reset();
task_count = 0;
for (int i = 0; i < 10; ++i) {
- loop()->PostDelayedTask(
- FROM_HERE,
- Bind(&IncrementInt, &task_count),
- TimeDelta::FromMilliseconds(10*i));
+ loop()->task_runner()->PostDelayedTask(FROM_HERE,
+ Bind(&IncrementInt, &task_count),
+ TimeDelta::FromMilliseconds(10 * i));
}
// After all the previous tasks have executed, enqueue an event that will
// quit.
// This relies on the fact that delayed tasks are executed in delay order.
// That is verified in message_loop_unittest.cc.
- loop()->PostDelayedTask(
- FROM_HERE,
- Bind(&EventInjector::AddEvent, Unretained(injector()), 10,
- MessageLoop::QuitWhenIdleClosure()),
+ loop()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&EventInjector::AddEvent, Unretained(injector()), 10,
+ MessageLoop::QuitWhenIdleClosure()),
TimeDelta::FromMilliseconds(150));
- loop()->Run();
+ RunLoop().Run();
ASSERT_EQ(10, task_count);
EXPECT_EQ(1, injector()->processed_events());
}
@@ -285,7 +285,7 @@ TEST_F(MessagePumpGLibTest, TestEventsWhileWaitingForWork) {
// And then quit (relies on the condition tested by TestEventTaskInterleave).
injector()->AddEvent(10, MessageLoop::QuitWhenIdleClosure());
- loop()->Run();
+ RunLoop().Run();
EXPECT_EQ(12, injector()->processed_events());
}
@@ -310,7 +310,7 @@ class ConcurrentHelper : public RefCounted<ConcurrentHelper> {
if (task_count_ == 0 && event_count_ == 0) {
MessageLoop::current()->QuitWhenIdle();
} else {
- MessageLoop::current()->PostTask(
+ ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, Bind(&ConcurrentHelper::FromTask, this));
}
}
@@ -356,17 +356,17 @@ TEST_F(MessagePumpGLibTest, TestConcurrentEventPostedTask) {
// Add 2 events to the queue to make sure it is always full (when we remove
// the event before processing it).
injector()->AddEventAsTask(
- 0, Bind(&ConcurrentHelper::FromEvent, helper.get()));
+ 0, Bind(&ConcurrentHelper::FromEvent, helper));
injector()->AddEventAsTask(
- 0, Bind(&ConcurrentHelper::FromEvent, helper.get()));
+ 0, Bind(&ConcurrentHelper::FromEvent, helper));
// Similarly post 2 tasks.
- loop()->PostTask(
- FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper.get()));
- loop()->PostTask(
- FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper.get()));
+ loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper));
+ loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper));
- loop()->Run();
+ RunLoop().Run();
EXPECT_EQ(0, helper->event_count());
EXPECT_EQ(0, helper->task_count());
}
@@ -381,8 +381,8 @@ void AddEventsAndDrainGLib(EventInjector* injector) {
injector->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
// Post a couple of dummy tasks
- MessageLoop::current()->PostTask(FROM_HERE, Bind(&DoNothing));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(&DoNothing));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&DoNothing));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&DoNothing));
// Drain the events
while (g_main_context_pending(NULL)) {
@@ -394,10 +394,9 @@ void AddEventsAndDrainGLib(EventInjector* injector) {
TEST_F(MessagePumpGLibTest, TestDrainingGLib) {
// Tests that draining events using GLib works.
- loop()->PostTask(
- FROM_HERE,
- Bind(&AddEventsAndDrainGLib, Unretained(injector())));
- loop()->Run();
+ loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&AddEventsAndDrainGLib, Unretained(injector())));
+ RunLoop().Run();
EXPECT_EQ(3, injector()->processed_events());
}
@@ -447,21 +446,19 @@ void TestGLibLoopInternal(EventInjector* injector) {
injector->AddDummyEvent(0);
injector->AddDummyEvent(0);
// Post a couple of dummy tasks
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&IncrementInt, &task_count));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&IncrementInt, &task_count));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&IncrementInt, &task_count));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&IncrementInt, &task_count));
// Delayed events
injector->AddDummyEvent(10);
injector->AddDummyEvent(10);
// Delayed work
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&IncrementInt, &task_count),
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, Bind(&IncrementInt, &task_count),
TimeDelta::FromMilliseconds(30));
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&GLibLoopRunner::Quit, runner.get()),
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, Bind(&GLibLoopRunner::Quit, runner),
TimeDelta::FromMilliseconds(40));
// Run a nested, straight GLib message loop.
@@ -482,21 +479,19 @@ void TestGtkLoopInternal(EventInjector* injector) {
injector->AddDummyEvent(0);
injector->AddDummyEvent(0);
// Post a couple of dummy tasks
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&IncrementInt, &task_count));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&IncrementInt, &task_count));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&IncrementInt, &task_count));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ Bind(&IncrementInt, &task_count));
// Delayed events
injector->AddDummyEvent(10);
injector->AddDummyEvent(10);
// Delayed work
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&IncrementInt, &task_count),
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, Bind(&IncrementInt, &task_count),
TimeDelta::FromMilliseconds(30));
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&GLibLoopRunner::Quit, runner.get()),
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, Bind(&GLibLoopRunner::Quit, runner),
TimeDelta::FromMilliseconds(40));
// Run a nested, straight Gtk message loop.
@@ -514,10 +509,9 @@ TEST_F(MessagePumpGLibTest, TestGLibLoop) {
// loop is not run by MessageLoop::Run() but by a straight GLib loop.
// Note that in this case we don't make strong guarantees about niceness
// between events and posted tasks.
- loop()->PostTask(
- FROM_HERE,
- Bind(&TestGLibLoopInternal, Unretained(injector())));
- loop()->Run();
+ loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&TestGLibLoopInternal, Unretained(injector())));
+ RunLoop().Run();
}
TEST_F(MessagePumpGLibTest, TestGtkLoop) {
@@ -525,10 +519,9 @@ TEST_F(MessagePumpGLibTest, TestGtkLoop) {
// loop is not run by MessageLoop::Run() but by a straight Gtk loop.
// Note that in this case we don't make strong guarantees about niceness
// between events and posted tasks.
- loop()->PostTask(
- FROM_HERE,
- Bind(&TestGtkLoopInternal, Unretained(injector())));
- loop()->Run();
+ loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&TestGtkLoopInternal, Unretained(injector())));
+ RunLoop().Run();
}
} // namespace base
diff --git a/base/message_loop/message_pump_libevent.cc b/base/message_loop/message_pump_libevent.cc
index 5aa55678d7..86f5faa056 100644
--- a/base/message_loop/message_pump_libevent.cc
+++ b/base/message_loop/message_pump_libevent.cc
@@ -44,12 +44,13 @@
namespace base {
-MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher()
+MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher(
+ const tracked_objects::Location& from_here)
: event_(NULL),
pump_(NULL),
watcher_(NULL),
- was_destroyed_(NULL) {
-}
+ was_destroyed_(NULL),
+ created_from_location_(from_here) {}
MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
if (event_) {
@@ -74,15 +75,15 @@ bool MessagePumpLibevent::FileDescriptorWatcher::StopWatchingFileDescriptor() {
return (rv == 0);
}
-void MessagePumpLibevent::FileDescriptorWatcher::Init(event *e) {
+void MessagePumpLibevent::FileDescriptorWatcher::Init(event* e) {
DCHECK(e);
DCHECK(!event_);
event_ = e;
}
-event *MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
- struct event *e = event_;
+event* MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
+ struct event* e = event_;
event_ = NULL;
return e;
}
@@ -112,7 +113,7 @@ MessagePumpLibevent::MessagePumpLibevent()
wakeup_pipe_in_(-1),
wakeup_pipe_out_(-1) {
if (!Init())
- NOTREACHED();
+ NOTREACHED();
}
MessagePumpLibevent::~MessagePumpLibevent() {
@@ -134,8 +135,8 @@ MessagePumpLibevent::~MessagePumpLibevent() {
bool MessagePumpLibevent::WatchFileDescriptor(int fd,
bool persistent,
int mode,
- FileDescriptorWatcher *controller,
- Watcher *delegate) {
+ FileDescriptorWatcher* controller,
+ Watcher* delegate) {
DCHECK_GE(fd, 0);
DCHECK(controller);
DCHECK(delegate);
@@ -292,16 +293,8 @@ void MessagePumpLibevent::ScheduleDelayedWork(
bool MessagePumpLibevent::Init() {
int fds[2];
- if (pipe(fds)) {
- DLOG(ERROR) << "pipe() failed, errno: " << errno;
- return false;
- }
- if (!SetNonBlocking(fds[0])) {
- DLOG(ERROR) << "SetNonBlocking for pipe fd[0] failed, errno: " << errno;
- return false;
- }
- if (!SetNonBlocking(fds[1])) {
- DLOG(ERROR) << "SetNonBlocking for pipe fd[1] failed, errno: " << errno;
+ if (!CreateLocalNonBlockingPipe(fds)) {
+ DPLOG(ERROR) << "pipe creation failed";
return false;
}
wakeup_pipe_out_ = fds[0];
@@ -324,8 +317,11 @@ void MessagePumpLibevent::OnLibeventNotification(int fd,
FileDescriptorWatcher* controller =
static_cast<FileDescriptorWatcher*>(context);
DCHECK(controller);
- TRACE_EVENT1("toplevel", "MessagePumpLibevent::OnLibeventNotification",
- "fd", fd);
+ TRACE_EVENT2("toplevel", "MessagePumpLibevent::OnLibeventNotification",
+ "src_file", controller->created_from_location().file_name(),
+ "src_func", controller->created_from_location().function_name());
+ TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION heap_profiler_scope(
+ controller->created_from_location().file_name());
MessagePumpLibevent* pump = controller->pump();
pump->processed_io_events_ = true;
diff --git a/base/message_loop/message_pump_libevent.h b/base/message_loop/message_pump_libevent.h
index 76f882f680..1124560d66 100644
--- a/base/message_loop/message_pump_libevent.h
+++ b/base/message_loop/message_pump_libevent.h
@@ -6,6 +6,7 @@
#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
#include "base/compiler_specific.h"
+#include "base/location.h"
#include "base/macros.h"
#include "base/message_loop/message_pump.h"
#include "base/threading/thread_checker.h"
@@ -37,7 +38,7 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
// Object returned by WatchFileDescriptor to manage further watching.
class FileDescriptorWatcher {
public:
- FileDescriptorWatcher();
+ explicit FileDescriptorWatcher(const tracked_objects::Location& from_here);
~FileDescriptorWatcher(); // Implicitly calls StopWatchingFileDescriptor.
// NOTE: These methods aren't called StartWatching()/StopWatching() to
@@ -47,6 +48,10 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
// to do.
bool StopWatchingFileDescriptor();
+ const tracked_objects::Location& created_from_location() {
+ return created_from_location_;
+ }
+
private:
friend class MessagePumpLibevent;
friend class MessagePumpLibeventTest;
@@ -73,6 +78,8 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
// destructor.
bool* was_destroyed_;
+ const tracked_objects::Location created_from_location_;
+
DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
};
@@ -100,8 +107,8 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
bool WatchFileDescriptor(int fd,
bool persistent,
int mode,
- FileDescriptorWatcher *controller,
- Watcher *delegate);
+ FileDescriptorWatcher* controller,
+ Watcher* delegate);
// MessagePump methods:
void Run(Delegate* delegate) override;
@@ -112,15 +119,11 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
private:
friend class MessagePumpLibeventTest;
- void WillProcessIOEvent();
- void DidProcessIOEvent();
-
// Risky part of constructor. Returns true on success.
bool Init();
// Called by libevent to tell us a registered FD can be read/written to.
- static void OnLibeventNotification(int fd, short flags,
- void* context);
+ static void OnLibeventNotification(int fd, short flags, void* context);
// Unix pipe used to implement ScheduleWork()
// ... callback; called by libevent inside Run() when pipe is ready to read
diff --git a/base/message_loop/message_pump_mac.h b/base/message_loop/message_pump_mac.h
index 14b8377b90..f0766eb860 100644
--- a/base/message_loop/message_pump_mac.h
+++ b/base/message_loop/message_pump_mac.h
@@ -78,9 +78,11 @@ class AutoreleasePoolType;
typedef NSAutoreleasePool AutoreleasePoolType;
#endif // !defined(__OBJC__) || __has_feature(objc_arc)
-class MessagePumpCFRunLoopBase : public MessagePump {
+class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
// Needs access to CreateAutoreleasePool.
friend class MessagePumpScopedAutoreleasePool;
+ friend class TestMessagePumpCFRunLoopBase;
+
public:
MessagePumpCFRunLoopBase();
~MessagePumpCFRunLoopBase() override;
@@ -113,6 +115,21 @@ class MessagePumpCFRunLoopBase : public MessagePump {
virtual AutoreleasePoolType* CreateAutoreleasePool();
private:
+ // Marking timers as invalid at the right time helps significantly reduce
+ // power use (see the comment in RunDelayedWorkTimer()), however there is no
+ // public API for doing so. CFRuntime.h states that CFRuntimeBase, upon which
+ // the above timer invalidation functions are based, can change from release
+ // to release and should not be accessed directly (this struct last changed at
+ // least in 2008 in CF-476).
+ //
+ // This function uses private API to modify a test timer's valid state and
+ // uses public API to confirm that the private API changed the right bit.
+ static bool CanInvalidateCFRunLoopTimers();
+
+ // Sets a Core Foundation object's "invalid" bit to |valid|. Based on code
+ // from CFRunLoop.c.
+ static void ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid);
+
// Timer callback scheduled by ScheduleDelayedWork. This does not do any
// work, but it signals work_source_ so that delayed work can be performed
// within the appropriate priority constraints.
diff --git a/base/message_loop/message_pump_mac.mm b/base/message_loop/message_pump_mac.mm
index 95d1c5f1fc..a3accee049 100644
--- a/base/message_loop/message_pump_mac.mm
+++ b/base/message_loop/message_pump_mac.mm
@@ -4,16 +4,18 @@
#import "base/message_loop/message_pump_mac.h"
-#include <dlfcn.h>
#import <Foundation/Foundation.h>
#include <limits>
#include "base/logging.h"
+#include "base/mac/call_with_eh_frame.h"
#include "base/mac/scoped_cftyperef.h"
+#include "base/macros.h"
#include "base/message_loop/timer_slack.h"
#include "base/run_loop.h"
#include "base/time/time.h"
+#include "build/build_config.h"
#if !defined(OS_IOS)
#import <AppKit/AppKit.h>
@@ -67,34 +69,48 @@ const CFTimeInterval kCFTimeIntervalMax =
// Set to true if MessagePumpMac::Create() is called before NSApp is
// initialized. Only accessed from the main thread.
bool g_not_using_cr_app = false;
+
+// Various CoreFoundation definitions.
+typedef struct __CFRuntimeBase {
+ uintptr_t _cfisa;
+ uint8_t _cfinfo[4];
+#if __LP64__
+ uint32_t _rc;
#endif
+} CFRuntimeBase;
-// Call through to CFRunLoopTimerSetTolerance(), which is only available on
-// OS X 10.9.
-void SetTimerTolerance(CFRunLoopTimerRef timer, CFTimeInterval tolerance) {
- typedef void (*CFRunLoopTimerSetTolerancePtr)(CFRunLoopTimerRef timer,
- CFTimeInterval tolerance);
-
- static CFRunLoopTimerSetTolerancePtr settimertolerance_function_ptr;
-
- static dispatch_once_t get_timer_tolerance_function_ptr_once;
- dispatch_once(&get_timer_tolerance_function_ptr_once, ^{
- NSBundle* bundle =[NSBundle
- bundleWithPath:@"/System/Library/Frameworks/CoreFoundation.framework"];
- const char* path = [[bundle executablePath] fileSystemRepresentation];
- CHECK(path);
- void* library_handle = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
- CHECK(library_handle) << dlerror();
- settimertolerance_function_ptr =
- reinterpret_cast<CFRunLoopTimerSetTolerancePtr>(
- dlsym(library_handle, "CFRunLoopTimerSetTolerance"));
-
- dlclose(library_handle);
- });
+#if defined(__BIG_ENDIAN__)
+#define __CF_BIG_ENDIAN__ 1
+#define __CF_LITTLE_ENDIAN__ 0
+#endif
- if (settimertolerance_function_ptr)
- settimertolerance_function_ptr(timer, tolerance);
+#if defined(__LITTLE_ENDIAN__)
+#define __CF_LITTLE_ENDIAN__ 1
+#define __CF_BIG_ENDIAN__ 0
+#endif
+
+#define CF_INFO_BITS (!!(__CF_BIG_ENDIAN__)*3)
+
+#define __CFBitfieldMask(N1, N2) \
+ ((((UInt32)~0UL) << (31UL - (N1) + (N2))) >> (31UL - N1))
+#define __CFBitfieldSetValue(V, N1, N2, X) \
+ ((V) = ((V) & ~__CFBitfieldMask(N1, N2)) | \
+ (((X) << (N2)) & __CFBitfieldMask(N1, N2)))
+
+// Marking timers as invalid at the right time by flipping their valid bit helps
+// significantly reduce power use (see the explanation in
+// RunDelayedWorkTimer()), however there is no public API for doing so.
+// CFRuntime.h states that CFRuntimeBase can change from release to release
+// and should not be accessed directly. The last known change of this struct
+// occurred in 2008 in CF-476 / 10.5; unfortunately the source for 10.11 and
+// 10.12 is not available for inspection at this time.
+// CanInvalidateCFRunLoopTimers() will at least prevent us from invalidating
+// timers if this function starts flipping the wrong bit on a future OS release.
+void __ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid) {
+ __CFBitfieldSetValue(((CFRuntimeBase*)timer)->_cfinfo[CF_INFO_BITS], 3, 3,
+ valid);
}
+#endif // !defined(OS_IOS)
} // namespace
@@ -119,6 +135,47 @@ class MessagePumpScopedAutoreleasePool {
DISALLOW_COPY_AND_ASSIGN(MessagePumpScopedAutoreleasePool);
};
+#if !defined(OS_IOS)
+// This function uses private API to modify a test timer's valid state and
+// uses public API to confirm that the private API changed the correct bit.
+// static
+bool MessagePumpCFRunLoopBase::CanInvalidateCFRunLoopTimers() {
+ CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
+ timer_context.info = nullptr;
+ ScopedCFTypeRef<CFRunLoopTimerRef> test_timer(
+ CFRunLoopTimerCreate(NULL, // allocator
+ kCFTimeIntervalMax, // fire time
+ kCFTimeIntervalMax, // interval
+ 0, // flags
+ 0, // priority
+ nullptr, &timer_context));
+ // Should be valid from the start.
+ if (!CFRunLoopTimerIsValid(test_timer)) {
+ return false;
+ }
+ // Confirm that the private API can mark the timer invalid.
+ __ChromeCFRunLoopTimerSetValid(test_timer, false);
+ if (CFRunLoopTimerIsValid(test_timer)) {
+ return false;
+ }
+ // Confirm that the private API can mark the timer valid.
+ __ChromeCFRunLoopTimerSetValid(test_timer, true);
+ return CFRunLoopTimerIsValid(test_timer);
+}
+#endif // !defined(OS_IOS)
+
+// static
+void MessagePumpCFRunLoopBase::ChromeCFRunLoopTimerSetValid(
+ CFRunLoopTimerRef timer,
+ bool valid) {
+#if !defined(OS_IOS)
+ static bool can_invalidate_timers = CanInvalidateCFRunLoopTimers();
+ if (can_invalidate_timers) {
+ __ChromeCFRunLoopTimerSetValid(timer, valid);
+ }
+#endif // !defined(OS_IOS)
+}
+
// Must be called on the run loop thread.
MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase()
: delegate_(NULL),
@@ -268,11 +325,22 @@ void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
const TimeTicks& delayed_work_time) {
TimeDelta delta = delayed_work_time - TimeTicks::Now();
delayed_work_fire_time_ = CFAbsoluteTimeGetCurrent() + delta.InSecondsF();
+
+ // Flip the timer's validation bit just before setting the new fire time. Do
+ // this now because CFRunLoopTimerSetNextFireDate() likely checks the validity
+ // of a timer before proceeding to set its fire date. Making the timer valid
+ // now won't have any side effects (such as a premature firing of the timer)
+ // because we're only flipping a bit.
+ //
+ // Please see the comment in RunDelayedWorkTimer() for more info on the whys
+ // of invalidation.
+ ChromeCFRunLoopTimerSetValid(delayed_work_timer_, true);
+
CFRunLoopTimerSetNextFireDate(delayed_work_timer_, delayed_work_fire_time_);
if (timer_slack_ == TIMER_SLACK_MAXIMUM) {
- SetTimerTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
+ CFRunLoopTimerSetTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
} else {
- SetTimerTolerance(delayed_work_timer_, 0);
+ CFRunLoopTimerSetTolerance(delayed_work_timer_, 0);
}
}
@@ -290,6 +358,31 @@ void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(
// The timer won't fire again until it's reset.
self->delayed_work_fire_time_ = kCFTimeIntervalMax;
+ // The message pump's timer needs to fire at changing and unpredictable
+ // intervals. Creating a new timer for each firing time is very expensive, so
+ // the message pump instead uses a repeating timer with a very large repeat
+ // rate. After each firing of the timer, the run loop sets the timer's next
+ // firing time to the distant future, essentially pausing the timer until the
+ // pump sets the next firing time. This is the solution recommended by Apple.
+ //
+ // It turns out, however, that scheduling timers is also quite expensive, and
+ // that every one of the message pump's timer firings incurs two
+ // reschedulings. The first rescheduling occurs in ScheduleDelayedWork(),
+ // which sets the desired next firing time. The second comes after exiting
+ // this method (the timer's callback method), when the run loop sets the
+ // timer's next firing time to far in the future.
+ //
+ // The code in __CFRunLoopDoTimer() inside CFRunLoop.c calls the timer's
+ // callback, confirms that the timer is valid, and then sets its future
+ // firing time based on its repeat frequency. Flipping the valid bit here
+ // causes the __CFRunLoopDoTimer() to skip setting the future firing time.
+ // Note that there's public API to invalidate a timer but it goes beyond
+ // flipping the valid bit, making the timer unusable in the future.
+ //
+ // ScheduleDelayedWork() flips the valid bit back just before setting the
+ // timer's new firing time.
+ ChromeCFRunLoopTimerSetValid(self->delayed_work_timer_, false);
+
// CFRunLoopTimers fire outside of the priority scheme for CFRunLoopSources.
// In order to establish the proper priority in which work and delayed work
// are processed one for one, the timer used to schedule delayed work must
@@ -301,7 +394,9 @@ void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(
// static
void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
- self->RunWork();
+ base::mac::CallWithEHFrame(^{
+ self->RunWork();
+ });
}
// Called by MessagePumpCFRunLoopBase::RunWorkSource.
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index 600b94ed48..6b38d55bbd 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -5,15 +5,26 @@
#include "base/metrics/field_trial.h"
#include <algorithm>
+#include <utility>
+#include "base/base_switches.h"
#include "base/build_time.h"
+#include "base/command_line.h"
+#include "base/debug/activity_tracker.h"
#include "base/logging.h"
+#include "base/metrics/field_trial_param_associator.h"
+#include "base/process/memory.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
+// On POSIX, the fd is shared using the mapping in GlobalDescriptors.
+#if defined(OS_POSIX) && !defined(OS_NACL)
+#include "base/posix/global_descriptors.h"
+#endif
+
namespace base {
namespace {
@@ -27,6 +38,62 @@ const char kPersistentStringSeparator = '/'; // Currently a slash.
// command line which forces its activation.
const char kActivationMarker = '*';
+// Use shared memory to communicate field trial (experiment) state. Set to false
+// for now while the implementation is fleshed out (e.g. data format, single
+// shared memory segment). See https://codereview.chromium.org/2365273004/ and
+// crbug.com/653874
+// The browser is the only process that has write access to the shared memory.
+// This is safe from race conditions because MakeIterable is a release operation
+// and GetNextOfType is an acquire operation, so memory writes before
+// MakeIterable happen before memory reads after GetNextOfType.
+const bool kUseSharedMemoryForFieldTrials = true;
+
+// Constants for the field trial allocator.
+const char kAllocatorName[] = "FieldTrialAllocator";
+
+// We allocate 128 KiB to hold all the field trial data. This should be enough,
+// as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
+// This also doesn't allocate all 128 KiB at once -- the pages only get mapped
+// to physical memory when they are touched. If the size of the allocated field
+// trials does get larger than 128 KiB, then we will drop some field trials in
+// child processes, leading to an inconsistent view between browser and child
+// processes and possibly causing crashes (see crbug.com/661617).
+const size_t kFieldTrialAllocationSize = 128 << 10; // 128 KiB
+
+// Writes out string1 and then string2 to pickle.
+bool WriteStringPair(Pickle* pickle,
+ const StringPiece& string1,
+ const StringPiece& string2) {
+ if (!pickle->WriteString(string1))
+ return false;
+ if (!pickle->WriteString(string2))
+ return false;
+ return true;
+}
+
+// Writes out the field trial's contents (via trial_state) to the pickle. The
+// format of the pickle looks like:
+// TrialName, GroupName, ParamKey1, ParamValue1, ParamKey2, ParamValue2, ...
+// If there are no parameters, then it just ends at GroupName.
+bool PickleFieldTrial(const FieldTrial::State& trial_state, Pickle* pickle) {
+ if (!WriteStringPair(pickle, *trial_state.trial_name,
+ *trial_state.group_name)) {
+ return false;
+ }
+
+ // Get field trial params.
+ std::map<std::string, std::string> params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
+ *trial_state.trial_name, *trial_state.group_name, &params);
+
+ // Write params to pickle.
+ for (const auto& param : params) {
+ if (!WriteStringPair(pickle, param.first, param.second))
+ return false;
+ }
+ return true;
+}
+
// Created a time value based on |year|, |month| and |day_of_month| parameters.
Time CreateTimeFromParams(int year, int month, int day_of_month) {
DCHECK_GT(year, 1970);
@@ -73,11 +140,18 @@ FieldTrial::Probability GetGroupBoundaryValue(
return std::min(result, divisor - 1);
}
+// Separate type from FieldTrial::State so that it can use StringPieces.
+struct FieldTrialStringEntry {
+ StringPiece trial_name;
+ StringPiece group_name;
+ bool activated = false;
+};
+
// Parses the --force-fieldtrials string |trials_string| into |entries|.
// Returns true if the string was parsed correctly. On failure, the |entries|
// array may end up being partially filled.
bool ParseFieldTrialsString(const std::string& trials_string,
- std::vector<FieldTrial::State>* entries) {
+ std::vector<FieldTrialStringEntry>* entries) {
const StringPiece trials_string_piece(trials_string);
size_t next_item = 0;
@@ -92,7 +166,7 @@ bool ParseFieldTrialsString(const std::string& trials_string,
if (group_name_end == trials_string.npos)
group_name_end = trials_string.length();
- FieldTrial::State entry;
+ FieldTrialStringEntry entry;
// Verify if the trial should be activated or not.
if (trials_string[next_item] == kActivationMarker) {
// Name cannot be only the indicator.
@@ -107,11 +181,61 @@ bool ParseFieldTrialsString(const std::string& trials_string,
trials_string_piece.substr(name_end + 1, group_name_end - name_end - 1);
next_item = group_name_end + 1;
- entries->push_back(entry);
+ entries->push_back(std::move(entry));
}
return true;
}
+void AddFeatureAndFieldTrialFlags(const char* enable_features_switch,
+ const char* disable_features_switch,
+ CommandLine* cmd_line) {
+ std::string enabled_features;
+ std::string disabled_features;
+ FeatureList::GetInstance()->GetFeatureOverrides(&enabled_features,
+ &disabled_features);
+
+ if (!enabled_features.empty())
+ cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
+ if (!disabled_features.empty())
+ cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
+
+ std::string field_trial_states;
+ FieldTrialList::AllStatesToString(&field_trial_states);
+ if (!field_trial_states.empty()) {
+ cmd_line->AppendSwitchASCII(switches::kForceFieldTrials,
+ field_trial_states);
+ }
+}
+
+#if defined(OS_WIN)
+HANDLE CreateReadOnlyHandle(FieldTrialList::FieldTrialAllocator* allocator) {
+ HANDLE src = allocator->shared_memory()->handle().GetHandle();
+ ProcessHandle process = GetCurrentProcess();
+ DWORD access = SECTION_MAP_READ | SECTION_QUERY;
+ HANDLE dst;
+ if (!::DuplicateHandle(process, src, process, &dst, access, true, 0))
+ return kInvalidPlatformFile;
+ return dst;
+}
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+int CreateReadOnlyHandle(FieldTrialList::FieldTrialAllocator* allocator) {
+ SharedMemoryHandle new_handle;
+ allocator->shared_memory()->ShareReadOnlyToProcess(GetCurrentProcessHandle(),
+ &new_handle);
+ return SharedMemory::GetFdFromSharedMemoryHandle(new_handle);
+}
+#endif
+
+void OnOutOfMemory(size_t size) {
+#if defined(OS_NACL)
+ NOTREACHED();
+#else
+ TerminateBecauseOutOfMemory(size);
+#endif
+}
+
} // namespace
// statics
@@ -127,12 +251,55 @@ int FieldTrialList::kNoExpirationYear = 0;
FieldTrial::EntropyProvider::~EntropyProvider() {
}
-FieldTrial::State::State() : activated(false) {}
+FieldTrial::State::State() {}
FieldTrial::State::State(const State& other) = default;
FieldTrial::State::~State() {}
+bool FieldTrial::FieldTrialEntry::GetTrialAndGroupName(
+ StringPiece* trial_name,
+ StringPiece* group_name) const {
+ PickleIterator iter = GetPickleIterator();
+ return ReadStringPair(&iter, trial_name, group_name);
+}
+
+bool FieldTrial::FieldTrialEntry::GetParams(
+ std::map<std::string, std::string>* params) const {
+ PickleIterator iter = GetPickleIterator();
+ StringPiece tmp;
+ // Skip reading trial and group name.
+ if (!ReadStringPair(&iter, &tmp, &tmp))
+ return false;
+
+ while (true) {
+ StringPiece key;
+ StringPiece value;
+ if (!ReadStringPair(&iter, &key, &value))
+ return key.empty(); // Non-empty is bad: got one of a pair.
+ (*params)[key.as_string()] = value.as_string();
+ }
+}
+
+PickleIterator FieldTrial::FieldTrialEntry::GetPickleIterator() const {
+ const char* src =
+ reinterpret_cast<const char*>(this) + sizeof(FieldTrialEntry);
+
+ Pickle pickle(src, pickle_size);
+ return PickleIterator(pickle);
+}
+
+bool FieldTrial::FieldTrialEntry::ReadStringPair(
+ PickleIterator* iter,
+ StringPiece* trial_name,
+ StringPiece* group_name) const {
+ if (!iter->ReadStringPiece(trial_name))
+ return false;
+ if (!iter->ReadStringPiece(group_name))
+ return false;
+ return true;
+}
+
void FieldTrial::Disable() {
DCHECK(!group_reported_);
enable_field_trial_ = false;
@@ -243,7 +410,8 @@ FieldTrial::FieldTrial(const std::string& trial_name,
enable_field_trial_(true),
forced_(false),
group_reported_(false),
- trial_registered_(false) {
+ trial_registered_(false),
+ ref_(FieldTrialList::FieldTrialAllocator::kReferenceNull) {
DCHECK_GT(total_probability, 0);
DCHECK(!trial_name_.empty());
DCHECK(!default_group_name_.empty());
@@ -267,6 +435,10 @@ void FieldTrial::SetGroupChoice(const std::string& group_name, int number) {
}
void FieldTrial::FinalizeGroupChoice() {
+ FinalizeGroupChoiceImpl(false);
+}
+
+void FieldTrial::FinalizeGroupChoiceImpl(bool is_locked) {
if (group_ != kNotFinalized)
return;
accumulated_group_probability_ = divisor_;
@@ -274,6 +446,10 @@ void FieldTrial::FinalizeGroupChoice() {
// finalized.
DCHECK(!forced_);
SetGroupChoice(default_group_name_, kDefaultGroupNumber);
+
+ // Add the field trial to shared memory.
+ if (kUseSharedMemoryForFieldTrials && trial_registered_)
+ FieldTrialList::OnGroupFinalized(is_locked, this);
}
bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
@@ -289,8 +465,18 @@ bool FieldTrial::GetState(State* field_trial_state) {
if (!enable_field_trial_)
return false;
FinalizeGroupChoice();
- field_trial_state->trial_name = trial_name_;
- field_trial_state->group_name = group_name_;
+ field_trial_state->trial_name = &trial_name_;
+ field_trial_state->group_name = &group_name_;
+ field_trial_state->activated = group_reported_;
+ return true;
+}
+
+bool FieldTrial::GetStateWhileLocked(State* field_trial_state) {
+ if (!enable_field_trial_)
+ return false;
+ FinalizeGroupChoiceImpl(true);
+ field_trial_state->trial_name = &trial_name_;
+ field_trial_state->group_name = &group_name_;
field_trial_state->activated = group_reported_;
return true;
}
@@ -308,8 +494,8 @@ FieldTrialList::Observer::~Observer() {
}
FieldTrialList::FieldTrialList(
- const FieldTrial::EntropyProvider* entropy_provider)
- : entropy_provider_(entropy_provider),
+ std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider)
+ : entropy_provider_(std::move(entropy_provider)),
observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
ObserverListBase<FieldTrialList::Observer>::NOTIFY_EXISTING_ONLY)) {
DCHECK(!global_);
@@ -475,17 +661,17 @@ void FieldTrialList::AllStatesToString(std::string* output) {
for (const auto& registered : global_->registered_) {
FieldTrial::State trial;
- if (!registered.second->GetState(&trial))
+ if (!registered.second->GetStateWhileLocked(&trial))
continue;
DCHECK_EQ(std::string::npos,
- trial.trial_name.find(kPersistentStringSeparator));
+ trial.trial_name->find(kPersistentStringSeparator));
DCHECK_EQ(std::string::npos,
- trial.group_name.find(kPersistentStringSeparator));
+ trial.group_name->find(kPersistentStringSeparator));
if (trial.activated)
output->append(1, kActivationMarker);
- trial.trial_name.AppendToString(output);
+ output->append(*trial.trial_name);
output->append(1, kPersistentStringSeparator);
- trial.group_name.AppendToString(output);
+ output->append(*trial.group_name);
output->append(1, kPersistentStringSeparator);
}
}
@@ -510,7 +696,7 @@ void FieldTrialList::GetActiveFieldTrialGroups(
void FieldTrialList::GetActiveFieldTrialGroupsFromString(
const std::string& trials_string,
FieldTrial::ActiveGroups* active_groups) {
- std::vector<FieldTrial::State> entries;
+ std::vector<FieldTrialStringEntry> entries;
if (!ParseFieldTrialsString(trials_string, &entries))
return;
@@ -525,6 +711,36 @@ void FieldTrialList::GetActiveFieldTrialGroupsFromString(
}
// static
+void FieldTrialList::GetInitiallyActiveFieldTrials(
+ const base::CommandLine& command_line,
+ FieldTrial::ActiveGroups* active_groups) {
+ DCHECK(global_->create_trials_from_command_line_called_);
+
+ if (!global_->field_trial_allocator_) {
+ GetActiveFieldTrialGroupsFromString(
+ command_line.GetSwitchValueASCII(switches::kForceFieldTrials),
+ active_groups);
+ return;
+ }
+
+ FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+ FieldTrialAllocator::Iterator mem_iter(allocator);
+ const FieldTrial::FieldTrialEntry* entry;
+ while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+ nullptr) {
+ StringPiece trial_name;
+ StringPiece group_name;
+ if (subtle::NoBarrier_Load(&entry->activated) &&
+ entry->GetTrialAndGroupName(&trial_name, &group_name)) {
+ FieldTrial::ActiveGroup group;
+ group.trial_name = trial_name.as_string();
+ group.group_name = group_name.as_string();
+ active_groups->push_back(group);
+ }
+ }
+}
+
+// static
bool FieldTrialList::CreateTrialsFromString(
const std::string& trials_string,
const std::set<std::string>& ignored_trial_names) {
@@ -532,7 +748,7 @@ bool FieldTrialList::CreateTrialsFromString(
if (trials_string.empty() || !global_)
return true;
- std::vector<FieldTrial::State> entries;
+ std::vector<FieldTrialStringEntry> entries;
if (!ParseFieldTrialsString(trials_string, &entries))
return false;
@@ -557,6 +773,145 @@ bool FieldTrialList::CreateTrialsFromString(
}
// static
+void FieldTrialList::CreateTrialsFromCommandLine(
+ const CommandLine& cmd_line,
+ const char* field_trial_handle_switch,
+ int fd_key) {
+ global_->create_trials_from_command_line_called_ = true;
+
+#if defined(OS_WIN)
+ if (cmd_line.HasSwitch(field_trial_handle_switch)) {
+ std::string handle_switch =
+ cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
+ bool result = CreateTrialsFromHandleSwitch(handle_switch);
+ DCHECK(result);
+ }
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+ // On POSIX, we check if the handle is valid by seeing if the browser process
+ // sent over the switch (we don't care about the value). Invalid handles
+ // occur in some browser tests which don't initialize the allocator.
+ if (cmd_line.HasSwitch(field_trial_handle_switch)) {
+ bool result = CreateTrialsFromDescriptor(fd_key);
+ DCHECK(result);
+ }
+#endif
+
+ if (cmd_line.HasSwitch(switches::kForceFieldTrials)) {
+ bool result = FieldTrialList::CreateTrialsFromString(
+ cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials),
+ std::set<std::string>());
+ DCHECK(result);
+ }
+}
+
+// static
+void FieldTrialList::CreateFeaturesFromCommandLine(
+ const base::CommandLine& command_line,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
+ FeatureList* feature_list) {
+ // Fallback to command line if not using shared memory.
+ if (!kUseSharedMemoryForFieldTrials ||
+ !global_->field_trial_allocator_.get()) {
+ return feature_list->InitializeFromCommandLine(
+ command_line.GetSwitchValueASCII(enable_features_switch),
+ command_line.GetSwitchValueASCII(disable_features_switch));
+ }
+
+ feature_list->InitializeFromSharedMemory(
+ global_->field_trial_allocator_.get());
+}
+
+#if defined(OS_WIN)
+// static
+void FieldTrialList::AppendFieldTrialHandleIfNeeded(
+ HandlesToInheritVector* handles) {
+ if (!global_)
+ return;
+ if (kUseSharedMemoryForFieldTrials) {
+ InstantiateFieldTrialAllocatorIfNeeded();
+ if (global_->readonly_allocator_handle_)
+ handles->push_back(global_->readonly_allocator_handle_);
+ }
+}
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+// static
+int FieldTrialList::GetFieldTrialHandle() {
+ if (global_ && kUseSharedMemoryForFieldTrials) {
+ InstantiateFieldTrialAllocatorIfNeeded();
+ // We check for an invalid handle where this gets called.
+ return global_->readonly_allocator_handle_;
+ }
+ return kInvalidPlatformFile;
+}
+#endif
+
+// static
+void FieldTrialList::CopyFieldTrialStateToFlags(
+ const char* field_trial_handle_switch,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
+ CommandLine* cmd_line) {
+ // TODO(lawrencewu): Ideally, having the global would be guaranteed. However,
+ // content browser tests currently don't create a FieldTrialList because they
+ // don't run ChromeBrowserMainParts code where it's done for Chrome.
+ // Some tests depend on the enable and disable features flag switch, though,
+ // so we can still add those even though AllStatesToString() will be a no-op.
+ if (!global_) {
+ AddFeatureAndFieldTrialFlags(enable_features_switch,
+ disable_features_switch, cmd_line);
+ return;
+ }
+
+ // Use shared memory to pass the state if the feature is enabled, otherwise
+ // fallback to passing it via the command line as a string.
+ if (kUseSharedMemoryForFieldTrials) {
+ InstantiateFieldTrialAllocatorIfNeeded();
+ // If the readonly handle didn't get duplicated properly, then fallback to
+ // original behavior.
+ if (global_->readonly_allocator_handle_ == kInvalidPlatformFile) {
+ AddFeatureAndFieldTrialFlags(enable_features_switch,
+ disable_features_switch, cmd_line);
+ return;
+ }
+
+ global_->field_trial_allocator_->UpdateTrackingHistograms();
+
+#if defined(OS_WIN)
+ // We need to pass a named anonymous handle to shared memory over the
+ // command line on Windows, since the child doesn't know which of the
+ // handles it inherited it should open.
+ // PlatformFile is typedef'd to HANDLE which is typedef'd to void *. We
+ // basically cast the handle into an int (uintptr_t, to be exact), stringify
+ // the int, and pass it as a command-line flag. The child process will do
+ // the reverse conversions to retrieve the handle. See
+ // http://stackoverflow.com/a/153077
+ auto uintptr_handle =
+ reinterpret_cast<uintptr_t>(global_->readonly_allocator_handle_);
+ std::string field_trial_handle = std::to_string(uintptr_handle);
+ cmd_line->AppendSwitchASCII(field_trial_handle_switch, field_trial_handle);
+#elif defined(OS_POSIX)
+ // On POSIX, we dup the fd into a fixed fd kFieldTrialDescriptor, so we
+ // don't have to pass over the handle (it's not even the right handle
+ // anyways). But some browser tests don't create the allocator, so we need
+ // to be able to distinguish valid and invalid handles. We do that by just
+ // checking that the flag is set with a dummy value.
+ cmd_line->AppendSwitchASCII(field_trial_handle_switch, "1");
+#else
+#error Unsupported OS
+#endif
+ return;
+ }
+
+ AddFeatureAndFieldTrialFlags(enable_features_switch, disable_features_switch,
+ cmd_line);
+}
+
+// static
FieldTrial* FieldTrialList::CreateFieldTrial(
const std::string& name,
const std::string& group_name) {
@@ -597,6 +952,20 @@ void FieldTrialList::RemoveObserver(Observer* observer) {
}
// static
+void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
+ if (!global_)
+ return;
+ if (is_locked) {
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ field_trial);
+ } else {
+ AutoLock auto_lock(global_->lock_);
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ field_trial);
+ }
+}
+
+// static
void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
if (!global_)
return;
@@ -606,10 +975,22 @@ void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
if (field_trial->group_reported_)
return;
field_trial->group_reported_ = true;
+
+ if (!field_trial->enable_field_trial_)
+ return;
+
+ if (kUseSharedMemoryForFieldTrials)
+ ActivateFieldTrialEntryWhileLocked(field_trial);
}
- if (!field_trial->enable_field_trial_)
- return;
+ // Recording for stability debugging has to be done inline as a task posted
+ // to an observer may not get executed before a crash.
+ base::debug::GlobalActivityTracker* tracker =
+ base::debug::GlobalActivityTracker::Get();
+ if (tracker) {
+ tracker->RecordFieldTrial(field_trial->trial_name(),
+ field_trial->group_name_internal());
+ }
global_->observer_list_->Notify(
FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
@@ -625,6 +1006,334 @@ size_t FieldTrialList::GetFieldTrialCount() {
}
// static
+bool FieldTrialList::GetParamsFromSharedMemory(
+ FieldTrial* field_trial,
+ std::map<std::string, std::string>* params) {
+ DCHECK(global_);
+ // If the field trial allocator is not set up yet, then there are several
+ // cases:
+ // - We are in the browser process and the allocator has not been set up
+ // yet. If we got here, then we couldn't find the params in
+ // FieldTrialParamAssociator, so it's definitely not here. Return false.
+ // - Using shared memory for field trials is not enabled. If we got here,
+ // then there's nothing in shared memory. Return false.
+ // - We are in the child process and the allocator has not been set up yet.
+ // If this is the case, then you are calling this too early. The field trial
+ // allocator should get set up very early in the lifecycle. Try to see if
+ // you can call it after it's been set up.
+ AutoLock auto_lock(global_->lock_);
+ if (!global_->field_trial_allocator_)
+ return false;
+
+ // If ref_ isn't set, then the field trial data can't be in shared memory.
+ if (!field_trial->ref_)
+ return false;
+
+ const FieldTrial::FieldTrialEntry* entry =
+ global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
+ field_trial->ref_);
+
+ size_t allocated_size =
+ global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
+ size_t actual_size = sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
+ if (allocated_size < actual_size)
+ return false;
+
+ return entry->GetParams(params);
+}
+
+// static
+void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
+ if (!global_)
+ return;
+
+ AutoLock auto_lock(global_->lock_);
+ if (!global_->field_trial_allocator_)
+ return;
+
+ // To clear the params, we iterate through every item in the allocator, copy
+ // just the trial and group name into a newly-allocated segment and then clear
+ // the existing item.
+ FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+ FieldTrialAllocator::Iterator mem_iter(allocator);
+
+ // List of refs to eventually be made iterable. We can't make it in the loop,
+ // since it would go on forever.
+ std::vector<FieldTrial::FieldTrialRef> new_refs;
+
+ FieldTrial::FieldTrialRef prev_ref;
+ while ((prev_ref = mem_iter.GetNextOfType<FieldTrial::FieldTrialEntry>()) !=
+ FieldTrialAllocator::kReferenceNull) {
+ // Get the existing field trial entry in shared memory.
+ const FieldTrial::FieldTrialEntry* prev_entry =
+ allocator->GetAsObject<FieldTrial::FieldTrialEntry>(prev_ref);
+ StringPiece trial_name;
+ StringPiece group_name;
+ if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name))
+ continue;
+
+ // Write a new entry, minus the params.
+ Pickle pickle;
+ pickle.WriteString(trial_name);
+ pickle.WriteString(group_name);
+ size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
+ FieldTrial::FieldTrialEntry* new_entry =
+ allocator->New<FieldTrial::FieldTrialEntry>(total_size);
+ subtle::NoBarrier_Store(&new_entry->activated,
+ subtle::NoBarrier_Load(&prev_entry->activated));
+ new_entry->pickle_size = pickle.size();
+
+ // TODO(lawrencewu): Modify base::Pickle to be able to write over a section
+ // in memory, so we can avoid this memcpy.
+ char* dst = reinterpret_cast<char*>(new_entry) +
+ sizeof(FieldTrial::FieldTrialEntry);
+ memcpy(dst, pickle.data(), pickle.size());
+
+ // Update the ref on the field trial and add it to the list to be made
+ // iterable.
+ FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
+ FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
+ trial->ref_ = new_ref;
+ new_refs.push_back(new_ref);
+
+ // Mark the existing entry as unused.
+ allocator->ChangeType(prev_ref, 0,
+ FieldTrial::FieldTrialEntry::kPersistentTypeId,
+ /*clear=*/false);
+ }
+
+ for (const auto& ref : new_refs) {
+ allocator->MakeIterable(ref);
+ }
+}
+
+// static
+void FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(
+ PersistentMemoryAllocator* allocator) {
+ if (!global_)
+ return;
+ AutoLock auto_lock(global_->lock_);
+ for (const auto& registered : global_->registered_) {
+ AddToAllocatorWhileLocked(allocator, registered.second);
+ }
+}
+
+// static
+std::vector<const FieldTrial::FieldTrialEntry*>
+FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
+ PersistentMemoryAllocator const& allocator) {
+ std::vector<const FieldTrial::FieldTrialEntry*> entries;
+ FieldTrialAllocator::Iterator iter(&allocator);
+ const FieldTrial::FieldTrialEntry* entry;
+ while ((entry = iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+ nullptr) {
+ entries.push_back(entry);
+ }
+ return entries;
+}
+
+#if defined(OS_WIN)
+// static
+bool FieldTrialList::CreateTrialsFromHandleSwitch(
+ const std::string& handle_switch) {
+ int field_trial_handle = std::stoi(handle_switch);
+ HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
+ SharedMemoryHandle shm_handle(handle, GetCurrentProcId());
+ return FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm_handle);
+}
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+// static
+bool FieldTrialList::CreateTrialsFromDescriptor(int fd_key) {
+ if (!kUseSharedMemoryForFieldTrials)
+ return false;
+
+ if (fd_key == -1)
+ return false;
+
+ int fd = GlobalDescriptors::GetInstance()->MaybeGet(fd_key);
+ if (fd == -1)
+ return false;
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ SharedMemoryHandle shm_handle(FileDescriptor(fd, true));
+#else
+ SharedMemoryHandle shm_handle(fd, true);
+#endif
+
+ bool result = FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm_handle);
+ DCHECK(result);
+ return true;
+}
+#endif
+
+// static
+bool FieldTrialList::CreateTrialsFromSharedMemoryHandle(
+ SharedMemoryHandle shm_handle) {
+ // shm gets deleted when it gets out of scope, but that's OK because we need
+ // it only for the duration of this method.
+ std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
+ if (!shm.get()->Map(kFieldTrialAllocationSize))
+ OnOutOfMemory(kFieldTrialAllocationSize);
+
+ return FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+}
+
+// static
+bool FieldTrialList::CreateTrialsFromSharedMemory(
+ std::unique_ptr<SharedMemory> shm) {
+ global_->field_trial_allocator_.reset(
+ new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, true));
+ FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
+ FieldTrialAllocator::Iterator mem_iter(shalloc);
+
+ const FieldTrial::FieldTrialEntry* entry;
+ while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+ nullptr) {
+ StringPiece trial_name;
+ StringPiece group_name;
+ if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
+ return false;
+
+ // TODO(lawrencewu): Convert the API for CreateFieldTrial to take
+ // StringPieces.
+ FieldTrial* trial =
+ CreateFieldTrial(trial_name.as_string(), group_name.as_string());
+
+ trial->ref_ = mem_iter.GetAsReference(entry);
+ if (subtle::NoBarrier_Load(&entry->activated)) {
+ // Call |group()| to mark the trial as "used" and notify observers, if
+ // any. This is useful to ensure that field trials created in child
+ // processes are properly reported in crash reports.
+ trial->group();
+ }
+ }
+ return true;
+}
+
+// static
+void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
+ if (!global_)
+ return;
+ AutoLock auto_lock(global_->lock_);
+ // Create the allocator if not already created and add all existing trials.
+ if (global_->field_trial_allocator_ != nullptr)
+ return;
+
+ SharedMemoryCreateOptions options;
+ options.size = kFieldTrialAllocationSize;
+ options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ options.type = SharedMemoryHandle::POSIX;
+#endif
+
+ std::unique_ptr<SharedMemory> shm(new SharedMemory());
+ if (!shm->Create(options))
+ OnOutOfMemory(kFieldTrialAllocationSize);
+
+ if (!shm->Map(kFieldTrialAllocationSize))
+ OnOutOfMemory(kFieldTrialAllocationSize);
+
+ global_->field_trial_allocator_.reset(
+ new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, false));
+ global_->field_trial_allocator_->CreateTrackingHistograms(kAllocatorName);
+
+ // Add all existing field trials.
+ for (const auto& registered : global_->registered_) {
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ registered.second);
+ }
+
+ // Add all existing features.
+ FeatureList::GetInstance()->AddFeaturesToAllocator(
+ global_->field_trial_allocator_.get());
+
+#if !defined(OS_NACL)
+ // Set |readonly_allocator_handle_| so we can pass it to be inherited and
+ // via the command line.
+ global_->readonly_allocator_handle_ =
+ CreateReadOnlyHandle(global_->field_trial_allocator_.get());
+#endif
+}
+
+// static
+void FieldTrialList::AddToAllocatorWhileLocked(
+ PersistentMemoryAllocator* allocator,
+ FieldTrial* field_trial) {
+ // Don't do anything if the allocator hasn't been instantiated yet.
+ if (allocator == nullptr)
+ return;
+
+ // Or if the allocator is read only, which means we are in a child process and
+ // shouldn't be writing to it.
+ if (allocator->IsReadonly())
+ return;
+
+ FieldTrial::State trial_state;
+ if (!field_trial->GetStateWhileLocked(&trial_state))
+ return;
+
+ // Or if we've already added it. We must check after GetState since it can
+ // also add to the allocator.
+ if (field_trial->ref_)
+ return;
+
+ Pickle pickle;
+ if (!PickleFieldTrial(trial_state, &pickle)) {
+ NOTREACHED();
+ return;
+ }
+
+ size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
+ FieldTrial::FieldTrialRef ref = allocator->Allocate(
+ total_size, FieldTrial::FieldTrialEntry::kPersistentTypeId);
+ if (ref == FieldTrialAllocator::kReferenceNull) {
+ NOTREACHED();
+ return;
+ }
+
+ FieldTrial::FieldTrialEntry* entry =
+ allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
+ subtle::NoBarrier_Store(&entry->activated, trial_state.activated);
+ entry->pickle_size = pickle.size();
+
+ // TODO(lawrencewu): Modify base::Pickle to be able to write over a section in
+ // memory, so we can avoid this memcpy.
+ char* dst =
+ reinterpret_cast<char*>(entry) + sizeof(FieldTrial::FieldTrialEntry);
+ memcpy(dst, pickle.data(), pickle.size());
+
+ allocator->MakeIterable(ref);
+ field_trial->ref_ = ref;
+}
+
+// static
+void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
+ FieldTrial* field_trial) {
+ FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+
+ // Check if we're in the child process and return early if so.
+ if (allocator && allocator->IsReadonly())
+ return;
+
+ FieldTrial::FieldTrialRef ref = field_trial->ref_;
+ if (ref == FieldTrialAllocator::kReferenceNull) {
+ // It's fine to do this even if the allocator hasn't been instantiated
+ // yet -- it'll just return early.
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ field_trial);
+ } else {
+ // It's also okay to do this even though the callee doesn't have a lock --
+ // the only thing that happens on a stale read here is a slight performance
+ // hit from the child re-synchronizing activation state.
+ FieldTrial::FieldTrialEntry* entry =
+ allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
+ subtle::NoBarrier_Store(&entry->activated, 1);
+ }
+}
+
+// static
const FieldTrial::EntropyProvider*
FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
if (!global_) {
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index 28a4606a88..60a6592ce6 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -58,15 +58,24 @@
#include <stdint.h>
#include <map>
+#include <memory>
#include <set>
#include <string>
#include <vector>
+#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/command_line.h"
+#include "base/feature_list.h"
+#include "base/files/file.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/persistent_memory_allocator.h"
#include "base/observer_list_threadsafe.h"
+#include "base/pickle.h"
+#include "base/process/launch.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
#include "base/time/time.h"
@@ -79,6 +88,9 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
public:
typedef int Probability; // Probability type for being selected in a trial.
+ // TODO(665129): Make private again after crash has been resolved.
+ typedef SharedPersistentMemoryAllocator::Reference FieldTrialRef;
+
// Specifies the persistence of the field trial group choice.
enum RandomizationType {
// One time randomized trials will persist the group choice between
@@ -112,17 +124,61 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
};
// A triplet representing a FieldTrial, its selected group and whether it's
- // active.
+ // active. String members are pointers to the underlying strings owned by the
+ // FieldTrial object. Does not use StringPiece to avoid conversions back to
+ // std::string.
struct BASE_EXPORT State {
- StringPiece trial_name;
- StringPiece group_name;
- bool activated;
+ const std::string* trial_name = nullptr;
+ const std::string* group_name = nullptr;
+ bool activated = false;
State();
State(const State& other);
~State();
};
+ // We create one FieldTrialEntry per field trial in shared memory, via
+ // AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
+ // base::Pickle object that we unpickle and read from.
+ struct BASE_EXPORT FieldTrialEntry {
+ // SHA1(FieldTrialEntry): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 8;
+
+ // Whether or not this field trial is activated. This is really just a
+ // boolean but using a 32 bit value for portability reasons. It should be
+ // accessed via NoBarrier_Load()/NoBarrier_Store() to prevent the compiler
+ // from doing unexpected optimizations because it thinks that only one
+ // thread is accessing the memory location.
+ subtle::Atomic32 activated;
+
+ // Size of the pickled structure, NOT the total size of this entry.
+ uint32_t pickle_size;
+
+ // Calling this is only valid when the entry is initialized. That is, it
+ // resides in shared memory and has a pickle containing the trial name and
+ // group name following it.
+ bool GetTrialAndGroupName(StringPiece* trial_name,
+ StringPiece* group_name) const;
+
+ // Calling this is only valid when the entry is initialized as well. Reads
+ // the parameters following the trial and group name and stores them as
+ // key-value mappings in |params|.
+ bool GetParams(std::map<std::string, std::string>* params) const;
+
+ private:
+ // Returns an iterator over the data containing names and params.
+ PickleIterator GetPickleIterator() const;
+
+ // Takes the iterator and writes out the first two items into |trial_name|
+ // and |group_name|.
+ bool ReadStringPair(PickleIterator* iter,
+ StringPiece* trial_name,
+ StringPiece* group_name) const;
+ };
+
typedef std::vector<ActiveGroup> ActiveGroups;
// A return value to indicate that a given instance has not yet had a group
@@ -213,6 +269,9 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+ DoNotAddSimulatedFieldTrialsToAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
friend class base::FieldTrialList;
@@ -246,6 +305,10 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// status.
void FinalizeGroupChoice();
+ // Implements FinalizeGroupChoice() with the added flexibility of being
+ // deadlock-free if |is_locked| is true and the caller is holding a lock.
+ void FinalizeGroupChoiceImpl(bool is_locked);
+
// Returns the trial name and selected group name for this field trial via
// the output parameter |active_group|, but only if the group has already
// been chosen and has been externally observed via |group()| and the trial
@@ -261,6 +324,10 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// untouched.
bool GetState(State* field_trial_state);
+ // Does the same thing as above, but is deadlock-free if the caller is holding
+ // a lock.
+ bool GetStateWhileLocked(State* field_trial_state);
+
// Returns the group_name. A winner need not have been chosen.
std::string group_name_internal() const { return group_name_; }
@@ -308,6 +375,9 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// should notify it when its group is queried.
bool trial_registered_;
+ // Reference to related field trial struct and data in shared memory.
+ FieldTrialRef ref_;
+
// When benchmarking is enabled, field trials all revert to the 'default'
// group.
static bool enable_benchmarking_;
@@ -321,6 +391,8 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// Only one instance of this class exists.
class BASE_EXPORT FieldTrialList {
public:
+ typedef SharedPersistentMemoryAllocator FieldTrialAllocator;
+
// Year that is guaranteed to not be expired when instantiating a field trial
// via |FactoryGetFieldTrial()|. Set to two years from the build date.
static int kNoExpirationYear;
@@ -338,11 +410,12 @@ class BASE_EXPORT FieldTrialList {
// This singleton holds the global list of registered FieldTrials.
//
- // To support one-time randomized field trials, specify a non-NULL
+ // To support one-time randomized field trials, specify a non-null
// |entropy_provider| which should be a source of uniformly distributed
- // entropy values. Takes ownership of |entropy_provider|. If one time
- // randomization is not desired, pass in NULL for |entropy_provider|.
- explicit FieldTrialList(const FieldTrial::EntropyProvider* entropy_provider);
+ // entropy values. If one time randomization is not desired, pass in null for
+ // |entropy_provider|.
+ explicit FieldTrialList(
+ std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider);
// Destructor Release()'s references to all registered FieldTrial instances.
~FieldTrialList();
@@ -384,7 +457,7 @@ class BASE_EXPORT FieldTrialList {
// PermutedEntropyProvider (which is used when UMA is not enabled). If
// |override_entropy_provider| is not null, then it will be used for
// randomization instead of the provider given when the FieldTrialList was
- // instanciated.
+ // instantiated.
static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
const std::string& trial_name,
FieldTrial::Probability total_probability,
@@ -449,6 +522,14 @@ class BASE_EXPORT FieldTrialList {
const std::string& trials_string,
FieldTrial::ActiveGroups* active_groups);
+ // Returns the field trials that were active when the process was
+ // created. Either parses the field trial string or the shared memory
+ // holding field trial information.
+ // Must be called only after a call to CreateTrialsFromCommandLine().
+ static void GetInitiallyActiveFieldTrials(
+ const base::CommandLine& command_line,
+ FieldTrial::ActiveGroups* active_groups);
+
// Use a state string (re: StatesToString()) to augment the current list of
// field trials to include the supplied trials, and using a 100% probability
// for each trial, force them to have the same group string. This is commonly
@@ -462,6 +543,51 @@ class BASE_EXPORT FieldTrialList {
const std::string& trials_string,
const std::set<std::string>& ignored_trial_names);
+ // Achieves the same thing as CreateTrialsFromString, except wraps the logic
+ // by taking in the trials from the command line, either via shared memory
+ // handle or command line argument. A bit of a misnomer since on POSIX we
+ // simply get the trials from opening |fd_key| if using shared memory. On
+ // Windows, we expect the |cmd_line| switch for |field_trial_handle_switch| to
+ // contain the shared memory handle that contains the field trial allocator.
+ // We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
+ // in since base/ can't depend on content/.
+ static void CreateTrialsFromCommandLine(const base::CommandLine& cmd_line,
+ const char* field_trial_handle_switch,
+ int fd_key);
+
+ // Creates base::Feature overrides from the command line by first trying to
+ // use shared memory and then falling back to the command line if it fails.
+ static void CreateFeaturesFromCommandLine(
+ const base::CommandLine& command_line,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
+ FeatureList* feature_list);
+
+#if defined(OS_WIN)
+ // On Windows, we need to explicitly pass down any handles to be inherited.
+ // This function adds the shared memory handle to field trial state to the
+ // list of handles to be inherited.
+ static void AppendFieldTrialHandleIfNeeded(
+ base::HandlesToInheritVector* handles);
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+ // On POSIX, we also need to explicitly pass down this file descriptor that
+ // should be shared with the child process. Returns kInvalidPlatformFile if no
+ // handle exists or was not initialized properly.
+ static PlatformFile GetFieldTrialHandle();
+#endif
+
+ // Adds a switch to the command line containing the field trial state as a
+ // string (if not using shared memory to share field trial state), or the
+ // shared memory handle + length.
+ // Needs the |field_trial_handle_switch| argument to be passed in since base/
+ // can't depend on content/.
+ static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
+ base::CommandLine* cmd_line);
+
// Create a FieldTrial with the given |name| and using 100% probability for
// the FieldTrial, force FieldTrial to have the same group string as
// |group_name|. This is commonly used in a non-browser process, to carry
@@ -479,13 +605,86 @@ class BASE_EXPORT FieldTrialList {
// Remove an observer.
static void RemoveObserver(Observer* observer);
+ // Grabs the lock if necessary and adds the field trial to the allocator. This
+ // should only be called from FinalizeGroupChoice().
+ static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
+
// Notify all observers that a group has been finalized for |field_trial|.
static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
// Return the number of active field trials.
static size_t GetFieldTrialCount();
+ // Gets the parameters for |field_trial| from shared memory and stores them in
+ // |params|. This is only exposed for use by FieldTrialParamAssociator and
+ // shouldn't be used by anything else.
+ static bool GetParamsFromSharedMemory(
+ FieldTrial* field_trial,
+ std::map<std::string, std::string>* params);
+
+ // Clears all the params in the allocator.
+ static void ClearParamsFromSharedMemoryForTesting();
+
+ // Dumps field trial state to an allocator so that it can be analyzed after a
+ // crash.
+ static void DumpAllFieldTrialsToPersistentAllocator(
+ PersistentMemoryAllocator* allocator);
+
+ // Retrieves field trial state from an allocator so that it can be analyzed
+ // after a crash. The pointers in the returned vector are into the persistent
+ // memory segment and so are only valid as long as the allocator is valid.
+ static std::vector<const FieldTrial::FieldTrialEntry*>
+ GetAllFieldTrialsFromPersistentAllocator(
+ PersistentMemoryAllocator const& allocator);
+
private:
+ // Allow tests to access our innards for testing purposes.
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, InstantiateAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AddTrialsToAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+ DoNotAddSimulatedFieldTrialsToAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AssociateFieldTrialParams);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
+
+#if defined(OS_WIN)
+ // Takes in |handle_switch| from the command line which represents the shared
+ // memory handle for field trials, parses it, and creates the field trials.
+ // Returns true on success, false on failure.
+ static bool CreateTrialsFromHandleSwitch(const std::string& handle_switch);
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+ // On POSIX systems that use the zygote, we look up the correct fd that backs
+ // the shared memory segment containing the field trials by looking it up via
+ // an fd key in GlobalDescriptors. Returns true on success, false on failure.
+ static bool CreateTrialsFromDescriptor(int fd_key);
+#endif
+
+ // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from it
+ // and maps it with the correct size.
+ static bool CreateTrialsFromSharedMemoryHandle(SharedMemoryHandle shm_handle);
+
+ // Expects a mapped piece of shared memory |shm| that was created from the
+ // browser process's field_trial_allocator and shared via the command line.
+ // This function recreates the allocator, iterates through all the field
+ // trials in it, and creates them via CreateFieldTrial(). Returns true if
+ // successful and false otherwise.
+ static bool CreateTrialsFromSharedMemory(
+ std::unique_ptr<base::SharedMemory> shm);
+
+ // Instantiate the field trial allocator, add all existing field trials to it,
+ // and duplicates its handle to a read-only handle, which gets stored in
+ // |readonly_allocator_handle|.
+ static void InstantiateFieldTrialAllocatorIfNeeded();
+
+ // Adds the field trial to the allocator. Caller must hold a lock before
+ // calling this.
+ static void AddToAllocatorWhileLocked(PersistentMemoryAllocator* allocator,
+ FieldTrial* field_trial);
+
+ // Activate the corresponding field trial entry struct in shared memory.
+ static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
+
// A map from FieldTrial names to the actual instances.
typedef std::map<std::string, FieldTrial*> RegistrationMap;
@@ -510,8 +709,8 @@ class BASE_EXPORT FieldTrialList {
// FieldTrialList is created after that.
static bool used_without_global_;
- // Lock for access to registered_.
- base::Lock lock_;
+ // Lock for access to registered_ and field_trial_allocator_.
+ Lock lock_;
RegistrationMap registered_;
std::map<std::string, std::string> seen_states_;
@@ -523,6 +722,20 @@ class BASE_EXPORT FieldTrialList {
// List of observers to be notified when a group is selected for a FieldTrial.
scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
+ // Allocator in shared memory containing field trial data. Used in both
+ // browser and child processes, but readonly in the child.
+ // In the future, we may want to move this to a more generic place if we want
+ // to start passing more data other than field trials.
+ std::unique_ptr<FieldTrialAllocator> field_trial_allocator_ = nullptr;
+
+ // Readonly copy of the handle to the allocator. Needs to be a member variable
+ // because it's needed from both CopyFieldTrialStateToFlags() and
+ // AppendFieldTrialHandleIfNeeded().
+ PlatformFile readonly_allocator_handle_ = kInvalidPlatformFile;
+
+ // Tracks whether CreateTrialsFromCommandLine() has been called.
+ bool create_trials_from_command_line_called_ = false;
+
DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
};
diff --git a/base/metrics/field_trial_param_associator.cc b/base/metrics/field_trial_param_associator.cc
new file mode 100644
index 0000000000..3bac18d6a9
--- /dev/null
+++ b/base/metrics/field_trial_param_associator.cc
@@ -0,0 +1,80 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial_param_associator.h"
+
+#include "base/metrics/field_trial.h"
+
+namespace base {
+
+FieldTrialParamAssociator::FieldTrialParamAssociator() {}
+FieldTrialParamAssociator::~FieldTrialParamAssociator() {}
+
+// static
+FieldTrialParamAssociator* FieldTrialParamAssociator::GetInstance() {
+ return Singleton<FieldTrialParamAssociator,
+ LeakySingletonTraits<FieldTrialParamAssociator>>::get();
+}
+
+bool FieldTrialParamAssociator::AssociateFieldTrialParams(
+ const std::string& trial_name,
+ const std::string& group_name,
+ const FieldTrialParams& params) {
+ if (FieldTrialList::IsTrialActive(trial_name))
+ return false;
+
+ AutoLock scoped_lock(lock_);
+ const FieldTrialKey key(trial_name, group_name);
+ if (ContainsKey(field_trial_params_, key))
+ return false;
+
+ field_trial_params_[key] = params;
+ return true;
+}
+
+bool FieldTrialParamAssociator::GetFieldTrialParams(
+ const std::string& trial_name,
+ FieldTrialParams* params) {
+ FieldTrial* field_trial = FieldTrialList::Find(trial_name);
+ if (!field_trial)
+ return false;
+
+ // First try the local map, falling back to getting it from shared memory.
+ if (GetFieldTrialParamsWithoutFallback(trial_name, field_trial->group_name(),
+ params)) {
+ return true;
+ }
+
+ // TODO(lawrencewu): add the params to field_trial_params_ for next time.
+ return FieldTrialList::GetParamsFromSharedMemory(field_trial, params);
+}
+
+bool FieldTrialParamAssociator::GetFieldTrialParamsWithoutFallback(
+ const std::string& trial_name,
+ const std::string& group_name,
+ FieldTrialParams* params) {
+ AutoLock scoped_lock(lock_);
+
+ const FieldTrialKey key(trial_name, group_name);
+ if (!ContainsKey(field_trial_params_, key))
+ return false;
+
+ *params = field_trial_params_[key];
+ return true;
+}
+
+void FieldTrialParamAssociator::ClearAllParamsForTesting() {
+ {
+ AutoLock scoped_lock(lock_);
+ field_trial_params_.clear();
+ }
+ FieldTrialList::ClearParamsFromSharedMemoryForTesting();
+}
+
+void FieldTrialParamAssociator::ClearAllCachedParamsForTesting() {
+ AutoLock scoped_lock(lock_);
+ field_trial_params_.clear();
+}
+
+} // namespace base
diff --git a/base/metrics/field_trial_param_associator.h b/base/metrics/field_trial_param_associator.h
new file mode 100644
index 0000000000..b19c66661c
--- /dev/null
+++ b/base/metrics/field_trial_param_associator.h
@@ -0,0 +1,71 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
+#define BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
+
+#include <map>
+#include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/memory/singleton.h"
+#include "base/metrics/field_trial.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// Keeps track of the parameters of all field trials and ensures access to them
+// is thread-safe.
+class BASE_EXPORT FieldTrialParamAssociator {
+ public:
+ FieldTrialParamAssociator();
+ ~FieldTrialParamAssociator();
+
+ // Key-value mapping type for field trial parameters.
+ typedef std::map<std::string, std::string> FieldTrialParams;
+
+ // Retrieve the singleton.
+ static FieldTrialParamAssociator* GetInstance();
+
+ // Sets parameters for the given field trial name and group.
+ bool AssociateFieldTrialParams(const std::string& trial_name,
+ const std::string& group_name,
+ const FieldTrialParams& params);
+
+ // Gets the parameters for a field trial and its chosen group. If not found in
+ // field_trial_params_, then tries to looks it up in shared memory.
+ bool GetFieldTrialParams(const std::string& trial_name,
+ FieldTrialParams* params);
+
+ // Gets the parameters for a field trial and its chosen group. Does not
+ // fallback to looking it up in shared memory. This should only be used if you
+ // know for sure the params are in the mapping, like if you're in the browser
+ // process, and even then you should probably just use GetFieldTrialParams().
+ bool GetFieldTrialParamsWithoutFallback(const std::string& trial_name,
+ const std::string& group_name,
+ FieldTrialParams* params);
+
+ // Clears the internal field_trial_params_ mapping, plus removes all params in
+ // shared memory.
+ void ClearAllParamsForTesting();
+
+ // Clears the internal field_trial_params_ mapping.
+ void ClearAllCachedParamsForTesting();
+
+ private:
+ friend struct DefaultSingletonTraits<FieldTrialParamAssociator>;
+
+ // (field_trial_name, field_trial_group)
+ typedef std::pair<std::string, std::string> FieldTrialKey;
+
+ Lock lock_;
+ std::map<FieldTrialKey, FieldTrialParams> field_trial_params_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrialParamAssociator);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc
index 00f351fa2c..54672e63d5 100644
--- a/base/metrics/field_trial_unittest.cc
+++ b/base/metrics/field_trial_unittest.cc
@@ -6,13 +6,20 @@
#include <stddef.h>
+#include "base/base_switches.h"
#include "base/build_time.h"
+#include "base/feature_list.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial_param_associator.h"
#include "base/rand_util.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
+#include "base/test/gtest_util.h"
+#include "base/test/mock_entropy_provider.h"
+#include "base/test/scoped_feature_list.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -335,12 +342,12 @@ TEST_F(FieldTrialTest, AllGroups) {
std::string winner("Winner");
trial->AppendGroup(winner, 10);
EXPECT_TRUE(trial->GetState(&field_trial_state));
- EXPECT_EQ(one_winner, field_trial_state.trial_name);
- EXPECT_EQ(winner, field_trial_state.group_name);
+ EXPECT_EQ(one_winner, *field_trial_state.trial_name);
+ EXPECT_EQ(winner, *field_trial_state.group_name);
trial->group();
EXPECT_TRUE(trial->GetState(&field_trial_state));
- EXPECT_EQ(one_winner, field_trial_state.trial_name);
- EXPECT_EQ(winner, field_trial_state.group_name);
+ EXPECT_EQ(one_winner, *field_trial_state.trial_name);
+ EXPECT_EQ(winner, *field_trial_state.group_name);
std::string multi_group("MultiGroup");
scoped_refptr<FieldTrial> multi_group_trial =
@@ -353,8 +360,8 @@ TEST_F(FieldTrialTest, AllGroups) {
// Finalize the group selection by accessing the selected group.
multi_group_trial->group();
EXPECT_TRUE(multi_group_trial->GetState(&field_trial_state));
- EXPECT_EQ(multi_group, field_trial_state.trial_name);
- EXPECT_EQ(multi_group_trial->group_name(), field_trial_state.group_name);
+ EXPECT_EQ(multi_group, *field_trial_state.trial_name);
+ EXPECT_EQ(multi_group_trial->group_name(), *field_trial_state.group_name);
}
TEST_F(FieldTrialTest, ActiveGroupsNotFinalized) {
@@ -1120,15 +1127,246 @@ TEST(FieldTrialTestWithoutList, StatesStringFormat) {
EXPECT_TRUE(field_trial_list.TrialExists("zzz"));
}
-#if GTEST_HAS_DEATH_TEST
TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
// Trying to instantiate a one-time randomized field trial before the
// FieldTrialList is created should crash.
- EXPECT_DEATH(FieldTrialList::FactoryGetFieldTrial(
- "OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
- base::FieldTrialList::kNoExpirationYear, 1, 1,
- base::FieldTrial::ONE_TIME_RANDOMIZED, NULL), "");
+ EXPECT_DEATH_IF_SUPPORTED(
+ FieldTrialList::FactoryGetFieldTrial(
+ "OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
+ base::FieldTrialList::kNoExpirationYear, 1, 1,
+ base::FieldTrial::ONE_TIME_RANDOMIZED, NULL),
+ "");
+}
+
+#if defined(OS_WIN)
+TEST(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
+ base::FieldTrialList field_trial_list(
+ base::MakeUnique<base::MockEntropyProvider>());
+ base::FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+ base::FilePath test_file_path = base::FilePath(FILE_PATH_LITERAL("Program"));
+ base::CommandLine cmd_line = base::CommandLine(test_file_path);
+ const char field_trial_handle[] = "test-field-trial-handle";
+ const char enable_features_switch[] = "test-enable-features";
+ const char disable_features_switch[] = "test-disable-features";
+
+ base::FieldTrialList::CopyFieldTrialStateToFlags(
+ field_trial_handle, enable_features_switch, disable_features_switch,
+ &cmd_line);
+ EXPECT_TRUE(cmd_line.HasSwitch(field_trial_handle) ||
+ cmd_line.HasSwitch(switches::kForceFieldTrials));
}
#endif
+TEST(FieldTrialListTest, InstantiateAllocator) {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+ void* memory = field_trial_list.field_trial_allocator_->shared_memory();
+ size_t used = field_trial_list.field_trial_allocator_->used();
+
+ // Ensure that the function is idempotent.
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+ void* new_memory = field_trial_list.field_trial_allocator_->shared_memory();
+ size_t new_used = field_trial_list.field_trial_allocator_->used();
+ EXPECT_EQ(memory, new_memory);
+ EXPECT_EQ(used, new_used);
+}
+
+TEST(FieldTrialListTest, AddTrialsToAllocator) {
+ std::string save_string;
+ base::SharedMemoryHandle handle;
+
+ // Scoping the first FieldTrialList, as we need another one to test that it
+ // matches.
+ {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+ FieldTrialList::AllStatesToString(&save_string);
+ handle = base::SharedMemory::DuplicateHandle(
+ field_trial_list.field_trial_allocator_->shared_memory()->handle());
+ }
+
+ FieldTrialList field_trial_list2(nullptr);
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->Map(4 << 10);
+ FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ std::string check_string;
+ FieldTrialList::AllStatesToString(&check_string);
+ EXPECT_EQ(save_string, check_string);
+}
+
+TEST(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
+ constexpr char kTrialName[] = "trial";
+ base::SharedMemoryHandle handle;
+ {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ // Create a simulated trial and a real trial and call group() on them, which
+ // should only add the real trial to the field trial allocator.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+ // This shouldn't add to the allocator.
+ scoped_refptr<FieldTrial> simulated_trial =
+ FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, "Simulated",
+ 0.95);
+ simulated_trial->group();
+
+ // This should add to the allocator.
+ FieldTrial* real_trial =
+ FieldTrialList::CreateFieldTrial(kTrialName, "Real");
+ real_trial->group();
+
+ handle = base::SharedMemory::DuplicateHandle(
+ field_trial_list.field_trial_allocator_->shared_memory()->handle());
+ }
+
+ // Check that there's only one entry in the allocator.
+ FieldTrialList field_trial_list2(nullptr);
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->Map(4 << 10);
+ FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ std::string check_string;
+ FieldTrialList::AllStatesToString(&check_string);
+ ASSERT_EQ(check_string.find("Simulated"), std::string::npos);
+}
+
+TEST(FieldTrialListTest, AssociateFieldTrialParams) {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ std::string trial_name("Trial1");
+ std::string group_name("Group1");
+
+ // Create a field trial with some params.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial(trial_name, group_name);
+ std::map<std::string, std::string> params;
+ params["key1"] = "value1";
+ params["key2"] = "value2";
+ FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+ trial_name, group_name, params);
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+ // Clear all cached params from the associator.
+ FieldTrialParamAssociator::GetInstance()->ClearAllCachedParamsForTesting();
+ // Check that the params have been cleared from the cache.
+ std::map<std::string, std::string> cached_params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
+ trial_name, group_name, &cached_params);
+ EXPECT_EQ(0U, cached_params.size());
+
+ // Check that we fetch the param from shared memory properly.
+ std::map<std::string, std::string> new_params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
+ &new_params);
+ EXPECT_EQ("value1", new_params["key1"]);
+ EXPECT_EQ("value2", new_params["key2"]);
+ EXPECT_EQ(2U, new_params.size());
+}
+
+TEST(FieldTrialListTest, ClearParamsFromSharedMemory) {
+ std::string trial_name("Trial1");
+ std::string group_name("Group1");
+
+ base::SharedMemoryHandle handle;
+ {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ // Create a field trial with some params.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrial* trial =
+ FieldTrialList::CreateFieldTrial(trial_name, group_name);
+ std::map<std::string, std::string> params;
+ params["key1"] = "value1";
+ params["key2"] = "value2";
+ FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+ trial_name, group_name, params);
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+ // Clear all params from the associator AND shared memory. The allocated
+ // segments should be different.
+ FieldTrial::FieldTrialRef old_ref = trial->ref_;
+ FieldTrialParamAssociator::GetInstance()->ClearAllParamsForTesting();
+ FieldTrial::FieldTrialRef new_ref = trial->ref_;
+ EXPECT_NE(old_ref, new_ref);
+
+ // Check that there are no params associated with the field trial anymore.
+ std::map<std::string, std::string> new_params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
+ &new_params);
+ EXPECT_EQ(0U, new_params.size());
+
+ // Now duplicate the handle so we can easily check that the trial is still
+ // in shared memory via AllStatesToString.
+ handle = base::SharedMemory::DuplicateHandle(
+ field_trial_list.field_trial_allocator_->shared_memory()->handle());
+ }
+
+ // Check that we have the trial.
+ FieldTrialList field_trial_list2(nullptr);
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->Map(4 << 10);
+ FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ std::string check_string;
+ FieldTrialList::AllStatesToString(&check_string);
+ EXPECT_EQ("*Trial1/Group1/", check_string);
+}
+
+TEST(FieldTrialListTest, DumpAndFetchFromSharedMemory) {
+ std::string trial_name("Trial1");
+ std::string group_name("Group1");
+
+ // Create a field trial with some params.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial(trial_name, group_name);
+ std::map<std::string, std::string> params;
+ params["key1"] = "value1";
+ params["key2"] = "value2";
+ FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+ trial_name, group_name, params);
+
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory());
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->CreateAndMapAnonymous(4 << 10);
+ // We _could_ use PersistentMemoryAllocator, this just has less params.
+ SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+
+ // Dump and subsequently retrieve the field trial to |allocator|.
+ FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(&allocator);
+ std::vector<const FieldTrial::FieldTrialEntry*> entries =
+ FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(allocator);
+
+ // Check that we have the entry we put in.
+ EXPECT_EQ(1u, entries.size());
+ const FieldTrial::FieldTrialEntry* entry = entries[0];
+
+ // Check that the trial and group names match.
+ StringPiece shm_trial_name;
+ StringPiece shm_group_name;
+ entry->GetTrialAndGroupName(&shm_trial_name, &shm_group_name);
+ EXPECT_EQ(trial_name, shm_trial_name);
+ EXPECT_EQ(group_name, shm_group_name);
+
+ // Check that the params match.
+ std::map<std::string, std::string> shm_params;
+ entry->GetParams(&shm_params);
+ EXPECT_EQ(2u, shm_params.size());
+ EXPECT_EQ("value1", shm_params["key1"]);
+ EXPECT_EQ("value2", shm_params["key2"]);
+}
+
} // namespace base
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index 0d6287c0b1..de2ac336d3 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -217,7 +217,7 @@ HistogramBase* Histogram::Factory::Build() {
ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
}
- DCHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
+ CHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
if (bucket_count_ != 0 &&
!histogram->HasConstructionArguments(minimum_, maximum_, bucket_count_)) {
// The construction arguments do not match the existing histogram. This can
@@ -533,7 +533,8 @@ Histogram::Histogram(const std::string& name,
Histogram::~Histogram() {
}
-bool Histogram::PrintEmptyBucket(uint32_t /*index*/) const {
+bool Histogram::PrintEmptyBucket(uint32_t index) const {
+ ALLOW_UNUSED_PARAM(index);
return true;
}
@@ -674,15 +675,14 @@ void Histogram::WriteAsciiHeader(const SampleVector& samples,
"Histogram: %s recorded %d samples",
histogram_name().c_str(),
sample_count);
- if (0 == sample_count) {
+ if (sample_count == 0) {
DCHECK_EQ(samples.sum(), 0);
} else {
- double average = static_cast<float>(samples.sum()) / sample_count;
-
- StringAppendF(output, ", average = %.1f", average);
+ double mean = static_cast<float>(samples.sum()) / sample_count;
+ StringAppendF(output, ", mean = %.1f", mean);
}
- if (flags() & ~kHexRangePrintingFlag)
- StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag);
+ if (flags())
+ StringAppendF(output, " (flags = 0x%x)", flags());
}
void Histogram::WriteAsciiBucketContext(const int64_t past,
@@ -754,8 +754,7 @@ class LinearHistogram::Factory : public Histogram::Factory {
std::unique_ptr<HistogramBase> HeapAlloc(
const BucketRanges* ranges) override {
- return WrapUnique(
- new LinearHistogram(name_, minimum_, maximum_, ranges));
+ return WrapUnique(new LinearHistogram(name_, minimum_, maximum_, ranges));
}
void FillHistogram(HistogramBase* base_histogram) override {
@@ -1139,8 +1138,11 @@ bool CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
return true;
}
-double CustomHistogram::GetBucketSize(Count /*current*/, uint32_t /*i*/) const {
- return 1;
+double CustomHistogram::GetBucketSize(Count current, uint32_t i) const {
+ ALLOW_UNUSED_PARAM(i);
+ // If this is a histogram of enum values, normalizing the bucket count
+ // by the bucket range is not helpful, so just return the bucket count.
+ return current;
}
// static
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
index 2283a4d80f..a76dd63226 100644
--- a/base/metrics/histogram.h
+++ b/base/metrics/histogram.h
@@ -9,13 +9,11 @@
// It supports calls to accumulate either time intervals (which are processed
// as integral number of milliseconds), or arbitrary integral units.
-// For Histogram(exponential histogram), LinearHistogram and CustomHistogram,
+// For Histogram (exponential histogram), LinearHistogram and CustomHistogram,
// the minimum for a declared range is 1 (instead of 0), while the maximum is
-// (HistogramBase::kSampleType_MAX - 1). Currently you can declare histograms
-// with ranges exceeding those limits (e.g. 0 as minimal or
-// HistogramBase::kSampleType_MAX as maximal), but those excesses will be
-// silently clamped to those limits (for backwards compatibility with existing
-// code). Best practice is to not exceed the limits.
+// (HistogramBase::kSampleType_MAX - 1). However, there will always be underflow
+// and overflow buckets added automatically, so a 0 bucket will always exist
+// even when a minimum value of 1 is specified.
// Each use of a histogram with the same name will reference the same underlying
// data, so it is safe to record to the same histogram from multiple locations
@@ -41,7 +39,7 @@
// are also counted by the constructor in the user supplied "bucket_count"
// argument.
// The above example has an exponential ratio of 2 (doubling the bucket width
-// in each consecutive bucket. The Histogram class automatically calculates
+// in each consecutive bucket). The Histogram class automatically calculates
// the smallest ratio that it can use to construct the number of buckets
// selected in the constructor. An another example, if you had 50 buckets,
// and millisecond time values from 1 to 10000, then the ratio between
@@ -81,8 +79,6 @@
#include "base/macros.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_base.h"
-// TODO(asvitkine): Migrate callers to to include this directly and remove this.
-#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/time/time.h"
@@ -92,7 +88,6 @@ class BooleanHistogram;
class CustomHistogram;
class Histogram;
class LinearHistogram;
-class PersistentMemoryAllocator;
class Pickle;
class PickleIterator;
class SampleVector;
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
index 8c4f1eca18..396f29739a 100644
--- a/base/metrics/histogram_base.cc
+++ b/base/metrics/histogram_base.cc
@@ -97,8 +97,7 @@ bool HistogramBase::SerializeInfo(Pickle* pickle) const {
return SerializeInfoImpl(pickle);
}
-uint32_t HistogramBase::FindCorruption(
- const HistogramSamples& /*samples*/) const {
+uint32_t HistogramBase::FindCorruption(const HistogramSamples& /* samples */) const {
// Not supported by default.
return NO_INCONSISTENCIES;
}
@@ -119,14 +118,16 @@ void HistogramBase::WriteJSON(std::string* output) const {
root.SetInteger("flags", flags());
root.Set("params", std::move(parameters));
root.Set("buckets", std::move(buckets));
- root.SetInteger("pid", GetCurrentProcId());
+ root.SetInteger("pid", GetUniqueIdForProcess());
serializer.Serialize(root);
}
// static
void HistogramBase::EnableActivityReportHistogram(
const std::string& process_type) {
- DCHECK(!report_histogram_);
+ if (report_histogram_)
+ return;
+
size_t existing = StatisticsRecorder::GetHistogramCount();
if (existing != 0) {
DVLOG(1) << existing
@@ -174,12 +175,7 @@ void HistogramBase::WriteAsciiBucketGraph(double current_size,
const std::string HistogramBase::GetSimpleAsciiBucketRange(
Sample sample) const {
- std::string result;
- if (kHexRangePrintingFlag & flags())
- StringAppendF(&result, "%#x", sample);
- else
- StringAppendF(&result, "%d", sample);
- return result;
+ return StringPrintf("%d", sample);
}
void HistogramBase::WriteAsciiBucketValue(Count current,
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
index d240099110..4f5ba049bc 100644
--- a/base/metrics/histogram_base.h
+++ b/base/metrics/histogram_base.h
@@ -21,7 +21,6 @@
namespace base {
-class BucketRanges;
class DictionaryValue;
class HistogramBase;
class HistogramSamples;
@@ -92,7 +91,7 @@ class BASE_EXPORT HistogramBase {
static const Sample kSampleType_MAX; // INT_MAX
enum Flags {
- kNoFlags = 0,
+ kNoFlags = 0x0,
// Histogram should be UMA uploaded.
kUmaTargetedHistogramFlag = 0x1,
@@ -121,9 +120,6 @@ class BASE_EXPORT HistogramBase {
// MemoryAllocator, and that loaded into the Histogram module before this
// histogram is created.
kIsPersistent = 0x40,
-
- // Only for Histogram and its sub classes: fancy bucket-naming support.
- kHexRangePrintingFlag = 0x8000,
};
// Histogram data inconsistency types.
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index ce1811a5a7..78473761dd 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -5,294 +5,311 @@
#ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
#define BASE_METRICS_HISTOGRAM_MACROS_H_
-#include "base/atomicops.h"
-#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros_internal.h"
+#include "base/metrics/histogram_macros_local.h"
#include "base/time/time.h"
-// Macros for efficient use of histograms. See documentation in histogram.h.
+
+// Macros for efficient use of histograms.
//
-// UMA_HISTOGRAM_SPARSE_SLOWLY is defined in sparse_histogram.h as it has
-// different #include dependencies.
+// For best practices on deciding when to emit to a histogram and what form
+// the histogram should take, see
+// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
+
+// TODO(rkaplow): Link to proper documentation on metric creation once we have
+// it in a good state.
+
+// All of these macros must be called with |name| as a runtime constant - it
+// doesn't have to literally be a constant, but it must be the same string on
+// all calls from a particular call site. If this rule is violated, it is
+// possible the data will be written to the wrong histogram.
//------------------------------------------------------------------------------
-// Histograms are often put in areas where they are called many many times, and
-// performance is critical. As a result, they are designed to have a very low
-// recurring cost of executing (adding additional samples). Toward that end,
-// the macros declare a static pointer to the histogram in question, and only
-// take a "slow path" to construct (or find) the histogram on the first run
-// through the macro. We leak the histograms at shutdown time so that we don't
-// have to validate using the pointers at any time during the running of the
-// process.
-
-// The following code is generally what a thread-safe static pointer
-// initialization looks like for a histogram (after a macro is expanded). This
-// sample is an expansion (with comments) of the code for
-// LOCAL_HISTOGRAM_CUSTOM_COUNTS().
-
-/*
- do {
- // The pointer's presence indicates the initialization is complete.
- // Initialization is idempotent, so it can safely be atomically repeated.
- static base::subtle::AtomicWord atomic_histogram_pointer = 0;
-
- // Acquire_Load() ensures that we acquire visibility to the pointed-to data
- // in the histogram.
- base::Histogram* histogram_pointer(reinterpret_cast<base::Histogram*>(
- base::subtle::Acquire_Load(&atomic_histogram_pointer)));
-
- if (!histogram_pointer) {
- // This is the slow path, which will construct OR find the matching
- // histogram. FactoryGet includes locks on a global histogram name map
- // and is completely thread safe.
- histogram_pointer = base::Histogram::FactoryGet(
- name, min, max, bucket_count, base::HistogramBase::kNoFlags);
-
- // Use Release_Store to ensure that the histogram data is made available
- // globally before we make the pointer visible.
- // Several threads may perform this store, but the same value will be
- // stored in all cases (for a given named/spec'ed histogram).
- // We could do this without any barrier, since FactoryGet entered and
- // exited a lock after construction, but this barrier makes things clear.
- base::subtle::Release_Store(&atomic_histogram_pointer,
- reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
- }
-
- // Ensure calling contract is upheld, and the name does NOT vary.
- DCHECK(histogram_pointer->histogram_name() == constant_histogram_name);
-
- histogram_pointer->Add(sample);
- } while (0);
-*/
-
-// The above pattern is repeated in several macros. The only elements that
-// vary are the invocation of the Add(sample) vs AddTime(sample), and the choice
-// of which FactoryGet method to use. The different FactoryGet methods have
-// various argument lists, so the function with its argument list is provided as
-// a macro argument here. The name is only used in a DCHECK, to assure that
-// callers don't try to vary the name of the histogram (which would tend to be
-// ignored by the one-time initialization of the histogtram_pointer).
-
-// In some cases (integration into 3rd party code), it's useful to seperate the
-// definition of |atomic_histogram_poiner| from its use. To achieve this we
-// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
-// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
-// and forwards to HISTOGRAM_POINTER_USE.
-#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer, \
- constant_histogram_name, \
- histogram_add_method_invocation, \
- histogram_factory_get_invocation) \
- do { \
- base::HistogramBase* histogram_pointer( \
- reinterpret_cast<base::HistogramBase*>( \
- base::subtle::Acquire_Load(atomic_histogram_pointer))); \
- if (!histogram_pointer) { \
- histogram_pointer = histogram_factory_get_invocation; \
- base::subtle::Release_Store( \
- atomic_histogram_pointer, \
- reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
- } \
- if (DCHECK_IS_ON()) \
- histogram_pointer->CheckName(constant_histogram_name); \
- histogram_pointer->histogram_add_method_invocation; \
- } while (0)
+// Enumeration histograms.
+
+// These macros create histograms for enumerated data. Ideally, the data should
+// be of the form of "event occurs, log the result". We recommended not putting
+// related but not directly connected data as enums within the same histogram.
+// You should be defining an associated Enum, and the input sample should be
+// an element of the Enum.
+// All of these macros must be called with |name| as a runtime constant.
+
+// Sample usage:
+// UMA_HISTOGRAM_ENUMERATION("My.Enumeration", VALUE, EVENT_MAX_VALUE);
+// New Enum values can be added, but existing enums must never be renumbered or
+// delete and reused. The value in |sample| must be strictly less than
+// |enum_max|.
+
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
+ name, sample, enum_max, \
+ base::HistogramBase::kUmaTargetedHistogramFlag)
-// Defines the static |atomic_histogram_pointer| and forwards to
-// HISTOGRAM_POINTER_USE.
-#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name, \
- histogram_add_method_invocation, \
- histogram_factory_get_invocation) \
- do { \
- static base::subtle::AtomicWord atomic_histogram_pointer = 0; \
- HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name, \
- histogram_add_method_invocation, \
- histogram_factory_get_invocation); \
- } while (0)
+// Histogram for boolean values.
+
+// Sample usage:
+// UMA_HISTOGRAM_BOOLEAN("Histogram.Boolean", bool);
+#define UMA_HISTOGRAM_BOOLEAN(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+ base::BooleanHistogram::FactoryGet(name, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
//------------------------------------------------------------------------------
-// Provide easy general purpose histogram in a macro, just like stats counters.
-// Most of these macros use 50 buckets, but check the definition for details.
-//
-// All of these macros must be called with |name| as a runtime constant --- it
-// doesn't have to literally be a constant, but it must be the same string on
-// all calls from a particular call site. If this rule is violated,
-// STATIC_HISTOGRAM_POINTER_BLOCK will DCHECK, and if DCHECKS are disabled, the
-// data will be written to the wrong histogram.
+// Linear histograms.
-#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
- base::TimeDelta::FromSeconds(10), 50)
+// All of these macros must be called with |name| as a runtime constant.
+
+// Used for capturing integer data with a linear bucketing scheme. This can be
+// used when you want the exact value of some small numeric count, with a max of
+// 100 or less. If you need to capture a range of greater than 100, we recommend
+// the use of the COUNT histograms below.
+
+// Sample usage:
+// UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
+#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
+ UMA_HISTOGRAM_ENUMERATION(name, sample, value_max)
+
+// Used for capturing basic percentages. This will be 100 buckets of size 1.
-// For folks that need real specific times, use this to select a precise range
-// of times you want plotted, and the number of buckets you want used.
-#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
- base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
- base::HistogramBase::kNoFlags))
+// Sample usage:
+// UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
+#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+ UMA_HISTOGRAM_ENUMERATION(name, percent_as_int, 101)
-#define LOCAL_HISTOGRAM_COUNTS(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+//------------------------------------------------------------------------------
+// Count histograms. These are used for collecting numeric data. Note that we
+// have macros for more specialized use cases below (memory, time, percentages).
+
+// The number suffixes here refer to the max size of the sample, i.e. COUNT_1000
+// will be able to collect samples of counts up to 1000. The default number of
+// buckets in all default macros is 50. We recommend erring on the side of too
+// large a range versus too short a range.
+// These macros default to exponential histograms - i.e. the lengths of the
+// bucket ranges exponentially increase as the sample range increases.
+// These should *not* be used if you are interested in exact counts, i.e. a
+// bucket range of 1. In these cases, you should use the ENUMERATION macros
+// defined later. These should also not be used to capture the number of some
+// event, i.e. "button X was clicked N times". In this cases, an enum should be
+// used, ideally with an appropriate baseline enum entry included.
+// All of these macros must be called with |name| as a runtime constant.
+
+// Sample usage:
+// UMA_HISTOGRAM_COUNTS_1M("My.Histogram", sample);
+
+#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100, 50)
+
+#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_100000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_1M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000000, 50)
-#define LOCAL_HISTOGRAM_COUNTS_100(name, sample) \
- LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
-
-#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample) \
- LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
-
-#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::Histogram::FactoryGet(name, min, max, bucket_count, \
- base::HistogramBase::kNoFlags))
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-#define HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
- flag))
-
-#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
- LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
-
-#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
- base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
-
-// Support histograming of an enumerated value. The samples should always be
-// strictly less than |boundary_value| -- this prevents you from running into
-// problems down the line if you add additional buckets to the histogram. Note
-// also that, despite explicitly setting the minimum bucket value to |1| below,
-// it is fine for enumerated histograms to be 0-indexed -- this is because
-// enumerated histograms should never have underflow.
-#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
- boundary_value + 1, base::HistogramBase::kNoFlags))
-
-// Support histograming of an enumerated value. Samples should be one of the
-// std::vector<int> list provided via |custom_ranges|. See comments above
-// CustomRanges::FactoryGet about the requirement of |custom_ranges|.
-// You can use the helper function CustomHistogram::ArrayToCustomRanges to
-// transform a C-style array of valid sample values to a std::vector<int>.
-#define LOCAL_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::CustomHistogram::FactoryGet(name, custom_ranges, \
- base::HistogramBase::kNoFlags))
-
-#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1000, 500000, 50)
+#define UMA_HISTOGRAM_COUNTS_10M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000000, 50)
+
+// This can be used when the default ranges are not sufficient. This macro lets
+// the metric developer customize the min and max of the sampled range, as well
+// as the number of buckets recorded.
+// Any data outside the range here will be put in underflow and overflow
+// buckets. Min values should be >=1 as emitted 0s will still go into the
+// underflow bucket.
+
+// Sample usage:
+// UMA_HISTOGRAM_CUSTOM_COUNTS("My.Histogram", 1, 100000000, 100);
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG( \
+ name, sample, min, max, bucket_count, \
+ base::HistogramBase::kUmaTargetedHistogramFlag)
//------------------------------------------------------------------------------
-// The following macros provide typical usage scenarios for callers that wish
-// to record histogram data, and have the data submitted/uploaded via UMA.
-// Not all systems support such UMA, but if they do, the following macros
-// should work with the service.
+// Timing histograms. These are used for collecting timing data (generally
+// latencies).
+
+// These macros create exponentially sized histograms (lengths of the bucket
+// ranges exponentially increase as the sample range increases). The input
+// sample is a base::TimeDelta. The output data is measured in ms granularity.
+// All of these macros must be called with |name| as a runtime constant.
-#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
+// Sample usage:
+// UMA_HISTOGRAM_TIMES("My.Timing.Histogram", time_delta);
+
+// Short timings - up to 10 seconds.
+#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromSeconds(10), 50)
-#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(10), \
+// Medium timings - up to 3 minutes. Note this starts at 10ms (no good reason,
+// but not worth changing).
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(10), \
base::TimeDelta::FromMinutes(3), 50)
-// Use this macro when times can routinely be much longer than 10 seconds.
-#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
+// Long timings - up to an hour.
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromHours(1), 50)
-// Use this macro when times can routinely be much longer than 10 seconds and
-// you want 100 buckets.
+// Long timings with higher granularity - up to an hour with 100 buckets.
#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromHours(1), 100)
-#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
- base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+// This can be used when the default ranges are not sufficient. This macro lets
+// the metric developer customize the min and max of the sampled range, as well
+// as the number of buckets recorded.
+
+// Sample usage:
+// UMA_HISTOGRAM_CUSTOM_TIMES("Very.Long.Timing.Histogram", duration_in_ms,
+// base::TimeDelta::FromSeconds(1), base::TimeDelta::FromDays(1), 100);
+#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
base::HistogramBase::kUmaTargetedHistogramFlag))
-#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 1000000, 50)
+// Scoped class which logs its time on this earth as a UMA statistic. This is
+// recommended for when you want a histogram which measures the time it takes
+// for a method to execute. This measures up to 10 seconds. It uses
+// UMA_HISTOGRAM_TIMES under the hood.
-#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 100, 50)
+// Sample usage:
+// void Function() {
+// SCOPED_UMA_HISTOGRAM_TIMER("Component.FunctionTime");
+// ...
+// }
+#define SCOPED_UMA_HISTOGRAM_TIMER(name) \
+ INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
-#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 1000, 50)
+// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
+// which measures up to an hour, and uses 100 buckets. This is more expensive
+// to store, so only use if this often takes >10 seconds.
+#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name) \
+ INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
-#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 10000, 50)
-#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::Histogram::FactoryGet(name, min, max, bucket_count, \
- base::HistogramBase::kUmaTargetedHistogramFlag))
+//------------------------------------------------------------------------------
+// Memory histograms.
-#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1000, 500000, 50)
+// These macros create exponentially sized histograms (lengths of the bucket
+// ranges exponentially increase as the sample range increases). The input
+// sample must be a number measured in kilobytes.
+// All of these macros must be called with |name| as a runtime constant.
-#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 1000, 50)
+// Sample usage:
+// UMA_HISTOGRAM_MEMORY_KB("My.Memory.Histogram", memory_in_kb);
-#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample) \
+// Used to measure common KB-granularity memory stats. Range is up to 500000KB -
+// approximately 500M.
+#define UMA_HISTOGRAM_MEMORY_KB(name, sample) \
+ UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1000, 500000, 50)
+
+// Used to measure common MB-granularity memory stats. Range is up to ~64G.
+#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample) \
UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
-#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
- UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
-#define UMA_HISTOGRAM_BOOLEAN(name, sample) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
- base::BooleanHistogram::FactoryGet(name, \
- base::HistogramBase::kUmaTargetedHistogramFlag))
+//------------------------------------------------------------------------------
+// Stability-specific histograms.
-// The samples should always be strictly less than |boundary_value|. For more
-// details, see the comment for the |LOCAL_HISTOGRAM_ENUMERATION| macro, above.
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
- HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
- base::HistogramBase::kUmaTargetedHistogramFlag)
+// Histograms logged in as stability histograms will be included in the initial
+// stability log. See comments by declaration of
+// MetricsService::PrepareInitialStabilityLog().
+// All of these macros must be called with |name| as a runtime constant.
+
+// For details on usage, see the documentation on the non-stability equivalents.
-// Similar to UMA_HISTOGRAM_ENUMERATION, but used for recording stability
-// histograms. Use this if recording a histogram that should be part of the
-// initial stability log.
-#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
- HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
+#define UMA_STABILITY_HISTOGRAM_COUNTS_100(name, sample) \
+ UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
+
+#define UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, \
+ bucket_count) \
+ INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG( \
+ name, sample, min, max, bucket_count, \
base::HistogramBase::kUmaStabilityHistogramFlag)
-#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::CustomHistogram::FactoryGet(name, custom_ranges, \
- base::HistogramBase::kUmaTargetedHistogramFlag))
+#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
+ name, sample, enum_max, \
+ base::HistogramBase::kUmaStabilityHistogramFlag)
-// Scoped class which logs its time on this earth as a UMA statistic. This is
-// recommended for when you want a histogram which measures the time it takes
-// for a method to execute. This measures up to 10 seconds.
-#define SCOPED_UMA_HISTOGRAM_TIMER(name) \
- SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
+//------------------------------------------------------------------------------
+// Sparse histograms.
-// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
-// which measures up to an hour, and uses 100 buckets. This is more expensive
-// to store, so only use if this often takes >10 seconds.
-#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name) \
- SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
-
-// This nested macro is necessary to expand __COUNTER__ to an actual value.
-#define SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key) \
- SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
-
-#define SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key) \
- class ScopedHistogramTimer##key { \
- public: \
- ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {} \
- ~ScopedHistogramTimer##key() { \
- base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_; \
- if (is_long) { \
- UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed); \
- } else { \
- UMA_HISTOGRAM_TIMES(name, elapsed); \
- } \
- } \
- private: \
- base::TimeTicks constructed_; \
- } scoped_histogram_timer_##key
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a large range.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and/or
+// infrequently recorded values since the implementation is slower
+// and takes more memory.
+//
+// For instance, Sqlite.Version.* are sparse because for any given database,
+// there's going to be exactly one version logged.
+// The |sample| can be a negative or non-negative number.
+#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
+ INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample)
+
+//------------------------------------------------------------------------------
+// Histogram instantiation helpers.
+
+// Support a collection of histograms, perhaps one for each entry in an
+// enumeration. This macro manages a block of pointers, adding to a specific
+// one by its index.
+//
+// A typical instantiation looks something like this:
+// STATIC_HISTOGRAM_POINTER_GROUP(
+// GetHistogramNameForIndex(histogram_index),
+// histogram_index, MAXIMUM_HISTOGRAM_INDEX, Add(some_delta),
+// base::Histogram::FactoryGet(
+// GetHistogramNameForIndex(histogram_index),
+// MINIMUM_SAMPLE, MAXIMUM_SAMPLE, BUCKET_COUNT,
+// base::HistogramBase::kUmaTargetedHistogramFlag));
+//
+// Though it seems inefficient to generate the name twice, the first
+// instance will be used only for DCHECK builds and the second will
+// execute only during the first access to the given index, after which
+// the pointer is cached and the name never needed again.
+#define STATIC_HISTOGRAM_POINTER_GROUP(constant_histogram_name, index, \
+ constant_maximum, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation) \
+ do { \
+ static base::subtle::AtomicWord atomic_histograms[constant_maximum]; \
+ DCHECK_LE(0, index); \
+ DCHECK_LT(index, constant_maximum); \
+ HISTOGRAM_POINTER_USE(&atomic_histograms[index], constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation); \
+ } while (0)
+
+//------------------------------------------------------------------------------
+// Deprecated histogram macros. Not recommended for current use.
+
+// Legacy name for UMA_HISTOGRAM_COUNTS_1M. Suggest using explicit naming
+// and not using this macro going forward.
+#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+// MB-granularity memory metric. This has a short max (1G).
+#define UMA_HISTOGRAM_MEMORY_MB(name, sample) \
+ UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000, 50)
+
+// For an enum with customized range. In general, sparse histograms should be
+// used instead.
+// Samples should be one of the std::vector<int> list provided via
+// |custom_ranges|. See comments above CustomRanges::FactoryGet about the
+// requirement of |custom_ranges|. You can use the helper function
+// CustomHistogram::ArrayToCustomRanges to transform a C-style array of valid
+// sample values to a std::vector<int>.
+#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
#endif // BASE_METRICS_HISTOGRAM_MACROS_H_
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
new file mode 100644
index 0000000000..53e4f11b75
--- /dev/null
+++ b/base/metrics/histogram_macros_internal.h
@@ -0,0 +1,157 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
+#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/time/time.h"
+
+// This is for macros internal to base/metrics. They should not be used outside
+// of this directory. For writing to UMA histograms, see histogram_macros.h.
+
+// TODO(rkaplow): Improve commenting of these methods.
+
+//------------------------------------------------------------------------------
+// Histograms are often put in areas where they are called many many times, and
+// performance is critical. As a result, they are designed to have a very low
+// recurring cost of executing (adding additional samples). Toward that end,
+// the macros declare a static pointer to the histogram in question, and only
+// take a "slow path" to construct (or find) the histogram on the first run
+// through the macro. We leak the histograms at shutdown time so that we don't
+// have to validate using the pointers at any time during the running of the
+// process.
+
+
+// In some cases (integration into 3rd party code), it's useful to separate the
+// definition of |atomic_histogram_pointer| from its use. To achieve this we
+// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
+// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
+// and forwards to HISTOGRAM_POINTER_USE.
+#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer, \
+ constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation) \
+ do { \
+ /* \
+ * Acquire_Load() ensures that we acquire visibility to the \
+ * pointed-to data in the histogram. \
+ */ \
+ base::HistogramBase* histogram_pointer( \
+ reinterpret_cast<base::HistogramBase*>( \
+ base::subtle::Acquire_Load(atomic_histogram_pointer))); \
+ if (!histogram_pointer) { \
+ /* \
+ * This is the slow path, which will construct OR find the \
+ * matching histogram. histogram_factory_get_invocation includes \
+ * locks on a global histogram name map and is completely thread \
+ * safe. \
+ */ \
+ histogram_pointer = histogram_factory_get_invocation; \
+ \
+ /* \
+ * Use Release_Store to ensure that the histogram data is made \
+ * available globally before we make the pointer visible. Several \
+ * threads may perform this store, but the same value will be \
+ * stored in all cases (for a given named/spec'ed histogram). \
+ * We could do this without any barrier, since FactoryGet entered \
+ * and exited a lock after construction, but this barrier makes \
+ * things clear. \
+ */ \
+ base::subtle::Release_Store( \
+ atomic_histogram_pointer, \
+ reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
+ } \
+ if (DCHECK_IS_ON()) \
+ histogram_pointer->CheckName(constant_histogram_name); \
+ histogram_pointer->histogram_add_method_invocation; \
+ } while (0)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+// Defines the static |atomic_histogram_pointer| and forwards to
+// HISTOGRAM_POINTER_USE.
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation) \
+ do { \
+ /* \
+ * The pointer's presence indicates that the initialization is complete. \
+ * Initialization is idempotent, so it can safely be atomically repeated. \
+ */ \
+ static base::subtle::AtomicWord atomic_histogram_pointer = 0; \
+ HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation); \
+ } while (0)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+#define INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(name, sample, min, max, \
+ bucket_count, flag) \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, Add(sample), \
+ base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+// For an enumeration with N items, recording values in the range [0, N - 1],
+// this macro creates a linear histogram with N + 1 buckets:
+// [0, 1), [1, 2), ..., [N - 1, N), and an overflow bucket [N, infinity).
+// Code should never emit to the overflow bucket; only to the other N buckets.
+// This allows future versions of Chrome to safely append new entries to the
+// enumeration. Otherwise, the histogram would have [N - 1, infinity) as its
+// overflow bucket, and so the maximal value (N - 1) would be emitted to this
+// overflow bucket. But, if an additional enumerated value were later added, the
+// bucket label for the value (N - 1) would change to [N - 1, N), which would
+// result in different versions of Chrome using different bucket labels for
+// identical data.
+#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
+ do { \
+ static_assert( \
+ !std::is_enum<decltype(sample)>::value || \
+ !std::is_enum<decltype(boundary)>::value || \
+ std::is_same<std::remove_const<decltype(sample)>::type, \
+ std::remove_const<decltype(boundary)>::type>::value, \
+ "|sample| and |boundary| shouldn't be of different enums"); \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, Add(sample), base::LinearHistogram::FactoryGet( \
+ name, 1, boundary, boundary + 1, flag)); \
+ } while (0)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+// This is necessary to expand __COUNTER__ to an actual value.
+#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key) \
+ INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key) \
+ class ScopedHistogramTimer##key { \
+ public: \
+ ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {} \
+ ~ScopedHistogramTimer##key() { \
+ base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_; \
+ if (is_long) { \
+ UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed); \
+ } else { \
+ UMA_HISTOGRAM_TIMES(name, elapsed); \
+ } \
+ } \
+ private: \
+ base::TimeTicks constructed_; \
+ } scoped_histogram_timer_##key
+
+// Macro for sparse histogram.
+// The implementation is more costly to add values to, and each value
+// stored has more overhead, compared to the other histogram types. However it
+// may be more efficient in memory if the total number of sample values is small
+// compared to the range of their values.
+#define INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
+ do { \
+ base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
+ name, base::HistogramBase::kUmaTargetedHistogramFlag); \
+ histogram->Add(sample); \
+ } while (0)
+
+#endif // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
diff --git a/base/metrics/histogram_macros_local.h b/base/metrics/histogram_macros_local.h
new file mode 100644
index 0000000000..7571a9c4ad
--- /dev/null
+++ b/base/metrics/histogram_macros_local.h
@@ -0,0 +1,88 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
+#define BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
+
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros_internal.h"
+#include "base/time/time.h"
+
+// TODO(rkaplow): Migrate all LOCAL_* usage within Chromium to include this
+// file instead of the histogram_macros.h file.
+
+//------------------------------------------------------------------------------
+// Enumeration histograms.
+//
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
+ name, sample, enum_max, \
+ base::HistogramBase::kNoFlags)
+
+#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+ base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
+
+//------------------------------------------------------------------------------
+// Percentage histograms.
+//
+// For usage details, see the equivalents in histogram_macros.h
+
+#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+//------------------------------------------------------------------------------
+// Count histograms. These are used for collecting numeric data. Note that we
+// have macros for more specialized use cases below (memory, time, percentages).
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_COUNTS_100(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_1000000(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
+
+#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG( \
+ name, sample, min, max, bucket_count, base::HistogramBase::kNoFlags)
+
+//------------------------------------------------------------------------------
+// Timing histograms. These are used for collecting timing data (generally
+// latencies).
+//
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::HistogramBase::kNoFlags))
+
+//------------------------------------------------------------------------------
+// Memory histograms.
+//
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1000, 500000, 50)
+
+//------------------------------------------------------------------------------
+// Deprecated histograms. Not recommended for current use.
+
+// TODO(rkaplow): See if we can clean up this macro and usage.
+// Legacy non-explicit version. We suggest using LOCAL_HISTOGRAM_COUNTS_1000000
+// instead.
+#define LOCAL_HISTOGRAM_COUNTS(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
+
+#endif // BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
index e28573fa7e..93f6d21c8a 100644
--- a/base/metrics/histogram_samples.h
+++ b/base/metrics/histogram_samples.h
@@ -27,6 +27,9 @@ class SampleCountIterator;
class BASE_EXPORT HistogramSamples {
public:
struct Metadata {
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 24;
+
// Initialized when the sample-set is first created with a value provided
// by the caller. It is generally used to identify the sample-set across
// threads and processes, though not necessarily uniquely as it is possible
@@ -55,7 +58,21 @@ class BASE_EXPORT HistogramSamples {
// might mismatch even when no memory corruption has happened.
HistogramBase::AtomicCount redundant_count;
- Metadata() : id(0), sum(0), redundant_count(0) {}
+ // 4 bytes of padding to explicitly extend this structure to a multiple of
+ // 64-bits. This is required to ensure the structure is the same size on
+ // both 32-bit and 64-bit builds.
+ char padding[4];
+ };
+
+ // Because sturctures held in persistent memory must be POD, there can be no
+ // default constructor to clear the fields. This derived class exists just
+ // to clear them when being allocated on the heap.
+ struct LocalMetadata : Metadata {
+ LocalMetadata() {
+ id = 0;
+ sum = 0;
+ redundant_count = 0;
+ }
};
explicit HistogramSamples(uint64_t id);
@@ -102,7 +119,7 @@ class BASE_EXPORT HistogramSamples {
// In order to support histograms shared through an external memory segment,
// meta values may be the local storage or external storage depending on the
// wishes of the derived class.
- Metadata local_meta_;
+ LocalMetadata local_meta_;
Metadata* meta_;
DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
index 340505e519..a774ea6177 100644
--- a/base/metrics/histogram_snapshot_manager.cc
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -53,6 +53,8 @@ void HistogramSnapshotManager::PrepareSamples(
for (size_t i = 0; i < ranges->size(); ++i)
ranges_copy.push_back(ranges->range(i));
HistogramBase::Sample* ranges_ptr = &ranges_copy[0];
+ uint32_t ranges_checksum = ranges->checksum();
+ uint32_t ranges_calc_checksum = ranges->CalculateChecksum();
const char* histogram_name = histogram->histogram_name().c_str();
int32_t flags = histogram->flags();
// The checksum should have caught this, so crash separately if it didn't.
@@ -61,6 +63,8 @@ void HistogramSnapshotManager::PrepareSamples(
// Ensure that compiler keeps around pointers to |histogram| and its
// internal |bucket_ranges_| for any minidumps.
base::debug::Alias(&ranges_ptr);
+ base::debug::Alias(&ranges_checksum);
+ base::debug::Alias(&ranges_calc_checksum);
base::debug::Alias(&histogram_name);
base::debug::Alias(&flags);
}
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
index 26fb93fd20..de4a2e195a 100644
--- a/base/metrics/histogram_snapshot_manager.h
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -24,7 +24,7 @@ class HistogramFlattener;
// histograms for recording either to disk or for transmission (such as from
// renderer to browser, or from browser to UMA upload). Since histograms can sit
// in memory for an extended period of time, and are vulnerable to memory
-// corruption, this class also validates as much rendundancy as it can before
+// corruption, this class also validates as much redundancy as it can before
// calling for the marginal change (a.k.a., delta) in a histogram to be
// recorded.
class BASE_EXPORT HistogramSnapshotManager {
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index 5c2ca6883a..02ed93ba0e 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -22,6 +22,7 @@
#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
#include "base/strings/stringprintf.h"
+#include "base/test/gtest_util.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -701,7 +702,6 @@ TEST_P(HistogramTest, FactoryTime) {
<< "ns each.";
}
-#if GTEST_HAS_DEATH_TEST
// For Histogram, LinearHistogram and CustomHistogram, the minimum for a
// declared range is 1, while the maximum is (HistogramBase::kSampleType_MAX -
// 1). But we accept ranges exceeding those limits, and silently clamped to
@@ -735,17 +735,18 @@ TEST(HistogramDeathTest, BadRangesTest) {
// CustomHistogram does not accepts kSampleType_MAX as range.
custom_ranges.push_back(HistogramBase::kSampleType_MAX);
- EXPECT_DEATH(CustomHistogram::FactoryGet("BadRangesCustom2", custom_ranges,
- HistogramBase::kNoFlags),
+ EXPECT_DEATH_IF_SUPPORTED(
+ CustomHistogram::FactoryGet("BadRangesCustom2", custom_ranges,
+ HistogramBase::kNoFlags),
"");
// CustomHistogram needs at least 1 valid range.
custom_ranges.clear();
custom_ranges.push_back(0);
- EXPECT_DEATH(CustomHistogram::FactoryGet("BadRangesCustom3", custom_ranges,
- HistogramBase::kNoFlags),
+ EXPECT_DEATH_IF_SUPPORTED(
+ CustomHistogram::FactoryGet("BadRangesCustom3", custom_ranges,
+ HistogramBase::kNoFlags),
"");
}
-#endif
} // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 5af3486645..29910036c7 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "base/atomicops.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/files/important_file_writer.h"
@@ -35,7 +36,6 @@ const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
// so that, if the structure of that object changes, stored older versions
// will be safely ignored.
enum : uint32_t {
- kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2
kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
};
@@ -45,8 +45,10 @@ enum : uint32_t {
// but that's best since PersistentMemoryAllocator objects (that underlie
// GlobalHistogramAllocator objects) are explicitly forbidden from doing
// anything essential at exit anyway due to the fact that they depend on data
-// managed elsewhere and which could be destructed first.
-GlobalHistogramAllocator* g_allocator = nullptr;
+// managed elsewhere and which could be destructed first. An AtomicWord is
+// used instead of std::atomic because the latter can create global ctors
+// and dtors.
+subtle::AtomicWord g_allocator = 0;
// Take an array of range boundaries and create a proper BucketRanges object
// which is returned to the caller. A return of nullptr indicates that the
@@ -117,7 +119,7 @@ PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
return found->second.get();
std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
- samples = WrapUnique(new PersistentSampleMapRecords(this, id));
+ samples = MakeUnique<PersistentSampleMapRecords>(this, id);
return samples.get();
}
@@ -224,6 +226,13 @@ PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
// This data will be held in persistent memory in order for processes to
// locate and use histograms created elsewhere.
struct PersistentHistogramAllocator::PersistentHistogramData {
+ // SHA1(Histogram): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize =
+ 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
+
int32_t histogram_type;
int32_t flags;
int32_t minimum;
@@ -238,7 +247,7 @@ struct PersistentHistogramAllocator::PersistentHistogramData {
// Space for the histogram name will be added during the actual allocation
// request. This must be the last field of the structure. A zero-size array
// or a "flexible" array would be preferred but is not (yet) valid C++.
- char name[1];
+ char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds.
};
PersistentHistogramAllocator::Iterator::Iterator(
@@ -248,7 +257,7 @@ PersistentHistogramAllocator::Iterator::Iterator(
std::unique_ptr<HistogramBase>
PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
PersistentMemoryAllocator::Reference ref;
- while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
+ while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
if (ref != ignore)
return allocator_->GetHistogram(ref);
}
@@ -271,11 +280,17 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
// add it to the local list of known histograms (while these may be simple
// references to histograms in other processes).
PersistentHistogramData* histogram_data =
- memory_allocator_->GetAsObject<PersistentHistogramData>(
- ref, kTypeIdHistogram);
+ memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
size_t length = memory_allocator_->GetAllocSize(ref);
+
+ // Check that metadata is reasonable: name is NUL terminated and non-empty,
+ // ID fields have been loaded with a hash of the name (0 is considered
+ // unset/invalid).
if (!histogram_data ||
- reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
+ reinterpret_cast<char*>(histogram_data)[length - 1] != '\0' ||
+ histogram_data->name[0] == '\0' ||
+ histogram_data->samples_metadata.id == 0 ||
+ histogram_data->logged_metadata.id == 0) {
RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
NOTREACHED();
return nullptr;
@@ -302,14 +317,13 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
// Create the metadata necessary for a persistent sparse histogram. This
// is done first because it is a small subset of what is required for
- // other histograms.
- PersistentMemoryAllocator::Reference histogram_ref =
- memory_allocator_->Allocate(
- offsetof(PersistentHistogramData, name) + name.length() + 1,
- kTypeIdHistogram);
+ // other histograms. The type is "under construction" so that a crash
+ // during the datafill doesn't leave a bad record around that could cause
+ // confusion by another process trying to read it. It will be corrected
+ // once histogram construction is complete.
PersistentHistogramData* histogram_data =
- memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
- kTypeIdHistogram);
+ memory_allocator_->New<PersistentHistogramData>(
+ offsetof(PersistentHistogramData, name) + name.length() + 1);
if (histogram_data) {
memcpy(histogram_data->name, name.c_str(), name.size() + 1);
histogram_data->histogram_type = histogram_type;
@@ -326,14 +340,15 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
return nullptr;
}
- size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
+ size_t ranges_count = bucket_count + 1;
+ size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
PersistentMemoryAllocator::Reference counts_ref =
memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
PersistentMemoryAllocator::Reference ranges_ref =
memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
HistogramBase::Sample* ranges_data =
- memory_allocator_->GetAsObject<HistogramBase::Sample>(
- ranges_ref, kTypeIdRangesArray);
+ memory_allocator_->GetAsArray<HistogramBase::Sample>(
+ ranges_ref, kTypeIdRangesArray, ranges_count);
// Only continue here if all allocations were successful. If they weren't,
// there is no way to free the space but that's not really a problem since
@@ -365,6 +380,11 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
// correct before commiting the new histogram to persistent space.
std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
DCHECK(histogram);
+ DCHECK_NE(0U, histogram_data->samples_metadata.id);
+ DCHECK_NE(0U, histogram_data->logged_metadata.id);
+
+ PersistentMemoryAllocator::Reference histogram_ref =
+ memory_allocator_->GetAsReference(histogram_data);
if (ref_ptr != nullptr)
*ref_ptr = histogram_ref;
@@ -386,22 +406,31 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
}
RecordCreateHistogramResult(result);
- NOTREACHED() << "error=" << result;
+
+ // Crash for failures caused by internal bugs but not "full" which is
+ // dependent on outside code.
+ if (result != CREATE_HISTOGRAM_ALLOCATOR_FULL)
+ NOTREACHED() << memory_allocator_->Name() << ", error=" << result;
return nullptr;
}
void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
bool registered) {
- // If the created persistent histogram was registered then it needs to
- // be marked as "iterable" in order to be found by other processes.
- if (registered)
+ if (registered) {
+ // If the created persistent histogram was registered then it needs to
+ // be marked as "iterable" in order to be found by other processes. This
+ // happens only after the histogram is fully formed so it's impossible for
+ // code iterating through the allocator to read a partially created record.
memory_allocator_->MakeIterable(ref);
- // If it wasn't registered then a race condition must have caused
- // two to be created. The allocator does not support releasing the
- // acquired memory so just change the type to be empty.
- else
- memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
+ } else {
+ // If it wasn't registered then a race condition must have caused two to
+ // be created. The allocator does not support releasing the acquired memory
+ // so just change the type to be empty.
+ memory_allocator_->ChangeType(ref, 0,
+ PersistentHistogramData::kPersistentTypeId,
+ /*clear=*/false);
+ }
}
void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
@@ -477,15 +506,10 @@ PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
static bool initialized = false;
if (!initialized) {
initialized = true;
- if (g_allocator) {
-// Don't log in release-with-asserts builds, otherwise the test_installer step
-// fails because this code writes to a log file before the installer code had a
-// chance to set the log file's location.
-#if !defined(DCHECK_ALWAYS_ON)
- DLOG(WARNING) << "Creating the results-histogram inside persistent"
- << " memory can cause future allocations to crash if"
- << " that memory is ever released (for testing).";
-#endif
+ if (GlobalHistogramAllocator::Get()) {
+ DVLOG(1) << "Creating the results-histogram inside persistent"
+ << " memory can cause future allocations to crash if"
+ << " that memory is ever released (for testing).";
}
histogram_pointer = LinearHistogram::FactoryGet(
@@ -527,8 +551,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
PersistentHistogramData histogram_data = *histogram_data_ptr;
HistogramBase::Sample* ranges_data =
- memory_allocator_->GetAsObject<HistogramBase::Sample>(
- histogram_data.ranges_ref, kTypeIdRangesArray);
+ memory_allocator_->GetAsArray<HistogramBase::Sample>(
+ histogram_data.ranges_ref, kTypeIdRangesArray,
+ PersistentMemoryAllocator::kSizeAny);
const uint32_t max_buckets =
std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
@@ -557,8 +582,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
created_ranges.release());
HistogramBase::AtomicCount* counts_data =
- memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
- histogram_data.counts_ref, kTypeIdCountsArray);
+ memory_allocator_->GetAsArray<HistogramBase::AtomicCount>(
+ histogram_data.counts_ref, kTypeIdCountsArray,
+ PersistentMemoryAllocator::kSizeAny);
size_t counts_bytes =
CalculateRequiredCountsBytes(histogram_data.bucket_count);
if (!counts_data || counts_bytes == 0 ||
@@ -628,7 +654,7 @@ PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
const HistogramBase* histogram) {
// This should never be called on the global histogram allocator as objects
// created there are already within the global statistics recorder.
- DCHECK_NE(g_allocator, this);
+ DCHECK_NE(GlobalHistogramAllocator::Get(), this);
DCHECK(histogram);
HistogramBase* existing =
@@ -638,7 +664,9 @@ PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
// Adding the passed histogram to the SR would cause a problem if the
// allocator that holds it eventually goes away. Instead, create a new
- // one from a serialized version.
+ // one from a serialized version. Deserialization calls the appropriate
+ // FactoryGet() which will create the histogram in the global persistent-
+ // histogram allocator if such is set.
base::Pickle pickle;
if (!histogram->SerializeInfo(&pickle))
return nullptr;
@@ -670,9 +698,9 @@ void GlobalHistogramAllocator::CreateWithPersistentMemory(
size_t page_size,
uint64_t id,
StringPiece name) {
- Set(WrapUnique(new GlobalHistogramAllocator(
- WrapUnique(new PersistentMemoryAllocator(
- base, size, page_size, id, name, false)))));
+ Set(WrapUnique(
+ new GlobalHistogramAllocator(MakeUnique<PersistentMemoryAllocator>(
+ base, size, page_size, id, name, false))));
}
// static
@@ -681,12 +709,12 @@ void GlobalHistogramAllocator::CreateWithLocalMemory(
uint64_t id,
StringPiece name) {
Set(WrapUnique(new GlobalHistogramAllocator(
- WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
+ MakeUnique<LocalPersistentMemoryAllocator>(size, id, name))));
}
#if !defined(OS_NACL)
// static
-void GlobalHistogramAllocator::CreateWithFile(
+bool GlobalHistogramAllocator::CreateWithFile(
const FilePath& file_path,
size_t size,
uint64_t id,
@@ -706,14 +734,55 @@ void GlobalHistogramAllocator::CreateWithFile(
if (!mmfile->IsValid() ||
!FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
NOTREACHED();
- return;
+ return false;
}
- Set(WrapUnique(new GlobalHistogramAllocator(
- WrapUnique(new FilePersistentMemoryAllocator(
- std::move(mmfile), size, id, name, false)))));
+ Set(WrapUnique(
+ new GlobalHistogramAllocator(MakeUnique<FilePersistentMemoryAllocator>(
+ std::move(mmfile), size, id, name, false))));
+ Get()->SetPersistentLocation(file_path);
+ return true;
}
-#endif
+
+// static
+bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
+ const FilePath& active_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name) {
+ if (!base::ReplaceFile(active_path, base_path, nullptr))
+ base::DeleteFile(base_path, /*recursive=*/false);
+
+ return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
+ name);
+}
+
+// static
+bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
+ size_t size,
+ uint64_t id,
+ StringPiece name) {
+ FilePath base_path, active_path;
+ ConstructFilePaths(dir, name, &base_path, &active_path);
+ return CreateWithActiveFile(base_path, active_path, size, id, name);
+}
+
+// static
+void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
+ StringPiece name,
+ FilePath* out_base_path,
+ FilePath* out_active_path) {
+ if (out_base_path) {
+ *out_base_path = dir.AppendASCII(name).AddExtension(
+ PersistentMemoryAllocator::kFileExtension);
+ }
+ if (out_active_path) {
+ *out_active_path =
+ dir.AppendASCII(name.as_string() + std::string("-active"))
+ .AddExtension(PersistentMemoryAllocator::kFileExtension);
+ }
+}
+#endif // !defined(OS_NACL)
// static
void GlobalHistogramAllocator::CreateWithSharedMemory(
@@ -728,9 +797,9 @@ void GlobalHistogramAllocator::CreateWithSharedMemory(
}
DCHECK_LE(memory->mapped_size(), size);
- Set(WrapUnique(new GlobalHistogramAllocator(
- WrapUnique(new SharedPersistentMemoryAllocator(
- std::move(memory), 0, StringPiece(), /*readonly=*/false)))));
+ Set(WrapUnique(
+ new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
+ std::move(memory), 0, StringPiece(), /*readonly=*/false))));
}
// static
@@ -745,9 +814,9 @@ void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
return;
}
- Set(WrapUnique(new GlobalHistogramAllocator(
- WrapUnique(new SharedPersistentMemoryAllocator(
- std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
+ Set(WrapUnique(
+ new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
+ std::move(shm), 0, StringPiece(), /*readonly=*/false))));
}
// static
@@ -756,8 +825,9 @@ void GlobalHistogramAllocator::Set(
// Releasing or changing an allocator is extremely dangerous because it
// likely has histograms stored within it. If the backing memory is also
// also released, future accesses to those histograms will seg-fault.
- CHECK(!g_allocator);
- g_allocator = allocator.release();
+ CHECK(!subtle::NoBarrier_Load(&g_allocator));
+ subtle::Release_Store(&g_allocator,
+ reinterpret_cast<uintptr_t>(allocator.release()));
size_t existing = StatisticsRecorder::GetHistogramCount();
DVLOG_IF(1, existing)
@@ -766,13 +836,14 @@ void GlobalHistogramAllocator::Set(
// static
GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
- return g_allocator;
+ return reinterpret_cast<GlobalHistogramAllocator*>(
+ subtle::Acquire_Load(&g_allocator));
}
// static
std::unique_ptr<GlobalHistogramAllocator>
GlobalHistogramAllocator::ReleaseForTesting() {
- GlobalHistogramAllocator* histogram_allocator = g_allocator;
+ GlobalHistogramAllocator* histogram_allocator = Get();
if (!histogram_allocator)
return nullptr;
PersistentMemoryAllocator* memory_allocator =
@@ -782,13 +853,9 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// Recorder forget about the histograms contained therein; otherwise,
// some operations will try to access them and the released memory.
PersistentMemoryAllocator::Iterator iter(memory_allocator);
- PersistentMemoryAllocator::Reference ref;
- while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
- PersistentHistogramData* histogram_data =
- memory_allocator->GetAsObject<PersistentHistogramData>(
- ref, kTypeIdHistogram);
- DCHECK(histogram_data);
- StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
+ const PersistentHistogramData* data;
+ while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
+ StatisticsRecorder::ForgetHistogramForTesting(data->name);
// If a test breaks here then a memory region containing a histogram
// actively used by this code is being released back to the test.
@@ -797,10 +864,10 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// the method GetCreateHistogramResultHistogram() *before* setting
// the (temporary) memory allocator via SetGlobalAllocator() so that
// histogram is instead allocated from the process heap.
- DCHECK_NE(kResultHistogram, histogram_data->name);
+ DCHECK_NE(kResultHistogram, data->name);
}
- g_allocator = nullptr;
+ subtle::Release_Store(&g_allocator, 0);
return WrapUnique(histogram_allocator);
};
@@ -837,10 +904,30 @@ bool GlobalHistogramAllocator::WriteToPersistentLocation() {
#endif
}
+void GlobalHistogramAllocator::DeletePersistentLocation() {
+#if defined(OS_NACL)
+ NOTREACHED();
+#else
+ if (persistent_location_.empty())
+ return;
+
+ // Open (with delete) and then immediately close the file by going out of
+ // scope. This is the only cross-platform safe way to delete a file that may
+ // be open elsewhere. Open handles will continue to operate normally but
+ // new opens will not be possible.
+ File file(persistent_location_,
+ File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
+#endif
+}
+
GlobalHistogramAllocator::GlobalHistogramAllocator(
std::unique_ptr<PersistentMemoryAllocator> memory)
: PersistentHistogramAllocator(std::move(memory)),
- import_iterator_(this) {}
+ import_iterator_(this) {
+ // Make sure the StatisticsRecorder is initialized to prevent duplicate
+ // histograms from being created. It's safe to call this multiple times.
+ StatisticsRecorder::Initialize();
+}
void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
// Skip the import if it's the histogram that was last created. Should a
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index ee1fba5f62..2eb28dfaf5 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -19,6 +19,7 @@
namespace base {
+class BucketRanges;
class FilePath;
class PersistentSampleMapRecords;
class PersistentSparseHistogramDataManager;
@@ -55,8 +56,8 @@ class BASE_EXPORT PersistentSparseHistogramDataManager {
// Convenience method that gets the object for a given reference so callers
// don't have to also keep their own pointer to the appropriate allocator.
template <typename T>
- T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
- return allocator_->GetAsObject<T>(ref, type_id);
+ T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
+ return allocator_->GetAsObject<T>(ref);
}
private:
@@ -130,8 +131,8 @@ class BASE_EXPORT PersistentSampleMapRecords {
// cleanliness of the interface), a template is defined that will be
// resolved when used inside that file.
template <typename T>
- T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
- return data_manager_->GetAsObject<T>(ref, type_id);
+ T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
+ return data_manager_->GetAsObject<T>(ref);
}
private:
@@ -394,11 +395,40 @@ class BASE_EXPORT GlobalHistogramAllocator
// Create a global allocator by memory-mapping a |file|. If the file does
// not exist, it will be created with the specified |size|. If the file does
// exist, the allocator will use and add to its contents, ignoring the passed
- // size in favor of the existing size.
- static void CreateWithFile(const FilePath& file_path,
+ // size in favor of the existing size. Returns whether the global allocator
+ // was set.
+ static bool CreateWithFile(const FilePath& file_path,
size_t size,
uint64_t id,
StringPiece name);
+
+ // Creates a new file at |active_path|. If it already exists, it will first be
+ // moved to |base_path|. In all cases, any old file at |base_path| will be
+ // removed. The file will be created using the given size, id, and name.
+ // Returns whether the global allocator was set.
+ static bool CreateWithActiveFile(const FilePath& base_path,
+ const FilePath& active_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name);
+
+ // Uses ConstructBaseActivePairFilePaths() to build a pair of file names which
+ // are then used for CreateWithActiveFile(). |name| is used for both the
+ // internal name for the allocator and also for the name of the file inside
+ // |dir|.
+ static bool CreateWithActiveFileInDir(const FilePath& dir,
+ size_t size,
+ uint64_t id,
+ StringPiece name);
+
+ // Constructs a pair of names in |dir| based on name that can be used for a
+ // base + active persistent memory mapped location for CreateWithActiveFile().
+ // |name| will be used as the basename of the file inside |dir|.
+ // |out_base_path| or |out_active_path| may be null if not needed.
+ static void ConstructFilePaths(const FilePath& dir,
+ StringPiece name,
+ FilePath* out_base_path,
+ FilePath* out_active_path);
#endif
// Create a global allocator using a block of shared |memory| of the
@@ -449,6 +479,10 @@ class BASE_EXPORT GlobalHistogramAllocator
// indicates success.
bool WriteToPersistentLocation();
+ // If there is a global metrics file being updated on disk, mark it to be
+ // deleted when the process exits.
+ void DeletePersistentLocation();
+
private:
friend class StatisticsRecorder;
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
index b680662250..df250a37b0 100644
--- a/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -102,9 +102,8 @@ TEST_F(PersistentHistogramAllocatorTest, CreateAndIterateTest) {
// Create a second allocator and have it access the memory of the first.
std::unique_ptr<HistogramBase> recovered;
- PersistentHistogramAllocator recovery(
- WrapUnique(new PersistentMemoryAllocator(
- allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+ PersistentHistogramAllocator recovery(MakeUnique<PersistentMemoryAllocator>(
+ allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false));
PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
recovered = histogram_iter.GetNext();
@@ -131,7 +130,7 @@ TEST_F(PersistentHistogramAllocatorTest, CreateWithFileTest) {
const char temp_name[] = "CreateWithFileTest";
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath temp_file = temp_dir.path().AppendASCII(temp_name);
+ FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
const size_t temp_size = 64 << 10; // 64 KiB
// Test creation of a new file.
@@ -156,54 +155,130 @@ TEST_F(PersistentHistogramAllocatorTest, CreateWithFileTest) {
GlobalHistogramAllocator::ReleaseForTesting();
}
-TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderTest) {
- size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderMergeTest) {
+ const char LinearHistogramName[] = "SRTLinearHistogram";
+ const char SparseHistogramName[] = "SRTSparseHistogram";
+ const size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
// Create a local StatisticsRecorder in which the newly created histogram
- // will be recorded.
+ // will be recorded. The global allocator must be replaced after because the
+ // act of releasing will cause the active SR to forget about all histograms
+ // in the relased memory.
std::unique_ptr<StatisticsRecorder> local_sr =
StatisticsRecorder::CreateTemporaryForTesting();
EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
-
- HistogramBase* histogram = LinearHistogram::FactoryGet(
- "TestHistogram", 1, 10, 10, HistogramBase::kIsPersistent);
- EXPECT_TRUE(histogram);
+ std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
+ ASSERT_TRUE(GlobalHistogramAllocator::Get());
+
+ // Create a linear histogram for merge testing.
+ HistogramBase* histogram1 =
+ LinearHistogram::FactoryGet(LinearHistogramName, 1, 10, 10, 0);
+ ASSERT_TRUE(histogram1);
EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
- histogram->Add(3);
- histogram->Add(1);
- histogram->Add(4);
- histogram->Add(1);
- histogram->Add(6);
-
- // Destroy the local SR and ensure that we're back to the initial state.
+ histogram1->Add(3);
+ histogram1->Add(1);
+ histogram1->Add(4);
+ histogram1->AddCount(1, 4);
+ histogram1->Add(6);
+
+ // Create a sparse histogram for merge testing.
+ HistogramBase* histogram2 =
+ SparseHistogram::FactoryGet(SparseHistogramName, 0);
+ ASSERT_TRUE(histogram2);
+ EXPECT_EQ(2U, StatisticsRecorder::GetHistogramCount());
+ histogram2->Add(3);
+ histogram2->Add(1);
+ histogram2->Add(4);
+ histogram2->AddCount(1, 4);
+ histogram2->Add(6);
+
+ // Destroy the local SR and ensure that we're back to the initial state and
+ // restore the global allocator. Histograms created in the local SR will
+ // become unmanaged.
+ std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+ GlobalHistogramAllocator::ReleaseForTesting();
local_sr.reset();
EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
+ GlobalHistogramAllocator::Set(std::move(old_allocator));
- // Create a second allocator and have it access the memory of the first.
- std::unique_ptr<HistogramBase> recovered;
- PersistentHistogramAllocator recovery(
- WrapUnique(new PersistentMemoryAllocator(
- allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
- PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+ // Create a "recovery" allocator using the same memory as the local one.
+ PersistentHistogramAllocator recovery1(MakeUnique<PersistentMemoryAllocator>(
+ const_cast<void*>(new_allocator->memory_allocator()->data()),
+ new_allocator->memory_allocator()->size(), 0, 0, "", false));
+ PersistentHistogramAllocator::Iterator histogram_iter1(&recovery1);
- recovered = histogram_iter.GetNext();
- ASSERT_TRUE(recovered);
+ // Get the histograms that were created locally (and forgotten) and merge
+ // them into the global SR. New objects will be created.
+ std::unique_ptr<HistogramBase> recovered;
+ while (true) {
+ recovered = histogram_iter1.GetNext();
+ if (!recovered)
+ break;
+
+ recovery1.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+ HistogramBase* found =
+ StatisticsRecorder::FindHistogram(recovered->histogram_name());
+ EXPECT_NE(recovered.get(), found);
+ };
+ EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
+
+ // Check the merged histograms for accuracy.
+ HistogramBase* found = StatisticsRecorder::FindHistogram(LinearHistogramName);
+ ASSERT_TRUE(found);
+ std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+ EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+ EXPECT_EQ(1, snapshot->GetCount(3));
+ EXPECT_EQ(5, snapshot->GetCount(1));
+ EXPECT_EQ(1, snapshot->GetCount(4));
+ EXPECT_EQ(1, snapshot->GetCount(6));
- // Merge the recovered histogram to the SR. It will always be a new object.
- recovery.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
- EXPECT_EQ(starting_sr_count + 1, StatisticsRecorder::GetHistogramCount());
- HistogramBase* found =
- StatisticsRecorder::FindHistogram(recovered->histogram_name());
+ found = StatisticsRecorder::FindHistogram(SparseHistogramName);
ASSERT_TRUE(found);
- EXPECT_NE(recovered.get(), found);
+ snapshot = found->SnapshotSamples();
+ EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+ EXPECT_EQ(1, snapshot->GetCount(3));
+ EXPECT_EQ(5, snapshot->GetCount(1));
+ EXPECT_EQ(1, snapshot->GetCount(4));
+ EXPECT_EQ(1, snapshot->GetCount(6));
- // Ensure that the data got merged, too.
- std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
- EXPECT_EQ(recovered->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+ // Perform additional histogram increments.
+ histogram1->AddCount(1, 3);
+ histogram1->Add(6);
+ histogram2->AddCount(1, 3);
+ histogram2->Add(7);
+
+ // Do another merge.
+ PersistentHistogramAllocator recovery2(MakeUnique<PersistentMemoryAllocator>(
+ const_cast<void*>(new_allocator->memory_allocator()->data()),
+ new_allocator->memory_allocator()->size(), 0, 0, "", false));
+ PersistentHistogramAllocator::Iterator histogram_iter2(&recovery2);
+ while (true) {
+ recovered = histogram_iter2.GetNext();
+ if (!recovered)
+ break;
+ recovery2.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+ };
+ EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
+
+ // And verify.
+ found = StatisticsRecorder::FindHistogram(LinearHistogramName);
+ snapshot = found->SnapshotSamples();
+ EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+ EXPECT_EQ(1, snapshot->GetCount(3));
+ EXPECT_EQ(8, snapshot->GetCount(1));
+ EXPECT_EQ(1, snapshot->GetCount(4));
+ EXPECT_EQ(2, snapshot->GetCount(6));
+
+ found = StatisticsRecorder::FindHistogram(SparseHistogramName);
+ snapshot = found->SnapshotSamples();
+ EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
EXPECT_EQ(1, snapshot->GetCount(3));
- EXPECT_EQ(2, snapshot->GetCount(1));
+ EXPECT_EQ(8, snapshot->GetCount(1));
EXPECT_EQ(1, snapshot->GetCount(4));
EXPECT_EQ(1, snapshot->GetCount(6));
+ EXPECT_EQ(1, snapshot->GetCount(7));
}
} // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index dfa408f44d..f70b396917 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -17,6 +17,7 @@
#include "base/logging.h"
#include "base/memory/shared_memory.h"
#include "base/metrics/histogram_macros.h"
+#include "base/metrics/sparse_histogram.h"
namespace {
@@ -48,6 +49,11 @@ enum : int {
kFlagFull = 1 << 1
};
+// Errors that are logged in "errors" histogram.
+enum AllocatorError : int {
+ kMemoryIsCorrupt = 1,
+};
+
bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
return (loaded_flags & flag) != 0;
@@ -58,8 +64,13 @@ void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
for (;;) {
uint32_t new_flags = (loaded_flags & ~flag) | flag;
// In the failue case, actual "flags" value stored in loaded_flags.
- if (flags->compare_exchange_weak(loaded_flags, new_flags))
+ // These access are "relaxed" because they are completely independent
+ // of all other values.
+ if (flags->compare_exchange_weak(loaded_flags, new_flags,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed)) {
break;
+ }
}
}
@@ -132,7 +143,19 @@ PersistentMemoryAllocator::Iterator::Iterator(
PersistentMemoryAllocator::Iterator::Iterator(
const PersistentMemoryAllocator* allocator,
Reference starting_after)
- : allocator_(allocator), last_record_(starting_after), record_count_(0) {
+ : allocator_(allocator), last_record_(0), record_count_(0) {
+ Reset(starting_after);
+}
+
+void PersistentMemoryAllocator::Iterator::Reset() {
+ last_record_.store(kReferenceQueue, std::memory_order_relaxed);
+ record_count_.store(0, std::memory_order_relaxed);
+}
+
+void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
+ last_record_.store(starting_after, std::memory_order_relaxed);
+ record_count_.store(0, std::memory_order_relaxed);
+
// Ensure that the starting point is a valid, iterable block (meaning it can
// be read and has a non-zero "next" pointer).
const volatile BlockHeader* block =
@@ -144,6 +167,14 @@ PersistentMemoryAllocator::Iterator::Iterator(
}
PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetLast() {
+ Reference last = last_record_.load(std::memory_order_relaxed);
+ if (last == kReferenceQueue)
+ return kReferenceNull;
+ return last;
+}
+
+PersistentMemoryAllocator::Reference
PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
// Make a copy of the existing count of found-records, acquiring all changes
// made to the allocator, notably "freeptr" (see comment in loop for why
@@ -193,7 +224,8 @@ PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
// is no need to do another such load when the while-loop restarts. A
// "strong" compare-exchange is used because failing unnecessarily would
// mean repeating some fairly costly validations above.
- if (last_record_.compare_exchange_strong(last, next)) {
+ if (last_record_.compare_exchange_strong(
+ last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
*type_return = block->type_id.load(std::memory_order_relaxed);
break;
}
@@ -247,20 +279,42 @@ bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
(page_size == 0 || size % page_size == 0 || readonly));
}
-PersistentMemoryAllocator::PersistentMemoryAllocator(
- void* base,
- size_t size,
- size_t page_size,
- uint64_t id,
- base::StringPiece name,
- bool readonly)
- : mem_base_(static_cast<char*>(base)),
+PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
+ size_t size,
+ size_t page_size,
+ uint64_t id,
+ base::StringPiece name,
+ bool readonly)
+ : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
+ size,
+ page_size,
+ id,
+ name,
+ readonly) {}
+
+PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
+ size_t size,
+ size_t page_size,
+ uint64_t id,
+ base::StringPiece name,
+ bool readonly)
+ : mem_base_(static_cast<char*>(memory.base)),
+ mem_type_(memory.type),
mem_size_(static_cast<uint32_t>(size)),
mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
readonly_(readonly),
corrupt_(0),
allocs_histogram_(nullptr),
- used_histogram_(nullptr) {
+ used_histogram_(nullptr),
+ errors_histogram_(nullptr) {
+ // These asserts ensure that the structures are 32/64-bit agnostic and meet
+ // all the requirements of use within the allocator. They access private
+ // definitions and so cannot be moved to the global scope.
+ static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
+ "struct is not portable across different natural word widths");
+ static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
+ "struct is not portable across different natural word widths");
+
static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
"BlockHeader is not a multiple of kAllocAlignment");
static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
@@ -269,7 +323,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(
"\"queue\" is not aligned properly; must be at end of struct");
// Ensure that memory segment is of acceptable size.
- CHECK(IsMemoryAcceptable(base, size, page_size, readonly));
+ CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
// These atomics operate inter-process and so must be lock-free. The local
// casts are to make sure it can be evaluated at compile time to a constant.
@@ -326,7 +380,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(
if (!name.empty()) {
const size_t name_length = name.length() + 1;
shared_meta()->name = Allocate(name_length, 0);
- char* name_cstr = GetAsObject<char>(shared_meta()->name, 0);
+ char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
if (name_cstr)
memcpy(name_cstr, name.data(), name.length());
}
@@ -355,7 +409,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(
*const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
// Ensure that settings are still valid after the above adjustments.
- if (!IsMemoryAcceptable(base, mem_size_, mem_page_, readonly))
+ if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly))
SetCorrupt();
}
}
@@ -374,7 +428,8 @@ uint64_t PersistentMemoryAllocator::Id() const {
const char* PersistentMemoryAllocator::Name() const {
Reference name_ref = shared_meta()->name;
- const char* name_cstr = GetAsObject<char>(name_ref, 0);
+ const char* name_cstr =
+ GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
if (!name_cstr)
return "";
@@ -392,16 +447,26 @@ void PersistentMemoryAllocator::CreateTrackingHistograms(
base::StringPiece name) {
if (name.empty() || readonly_)
return;
-
std::string name_string = name.as_string();
+
+#if 0
+ // This histogram wasn't being used so has been disabled. It is left here
+ // in case development of a new use of the allocator could benefit from
+ // recording (temporarily and locally) the allocation sizes.
+ DCHECK(!allocs_histogram_);
+ allocs_histogram_ = Histogram::FactoryGet(
+ "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+ HistogramBase::kUmaTargetedHistogramFlag);
+#endif
+
DCHECK(!used_histogram_);
used_histogram_ = LinearHistogram::FactoryGet(
"UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
HistogramBase::kUmaTargetedHistogramFlag);
- DCHECK(!allocs_histogram_);
- allocs_histogram_ = Histogram::FactoryGet(
- "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+ DCHECK(!errors_histogram_);
+ errors_histogram_ = SparseHistogram::FactoryGet(
+ "UMA.PersistentAllocator." + name_string + ".Errors",
HistogramBase::kUmaTargetedHistogramFlag);
}
@@ -410,6 +475,24 @@ size_t PersistentMemoryAllocator::used() const {
mem_size_);
}
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
+ const void* memory,
+ uint32_t type_id) const {
+ uintptr_t address = reinterpret_cast<uintptr_t>(memory);
+ if (address < reinterpret_cast<uintptr_t>(mem_base_))
+ return kReferenceNull;
+
+ uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
+ if (offset >= mem_size_ || offset < sizeof(BlockHeader))
+ return kReferenceNull;
+
+ Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
+ if (!GetBlockData(ref, type_id, kSizeAny))
+ return kReferenceNull;
+
+ return ref;
+}
+
size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
if (!block)
@@ -433,15 +516,62 @@ uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
bool PersistentMemoryAllocator::ChangeType(Reference ref,
uint32_t to_type_id,
- uint32_t from_type_id) {
+ uint32_t from_type_id,
+ bool clear) {
DCHECK(!readonly_);
volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
if (!block)
return false;
- // This is a "strong" exchange because there is no loop that can retry in
- // the wake of spurious failures possible with "weak" exchanges.
- return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
+ // "Strong" exchanges are used below because there is no loop that can retry
+ // in the wake of spurious failures possible with "weak" exchanges. It is,
+ // in aggregate, an "acquire-release" operation so no memory accesses can be
+ // reordered either before or after this method (since changes based on type
+ // could happen on either side).
+
+ if (clear) {
+ // If clearing the memory, first change it to the "transitioning" type so
+ // there can be no confusion by other threads. After the memory is cleared,
+ // it can be changed to its final type.
+ if (!block->type_id.compare_exchange_strong(
+ from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
+ std::memory_order_acquire)) {
+ // Existing type wasn't what was expected: fail (with no changes)
+ return false;
+ }
+
+ // Clear the memory in an atomic manner. Using "release" stores force
+ // every write to be done after the ones before it. This is better than
+ // using memset because (a) it supports "volatile" and (b) it creates a
+ // reliable pattern upon which other threads may rely.
+ volatile std::atomic<int>* data =
+ reinterpret_cast<volatile std::atomic<int>*>(
+ reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
+ const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
+ DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
+ for (uint32_t i = 0; i < words; ++i) {
+ data->store(0, std::memory_order_release);
+ ++data;
+ }
+
+ // If the destination type is "transitioning" then skip the final exchange.
+ if (to_type_id == kTypeIdTransitioning)
+ return true;
+
+ // Finish the change to the desired type.
+ from_type_id = kTypeIdTransitioning; // Exchange needs modifiable original.
+ bool success = block->type_id.compare_exchange_strong(
+ from_type_id, to_type_id, std::memory_order_release,
+ std::memory_order_relaxed);
+ DCHECK(success); // Should never fail.
+ return success;
+ }
+
+ // One step change to the new type. Will return false if the existing value
+ // doesn't match what is expected.
+ return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire);
}
PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
@@ -520,8 +650,9 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
return kReferenceNull;
}
const uint32_t new_freeptr = freeptr + page_free;
- if (shared_meta()->freeptr.compare_exchange_strong(freeptr,
- new_freeptr)) {
+ if (shared_meta()->freeptr.compare_exchange_strong(
+ freeptr, new_freeptr, std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
block->size = page_free;
block->cookie = kBlockCookieWasted;
}
@@ -544,8 +675,11 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
// while we were processing. A "weak" exchange would be permissable here
// because the code will just loop and try again but the above processing
// is significant so make the extra effort of a "strong" exchange.
- if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr))
+ if (!shared_meta()->freeptr.compare_exchange_strong(
+ freeptr, new_freeptr, std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
continue;
+ }
// Given that all memory was zeroed before ever being given to an instance
// of this class and given that we only allocate in a monotomic fashion
@@ -561,6 +695,10 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
return kReferenceNull;
}
+ // Load information into the block header. There is no "release" of the
+ // data here because this memory can, currently, be seen only by the thread
+ // performing the allocation. When it comes time to share this, the thread
+ // will call MakeIterable() which does the release operation.
block->size = size;
block->cookie = kBlockCookieAllocated;
block->type_id.store(type_id, std::memory_order_relaxed);
@@ -573,7 +711,7 @@ void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
(uint32_t)sizeof(BlockHeader));
meminfo->total = mem_size_;
- meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
+ meminfo->free = remaining - sizeof(BlockHeader);
}
void PersistentMemoryAllocator::MakeIterable(Reference ref) {
@@ -641,9 +779,15 @@ void PersistentMemoryAllocator::MakeIterable(Reference ref) {
// case, it's safe to discard the constness and modify the local flag and
// maybe even the shared flag if the underlying data isn't actually read-only.
void PersistentMemoryAllocator::SetCorrupt() const {
- LOG(ERROR) << "Corruption detected in shared-memory segment.";
- const_cast<std::atomic<bool>*>(&corrupt_)->store(true,
- std::memory_order_relaxed);
+ if (!corrupt_.load(std::memory_order_relaxed) &&
+ !CheckFlag(
+ const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
+ kFlagCorrupt)) {
+ LOG(ERROR) << "Corruption detected in shared-memory segment.";
+ RecordError(kMemoryIsCorrupt);
+ }
+
+ corrupt_.store(true, std::memory_order_relaxed);
if (!readonly_) {
SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
kFlagCorrupt);
@@ -673,10 +817,10 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok,
bool free_ok) const {
// Validation of parameters.
- if (ref % kAllocAlignment != 0)
- return nullptr;
if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
return nullptr;
+ if (ref % kAllocAlignment != 0)
+ return nullptr;
size += sizeof(BlockHeader);
if (ref + size > mem_size_)
return nullptr;
@@ -705,6 +849,11 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
}
+void PersistentMemoryAllocator::RecordError(int error) const {
+ if (errors_histogram_)
+ errors_histogram_->Add(error);
+}
+
const volatile void* PersistentMemoryAllocator::GetBlockData(
Reference ref,
uint32_t type_id,
@@ -739,37 +888,60 @@ LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
size, 0, id, name, false) {}
LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
- DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_);
+ DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
}
// static
-void* LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+PersistentMemoryAllocator::Memory
+LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+ void* address;
+
#if defined(OS_WIN)
- void* address =
+ address =
::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
- DPCHECK(address);
- return address;
+ if (address)
+ return Memory(address, MEM_VIRTUAL);
+ UMA_HISTOGRAM_SPARSE_SLOWLY("UMA.LocalPersistentMemoryAllocator.Failures.Win",
+ ::GetLastError());
#elif defined(OS_POSIX)
// MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
// MAP_SHARED is not available on Linux <2.4 but required on Mac.
- void* address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_SHARED, -1, 0);
- DPCHECK(MAP_FAILED != address);
- return address;
+ address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_SHARED, -1, 0);
+ if (address != MAP_FAILED)
+ return Memory(address, MEM_VIRTUAL);
+ UMA_HISTOGRAM_SPARSE_SLOWLY(
+ "UMA.LocalPersistentMemoryAllocator.Failures.Posix", errno);
#else
#error This architecture is not (yet) supported.
#endif
+
+ // As a last resort, just allocate the memory from the heap. This will
+ // achieve the same basic result but the acquired memory has to be
+ // explicitly zeroed and thus realized immediately (i.e. all pages are
+ // added to the process now istead of only when first accessed).
+ address = malloc(size);
+ DPCHECK(address);
+ memset(address, 0, size);
+ return Memory(address, MEM_MALLOC);
}
// static
void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
- size_t size) {
+ size_t size,
+ MemoryType type) {
+ if (type == MEM_MALLOC) {
+ free(memory);
+ return;
+ }
+
+ DCHECK_EQ(MEM_VIRTUAL, type);
#if defined(OS_WIN)
BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
- DPCHECK(success);
+ DCHECK(success);
#elif defined(OS_POSIX)
int result = ::munmap(memory, size);
- DPCHECK(0 == result);
+ DCHECK_EQ(0, result);
#else
#error This architecture is not (yet) supported.
#endif
@@ -783,12 +955,13 @@ SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
uint64_t id,
base::StringPiece name,
bool read_only)
- : PersistentMemoryAllocator(static_cast<uint8_t*>(memory->memory()),
- memory->mapped_size(),
- 0,
- id,
- name,
- read_only),
+ : PersistentMemoryAllocator(
+ Memory(static_cast<uint8_t*>(memory->memory()), MEM_SHARED),
+ memory->mapped_size(),
+ 0,
+ id,
+ name,
+ read_only),
shared_memory_(std::move(memory)) {}
SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
@@ -809,12 +982,13 @@ FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
uint64_t id,
base::StringPiece name,
bool read_only)
- : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
- max_size != 0 ? max_size : file->length(),
- 0,
- id,
- name,
- read_only),
+ : PersistentMemoryAllocator(
+ Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
+ max_size != 0 ? max_size : file->length(),
+ 0,
+ id,
+ name,
+ read_only),
mapped_file_(std::move(file)) {}
FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index 2fc0d2d0da..b38f284ff4 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -9,6 +9,7 @@
#include <atomic>
#include <memory>
+#include <type_traits>
#include "base/atomicops.h"
#include "base/base_export.h"
@@ -47,6 +48,50 @@ class SharedMemory;
// Note that memory not in active use is not accessed so it is possible to
// use virtual memory, including memory-mapped files, as backing storage with
// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
+//
+// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
+// character arrays and manipulating that memory manually, the better way is
+// generally to use the "object" methods to create and manage allocations. In
+// this way the sizing, type-checking, and construction are all automatic. For
+// this to work, however, every type of stored object must define two public
+// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
+//
+// struct MyPersistentObjectType {
+// // SHA1(MyPersistentObjectType): Increment this if structure changes!
+// static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
+//
+// // Expected size for 32/64-bit check. Update this if structure changes!
+// static constexpr size_t kExpectedInstanceSize = 20;
+//
+// ...
+// };
+//
+// kPersistentTypeId: This value is an arbitrary identifier that allows the
+// identification of these objects in the allocator, including the ability
+// to find them via iteration. The number is arbitrary but using the first
+// four bytes of the SHA1 hash of the type name means that there shouldn't
+// be any conflicts with other types that may also be stored in the memory.
+// The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
+// be used to generate the hash if the type name seems common. Use a command
+// like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
+// If the structure layout changes, ALWAYS increment this number so that
+// newer versions of the code don't try to interpret persistent data written
+// by older versions with a different layout.
+//
+// kExpectedInstanceSize: This value is the hard-coded number that matches
+// what sizeof(T) would return. By providing it explicitly, the allocator can
+// verify that the structure is compatible between both 32-bit and 64-bit
+// versions of the code.
+//
+// Using New manages the memory and then calls the default constructor for the
+// object. Given that objects are persistent, no destructor is ever called
+// automatically though a caller can explicitly call Delete to destruct it and
+// change the type to something indicating it is no longer in use.
+//
+// Though persistent memory segments are transferrable between programs built
+// for different natural word widths, they CANNOT be exchanged between CPUs
+// of different endianess. Attempts to do so will simply see the existing data
+// as corrupt and refuse to access any of it.
class BASE_EXPORT PersistentMemoryAllocator {
public:
typedef uint32_t Reference;
@@ -56,6 +101,11 @@ class BASE_EXPORT PersistentMemoryAllocator {
// That means that multiple threads can share an iterator and the same
// reference will not be returned twice.
//
+ // The order of the items returned by an iterator matches the order in which
+ // MakeIterable() was called on them. Once an allocation is made iterable,
+ // it is always such so the only possible difference between successive
+ // iterations is for more to be added to the end.
+ //
// Iteration, in general, is tolerant of corrupted memory. It will return
// what it can and stop only when corruption forces it to. Bad corruption
// could cause the same object to be returned many times but it will
@@ -76,6 +126,17 @@ class BASE_EXPORT PersistentMemoryAllocator {
Iterator(const PersistentMemoryAllocator* allocator,
Reference starting_after);
+ // Resets the iterator back to the beginning.
+ void Reset();
+
+ // Resets the iterator, resuming from the |starting_after| reference.
+ void Reset(Reference starting_after);
+
+ // Returns the previously retrieved reference, or kReferenceNull if none.
+ // If constructor or reset with a starting_after location, this will return
+ // that value.
+ Reference GetLast();
+
// Gets the next iterable, storing that type in |type_return|. The actual
// return value is a reference to the allocation inside the allocator or
// zero if there are no more. GetNext() may still be called again at a
@@ -88,6 +149,18 @@ class BASE_EXPORT PersistentMemoryAllocator {
// calls to GetNext() meaning it's possible to completely miss entries.
Reference GetNextOfType(uint32_t type_match);
+ // As above but works using object type.
+ template <typename T>
+ Reference GetNextOfType() {
+ return GetNextOfType(T::kPersistentTypeId);
+ }
+
+ // As above but works using objects and returns null if not found.
+ template <typename T>
+ const T* GetNextOfObject() {
+ return GetAsObject<T>(GetNextOfType<T>());
+ }
+
// Converts references to objects. This is a convenience method so that
// users of the iterator don't need to also have their own pointer to the
// allocator over which the iterator runs in order to retrieve objects.
@@ -96,8 +169,27 @@ class BASE_EXPORT PersistentMemoryAllocator {
// non-const (external) pointer to the same allocator (or use const_cast
// to remove the qualifier).
template <typename T>
- const T* GetAsObject(Reference ref, uint32_t type_id) const {
- return allocator_->GetAsObject<T>(ref, type_id);
+ const T* GetAsObject(Reference ref) const {
+ return allocator_->GetAsObject<T>(ref);
+ }
+
+ // Similar to GetAsObject() but converts references to arrays of things.
+ template <typename T>
+ const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
+ return allocator_->GetAsArray<T>(ref, type_id, count);
+ }
+
+ // Convert a generic pointer back into a reference. A null reference will
+ // be returned if |memory| is not inside the persistent segment or does not
+ // point to an object of the specified |type_id|.
+ Reference GetAsReference(const void* memory, uint32_t type_id) const {
+ return allocator_->GetAsReference(memory, type_id);
+ }
+
+ // As above but convert an object back into a reference.
+ template <typename T>
+ Reference GetAsReference(const T* obj) const {
+ return allocator_->GetAsReference(obj);
}
private:
@@ -120,11 +212,21 @@ class BASE_EXPORT PersistentMemoryAllocator {
};
enum : Reference {
- kReferenceNull = 0 // A common "null" reference value.
+ // A common "null" reference value.
+ kReferenceNull = 0,
};
enum : uint32_t {
- kTypeIdAny = 0 // Match any type-id inside GetAsObject().
+ // A value that will match any type when doing lookups.
+ kTypeIdAny = 0x00000000,
+
+ // A value indicating that the type is in transition. Work is being done
+ // on the contents to prepare it for a new type to come.
+ kTypeIdTransitioning = 0xFFFFFFFF,
+ };
+
+ enum : size_t {
+ kSizeAny = 1 // Constant indicating that any array size is acceptable.
};
// This is the standard file extension (suitable for being passed to the
@@ -187,7 +289,7 @@ class BASE_EXPORT PersistentMemoryAllocator {
//
// IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
// with the following histograms:
- // UMA.PersistentAllocator.name.Allocs
+ // UMA.PersistentAllocator.name.Errors
// UMA.PersistentAllocator.name.UsedPct
void CreateTrackingHistograms(base::StringPiece name);
@@ -208,6 +310,27 @@ class BASE_EXPORT PersistentMemoryAllocator {
// TIME before accessing it or risk crashing! Once dereferenced, the pointer
// is safe to reuse forever.
//
+ // It is essential that the object be of a fixed size. All fields must be of
+ // a defined type that does not change based on the compiler or the CPU
+ // natural word size. Acceptable are char, float, double, and (u)intXX_t.
+ // Unacceptable are int, bool, and wchar_t which are implementation defined
+ // with regards to their size.
+ //
+ // Alignment must also be consistent. A uint64_t after a uint32_t will pad
+ // differently between 32 and 64 bit architectures. Either put the bigger
+ // elements first, group smaller elements into blocks the size of larger
+ // elements, or manually insert padding fields as appropriate for the
+ // largest architecture, including at the end.
+ //
+ // To protected against mistakes, all objects must have the attribute
+ // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded
+ // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
+ // instance size is not fixed, at least one build will fail.
+ //
+ // If the size of a structure changes, the type-ID used to recognize it
+ // should also change so later versions of the code don't try to read
+ // incompatible structures from earlier versions.
+ //
// NOTE: Though this method will guarantee that an object of the specified
// type can be accessed without going outside the bounds of the memory
// segment, it makes no guarantees of the validity of the data within the
@@ -220,19 +343,51 @@ class BASE_EXPORT PersistentMemoryAllocator {
// nature of that keyword to the caller. It can add it back, if necessary,
// based on knowledge of how the allocator is being used.
template <typename T>
- T* GetAsObject(Reference ref, uint32_t type_id) {
- static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
- return const_cast<T*>(
- reinterpret_cast<volatile T*>(GetBlockData(ref, type_id, sizeof(T))));
+ T* GetAsObject(Reference ref) {
+ static_assert(std::is_standard_layout<T>::value, "only standard objects");
+ static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+ static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
+ return const_cast<T*>(reinterpret_cast<volatile T*>(
+ GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
+ }
+ template <typename T>
+ const T* GetAsObject(Reference ref) const {
+ static_assert(std::is_standard_layout<T>::value, "only standard objects");
+ static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+ static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
+ return const_cast<const T*>(reinterpret_cast<const volatile T*>(
+ GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
+ }
+
+ // Like GetAsObject but get an array of simple, fixed-size types.
+ //
+ // Use a |count| of the required number of array elements, or kSizeAny.
+ // GetAllocSize() can be used to calculate the upper bound but isn't reliable
+ // because padding can make space for extra elements that were not written.
+ //
+ // Remember that an array of char is a string but may not be NUL terminated.
+ //
+ // There are no compile-time or run-time checks to ensure 32/64-bit size
+ // compatibilty when using these accessors. Only use fixed-size types such
+ // as char, float, double, or (u)intXX_t.
+ template <typename T>
+ T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
+ static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+ return const_cast<T*>(reinterpret_cast<volatile T*>(
+ GetBlockData(ref, type_id, count * sizeof(T))));
}
template <typename T>
- const T* GetAsObject(Reference ref, uint32_t type_id) const {
- static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
- return const_cast<const T*>(
- reinterpret_cast<const volatile T*>(GetBlockData(
- ref, type_id, sizeof(T))));
+ const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
+ static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+ return const_cast<const char*>(reinterpret_cast<const volatile T*>(
+ GetBlockData(ref, type_id, count * sizeof(T))));
}
+ // Get the corresponding reference for an object held in persistent memory.
+ // If the |memory| is not valid or the type does not match, a kReferenceNull
+ // result will be returned.
+ Reference GetAsReference(const void* memory, uint32_t type_id) const;
+
// Get the number of bytes allocated to a block. This is useful when storing
// arrays in order to validate the ending boundary. The returned value will
// include any padding added to achieve the required alignment and so could
@@ -244,14 +399,22 @@ class BASE_EXPORT PersistentMemoryAllocator {
// even though the memory stays valid and allocated. Changing the type is
// an atomic compare/exchange and so requires knowing the existing value.
// It will return false if the existing type is not what is expected.
+ //
+ // Changing the type doesn't mean the data is compatible with the new type.
+ // Passing true for |clear| will zero the memory after the type has been
+ // changed away from |from_type_id| but before it becomes |to_type_id| meaning
+ // that it is done in a manner that is thread-safe. Memory is guaranteed to
+ // be zeroed atomically by machine-word in a monotonically increasing order.
+ //
+ // It will likely be necessary to reconstruct the type before it can be used.
+ // Changing the type WILL NOT invalidate existing pointers to the data, either
+ // in this process or others, so changing the data structure could have
+ // unpredicatable results. USE WITH CARE!
uint32_t GetType(Reference ref) const;
- bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
-
- // Reserve space in the memory segment of the desired |size| and |type_id|.
- // A return value of zero indicates the allocation failed, otherwise the
- // returned reference can be used by any process to get a real pointer via
- // the GetAsObject() call.
- Reference Allocate(size_t size, uint32_t type_id);
+ bool ChangeType(Reference ref,
+ uint32_t to_type_id,
+ uint32_t from_type_id,
+ bool clear);
// Allocated objects can be added to an internal list that can then be
// iterated over by other processes. If an allocated object can be found
@@ -260,6 +423,7 @@ class BASE_EXPORT PersistentMemoryAllocator {
// succeeds unless corruption is detected; check IsCorrupted() to find out.
// Once an object is made iterable, its position in iteration can never
// change; new iterable objects will always be added after it in the series.
+ // Changing the type does not alter its "iterable" status.
void MakeIterable(Reference ref);
// Get the information about the amount of free space in the allocator. The
@@ -286,8 +450,138 @@ class BASE_EXPORT PersistentMemoryAllocator {
// called before such information is to be displayed or uploaded.
void UpdateTrackingHistograms();
+ // While the above works much like malloc & free, these next methods provide
+ // an "object" interface similar to new and delete.
+
+ // Reserve space in the memory segment of the desired |size| and |type_id|.
+ // A return value of zero indicates the allocation failed, otherwise the
+ // returned reference can be used by any process to get a real pointer via
+ // the GetAsObject() or GetAsArray calls.
+ Reference Allocate(size_t size, uint32_t type_id);
+
+ // Allocate and construct an object in persistent memory. The type must have
+ // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
+ // static constexpr fields that are used to ensure compatibility between
+ // software versions. An optional size parameter can be specified to force
+ // the allocation to be bigger than the size of the object; this is useful
+ // when the last field is actually variable length.
+ template <typename T>
+ T* New(size_t size) {
+ if (size < sizeof(T))
+ size = sizeof(T);
+ Reference ref = Allocate(size, T::kPersistentTypeId);
+ void* mem =
+ const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
+ if (!mem)
+ return nullptr;
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
+ return new (mem) T();
+ }
+ template <typename T>
+ T* New() {
+ return New<T>(sizeof(T));
+ }
+
+ // Similar to New, above, but construct the object out of an existing memory
+ // block and of an expected type. If |clear| is true, memory will be zeroed
+ // before construction. Though this is not standard object behavior, it
+ // is present to match with new allocations that always come from zeroed
+ // memory. Anything previously present simply ceases to exist; no destructor
+ // is called for it so explicitly Delete() the old object first if need be.
+ // Calling this will not invalidate existing pointers to the object, either
+ // in this process or others, so changing the object could have unpredictable
+ // results. USE WITH CARE!
+ template <typename T>
+ T* New(Reference ref, uint32_t from_type_id, bool clear) {
+ DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
+ // Make sure the memory is appropriate. This won't be used until after
+ // the type is changed but checking first avoids the possibility of having
+ // to change the type back.
+ void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
+ if (!mem)
+ return nullptr;
+ // Ensure the allocator's internal alignment is sufficient for this object.
+ // This protects against coding errors in the allocator.
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
+ // Change the type, clearing the memory if so desired. The new type is
+ // "transitioning" so that there is no race condition with the construction
+ // of the object should another thread be simultaneously iterating over
+ // data. This will "acquire" the memory so no changes get reordered before
+ // it.
+ if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
+ return nullptr;
+ // Construct an object of the desired type on this memory, just as if
+ // New() had been called to create it.
+ T* obj = new (mem) T();
+ // Finally change the type to the desired one. This will "release" all of
+ // the changes above and so provide a consistent view to other threads.
+ bool success =
+ ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
+ DCHECK(success);
+ return obj;
+ }
+
+ // Deletes an object by destructing it and then changing the type to a
+ // different value (default 0).
+ template <typename T>
+ void Delete(T* obj, uint32_t new_type) {
+ // Get the reference for the object.
+ Reference ref = GetAsReference<T>(obj);
+ // First change the type to "transitioning" so there is no race condition
+ // where another thread could find the object through iteration while it
+ // is been destructed. This will "acquire" the memory so no changes get
+ // reordered before it. It will fail if |ref| is invalid.
+ if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
+ return;
+ // Destruct the object.
+ obj->~T();
+ // Finally change the type to the desired value. This will "release" all
+ // the changes above.
+ bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
+ DCHECK(success);
+ }
+ template <typename T>
+ void Delete(T* obj) {
+ Delete<T>(obj, 0);
+ }
+
+ // As above but works with objects allocated from persistent memory.
+ template <typename T>
+ Reference GetAsReference(const T* obj) const {
+ return GetAsReference(obj, T::kPersistentTypeId);
+ }
+
+ // As above but works with an object allocated from persistent memory.
+ template <typename T>
+ void MakeIterable(const T* obj) {
+ MakeIterable(GetAsReference<T>(obj));
+ }
+
protected:
+ enum MemoryType {
+ MEM_EXTERNAL,
+ MEM_MALLOC,
+ MEM_VIRTUAL,
+ MEM_SHARED,
+ MEM_FILE,
+ };
+
+ struct Memory {
+ Memory(void* b, MemoryType t) : base(b), type(t) {}
+
+ void* base;
+ MemoryType type;
+ };
+
+ // Constructs the allocator. Everything is the same as the public allocator
+ // except |memory| which is a structure with additional information besides
+ // the base address.
+ PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
+ uint64_t id, base::StringPiece name,
+ bool readonly);
+
volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
+ const MemoryType mem_type_; // Type of memory allocation.
const uint32_t mem_size_; // Size of entire memory segment.
const uint32_t mem_page_; // Page size allocations shouldn't cross.
@@ -332,11 +626,15 @@ class BASE_EXPORT PersistentMemoryAllocator {
ref, type_id, size));
}
- const bool readonly_; // Indicates access to read-only memory.
- std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
+ // Record an error in the internal histogram.
+ void RecordError(int error) const;
+
+ const bool readonly_; // Indicates access to read-only memory.
+ mutable std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
HistogramBase* allocs_histogram_; // Histogram recording allocs.
HistogramBase* used_histogram_; // Histogram recording used space.
+ HistogramBase* errors_histogram_; // Histogram recording errors.
friend class PersistentMemoryAllocatorTest;
FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
@@ -359,10 +657,10 @@ class BASE_EXPORT LocalPersistentMemoryAllocator
// Allocates a block of local memory of the specified |size|, ensuring that
// the memory will not be physically allocated until accessed and will read
// as zero when that happens.
- static void* AllocateLocalMemory(size_t size);
+ static Memory AllocateLocalMemory(size_t size);
// Deallocates a block of local |memory| of the specified |size|.
- static void DeallocateLocalMemory(void* memory, size_t size);
+ static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
};
@@ -382,7 +680,7 @@ class BASE_EXPORT SharedPersistentMemoryAllocator
SharedMemory* shared_memory() { return shared_memory_.get(); }
- // Ensure that the memory isn't so invalid that it won't crash when passing it
+ // Ensure that the memory isn't so invalid that it would crash when passing it
// to the allocator. This doesn't guarantee the data is valid, just that it
// won't cause the program to abort. The existing IsCorrupt() call will handle
// the rest.
@@ -411,7 +709,7 @@ class BASE_EXPORT FilePersistentMemoryAllocator
bool read_only);
~FilePersistentMemoryAllocator() override;
- // Ensure that the file isn't so invalid that it won't crash when passing it
+ // Ensure that the file isn't so invalid that it would crash when passing it
// to the allocator. This doesn't guarantee the file is valid, just that it
// won't cause the program to abort. The existing IsCorrupt() call will handle
// the rest.
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index a3d90c2612..d12e00f6d6 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -40,16 +40,20 @@ class PersistentMemoryAllocatorTest : public testing::Test {
uint32_t kAllocAlignment;
struct TestObject1 {
- int onething;
+ static constexpr uint32_t kPersistentTypeId = 1;
+ static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
+ int32_t onething;
char oranother;
};
struct TestObject2 {
- int thiis;
- long that;
+ static constexpr uint32_t kPersistentTypeId = 2;
+ static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
+ int64_t thiis;
+ int32_t that;
float andthe;
- char other;
- double thing;
+ double other;
+ char thing[8];
};
PersistentMemoryAllocatorTest() {
@@ -63,7 +67,6 @@ class PersistentMemoryAllocatorTest : public testing::Test {
allocator_.reset(new PersistentMemoryAllocator(
mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
TEST_ID, TEST_NAME, false));
- allocator_->CreateTrackingHistograms(allocator_->Name());
}
void TearDown() override {
@@ -90,14 +93,13 @@ class PersistentMemoryAllocatorTest : public testing::Test {
};
TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
+ allocator_->CreateTrackingHistograms(allocator_->Name());
+
std::string base_name(TEST_NAME);
EXPECT_EQ(TEST_ID, allocator_->Id());
EXPECT_TRUE(allocator_->used_histogram_);
EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
allocator_->used_histogram_->histogram_name());
- EXPECT_TRUE(allocator_->allocs_histogram_);
- EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".Allocs",
- allocator_->allocs_histogram_->histogram_name());
// Get base memory info for later comparison.
PersistentMemoryAllocator::MemoryInfo meminfo0;
@@ -107,10 +109,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Validate allocation of test object and make sure it can be referenced
// and all metadata looks correct.
- Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
- EXPECT_NE(0U, block1);
- EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
- EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
+ TestObject1* obj1 = allocator_->New<TestObject1>();
+ ASSERT_TRUE(obj1);
+ Reference block1 = allocator_->GetAsReference(obj1);
+ ASSERT_NE(0U, block1);
+ EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
allocator_->GetAllocSize(block1));
@@ -119,21 +123,38 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(meminfo0.total, meminfo1.total);
EXPECT_GT(meminfo0.free, meminfo1.free);
+ // Verify that pointers can be turned back into references and that invalid
+ // addresses return null.
+ char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
+ ASSERT_TRUE(memory1);
+ EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
+ EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
+ EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
+ EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
+ EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
+ EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
+ EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
+
// Ensure that the test-object can be made iterable.
PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
+ EXPECT_EQ(0U, iter1a.GetLast());
uint32_t type;
EXPECT_EQ(0U, iter1a.GetNext(&type));
allocator_->MakeIterable(block1);
EXPECT_EQ(block1, iter1a.GetNext(&type));
EXPECT_EQ(1U, type);
+ EXPECT_EQ(block1, iter1a.GetLast());
EXPECT_EQ(0U, iter1a.GetNext(&type));
+ EXPECT_EQ(block1, iter1a.GetLast());
// Create second test-object and ensure everything is good and it cannot
// be confused with test-object of another type.
- Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
- EXPECT_NE(0U, block2);
- EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
- EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
+ TestObject2* obj2 = allocator_->New<TestObject2>();
+ ASSERT_TRUE(obj2);
+ Reference block2 = allocator_->GetAsReference(obj2);
+ ASSERT_NE(0U, block2);
+ EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
allocator_->GetAllocSize(block2));
@@ -143,9 +164,27 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_GT(meminfo1.free, meminfo2.free);
// Ensure that second test-object can also be made iterable.
- allocator_->MakeIterable(block2);
+ allocator_->MakeIterable(obj2);
EXPECT_EQ(block2, iter1a.GetNext(&type));
EXPECT_EQ(2U, type);
+ EXPECT_EQ(block2, iter1a.GetLast());
+ EXPECT_EQ(0U, iter1a.GetNext(&type));
+ EXPECT_EQ(block2, iter1a.GetLast());
+
+ // Check that the iterator can be reset to the beginning.
+ iter1a.Reset();
+ EXPECT_EQ(0U, iter1a.GetLast());
+ EXPECT_EQ(block1, iter1a.GetNext(&type));
+ EXPECT_EQ(block1, iter1a.GetLast());
+ EXPECT_EQ(block2, iter1a.GetNext(&type));
+ EXPECT_EQ(block2, iter1a.GetLast());
+ EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+ // Check that the iterator can be reset to an arbitrary location.
+ iter1a.Reset(block1);
+ EXPECT_EQ(block1, iter1a.GetLast());
+ EXPECT_EQ(block2, iter1a.GetNext(&type));
+ EXPECT_EQ(block2, iter1a.GetLast());
EXPECT_EQ(0U, iter1a.GetNext(&type));
// Check that iteration can begin after an arbitrary location.
@@ -164,26 +203,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_TRUE(used_samples);
EXPECT_EQ(1, used_samples->TotalCount());
- // Check the internal histogram record of allocation requests.
- std::unique_ptr<HistogramSamples> allocs_samples(
- allocator_->allocs_histogram_->SnapshotSamples());
- EXPECT_TRUE(allocs_samples);
- EXPECT_EQ(2, allocs_samples->TotalCount());
- EXPECT_EQ(0, allocs_samples->GetCount(0));
- EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1)));
- EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject2)));
-#if !DCHECK_IS_ON() // DCHECK builds will die at a NOTREACHED().
- EXPECT_EQ(0U, allocator_->Allocate(TEST_MEMORY_SIZE + 1, 0));
- allocs_samples = allocator_->allocs_histogram_->SnapshotSamples();
- EXPECT_EQ(3, allocs_samples->TotalCount());
- EXPECT_EQ(1, allocs_samples->GetCount(0));
-#endif
-
- // Check that an objcet's type can be changed.
+ // Check that an object's type can be changed.
EXPECT_EQ(2U, allocator_->GetType(block2));
- allocator_->ChangeType(block2, 3, 2);
+ allocator_->ChangeType(block2, 3, 2, false);
EXPECT_EQ(3U, allocator_->GetType(block2));
- allocator_->ChangeType(block2, 2, 3);
+ allocator_->New<TestObject2>(block2, 3, false);
EXPECT_EQ(2U, allocator_->GetType(block2));
// Create second allocator (read/write) using the same memory segment.
@@ -192,16 +216,14 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
TEST_MEMORY_PAGE, 0, "", false));
EXPECT_EQ(TEST_ID, allocator2->Id());
EXPECT_FALSE(allocator2->used_histogram_);
- EXPECT_FALSE(allocator2->allocs_histogram_);
- EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_);
// Ensure that iteration and access through second allocator works.
PersistentMemoryAllocator::Iterator iter2(allocator2.get());
EXPECT_EQ(block1, iter2.GetNext(&type));
EXPECT_EQ(block2, iter2.GetNext(&type));
EXPECT_EQ(0U, iter2.GetNext(&type));
- EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
- EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
+ EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
+ EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
// Create a third allocator (read-only) using the same memory segment.
std::unique_ptr<const PersistentMemoryAllocator> allocator3(
@@ -209,20 +231,29 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
TEST_MEMORY_PAGE, 0, "", true));
EXPECT_EQ(TEST_ID, allocator3->Id());
EXPECT_FALSE(allocator3->used_histogram_);
- EXPECT_FALSE(allocator3->allocs_histogram_);
// Ensure that iteration and access through third allocator works.
PersistentMemoryAllocator::Iterator iter3(allocator3.get());
EXPECT_EQ(block1, iter3.GetNext(&type));
EXPECT_EQ(block2, iter3.GetNext(&type));
EXPECT_EQ(0U, iter3.GetNext(&type));
- EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
- EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
+ EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
+ EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
// Ensure that GetNextOfType works.
PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
- EXPECT_EQ(block2, iter1c.GetNextOfType(2));
+ EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
EXPECT_EQ(0U, iter1c.GetNextOfType(2));
+
+ // Ensure that GetNextOfObject works.
+ PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
+ EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
+ EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
+
+ // Ensure that deleting an object works.
+ allocator_->Delete(obj2);
+ PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
+ EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
}
TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -535,7 +566,7 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
r456 = local.Allocate(456, 456);
r789 = local.Allocate(789, 789);
local.MakeIterable(r123);
- local.ChangeType(r456, 654, 456);
+ local.ChangeType(r456, 654, 456, false);
local.MakeIterable(r789);
local.GetMemoryInfo(&meminfo1);
EXPECT_FALSE(local.IsFull());
@@ -604,6 +635,25 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
shalloc3.MakeIterable(obj);
EXPECT_EQ(obj, iter2.GetNext(&type));
EXPECT_EQ(42U, type);
+
+ // Clear-on-change test.
+ Reference data_ref = shalloc3.Allocate(sizeof(int) * 4, 911);
+ int* data = shalloc3.GetAsArray<int>(data_ref, 911, 4);
+ ASSERT_TRUE(data);
+ data[0] = 0;
+ data[1] = 1;
+ data[2] = 2;
+ data[3] = 3;
+ ASSERT_TRUE(shalloc3.ChangeType(data_ref, 119, 911, false));
+ EXPECT_EQ(0, data[0]);
+ EXPECT_EQ(1, data[1]);
+ EXPECT_EQ(2, data[2]);
+ EXPECT_EQ(3, data[3]);
+ ASSERT_TRUE(shalloc3.ChangeType(data_ref, 191, 119, true));
+ EXPECT_EQ(0, data[0]);
+ EXPECT_EQ(0, data[1]);
+ EXPECT_EQ(0, data[2]);
+ EXPECT_EQ(0, data[3]);
}
@@ -613,7 +663,7 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("persistent_memory");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("persistent_memory");
PersistentMemoryAllocator::MemoryInfo meminfo1;
Reference r123, r456, r789;
@@ -624,7 +674,7 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
r456 = local.Allocate(456, 456);
r789 = local.Allocate(789, 789);
local.MakeIterable(r123);
- local.ChangeType(r456, 654, 456);
+ local.ChangeType(r456, 654, 456, false);
local.MakeIterable(r789);
local.GetMemoryInfo(&meminfo1);
EXPECT_FALSE(local.IsFull());
@@ -668,7 +718,7 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path = temp_dir.path().AppendASCII("extend_test");
+ FilePath file_path = temp_dir.GetPath().AppendASCII("extend_test");
MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size.
// Start with a small but valid file of persistent data.
@@ -734,7 +784,7 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
char filename[100];
for (size_t filesize = minsize; filesize > 0; --filesize) {
strings::SafeSPrintf(filename, "memory_%d_A", filesize);
- FilePath file_path = temp_dir.path().AppendASCII(filename);
+ FilePath file_path = temp_dir.GetPath().AppendASCII(filename);
ASSERT_FALSE(PathExists(file_path));
{
File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
@@ -765,7 +815,8 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
uint32_t type_id;
Reference ref;
while ((ref = iter.GetNext(&type_id)) != 0) {
- const char* data = allocator.GetAsObject<char>(ref, 0);
+ const char* data = allocator.GetAsArray<char>(
+ ref, 0, PersistentMemoryAllocator::kSizeAny);
uint32_t type = allocator.GetType(ref);
size_t size = allocator.GetAllocSize(ref);
// Ensure compiler can't optimize-out above variables.
@@ -784,7 +835,7 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
}
strings::SafeSPrintf(filename, "memory_%d_B", filesize);
- file_path = temp_dir.path().AppendASCII(filename);
+ file_path = temp_dir.GetPath().AppendASCII(filename);
ASSERT_FALSE(PathExists(file_path));
{
File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
index 15f83cdb33..51cc0c709d 100644
--- a/base/metrics/persistent_sample_map.cc
+++ b/base/metrics/persistent_sample_map.cc
@@ -6,6 +6,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/stl_util.h"
@@ -16,6 +17,12 @@ typedef HistogramBase::Sample Sample;
namespace {
+enum NegativeSampleReason {
+ PERSISTENT_SPARSE_HAVE_LOGGED_BUT_NOT_SAMPLE,
+ PERSISTENT_SPARSE_SAMPLE_LESS_THAN_LOGGED,
+ MAX_NEGATIVE_SAMPLE_REASONS
+};
+
// An iterator for going through a PersistentSampleMap. The logic here is
// identical to that of SampleMapIterator but with different data structures.
// Changes here likely need to be duplicated there.
@@ -82,14 +89,17 @@ void PersistentSampleMapIterator::SkipEmptyBuckets() {
// memory allocator. The "id" must be unique across all maps held by an
// allocator or they will get attached to the wrong sample map.
struct SampleRecord {
+ // SHA1(SampleRecord): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 16;
+
uint64_t id; // Unique identifier of owner.
Sample value; // The value for which this record holds a count.
Count count; // The count associated with the above value.
};
-// The type-id used to identify sample records inside an allocator.
-const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
-
} // namespace
PersistentSampleMap::PersistentSampleMap(
@@ -141,15 +151,12 @@ PersistentMemoryAllocator::Reference
PersistentSampleMap::GetNextPersistentRecord(
PersistentMemoryAllocator::Iterator& iterator,
uint64_t* sample_map_id) {
- PersistentMemoryAllocator::Reference ref =
- iterator.GetNextOfType(kTypeIdSampleRecord);
- const SampleRecord* record =
- iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
if (!record)
return 0;
*sample_map_id = record->id;
- return ref;
+ return iterator.GetAsReference(record);
}
// static
@@ -158,11 +165,7 @@ PersistentSampleMap::CreatePersistentRecord(
PersistentMemoryAllocator* allocator,
uint64_t sample_map_id,
Sample value) {
- PersistentMemoryAllocator::Reference ref =
- allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
- SampleRecord* record =
- allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
-
+ SampleRecord* record = allocator->New<SampleRecord>();
if (!record) {
NOTREACHED() << "full=" << allocator->IsFull()
<< ", corrupt=" << allocator->IsCorrupt();
@@ -172,6 +175,8 @@ PersistentSampleMap::CreatePersistentRecord(
record->id = sample_map_id;
record->value = value;
record->count = 0;
+
+ PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
allocator->MakeIterable(ref);
return ref;
}
@@ -183,11 +188,39 @@ bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
Count count;
for (; !iter->Done(); iter->Next()) {
iter->Get(&min, &max, &count);
+ if (count == 0)
+ continue;
if (min + 1 != max)
return false; // SparseHistogram only supports bucket with size 1.
+#if 0 // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
*GetOrCreateSampleCountStorage(min) +=
(op == HistogramSamples::ADD) ? count : -count;
+#else
+ if (op == HistogramSamples::ADD) {
+ *GetOrCreateSampleCountStorage(min) += count;
+ } else {
+ // Subtract is used only for determining deltas when reporting which
+ // means that it's in the "logged" iterator. It should have an active
+ // sample record and thus there is no need to try to create one.
+ NegativeSampleReason reason = MAX_NEGATIVE_SAMPLE_REASONS;
+ Count* bucket = GetSampleCountStorage(min);
+ if (bucket == nullptr) {
+ reason = PERSISTENT_SPARSE_HAVE_LOGGED_BUT_NOT_SAMPLE;
+ } else {
+ if (*bucket < count) {
+ reason = PERSISTENT_SPARSE_SAMPLE_LESS_THAN_LOGGED;
+ *bucket = 0;
+ } else {
+ *bucket -= count;
+ }
+ }
+ if (reason != MAX_NEGATIVE_SAMPLE_REASONS) {
+ UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
+ MAX_NEGATIVE_SAMPLE_REASONS);
+ }
+ }
+#endif
}
return true;
}
@@ -253,8 +286,7 @@ Count* PersistentSampleMap::ImportSamples(Sample until_value,
PersistentMemoryAllocator::Reference ref;
PersistentSampleMapRecords* records = GetRecords();
while ((ref = records->GetNext()) != 0) {
- SampleRecord* record =
- records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
if (!record)
continue;
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
index 3c175db542..853f862182 100644
--- a/base/metrics/persistent_sample_map.h
+++ b/base/metrics/persistent_sample_map.h
@@ -24,7 +24,6 @@ namespace base {
class PersistentHistogramAllocator;
class PersistentSampleMapRecords;
-class PersistentSparseHistogramDataManager;
// The logic here is similar to that of SampleMap but with different data
// structures. Changes here likely need to be duplicated there.
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
index beb72e5f20..d50ab997b2 100644
--- a/base/metrics/persistent_sample_map_unittest.cc
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -8,6 +8,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/test/gtest_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -15,22 +16,22 @@ namespace {
std::unique_ptr<PersistentHistogramAllocator> CreateHistogramAllocator(
size_t bytes) {
- return WrapUnique(new PersistentHistogramAllocator(
- WrapUnique(new LocalPersistentMemoryAllocator(bytes, 0, ""))));
+ return MakeUnique<PersistentHistogramAllocator>(
+ MakeUnique<LocalPersistentMemoryAllocator>(bytes, 0, ""));
}
std::unique_ptr<PersistentHistogramAllocator> DuplicateHistogramAllocator(
PersistentHistogramAllocator* original) {
- return WrapUnique(
- new PersistentHistogramAllocator(WrapUnique(new PersistentMemoryAllocator(
+ return MakeUnique<PersistentHistogramAllocator>(
+ MakeUnique<PersistentMemoryAllocator>(
const_cast<void*>(original->data()), original->length(), 0,
- original->Id(), original->Name(), false))));
+ original->Id(), original->Name(), false));
}
TEST(PersistentSampleMapTest, AccumulateTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
samples.Accumulate(1, 100);
@@ -47,7 +48,7 @@ TEST(PersistentSampleMapTest, AccumulateTest) {
TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
samples.Accumulate(250000000, 100);
@@ -64,7 +65,7 @@ TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
TEST(PersistentSampleMapTest, AddSubtractTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator1 =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta1;
+ HistogramSamples::LocalMetadata meta1;
PersistentSampleMap samples1(1, allocator1.get(), &meta1);
samples1.Accumulate(1, 100);
samples1.Accumulate(2, 100);
@@ -72,7 +73,7 @@ TEST(PersistentSampleMapTest, AddSubtractTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator2 =
DuplicateHistogramAllocator(allocator1.get());
- HistogramSamples::Metadata meta2;
+ HistogramSamples::LocalMetadata meta2;
PersistentSampleMap samples2(2, allocator2.get(), &meta2);
samples2.Accumulate(1, 200);
samples2.Accumulate(2, 200);
@@ -100,7 +101,7 @@ TEST(PersistentSampleMapTest, AddSubtractTest) {
TEST(PersistentSampleMapTest, PersistenceTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator1 =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta12;
+ HistogramSamples::LocalMetadata meta12;
PersistentSampleMap samples1(12, allocator1.get(), &meta12);
samples1.Accumulate(1, 100);
samples1.Accumulate(2, 200);
@@ -153,7 +154,7 @@ TEST(PersistentSampleMapTest, PersistenceTest) {
TEST(PersistentSampleMapIteratorTest, IterateTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
samples.Accumulate(1, 100);
samples.Accumulate(2, 200);
@@ -191,7 +192,7 @@ TEST(PersistentSampleMapIteratorTest, IterateTest) {
TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
std::unique_ptr<PersistentHistogramAllocator> allocator1 =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta1;
+ HistogramSamples::LocalMetadata meta1;
PersistentSampleMap samples1(1, allocator1.get(), &meta1);
samples1.Accumulate(5, 1);
samples1.Accumulate(10, 2);
@@ -201,7 +202,7 @@ TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
std::unique_ptr<PersistentHistogramAllocator> allocator2 =
DuplicateHistogramAllocator(allocator1.get());
- HistogramSamples::Metadata meta2;
+ HistogramSamples::LocalMetadata meta2;
PersistentSampleMap samples2(2, allocator2.get(), &meta2);
samples2.Accumulate(5, 1);
samples2.Accumulate(20, 4);
@@ -233,12 +234,10 @@ TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
EXPECT_TRUE(it->Done());
}
-// Only run this test on builds that support catching a DCHECK crash.
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
std::unique_ptr<SampleCountIterator> it = samples.Iterator();
@@ -248,16 +247,14 @@ TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
HistogramBase::Sample min;
HistogramBase::Sample max;
HistogramBase::Count count;
- EXPECT_DEATH(it->Get(&min, &max, &count), "");
+ EXPECT_DCHECK_DEATH(it->Get(&min, &max, &count));
- EXPECT_DEATH(it->Next(), "");
+ EXPECT_DCHECK_DEATH(it->Next());
samples.Accumulate(1, 100);
it = samples.Iterator();
EXPECT_FALSE(it->Done());
}
-#endif
-// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
} // namespace
} // namespace base
diff --git a/base/metrics/sample_map_unittest.cc b/base/metrics/sample_map_unittest.cc
index 8f577109cd..9d7e818bb7 100644
--- a/base/metrics/sample_map_unittest.cc
+++ b/base/metrics/sample_map_unittest.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "base/test/gtest_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
diff --git a/base/metrics/sample_vector_unittest.cc b/base/metrics/sample_vector_unittest.cc
index 02e48aac17..897ceed520 100644
--- a/base/metrics/sample_vector_unittest.cc
+++ b/base/metrics/sample_vector_unittest.cc
@@ -12,6 +12,7 @@
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram.h"
+#include "base/test/gtest_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index 3c1222d2ae..bee48d4c17 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -68,7 +68,7 @@ HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
}
- DCHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
+ CHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
return histogram;
}
@@ -282,8 +282,8 @@ void SparseHistogram::WriteAsciiHeader(const Count total_count,
"Histogram: %s recorded %d samples",
histogram_name().c_str(),
total_count);
- if (flags() & ~kHexRangePrintingFlag)
- StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag);
+ if (flags())
+ StringAppendF(output, " (flags = 0x%x)", flags());
}
} // namespace base
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
index 3b302d6f22..97709ba18f 100644
--- a/base/metrics/sparse_histogram.h
+++ b/base/metrics/sparse_histogram.h
@@ -13,45 +13,17 @@
#include <string>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
-#include "base/metrics/sample_map.h"
+#include "base/metrics/histogram_samples.h"
#include "base/synchronization/lock.h"
namespace base {
-// Sparse histograms are well suited for recording counts of exact sample values
-// that are sparsely distributed over a large range.
-//
-// The implementation uses a lock and a map, whereas other histogram types use a
-// vector and no lock. It is thus more costly to add values to, and each value
-// stored has more overhead, compared to the other histogram types. However it
-// may be more efficient in memory if the total number of sample values is small
-// compared to the range of their values.
-//
-// UMA_HISTOGRAM_ENUMERATION would be better suited for a smaller range of
-// enumerations that are (nearly) contiguous. Also for code that is expected to
-// run often or in a tight loop.
-//
-// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and or
-// infrequently recorded values.
-//
-// For instance, Sqlite.Version.* are SPARSE because for any given database,
-// there's going to be exactly one version logged, meaning no gain to having a
-// pre-allocated vector of slots once the fleet gets to version 4 or 5 or 10.
-// Likewise Sqlite.Error.* are SPARSE, because most databases generate few or no
-// errors and there are large gaps in the set of possible errors.
-#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
- do { \
- base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
- name, base::HistogramBase::kUmaTargetedHistogramFlag); \
- histogram->Add(sample); \
- } while (0)
-
class HistogramSamples;
class PersistentHistogramAllocator;
+class Pickle;
+class PickleIterator;
class BASE_EXPORT SparseHistogram : public HistogramBase {
public:
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index eab7790276..f4a7c9495e 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -8,6 +8,7 @@
#include <string>
#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/persistent_memory_allocator.h"
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index 42ed5a9545..74c964a3fb 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -16,7 +16,6 @@
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
-#include "base/synchronization/lock.h"
#include "base/values.h"
namespace {
@@ -59,10 +58,10 @@ StatisticsRecorder::HistogramIterator::~HistogramIterator() {}
StatisticsRecorder::HistogramIterator&
StatisticsRecorder::HistogramIterator::operator++() {
const HistogramMap::iterator histograms_end = histograms_->end();
- if (iter_ == histograms_end || lock_ == NULL)
+ if (iter_ == histograms_end)
return *this;
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
for (;;) {
++iter_;
@@ -79,51 +78,63 @@ StatisticsRecorder::HistogramIterator::operator++() {
}
StatisticsRecorder::~StatisticsRecorder() {
- DCHECK(lock_);
DCHECK(histograms_);
DCHECK(ranges_);
// Clean out what this object created and then restore what existed before.
Reset();
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
histograms_ = existing_histograms_.release();
callbacks_ = existing_callbacks_.release();
ranges_ = existing_ranges_.release();
+ providers_ = existing_providers_.release();
}
// static
void StatisticsRecorder::Initialize() {
+ // Tests sometimes create local StatisticsRecorders in order to provide a
+ // contained environment of histograms that can be later discarded. If a
+ // true global instance gets created in this environment then it will
+ // eventually get disconnected when the local instance destructs and
+ // restores the previous state, resulting in no StatisticsRecorder at all.
+ // The global lazy instance, however, will remain valid thus ensuring that
+ // another never gets installed via this method. If a |histograms_| map
+ // exists then assume the StatisticsRecorder is already "initialized".
+ if (histograms_)
+ return;
+
// Ensure that an instance of the StatisticsRecorder object is created.
g_statistics_recorder_.Get();
}
// static
bool StatisticsRecorder::IsActive() {
- if (lock_ == NULL)
- return false;
- base::AutoLock auto_lock(*lock_);
- return NULL != histograms_;
+ base::AutoLock auto_lock(lock_.Get());
+ return histograms_ != nullptr;
+}
+
+// static
+void StatisticsRecorder::RegisterHistogramProvider(
+ const WeakPtr<HistogramProvider>& provider) {
+ providers_->push_back(provider);
}
// static
HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
HistogramBase* histogram) {
- // As per crbug.com/79322 the histograms are intentionally leaked, so we need
- // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
- // for an object, the duplicates should not be annotated.
- // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
- // twice if (lock_ == NULL) || (!histograms_).
- if (lock_ == NULL) {
- ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
- return histogram;
- }
-
- HistogramBase* histogram_to_delete = NULL;
- HistogramBase* histogram_to_return = NULL;
+ HistogramBase* histogram_to_delete = nullptr;
+ HistogramBase* histogram_to_return = nullptr;
{
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL) {
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_) {
histogram_to_return = histogram;
+
+ // As per crbug.com/79322 the histograms are intentionally leaked, so we
+ // need to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used
+ // only once for an object, the duplicates should not be annotated.
+ // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
+ // twice |if (!histograms_)|.
+ ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
} else {
const std::string& name = histogram->histogram_name();
HistogramMap::iterator it = histograms_->find(name);
@@ -164,13 +175,8 @@ const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
DCHECK(ranges->HasValidChecksum());
std::unique_ptr<const BucketRanges> ranges_deleter;
- if (lock_ == NULL) {
- ANNOTATE_LEAKING_OBJECT_PTR(ranges);
- return ranges;
- }
-
- base::AutoLock auto_lock(*lock_);
- if (ranges_ == NULL) {
+ base::AutoLock auto_lock(lock_.Get());
+ if (!ranges_) {
ANNOTATE_LEAKING_OBJECT_PTR(ranges);
return ranges;
}
@@ -267,10 +273,8 @@ std::string StatisticsRecorder::ToJSON(const std::string& query) {
// static
void StatisticsRecorder::GetHistograms(Histograms* output) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return;
for (const auto& entry : *histograms_) {
@@ -281,10 +285,8 @@ void StatisticsRecorder::GetHistograms(Histograms* output) {
// static
void StatisticsRecorder::GetBucketRanges(
std::vector<const BucketRanges*>* output) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (ranges_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!ranges_)
return;
for (const auto& entry : *ranges_) {
@@ -301,19 +303,31 @@ HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
// will acquire the lock at that time.
ImportGlobalPersistentHistograms();
- if (lock_ == NULL)
- return NULL;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
- return NULL;
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
+ return nullptr;
HistogramMap::iterator it = histograms_->find(name);
if (histograms_->end() == it)
- return NULL;
+ return nullptr;
return it->second;
}
// static
+void StatisticsRecorder::ImportProvidedHistograms() {
+ if (!providers_)
+ return;
+
+ // Merge histogram data from each provider in turn.
+ for (const WeakPtr<HistogramProvider>& provider : *providers_) {
+ // Weak-pointer may be invalid if the provider was destructed, though they
+ // generally never are.
+ if (provider)
+ provider->MergeHistogramDeltas();
+ }
+}
+
+// static
StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
bool include_persistent) {
DCHECK(histograms_);
@@ -321,7 +335,7 @@ StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
HistogramMap::iterator iter_begin;
{
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
iter_begin = histograms_->begin();
}
return HistogramIterator(iter_begin, include_persistent);
@@ -331,7 +345,7 @@ StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
HistogramMap::iterator iter_end;
{
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
iter_end = histograms_->end();
}
return HistogramIterator(iter_end, true);
@@ -339,21 +353,22 @@ StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
// static
void StatisticsRecorder::InitLogOnShutdown() {
- if (lock_ == nullptr)
+ if (!histograms_)
return;
- base::AutoLock auto_lock(*lock_);
+
+ base::AutoLock auto_lock(lock_.Get());
g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock();
}
// static
void StatisticsRecorder::GetSnapshot(const std::string& query,
Histograms* snapshot) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return;
+ ImportGlobalPersistentHistograms();
+
for (const auto& entry : *histograms_) {
if (entry.second->histogram_name().find(query) != std::string::npos)
snapshot->push_back(entry.second);
@@ -365,10 +380,8 @@ bool StatisticsRecorder::SetCallback(
const std::string& name,
const StatisticsRecorder::OnSampleCallback& cb) {
DCHECK(!cb.is_null());
- if (lock_ == NULL)
- return false;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return false;
if (ContainsKey(*callbacks_, name))
@@ -384,10 +397,8 @@ bool StatisticsRecorder::SetCallback(
// static
void StatisticsRecorder::ClearCallback(const std::string& name) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return;
callbacks_->erase(name);
@@ -401,10 +412,8 @@ void StatisticsRecorder::ClearCallback(const std::string& name) {
// static
StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
const std::string& name) {
- if (lock_ == NULL)
- return OnSampleCallback();
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return OnSampleCallback();
auto callback_iterator = callbacks_->find(name);
@@ -414,10 +423,7 @@ StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
// static
size_t StatisticsRecorder::GetHistogramCount() {
- if (!lock_)
- return 0;
-
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
if (!histograms_)
return 0;
return histograms_->size();
@@ -438,7 +444,7 @@ StatisticsRecorder::CreateTemporaryForTesting() {
// static
void StatisticsRecorder::UninitializeForTesting() {
// Stop now if it's never been initialized.
- if (lock_ == NULL || histograms_ == NULL)
+ if (!histograms_)
return;
// Get the global instance and destruct it. It's held in static memory so
@@ -454,7 +460,7 @@ void StatisticsRecorder::UninitializeForTesting() {
// static
void StatisticsRecorder::ImportGlobalPersistentHistograms() {
- if (lock_ == NULL)
+ if (!histograms_)
return;
// Import histograms from known persistent storage. Histograms could have
@@ -470,25 +476,17 @@ void StatisticsRecorder::ImportGlobalPersistentHistograms() {
// of main(), and hence it is not thread safe. It initializes globals to
// provide support for all future calls.
StatisticsRecorder::StatisticsRecorder() {
- if (lock_ == NULL) {
- // This will leak on purpose. It's the only way to make sure we won't race
- // against the static uninitialization of the module while one of our
- // static methods relying on the lock get called at an inappropriate time
- // during the termination phase. Since it's a static data member, we will
- // leak one per process, which would be similar to the instance allocated
- // during static initialization and released only on process termination.
- lock_ = new base::Lock;
- }
-
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
existing_histograms_.reset(histograms_);
existing_callbacks_.reset(callbacks_);
existing_ranges_.reset(ranges_);
+ existing_providers_.reset(providers_);
histograms_ = new HistogramMap;
callbacks_ = new CallbackMap;
ranges_ = new RangesMap;
+ providers_ = new HistogramProviders;
InitLogOnShutdownWithoutLock();
}
@@ -502,23 +500,21 @@ void StatisticsRecorder::InitLogOnShutdownWithoutLock() {
// static
void StatisticsRecorder::Reset() {
- // If there's no lock then there is nothing to reset.
- if (!lock_)
- return;
std::unique_ptr<HistogramMap> histograms_deleter;
std::unique_ptr<CallbackMap> callbacks_deleter;
std::unique_ptr<RangesMap> ranges_deleter;
- // We don't delete lock_ on purpose to avoid having to properly protect
- // against it going away after we checked for NULL in the static methods.
+ std::unique_ptr<HistogramProviders> providers_deleter;
{
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
histograms_deleter.reset(histograms_);
callbacks_deleter.reset(callbacks_);
ranges_deleter.reset(ranges_);
- histograms_ = NULL;
- callbacks_ = NULL;
- ranges_ = NULL;
+ providers_deleter.reset(providers_);
+ histograms_ = nullptr;
+ callbacks_ = nullptr;
+ ranges_ = nullptr;
+ providers_ = nullptr;
}
// We are going to leak the histograms and the ranges.
}
@@ -532,12 +528,15 @@ void StatisticsRecorder::DumpHistogramsToVlog(void* /*instance*/) {
// static
-StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
+StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = nullptr;
// static
-StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = NULL;
+StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = nullptr;
// static
-StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
+StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = nullptr;
// static
-base::Lock* StatisticsRecorder::lock_ = NULL;
+StatisticsRecorder::HistogramProviders* StatisticsRecorder::providers_;
+// static
+base::LazyInstance<base::Lock>::Leaky StatisticsRecorder::lock_ =
+ LAZY_INSTANCE_INITIALIZER;
} // namespace base
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index c3c6aceffd..55be86a85b 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -23,15 +23,14 @@
#include "base/gtest_prod_util.h"
#include "base/lazy_instance.h"
#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
#include "base/metrics/histogram_base.h"
#include "base/strings/string_piece.h"
-
-class SubprocessMetricsProviderTest;
+#include "base/synchronization/lock.h"
namespace base {
class BucketRanges;
-class Lock;
class BASE_EXPORT StatisticsRecorder {
public:
@@ -64,8 +63,18 @@ class BASE_EXPORT StatisticsRecorder {
}
};
+ // An interface class that allows the StatisticsRecorder to forcibly merge
+ // histograms from providers when necessary.
+ class HistogramProvider {
+ public:
+ virtual ~HistogramProvider() {}
+ // Merges all histogram information into the global versions.
+ virtual void MergeHistogramDeltas() = 0;
+ };
+
typedef std::map<StringKey, HistogramBase*> HistogramMap;
typedef std::vector<HistogramBase*> Histograms;
+ typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
// A class for iterating over the histograms held within this global resource.
class BASE_EXPORT HistogramIterator {
@@ -103,6 +112,12 @@ class BASE_EXPORT StatisticsRecorder {
// Find out if histograms can now be registered into our list.
static bool IsActive();
+ // Register a provider of histograms that can be called to merge those into
+ // the global StatisticsRecorder. Calls to ImportProvidedHistograms() will
+ // fetch from registered providers.
+ static void RegisterHistogramProvider(
+ const WeakPtr<HistogramProvider>& provider);
+
// Register, or add a new histogram to the collection of statistics. If an
// identically named histogram is already registered, then the argument
// |histogram| will deleted. The returned value is always the registered
@@ -136,6 +151,9 @@ class BASE_EXPORT StatisticsRecorder {
// safe. It returns NULL if a matching histogram is not found.
static HistogramBase* FindHistogram(base::StringPiece name);
+ // Imports histograms from providers. This must be called on the UI thread.
+ static void ImportProvidedHistograms();
+
// Support for iterating over known histograms.
static HistogramIterator begin(bool include_persistent);
static HistogramIterator end();
@@ -200,7 +218,7 @@ class BASE_EXPORT StatisticsRecorder {
// |bucket_ranges_|.
typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
- friend struct DefaultLazyInstanceTraits<StatisticsRecorder>;
+ friend struct LazyInstanceTraitsBase<StatisticsRecorder>;
friend class StatisticsRecorderTest;
// Imports histograms from global persistent memory. The global lock must
@@ -222,6 +240,7 @@ class BASE_EXPORT StatisticsRecorder {
std::unique_ptr<HistogramMap> existing_histograms_;
std::unique_ptr<CallbackMap> existing_callbacks_;
std::unique_ptr<RangesMap> existing_ranges_;
+ std::unique_ptr<HistogramProviders> existing_providers_;
bool vlog_initialized_ = false;
@@ -231,9 +250,13 @@ class BASE_EXPORT StatisticsRecorder {
static HistogramMap* histograms_;
static CallbackMap* callbacks_;
static RangesMap* ranges_;
+ static HistogramProviders* providers_;
- // Lock protects access to above maps.
- static base::Lock* lock_;
+ // Lock protects access to above maps. This is a LazyInstance to avoid races
+ // when the above methods are used before Initialize(). Previously each method
+ // would do |if (!lock_) return;| which would race with
+ // |lock_ = new Lock;| in StatisticsRecorder(). http://crbug.com/672852.
+ static base::LazyInstance<base::Lock>::Leaky lock_;
DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
};
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index 65e2c98f52..48b6df3068 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -12,6 +12,7 @@
#include "base/bind.h"
#include "base/json/json_reader.h"
#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/sparse_histogram.h"
@@ -656,4 +657,74 @@ TEST_P(StatisticsRecorderTest, LogOnShutdownInitialized) {
EXPECT_TRUE(VLogInitialized());
}
+class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
+ public:
+ TestHistogramProvider(std::unique_ptr<PersistentHistogramAllocator> allocator)
+ : allocator_(std::move(allocator)), weak_factory_(this) {
+ StatisticsRecorder::RegisterHistogramProvider(weak_factory_.GetWeakPtr());
+ }
+
+ void MergeHistogramDeltas() override {
+ PersistentHistogramAllocator::Iterator hist_iter(allocator_.get());
+ while (true) {
+ std::unique_ptr<base::HistogramBase> histogram = hist_iter.GetNext();
+ if (!histogram)
+ break;
+ allocator_->MergeHistogramDeltaToStatisticsRecorder(histogram.get());
+ }
+ }
+
+ private:
+ std::unique_ptr<PersistentHistogramAllocator> allocator_;
+ WeakPtrFactory<TestHistogramProvider> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestHistogramProvider);
+};
+
+TEST_P(StatisticsRecorderTest, ImportHistogramsTest) {
+ // Create a second SR to create some histograms for later import.
+ std::unique_ptr<StatisticsRecorder> temp_sr =
+ StatisticsRecorder::CreateTemporaryForTesting();
+
+ // Extract any existing global allocator so a new one can be created.
+ std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+ GlobalHistogramAllocator::ReleaseForTesting();
+
+ // Create a histogram inside a new allocator for testing.
+ GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
+ HistogramBase* histogram = LinearHistogram::FactoryGet("Foo", 1, 10, 11, 0);
+ histogram->Add(3);
+
+ // Undo back to the starting point.
+ std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::Set(std::move(old_allocator));
+ temp_sr.reset();
+
+ // Create a provider that can supply histograms to the current SR.
+ TestHistogramProvider provider(std::move(new_allocator));
+
+ // Verify that the created histogram is no longer known.
+ ASSERT_FALSE(StatisticsRecorder::FindHistogram(histogram->histogram_name()));
+
+ // Now test that it merges.
+ StatisticsRecorder::ImportProvidedHistograms();
+ HistogramBase* found =
+ StatisticsRecorder::FindHistogram(histogram->histogram_name());
+ ASSERT_TRUE(found);
+ EXPECT_NE(histogram, found);
+ std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+ EXPECT_EQ(1, snapshot->TotalCount());
+ EXPECT_EQ(1, snapshot->GetCount(3));
+
+ // Finally, verify that updates can also be merged.
+ histogram->Add(3);
+ histogram->Add(5);
+ StatisticsRecorder::ImportProvidedHistograms();
+ snapshot = found->SnapshotSamples();
+ EXPECT_EQ(3, snapshot->TotalCount());
+ EXPECT_EQ(2, snapshot->GetCount(3));
+ EXPECT_EQ(1, snapshot->GetCount(5));
+}
+
} // namespace base
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
index 169a0634e4..65ac918817 100644
--- a/base/metrics/user_metrics.cc
+++ b/base/metrics/user_metrics.cc
@@ -17,10 +17,10 @@
namespace base {
namespace {
-LazyInstance<std::vector<ActionCallback>> g_callbacks =
- LAZY_INSTANCE_INITIALIZER;
-LazyInstance<scoped_refptr<SingleThreadTaskRunner>> g_task_runner =
+LazyInstance<std::vector<ActionCallback>>::DestructorAtExit g_callbacks =
LAZY_INSTANCE_INITIALIZER;
+LazyInstance<scoped_refptr<SingleThreadTaskRunner>>::DestructorAtExit
+ g_task_runner = LAZY_INSTANCE_INITIALIZER;
} // namespace
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
index 93701e8fd2..87fbd9cac0 100644
--- a/base/metrics/user_metrics.h
+++ b/base/metrics/user_metrics.h
@@ -17,6 +17,9 @@ namespace base {
// This module provides some helper functions for logging actions tracked by
// the user metrics system.
+// For best practices on deciding when to emit a user action, see
+// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/actions/README.md
+
// Record that the user performed an action.
// This function must be called after the task runner has been set with
// SetRecordActionTaskRunner().
diff --git a/base/native_library.h b/base/native_library.h
index b4f3a3cd1b..02eae1d508 100644
--- a/base/native_library.h
+++ b/base/native_library.h
@@ -65,12 +65,32 @@ struct BASE_EXPORT NativeLibraryLoadError {
#endif // OS_WIN
};
+struct BASE_EXPORT NativeLibraryOptions {
+ NativeLibraryOptions() = default;
+ NativeLibraryOptions(const NativeLibraryOptions& options) = default;
+
+ // If |true|, a loaded library is required to prefer local symbol resolution
+ // before considering global symbols. Note that this is already the default
+ // behavior on most systems. Setting this to |false| does not guarantee the
+ // inverse, i.e., it does not force a preference for global symbols over local
+ // ones.
+ bool prefer_own_symbols = false;
+};
+
// Loads a native library from disk. Release it with UnloadNativeLibrary when
// you're done. Returns NULL on failure.
// If |error| is not NULL, it may be filled in on load error.
BASE_EXPORT NativeLibrary LoadNativeLibrary(const FilePath& library_path,
NativeLibraryLoadError* error);
+// Loads a native library from disk. Release it with UnloadNativeLibrary when
+// you're done. Returns NULL on failure.
+// If |error| is not NULL, it may be filled in on load error.
+BASE_EXPORT NativeLibrary LoadNativeLibraryWithOptions(
+ const FilePath& library_path,
+ const NativeLibraryOptions& options,
+ NativeLibraryLoadError* error);
+
#if defined(OS_WIN)
// Loads a native library from disk. Release it with UnloadNativeLibrary when
// you're done.
diff --git a/base/native_library_posix.cc b/base/native_library_posix.cc
index 2dc434b7be..3459716af1 100644
--- a/base/native_library_posix.cc
+++ b/base/native_library_posix.cc
@@ -19,16 +19,27 @@ std::string NativeLibraryLoadError::ToString() const {
}
// static
-NativeLibrary LoadNativeLibrary(const FilePath& library_path,
- NativeLibraryLoadError* error) {
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+ const NativeLibraryOptions& options,
+ NativeLibraryLoadError* error) {
// dlopen() opens the file off disk.
ThreadRestrictions::AssertIOAllowed();
- // We deliberately do not use RTLD_DEEPBIND. For the history why, please
- // refer to the bug tracker. Some useful bug reports to read include:
+ // We deliberately do not use RTLD_DEEPBIND by default. For the history why,
+ // please refer to the bug tracker. Some useful bug reports to read include:
// http://crbug.com/17943, http://crbug.com/17557, http://crbug.com/36892,
// and http://crbug.com/40794.
- void* dl = dlopen(library_path.value().c_str(), RTLD_LAZY);
+ int flags = RTLD_LAZY;
+#if defined(OS_ANDROID) || !defined(RTLD_DEEPBIND)
+ // Certain platforms don't define RTLD_DEEPBIND. Android dlopen() requires
+ // further investigation, as it might vary across versions. Crash here to
+ // warn developers that they're trying to rely on uncertain behavior.
+ CHECK(!options.prefer_own_symbols);
+#else
+ if (options.prefer_own_symbols)
+ flags |= RTLD_DEEPBIND;
+#endif
+ void* dl = dlopen(library_path.value().c_str(), flags);
if (!dl && error)
error->message = dlerror();
diff --git a/base/numerics/safe_conversions.h b/base/numerics/safe_conversions.h
index 6b558afde4..b0ec279eb5 100644
--- a/base/numerics/safe_conversions.h
+++ b/base/numerics/safe_conversions.h
@@ -8,95 +8,130 @@
#include <stddef.h>
#include <limits>
+#include <ostream>
#include <type_traits>
-#include "base/logging.h"
#include "base/numerics/safe_conversions_impl.h"
namespace base {
+// The following are helper constexpr template functions and classes for safely
+// performing a range of conversions, assignments, and tests:
+//
+// checked_cast<> - Analogous to static_cast<> for numeric types, except
+// that it CHECKs that the specified numeric conversion will not overflow
+// or underflow. NaN source will always trigger a CHECK.
+// The default CHECK triggers a crash, but the handler can be overriden.
+// saturated_cast<> - Analogous to static_cast<> for numeric types, except
+// that it returns a saturated result when the specified numeric conversion
+// would otherwise overflow or underflow. An NaN source returns 0 by
+// default, but can be overridden to return a different result.
+// strict_cast<> - Analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large
+// enough to contain any value in the source type. It performs no runtime
+// checking and thus introduces no runtime overhead.
+// IsValueInRangeForNumericType<>() - A convenience function that returns true
+// if the type supplied to the template parameter can represent the value
+// passed as an argument to the function.
+// IsValueNegative<>() - A convenience function that will accept any arithmetic
+// type as an argument and will return whether the value is less than zero.
+// Unsigned types always return false.
+// SafeUnsignedAbs() - Returns the absolute value of the supplied integer
+// parameter as an unsigned result (thus avoiding an overflow if the value
+// is the signed, two's complement minimum).
+// StrictNumeric<> - A wrapper type that performs assignments and copies via
+// the strict_cast<> template, and can perform valid arithmetic comparisons
+// across any range of arithmetic types. StrictNumeric is the return type
+// for values extracted from a CheckedNumeric class instance. The raw
+// arithmetic value is extracted via static_cast to the underlying type.
+// MakeStrictNum() - Creates a new StrictNumeric from the underlying type of
+// the supplied arithmetic or StrictNumeric type.
+
// Convenience function that returns true if the supplied value is in range
// for the destination type.
template <typename Dst, typename Src>
constexpr bool IsValueInRangeForNumericType(Src value) {
- return internal::DstRangeRelationToSrcRange<Dst>(value) ==
- internal::RANGE_VALID;
-}
-
-// Convenience function for determining if a numeric value is negative without
-// throwing compiler warnings on: unsigned(value) < 0.
-template <typename T>
-constexpr typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
-IsValueNegative(T value) {
- static_assert(std::numeric_limits<T>::is_specialized,
- "Argument must be numeric.");
- return value < 0;
+ return internal::DstRangeRelationToSrcRange<Dst>(value).IsValid();
}
-template <typename T>
-constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed,
- bool>::type IsValueNegative(T) {
- static_assert(std::numeric_limits<T>::is_specialized,
- "Argument must be numeric.");
- return false;
-}
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+struct CheckOnFailure {
+ template <typename T>
+ static T HandleFailure() {
+#if defined(__GNUC__) || defined(__clang__)
+ __builtin_trap();
+#else
+ ((void)(*(volatile char*)0 = 0));
+#endif
+ return T();
+ }
+};
// checked_cast<> is analogous to static_cast<> for numeric types,
// except that it CHECKs that the specified numeric conversion will not
// overflow or underflow. NaN source will always trigger a CHECK.
-template <typename Dst, typename Src>
-inline Dst checked_cast(Src value) {
- CHECK(IsValueInRangeForNumericType<Dst>(value));
- return static_cast<Dst>(value);
+template <typename Dst,
+ class CheckHandler = CheckOnFailure,
+ typename Src>
+constexpr Dst checked_cast(Src value) {
+ // This throws a compile-time error on evaluating the constexpr if it can be
+ // determined at compile-time as failing, otherwise it will CHECK at runtime.
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return IsValueInRangeForNumericType<Dst, SrcType>(value)
+ ? static_cast<Dst>(static_cast<SrcType>(value))
+ : CheckHandler::template HandleFailure<Dst>();
}
-// HandleNaN will cause this class to CHECK(false).
-struct SaturatedCastNaNBehaviorCheck {
- template <typename T>
- static T HandleNaN() {
- CHECK(false);
- return T();
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+template <typename T>
+struct SaturationDefaultHandler {
+ static constexpr T NaN() {
+ return std::numeric_limits<T>::has_quiet_NaN
+ ? std::numeric_limits<T>::quiet_NaN()
+ : T();
}
-};
-
-// HandleNaN will return 0 in this case.
-struct SaturatedCastNaNBehaviorReturnZero {
- template <typename T>
- static constexpr T HandleNaN() {
- return T();
+ static constexpr T max() { return std::numeric_limits<T>::max(); }
+ static constexpr T Overflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max();
+ }
+ static constexpr T lowest() { return std::numeric_limits<T>::lowest(); }
+ static constexpr T Underflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity() * -1
+ : std::numeric_limits<T>::lowest();
}
};
namespace internal {
-// This wrapper is used for C++11 constexpr support by avoiding the declaration
-// of local variables in the saturated_cast template function.
-template <typename Dst, class NaNHandler, typename Src>
-constexpr Dst saturated_cast_impl(const Src value,
- const RangeConstraint constraint) {
- return constraint == RANGE_VALID
- ? static_cast<Dst>(value)
- : (constraint == RANGE_UNDERFLOW
- ? std::numeric_limits<Dst>::min()
- : (constraint == RANGE_OVERFLOW
- ? std::numeric_limits<Dst>::max()
- : (constraint == RANGE_INVALID
- ? NaNHandler::template HandleNaN<Dst>()
- : (NOTREACHED(), static_cast<Dst>(value)))));
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
+ // For some reason clang generates much better code when the branch is
+ // structured exactly this way, rather than a sequence of checks.
+ return !constraint.IsOverflowFlagSet()
+ ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
+ : S<Dst>::Underflow())
+ // Skip this check for integral Src, which cannot be NaN.
+ : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ ? S<Dst>::Overflow()
+ : S<Dst>::NaN());
}
-} // namespace internal
// saturated_cast<> is analogous to static_cast<> for numeric types, except
-// that the specified numeric conversion will saturate rather than overflow or
-// underflow. NaN assignment to an integral will defer the behavior to a
-// specified class. By default, it will return 0.
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
template <typename Dst,
- class NaNHandler = SaturatedCastNaNBehaviorReturnZero,
+ template <typename>
+ class SaturationHandler = SaturationDefaultHandler,
typename Src>
constexpr Dst saturated_cast(Src value) {
- return std::numeric_limits<Dst>::is_iec559
- ? static_cast<Dst>(value) // Floating point optimization.
- : internal::saturated_cast_impl<Dst, NaNHandler>(
- value, internal::DstRangeRelationToSrcRange<Dst>(value));
+ using SrcType = typename UnderlyingType<Src>::type;
+ return saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+ value,
+ DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(value));
}
// strict_cast<> is analogous to static_cast<> for numeric types, except that
@@ -104,22 +139,40 @@ constexpr Dst saturated_cast(Src value) {
// to contain any value in the source type. It performs no runtime checking.
template <typename Dst, typename Src>
constexpr Dst strict_cast(Src value) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
- static_assert(std::numeric_limits<Dst>::is_specialized,
- "Result must be numeric.");
- static_assert((internal::StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
- internal::NUMERIC_RANGE_CONTAINED),
- "The numeric conversion is out of range for this type. You "
- "should probably use one of the following conversion "
- "mechanisms on the value you want to pass:\n"
- "- base::checked_cast\n"
- "- base::saturated_cast\n"
- "- base::CheckedNumeric");
-
- return static_cast<Dst>(value);
+ using SrcType = typename UnderlyingType<Src>::type;
+ static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // Alternatively, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED,
+ "The source type is out of range for the destination type. "
+ "Please see strict_cast<> comments for more information.");
+
+ return static_cast<Dst>(static_cast<SrcType>(value));
}
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained {
+ static const bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+ Dst,
+ Src,
+ typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+ ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+ static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
// StrictNumeric implements compile time range checking between numeric types by
// wrapping assignment operations in a strict_cast. This class is intended to be
// used for function arguments and return types, to ensure the destination type
@@ -133,7 +186,7 @@ constexpr Dst strict_cast(Src value) {
template <typename T>
class StrictNumeric {
public:
- typedef T type;
+ using type = T;
constexpr StrictNumeric() : value_(0) {}
@@ -145,21 +198,74 @@ class StrictNumeric {
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to StrictNumerics to make them easier to use.
template <typename Src>
- constexpr StrictNumeric(Src value)
+ constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
: value_(strict_cast<T>(value)) {}
- // The numeric cast operator basically handles all the magic.
- template <typename Dst>
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // If you're assigning from a CheckedNumeric<> class, you may be able to use
+ // the AssignIfValid() member function, specify a narrower destination type to
+ // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+ // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+ // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+ // to explicitly cast the result to the destination type.
+ // If none of that works, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ template <typename Dst,
+ typename std::enable_if<
+ IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
constexpr operator Dst() const {
- return strict_cast<Dst>(value_);
+ return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
}
private:
const T value_;
};
-// Explicitly make a shorter size_t typedef for convenience.
-typedef StrictNumeric<size_t> SizeT;
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
+ const T value) {
+ return value;
+}
+
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
+ os << static_cast<T>(value);
+ return os;
+}
+
+#define STRICT_COMPARISON_OP(NAME, OP) \
+ template <typename L, typename R, \
+ typename std::enable_if< \
+ internal::IsStrictOp<L, R>::value>::type* = nullptr> \
+ constexpr bool operator OP(const L lhs, const R rhs) { \
+ return SafeCompare<NAME, typename UnderlyingType<L>::type, \
+ typename UnderlyingType<R>::type>(lhs, rhs); \
+ }
+
+STRICT_COMPARISON_OP(IsLess, <);
+STRICT_COMPARISON_OP(IsLessOrEqual, <=);
+STRICT_COMPARISON_OP(IsGreater, >);
+STRICT_COMPARISON_OP(IsGreaterOrEqual, >=);
+STRICT_COMPARISON_OP(IsEqual, ==);
+STRICT_COMPARISON_OP(IsNotEqual, !=);
+
+#undef STRICT_COMPARISON_OP
+};
+
+using internal::strict_cast;
+using internal::saturated_cast;
+using internal::SafeUnsignedAbs;
+using internal::StrictNumeric;
+using internal::MakeStrictNum;
+using internal::IsValueNegative;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
} // namespace base
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
index 0f0aebcab7..24357fd6a5 100644
--- a/base/numerics/safe_conversions_impl.h
+++ b/base/numerics/safe_conversions_impl.h
@@ -5,28 +5,77 @@
#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
#define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
-#include <limits.h>
#include <stdint.h>
-#include <climits>
#include <limits>
+#include <type_traits>
namespace base {
namespace internal {
// The std library doesn't provide a binary max_exponent for integers, however
-// we can compute one by adding one to the number of non-sign bits. This allows
-// for accurate range comparisons between floating point and integer types.
+// we can compute an analog using std::numeric_limits<>::digits.
template <typename NumericType>
struct MaxExponent {
- static_assert(std::is_arithmetic<NumericType>::value,
- "Argument must be numeric.");
- static const int value = std::numeric_limits<NumericType>::is_iec559
+ static const int value = std::is_floating_point<NumericType>::value
? std::numeric_limits<NumericType>::max_exponent
- : (sizeof(NumericType) * CHAR_BIT + 1 -
- std::numeric_limits<NumericType>::is_signed);
+ : std::numeric_limits<NumericType>::digits + 1;
};
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign {
+ static const int value = std::numeric_limits<NumericType>::digits +
+ std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T,
+ typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T value) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(
+ T x,
+ bool is_negative) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using SignedT = typename std::make_signed<T>::type;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<SignedT>(
+ (static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return IsValueNegative(value) ? 0 - static_cast<UnsignedT>(value)
+ : static_cast<UnsignedT>(value);
+}
+
enum IntegerRepresentation {
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_SIGNED
@@ -34,7 +83,7 @@ enum IntegerRepresentation {
// A range for a given nunmeric Src type is contained for a given numeric Dst
// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
-// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
// We implement this as template specializations rather than simple static
// comparisons to ensure type correctness in our comparisons.
enum NumericRangeRepresentation {
@@ -45,16 +94,14 @@ enum NumericRangeRepresentation {
// Helper templates to statically determine if our destination type can contain
// maximum and minimum values represented by the source type.
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign =
- std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED >
+template <typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED>
struct StaticDstRangeRelationToSrcRange;
// Same sign: Dst is guaranteed to contain Src only if its range is equal or
@@ -89,29 +136,33 @@ struct StaticDstRangeRelationToSrcRange<Dst,
static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
};
-enum RangeConstraint {
- RANGE_VALID = 0x0, // Value can be represented by the destination type.
- RANGE_UNDERFLOW = 0x1, // Value would overflow.
- RANGE_OVERFLOW = 0x2, // Value would underflow.
- RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
-};
-
-// Helper function for coercing an int back to a RangeContraint.
-constexpr RangeConstraint GetRangeConstraint(int integer_range_constraint) {
- // TODO(jschuh): Once we get full C++14 support we want this
- // assert(integer_range_constraint >= RANGE_VALID &&
- // integer_range_constraint <= RANGE_INVALID)
- return static_cast<RangeConstraint>(integer_range_constraint);
-}
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck {
+ public:
+ constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+ : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
+ constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+ constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+ constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+ constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+ constexpr bool operator==(const RangeCheck rhs) const {
+ return is_underflow_ == rhs.is_underflow_ &&
+ is_overflow_ == rhs.is_overflow_;
+ }
+ constexpr bool operator!=(const RangeCheck rhs) const {
+ return !(*this == rhs);
+ }
-// This function creates a RangeConstraint from an upper and lower bound
-// check by taking advantage of the fact that only NaN can be out of range in
-// both directions at once.
-constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
- bool is_in_lower_bound) {
- return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
- (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
-}
+ private:
+ // Do not change the order of these member variables. The integral conversion
+ // optimization depends on this exact order.
+ const bool is_underflow_;
+ const bool is_overflow_;
+};
// The following helper template addresses a corner case in range checks for
// conversion from a floating-point type to an integral type of smaller range
@@ -134,131 +185,547 @@ constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
// To fix this bug we manually truncate the maximum value when the destination
// type is an integral of larger precision than the source floating-point type,
// such that the resulting maximum is represented exactly as a floating point.
-template <typename Dst, typename Src>
+template <typename Dst, typename Src, template <typename> class Bounds>
struct NarrowingRange {
- typedef typename std::numeric_limits<Src> SrcLimits;
- typedef typename std::numeric_limits<Dst> DstLimits;
- // The following logic avoids warnings where the max function is
- // instantiated with invalid values for a bit shift (even though
- // such a function can never be called).
- static const int shift = (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
- SrcLimits::digits < DstLimits::digits &&
- SrcLimits::is_iec559 &&
- DstLimits::is_integer)
- ? (DstLimits::digits - SrcLimits::digits)
- : 0;
-
- static constexpr Dst max() {
- // We use UINTMAX_C below to avoid compiler warnings about shifting floating
- // points. Since it's a compile time calculation, it shouldn't have any
- // performance impact.
- return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = typename std::numeric_limits<Dst>;
+
+ // Computes the mask required to make an accurate comparison between types.
+ static const int kShift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+ template <
+ typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+
+ // Masks out the integer bits that are beyond the precision of the
+ // intermediate type used for comparison.
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift < DstLimits::digits, "");
+ return static_cast<T>(
+ ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+ IsValueNegative(value)));
}
- static constexpr Dst min() {
- return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
- : DstLimits::min();
+ template <typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* =
+ nullptr>
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift == 0, "");
+ return value;
}
+
+ static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+ static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
};
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- NumericRangeRepresentation DstRange =
- StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+template <typename Dst,
+ typename Src,
+ template <typename> class Bounds,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value>
struct DstRangeRelationToSrcRangeImpl;
// The following templates are for ranges that must be verified at runtime. We
// split it into checks based on signedness to avoid confusing casts and
// compiler warnings on signed an unsigned comparisons.
-// Dst range is statically determined to contain Src: Nothing to check.
+// Same sign narrowing: The range is contained for normal limits.
template <typename Dst,
typename Src,
+ template <typename> class Bounds,
IntegerRepresentation DstSign,
IntegerRepresentation SrcSign>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
DstSign,
SrcSign,
NUMERIC_RANGE_CONTAINED> {
- static constexpr RangeConstraint Check(Src /*value*/) { return RANGE_VALID; }
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+ static_cast<Dst>(value) >= DstLimits::lowest(),
+ static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+ static_cast<Dst>(value) <= DstLimits::max());
+ }
};
// Signed to signed narrowing: Both the upper and lower boundaries may be
-// exceeded.
-template <typename Dst, typename Src>
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_SIGNED,
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
- (value >= NarrowingRange<Dst, Src>::min()));
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
}
};
-// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
-template <typename Dst, typename Src>
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+ value <= DstLimits::max());
}
};
-// Unsigned to signed: The upper boundary may be exceeded.
-template <typename Dst, typename Src>
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_SIGNED,
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return sizeof(Dst) > sizeof(Src)
- ? RANGE_VALID
- : GetRangeConstraint(
- value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
- true);
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(DstLimits::lowest() <= Dst(0) ||
+ static_cast<Promotion>(value) >=
+ static_cast<Promotion>(DstLimits::lowest()),
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
}
};
// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
-// and any negative value exceeds the lower boundary.
-template <typename Dst, typename Src>
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
- ? GetRangeConstraint(true, value >= static_cast<Src>(0))
- : GetRangeConstraint(
- value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
- value >= static_cast<Src>(0));
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(
+ value >= Src(0) && (DstLimits::lowest() == 0 ||
+ static_cast<Dst>(value) >= DstLimits::lowest()),
+ static_cast<Promotion>(SrcLimits::max()) <=
+ static_cast<Promotion>(DstLimits::max()) ||
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
}
};
-template <typename Dst, typename Src>
-constexpr RangeConstraint DstRangeRelationToSrcRange(Src value) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
- static_assert(std::numeric_limits<Dst>::is_specialized,
- "Result must be numeric.");
- return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+template <typename Dst,
+ template <typename> class Bounds = std::numeric_limits,
+ typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
}
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
+ template <> \
+ struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
+ std::is_signed<I>::value> { \
+ using type = I; \
+ }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+ "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger {
+ using type =
+ typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
+ IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ std::is_signed<Lhs>::value
+ ? (std::is_signed<Rhs>::value
+ ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION)
+ : LEFT_PROMOTION)
+ : (std::is_signed<Rhs>::value
+ ? RIGHT_PROMOTION
+ : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <
+ typename Lhs,
+ typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
+ IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+ value == IntegerBitsPlusSign<intmax_t>::value,
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value =
+ !std::is_floating_point<T>::value &&
+ !std::is_floating_point<Lhs>::value &&
+ !std::is_floating_point<Rhs>::value &&
+ std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+ std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs,
+ typename Rhs,
+ bool is_promotion_possible = IsIntegerArithmeticSafe<
+ typename std::conditional<std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value,
+ intmax_t,
+ uintmax_t>::type,
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+ static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// This hacks around libstdc++ 4.6 missing stuff in type_traits.
+#if defined(__GLIBCXX__)
+#define PRIV_GLIBCXX_4_7_0 20120322
+#define PRIV_GLIBCXX_4_5_4 20120702
+#define PRIV_GLIBCXX_4_6_4 20121127
+#if (__GLIBCXX__ < PRIV_GLIBCXX_4_7_0 || __GLIBCXX__ == PRIV_GLIBCXX_4_5_4 || \
+ __GLIBCXX__ == PRIV_GLIBCXX_4_6_4)
+#define PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#undef PRIV_GLIBCXX_4_7_0
+#undef PRIV_GLIBCXX_4_5_4
+#undef PRIV_GLIBCXX_4_6_4
+#endif
+#endif
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true> {
+#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+ using type = __underlying_type(T);
+#else
+ using type = typename std::underlying_type<T>::type;
+#endif
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+#undef PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#endif
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false> {
+ using type = T;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType {
+ using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+ static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_checked = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = true;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict);
+};
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+struct IsEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) ==
+ DstRangeRelationToSrcRange<L>(rhs) &&
+ static_cast<decltype(lhs + rhs)>(lhs) ==
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+template <typename L, typename R>
+struct IsNotEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) !=
+ DstRangeRelationToSrcRange<L>(rhs) ||
+ static_cast<decltype(lhs + rhs)>(lhs) !=
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs) {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ using Promotion = BigEnoughPromotion<L, R>;
+ using BigType = typename Promotion::type;
+ return Promotion::is_contained
+ // Force to a larger type for speed if both are contained.
+ ? C<BigType, BigType>::Test(
+ static_cast<BigType>(static_cast<L>(lhs)),
+ static_cast<BigType>(static_cast<R>(rhs)))
+ // Let the template functions figure it out for mixed types.
+ : C<L, R>::Test(lhs, rhs);
+};
+
} // namespace internal
} // namespace base
diff --git a/base/numerics/safe_math.h b/base/numerics/safe_math.h
index d0003b79db..f5007db39c 100644
--- a/base/numerics/safe_math.h
+++ b/base/numerics/safe_math.h
@@ -10,155 +10,259 @@
#include <limits>
#include <type_traits>
-#include "base/logging.h"
#include "base/numerics/safe_math_impl.h"
namespace base {
-
namespace internal {
-// CheckedNumeric implements all the logic and operators for detecting integer
+// CheckedNumeric<> implements all the logic and operators for detecting integer
// boundary conditions such as overflow, underflow, and invalid conversions.
// The CheckedNumeric type implicitly converts from floating point and integer
// data types, and contains overloads for basic arithmetic operations (i.e.: +,
-// -, *, /, %).
+// -, *, / for all types and %, <<, >>, &, |, ^ for integers). Type promotions
+// are a slightly modified version of the standard C arithmetic rules with the
+// two differences being that there is no default promotion to int and bitwise
+// logical operations always return an unsigned of the wider type.
+//
+// You may also use one of the variadic convenience functions, which accept
+// standard arithmetic or CheckedNumeric types, perform arithmetic operations,
+// and return a CheckedNumeric result. The supported functions are:
+// CheckAdd() - Addition.
+// CheckSub() - Subtraction.
+// CheckMul() - Multiplication.
+// CheckDiv() - Division.
+// CheckMod() - Modulous (integer only).
+// CheckLsh() - Left integer shift (integer only).
+// CheckRsh() - Right integer shift (integer only).
+// CheckAnd() - Bitwise AND (integer only with unsigned result).
+// CheckOr() - Bitwise OR (integer only with unsigned result).
+// CheckXor() - Bitwise XOR (integer only with unsigned result).
+// CheckMax() - Maximum of supplied arguments.
+// CheckMin() - Minimum of supplied arguments.
+//
+// The unary negation, increment, and decrement operators are supported, along
+// with the following unary arithmetic methods, which return a new
+// CheckedNumeric as a result of the operation:
+// Abs() - Absolute value.
+// UnsignedAbs() - Absolute value as an equal-width unsigned underlying type
+// (valid for only integral types).
+// Max() - Returns whichever is greater of the current instance or argument.
+// The underlying return type is whichever has the greatest magnitude.
+// Min() - Returns whichever is lowest of the current instance or argument.
+// The underlying return type is whichever has can represent the lowest
+// number in the smallest width (e.g. int8_t over unsigned, int over
+// int8_t, and float over int).
//
// The following methods convert from CheckedNumeric to standard numeric values:
-// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
-// has not wrapped and is not the result of an invalid conversion).
-// ValueOrDie() - Returns the underlying value. If the state is not valid this
-// call will crash on a CHECK.
-// ValueOrDefault() - Returns the current value, or the supplied default if the
-// state is not valid.
-// ValueFloating() - Returns the underlying floating point value (valid only
-// only for floating point CheckedNumeric types).
+// AssignIfValid() - Assigns the underlying value to the supplied destination
+// pointer if the value is currently valid and within the range
+// supported by the destination type. Returns true on success.
+// ****************************************************************************
+// * WARNING: All of the following functions return a StrictNumeric, which *
+// * is valid for comparison and assignment operations, but will trigger a *
+// * compile failure on attempts to assign to a type of insufficient range. *
+// ****************************************************************************
+// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
+// has not wrapped and is not the result of an invalid conversion).
+// ValueOrDie() - Returns the underlying value. If the state is not valid this
+// call will crash on a CHECK.
+// ValueOrDefault() - Returns the current value, or the supplied default if the
+// state is not valid (will not trigger a CHECK).
//
-// Bitwise operations are explicitly not supported, because correct
-// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison
-// operations are explicitly not supported because they could result in a crash
-// on a CHECK condition. You should use patterns like the following for these
-// operations:
-// Bitwise operation:
-// CheckedNumeric<int> checked_int = untrusted_input_value;
-// int x = checked_int.ValueOrDefault(0) | kFlagValues;
-// Comparison:
+// The following wrapper functions can be used to avoid the template
+// disambiguator syntax when converting a destination type.
+// IsValidForType<>() in place of: a.template IsValid<Dst>()
+// ValueOrDieForType<>() in place of: a.template ValueOrDie()
+// ValueOrDefaultForType<>() in place of: a.template ValueOrDefault(default)
+//
+// The following are general utility methods that are useful for converting
+// between arithmetic types and CheckedNumeric types:
+// CheckedNumeric::Cast<Dst>() - Instance method returning a CheckedNumeric
+// derived from casting the current instance to a CheckedNumeric of
+// the supplied destination type.
+// MakeCheckedNum() - Creates a new CheckedNumeric from the underlying type of
+// the supplied arithmetic, CheckedNumeric, or StrictNumeric type.
+//
+// Comparison operations are explicitly not supported because they could result
+// in a crash on an unexpected CHECK condition. You should use patterns like the
+// following for comparisons:
// CheckedNumeric<size_t> checked_size = untrusted_input_value;
// checked_size += HEADER LENGTH;
// if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size)
// Do stuff...
+
template <typename T>
class CheckedNumeric {
static_assert(std::is_arithmetic<T>::value,
"CheckedNumeric<T>: T must be a numeric type.");
public:
- typedef T type;
+ using type = T;
- CheckedNumeric() {}
+ constexpr CheckedNumeric() {}
// Copy constructor.
template <typename Src>
- CheckedNumeric(const CheckedNumeric<Src>& rhs)
- : state_(rhs.ValueUnsafe(), rhs.validity()) {}
+ constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
+ : state_(rhs.state_.value(), rhs.IsValid()) {}
template <typename Src>
- CheckedNumeric(Src value, RangeConstraint validity)
- : state_(value, validity) {}
+ friend class CheckedNumeric;
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to CheckedNumerics to make them easier to use.
template <typename Src>
- CheckedNumeric(Src value) // NOLINT(runtime/explicit)
+ constexpr CheckedNumeric(Src value) // NOLINT(runtime/explicit)
: state_(value) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
}
// This is not an explicit constructor because we want a seamless conversion
// from StrictNumeric types.
template <typename Src>
- CheckedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit)
- : state_(static_cast<Src>(value)) {
+ constexpr CheckedNumeric(
+ StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : state_(static_cast<Src>(value)) {}
+
+ // IsValid() - The public API to test if a CheckedNumeric is currently valid.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter.
+ template <typename Dst = T>
+ constexpr bool IsValid() const {
+ return state_.is_valid() &&
+ IsValueInRangeForNumericType<Dst>(state_.value());
}
- // IsValid() is the public API to test if a CheckedNumeric is currently valid.
- bool IsValid() const { return validity() == RANGE_VALID; }
+ // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
+ // and is within the range supported by the destination type. Returns true if
+ // successful and false otherwise.
+ template <typename Dst>
+ constexpr bool AssignIfValid(Dst* result) const {
+ return IsValid<Dst>() ? ((*result = static_cast<Dst>(state_.value())), true)
+ : false;
+ }
- // ValueOrDie() The primary accessor for the underlying value. If the current
- // state is not valid it will CHECK and crash.
- T ValueOrDie() const {
- CHECK(IsValid());
- return state_.value();
+ // ValueOrDie() - The primary accessor for the underlying value. If the
+ // current state is not valid it will CHECK and crash.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter, which will trigger a CHECK if the value is not in bounds for
+ // the destination.
+ // The CHECK behavior can be overridden by supplying a handler as a
+ // template parameter, for test code, etc. However, the handler cannot access
+ // the underlying value, and it is not available through other means.
+ template <typename Dst = T, class CheckHandler = CheckOnFailure>
+ constexpr StrictNumeric<Dst> ValueOrDie() const {
+ return IsValid<Dst>() ? static_cast<Dst>(state_.value())
+ : CheckHandler::template HandleFailure<Dst>();
}
- // ValueOrDefault(T default_value) A convenience method that returns the
+ // ValueOrDefault(T default_value) - A convenience method that returns the
// current value if the state is valid, and the supplied default_value for
// any other state.
- T ValueOrDefault(T default_value) const {
- return IsValid() ? state_.value() : default_value;
+ // A range checked destination type can be supplied using the Dst template
+ // parameter. WARNING: This function may fail to compile or CHECK at runtime
+ // if the supplied default_value is not within range of the destination type.
+ template <typename Dst = T, typename Src>
+ constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
+ return IsValid<Dst>() ? static_cast<Dst>(state_.value())
+ : checked_cast<Dst>(default_value);
}
- // ValueFloating() - Since floating point values include their validity state,
- // we provide an easy method for extracting them directly, without a risk of
- // crashing on a CHECK.
- T ValueFloating() const {
- static_assert(std::numeric_limits<T>::is_iec559, "Argument must be float.");
- return CheckedNumeric<T>::cast(*this).ValueUnsafe();
+ // Returns a checked numeric of the specified type, cast from the current
+ // CheckedNumeric. If the current state is invalid or the destination cannot
+ // represent the result then the returned CheckedNumeric will be invalid.
+ template <typename Dst>
+ constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+ return *this;
}
- // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for
- // tests and to avoid a big matrix of friend operator overloads. But the
- // values it returns are likely to change in the future.
- // Returns: current validity state (i.e. valid, overflow, underflow, nan).
- // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
- // saturation/wrapping so we can expose this state consistently and implement
- // saturated arithmetic.
- RangeConstraint validity() const { return state_.validity(); }
-
- // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
- // for tests and to avoid a big matrix of friend operator overloads. But the
- // values it returns are likely to change in the future.
- // Returns: the raw numeric value, regardless of the current state.
- // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
- // saturation/wrapping so we can expose this state consistently and implement
- // saturated arithmetic.
- T ValueUnsafe() const { return state_.value(); }
+ // This friend method is available solely for providing more detailed logging
+ // in the the tests. Do not implement it in production code, because the
+ // underlying values may change at any time.
+ template <typename U>
+ friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
// Prototypes for the supported arithmetic operator overloads.
- template <typename Src> CheckedNumeric& operator+=(Src rhs);
- template <typename Src> CheckedNumeric& operator-=(Src rhs);
- template <typename Src> CheckedNumeric& operator*=(Src rhs);
- template <typename Src> CheckedNumeric& operator/=(Src rhs);
- template <typename Src> CheckedNumeric& operator%=(Src rhs);
-
- CheckedNumeric operator-() const {
- RangeConstraint validity;
- T value = CheckedNeg(state_.value(), &validity);
- // Negation is always valid for floating point.
- if (std::numeric_limits<T>::is_iec559)
- return CheckedNumeric<T>(value);
-
- validity = GetRangeConstraint(state_.validity() | validity);
- return CheckedNumeric<T>(value, validity);
+ template <typename Src>
+ CheckedNumeric& operator+=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator-=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator*=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator/=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator%=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator<<=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator>>=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator&=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator|=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator^=(const Src rhs);
+
+ constexpr CheckedNumeric operator-() const {
+ return CheckedNumeric<T>(
+ NegateWrapper(state_.value()),
+ IsValid() &&
+ (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+ NegateWrapper(state_.value()) !=
+ std::numeric_limits<T>::lowest()));
}
- CheckedNumeric Abs() const {
- RangeConstraint validity;
- T value = CheckedAbs(state_.value(), &validity);
- // Absolute value is always valid for floating point.
- if (std::numeric_limits<T>::is_iec559)
- return CheckedNumeric<T>(value);
+ constexpr CheckedNumeric operator~() const {
+ return CheckedNumeric<decltype(InvertWrapper(T()))>(
+ InvertWrapper(state_.value()), IsValid());
+ }
+
+ constexpr CheckedNumeric Abs() const {
+ return CheckedNumeric<T>(
+ AbsWrapper(state_.value()),
+ IsValid() &&
+ (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+ AbsWrapper(state_.value()) != std::numeric_limits<T>::lowest()));
+ }
- validity = GetRangeConstraint(state_.validity() | validity);
- return CheckedNumeric<T>(value, validity);
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
+ const U rhs) const {
+ using R = typename UnderlyingType<U>::type;
+ using result_type = typename MathWrapper<CheckedMaxOp, T, U>::type;
+ // TODO(jschuh): This can be converted to the MathOp version and remain
+ // constexpr once we have C++14 support.
+ return CheckedNumeric<result_type>(
+ static_cast<result_type>(
+ IsGreater<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+ ? state_.value()
+ : Wrapper<U>::value(rhs)),
+ state_.is_valid() && Wrapper<U>::is_valid(rhs));
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
+ const U rhs) const {
+ using R = typename UnderlyingType<U>::type;
+ using result_type = typename MathWrapper<CheckedMinOp, T, U>::type;
+ // TODO(jschuh): This can be converted to the MathOp version and remain
+ // constexpr once we have C++14 support.
+ return CheckedNumeric<result_type>(
+ static_cast<result_type>(
+ IsLess<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+ ? state_.value()
+ : Wrapper<U>::value(rhs)),
+ state_.is_valid() && Wrapper<U>::is_valid(rhs));
}
// This function is available only for integral types. It returns an unsigned
// integer of the same width as the source type, containing the absolute value
// of the source, and properly handling signed min.
- CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const {
+ constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
+ UnsignedAbs() const {
return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
- CheckedUnsignedAbs(state_.value()), state_.validity());
+ SafeUnsignedAbs(state_.value()), state_.is_valid());
}
CheckedNumeric& operator++() {
@@ -183,126 +287,221 @@ class CheckedNumeric {
return value;
}
- // These static methods behave like a convenience cast operator targeting
- // the desired CheckedNumeric type. As an optimization, a reference is
- // returned when Src is the same type as T.
- template <typename Src>
- static CheckedNumeric<T> cast(
- Src u,
- typename std::enable_if<std::numeric_limits<Src>::is_specialized,
- int>::type = 0) {
- return u;
- }
+ // These perform the actual math operations on the CheckedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+ static CheckedNumeric MathOp(const L lhs, const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ T result = 0;
+ bool is_valid =
+ Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
+ return CheckedNumeric<T>(result, is_valid);
+ };
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ CheckedNumeric& MathOp(const R rhs) {
+ using Math = typename MathWrapper<M, T, R>::math;
+ T result = 0; // Using T as the destination saves a range check.
+ bool is_valid = state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
+ *this = CheckedNumeric<T>(result, is_valid);
+ return *this;
+ };
+
+ private:
+ CheckedNumericState<T> state_;
template <typename Src>
- static CheckedNumeric<T> cast(
- const CheckedNumeric<Src>& u,
- typename std::enable_if<!std::is_same<Src, T>::value, int>::type = 0) {
- return u;
- }
+ constexpr CheckedNumeric(Src value, bool is_valid)
+ : state_(value, is_valid) {}
- static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
+ // These wrappers allow us to handle state the same way for both
+ // CheckedNumeric and POD arithmetic types.
+ template <typename Src>
+ struct Wrapper {
+ static constexpr bool is_valid(Src) { return true; }
+ static constexpr Src value(Src value) { return value; }
+ };
- private:
- template <typename NumericType>
- struct UnderlyingType {
- using type = NumericType;
+ template <typename Src>
+ struct Wrapper<CheckedNumeric<Src>> {
+ static constexpr bool is_valid(const CheckedNumeric<Src> v) {
+ return v.IsValid();
+ }
+ static constexpr Src value(const CheckedNumeric<Src> v) {
+ return v.state_.value();
+ }
};
- template <typename NumericType>
- struct UnderlyingType<CheckedNumeric<NumericType>> {
- using type = NumericType;
+ template <typename Src>
+ struct Wrapper<StrictNumeric<Src>> {
+ static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
+ static constexpr Src value(const StrictNumeric<Src> v) {
+ return static_cast<Src>(v);
+ }
};
+};
- CheckedNumericState<T> state_;
+// Convenience functions to avoid the ugly template disambiguator syntax.
+template <typename Dst, typename Src>
+constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
+ return value.template IsValid<Dst>();
+}
+
+template <typename Dst, typename Src>
+constexpr StrictNumeric<Dst> ValueOrDieForType(
+ const CheckedNumeric<Src> value) {
+ return value.template ValueOrDie<Dst>();
+}
+
+template <typename Dst, typename Src, typename Default>
+constexpr StrictNumeric<Dst> ValueOrDefaultForType(
+ const CheckedNumeric<Src> value,
+ const Default default_value) {
+ return value.template ValueOrDefault<Dst>(default_value);
+}
+
+// These variadic templates work out the return types.
+// TODO(jschuh): Rip all this out once we have C++14 non-trailing auto support.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+struct ResultType;
+
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+struct ResultType<M, L, R> {
+ using type = typename MathWrapper<M, L, R>::type;
};
-// This is the boilerplate for the standard arithmetic operator overloads. A
-// macro isn't the prettiest solution, but it beats rewriting these five times.
-// Some details worth noting are:
-// * We apply the standard arithmetic promotions.
-// * We skip range checks for floating points.
-// * We skip range checks for destination integers with sufficient range.
-// TODO(jschuh): extract these out into templates.
-#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \
- /* Binary arithmetic operator for CheckedNumerics of the same type. */ \
- template <typename T> \
- CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP( \
- const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) { \
- typedef typename ArithmeticPromotion<T>::type Promotion; \
- /* Floating point always takes the fast path */ \
- if (std::numeric_limits<T>::is_iec559) \
- return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \
- if (IsIntegerArithmeticSafe<Promotion, T, T>::value) \
- return CheckedNumeric<Promotion>( \
- lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
- GetRangeConstraint(rhs.validity() | lhs.validity())); \
- RangeConstraint validity = RANGE_VALID; \
- T result = static_cast<T>( \
- Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()), \
- static_cast<Promotion>(rhs.ValueUnsafe()), &validity)); \
- return CheckedNumeric<Promotion>( \
- result, \
- GetRangeConstraint(validity | lhs.validity() | rhs.validity())); \
- } \
- /* Assignment arithmetic operator implementation from CheckedNumeric. */ \
- template <typename T> \
- template <typename Src> \
- CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) { \
- *this = CheckedNumeric<T>::cast(*this) \
- OP CheckedNumeric<typename UnderlyingType<Src>::type>::cast(rhs); \
- return *this; \
- } \
- /* Binary arithmetic operator for CheckedNumeric of different type. */ \
- template <typename T, typename Src> \
- CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
- const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) { \
- typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
- if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
- return CheckedNumeric<Promotion>( \
- lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
- GetRangeConstraint(rhs.validity() | lhs.validity())); \
- return CheckedNumeric<Promotion>::cast(lhs) \
- OP CheckedNumeric<Promotion>::cast(rhs); \
- } \
- /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
- template <typename T, typename Src, \
- typename std::enable_if<std::is_arithmetic<Src>::value>::type* = \
- nullptr> \
- CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
- const CheckedNumeric<T>& lhs, Src rhs) { \
- typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
- if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
- return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs, \
- lhs.validity()); \
- return CheckedNumeric<Promotion>::cast(lhs) \
- OP CheckedNumeric<Promotion>::cast(rhs); \
- } \
- /* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \
- template <typename T, typename Src, \
- typename std::enable_if<std::is_arithmetic<Src>::value>::type* = \
- nullptr> \
- CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
- Src lhs, const CheckedNumeric<T>& rhs) { \
- typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
- if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
- return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(), \
- rhs.validity()); \
- return CheckedNumeric<Promotion>::cast(lhs) \
- OP CheckedNumeric<Promotion>::cast(rhs); \
- }
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+struct ResultType {
+ using type =
+ typename ResultType<M, typename ResultType<M, L, R>::type, Args...>::type;
+};
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= )
+// Convience wrapper to return a new CheckedNumeric from the provided arithmetic
+// or CheckedNumericType.
+template <typename T>
+constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
+ const T value) {
+ return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+CheckedNumeric<typename MathWrapper<M, L, R>::type> ChkMathOp(const L lhs,
+ const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+ rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+CheckedNumeric<typename ResultType<M, L, R, Args...>::type>
+ChkMathOp(const L lhs, const R rhs, const Args... args) {
+ auto tmp = ChkMathOp<M>(lhs, rhs);
+ return tmp.IsValid() ? ChkMathOp<M>(tmp, args...)
+ : decltype(ChkMathOp<M>(tmp, args...))(tmp);
+};
+// The following macros are just boilerplate for the standard arithmetic
+// operator overloads and variadic function templates. A macro isn't the nicest
+// solution, but it beats rewriting these over and over again.
+#define BASE_NUMERIC_ARITHMETIC_VARIADIC(NAME) \
+ template <typename L, typename R, typename... Args> \
+ CheckedNumeric<typename ResultType<Checked##NAME##Op, L, R, Args...>::type> \
+ Check##NAME(const L lhs, const R rhs, const Args... args) { \
+ return ChkMathOp<Checked##NAME##Op, L, R, Args...>(lhs, rhs, args...); \
+ }
+
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \
+ /* Binary arithmetic operator for all CheckedNumeric operations. */ \
+ template <typename L, typename R, \
+ typename std::enable_if<IsCheckedOp<L, R>::value>::type* = \
+ nullptr> \
+ CheckedNumeric<typename MathWrapper<Checked##NAME##Op, L, R>::type> \
+ operator OP(const L lhs, const R rhs) { \
+ return decltype(lhs OP rhs)::template MathOp<Checked##NAME##Op>(lhs, rhs); \
+ } \
+ /* Assignment arithmetic operator implementation from CheckedNumeric. */ \
+ template <typename L> \
+ template <typename R> \
+ CheckedNumeric<L>& CheckedNumeric<L>::operator COMPOUND_OP(const R rhs) { \
+ return MathOp<Checked##NAME##Op>(rhs); \
+ } \
+ /* Variadic arithmetic functions that return CheckedNumeric. */ \
+ BASE_NUMERIC_ARITHMETIC_VARIADIC(NAME)
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Min)
+
+#undef BASE_NUMERIC_ARITHMETIC_VARIADIC
#undef BASE_NUMERIC_ARITHMETIC_OPERATORS
+// These are some extra StrictNumeric operators to support simple pointer
+// arithmetic with our result types. Since wrapping on a pointer is always
+// bad, we trigger the CHECK condition here.
+template <typename L, typename R>
+L* operator+(L* lhs, const StrictNumeric<R> rhs) {
+ uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
+ CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L*>(result);
+}
+
+template <typename L, typename R>
+L* operator-(L* lhs, const StrictNumeric<R> rhs) {
+ uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
+ CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L*>(result);
+}
+
} // namespace internal
using internal::CheckedNumeric;
+using internal::IsValidForType;
+using internal::ValueOrDieForType;
+using internal::ValueOrDefaultForType;
+using internal::MakeCheckedNum;
+using internal::CheckMax;
+using internal::CheckMin;
+using internal::CheckAdd;
+using internal::CheckSub;
+using internal::CheckMul;
+using internal::CheckDiv;
+using internal::CheckMod;
+using internal::CheckLsh;
+using internal::CheckRsh;
+using internal::CheckAnd;
+using internal::CheckOr;
+using internal::CheckXor;
} // namespace base
diff --git a/base/numerics/safe_math_impl.h b/base/numerics/safe_math_impl.h
index f214f3fec2..a224f692dd 100644
--- a/base/numerics/safe_math_impl.h
+++ b/base/numerics/safe_math_impl.h
@@ -23,348 +23,486 @@ namespace internal {
// but it may not be fast. This code could be split based on
// platform/architecture and replaced with potentially faster implementations.
-// Integer promotion templates used by the portable checked integer arithmetic.
-template <size_t Size, bool IsSigned>
-struct IntegerForSizeAndSign;
-template <>
-struct IntegerForSizeAndSign<1, true> {
- typedef int8_t type;
-};
-template <>
-struct IntegerForSizeAndSign<1, false> {
- typedef uint8_t type;
-};
-template <>
-struct IntegerForSizeAndSign<2, true> {
- typedef int16_t type;
-};
-template <>
-struct IntegerForSizeAndSign<2, false> {
- typedef uint16_t type;
-};
-template <>
-struct IntegerForSizeAndSign<4, true> {
- typedef int32_t type;
-};
-template <>
-struct IntegerForSizeAndSign<4, false> {
- typedef uint32_t type;
-};
-template <>
-struct IntegerForSizeAndSign<8, true> {
- typedef int64_t type;
-};
-template <>
-struct IntegerForSizeAndSign<8, false> {
- typedef uint64_t type;
-};
-
-// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
-// support 128-bit math, then the ArithmeticPromotion template below will need
-// to be updated (or more likely replaced with a decltype expression).
-
-template <typename Integer>
-struct UnsignedIntegerForSize {
- typedef typename std::enable_if<
- std::numeric_limits<Integer>::is_integer,
- typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type;
-};
-
-template <typename Integer>
-struct SignedIntegerForSize {
- typedef typename std::enable_if<
- std::numeric_limits<Integer>::is_integer,
- typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type;
-};
-
-template <typename Integer>
-struct TwiceWiderInteger {
- typedef typename std::enable_if<
- std::numeric_limits<Integer>::is_integer,
- typename IntegerForSizeAndSign<
- sizeof(Integer) * 2,
- std::numeric_limits<Integer>::is_signed>::type>::type type;
-};
-
-template <typename Integer>
-struct PositionOfSignBit {
- static const typename std::enable_if<std::numeric_limits<Integer>::is_integer,
- size_t>::type value =
- CHAR_BIT * sizeof(Integer) - 1;
-};
-
// This is used for UnsignedAbs, where we need to support floating-point
// template instantiations even though we don't actually support the operations.
-// However, there is no corresponding implementation of e.g. CheckedUnsignedAbs,
+// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
// so the float versions will not compile.
template <typename Numeric,
- bool IsInteger = std::numeric_limits<Numeric>::is_integer,
- bool IsFloat = std::numeric_limits<Numeric>::is_iec559>
+ bool IsInteger = std::is_integral<Numeric>::value,
+ bool IsFloat = std::is_floating_point<Numeric>::value>
struct UnsignedOrFloatForSize;
template <typename Numeric>
struct UnsignedOrFloatForSize<Numeric, true, false> {
- typedef typename UnsignedIntegerForSize<Numeric>::type type;
+ using type = typename std::make_unsigned<Numeric>::type;
};
template <typename Numeric>
struct UnsignedOrFloatForSize<Numeric, false, true> {
- typedef Numeric type;
+ using type = Numeric;
};
-// Helper templates for integer manipulations.
+// Probe for builtin math overflow support on Clang and version check on GCC.
+#if defined(__has_builtin)
+#define USE_OVERFLOW_BUILTINS (__has_builtin(__builtin_add_overflow))
+#elif defined(__GNUC__)
+#define USE_OVERFLOW_BUILTINS (__GNUC__ >= 5)
+#else
+#define USE_OVERFLOW_BUILTINS (0)
+#endif
template <typename T>
-constexpr bool HasSignBit(T x) {
- // Cast to unsigned since right shift on signed is undefined.
- return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
- PositionOfSignBit<T>::value);
-}
-
-// This wrapper undoes the standard integer promotions.
-template <typename T>
-constexpr T BinaryComplement(T x) {
- return static_cast<T>(~x);
-}
-
-// Here are the actual portable checked integer math implementations.
-// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
-// way to coalesce things into the CheckedNumericState specializations below.
-
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
-CheckedAdd(T x, T y, RangeConstraint* validity) {
+bool CheckedAddImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
- typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
UnsignedDst ux = static_cast<UnsignedDst>(x);
UnsignedDst uy = static_cast<UnsignedDst>(y);
UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+ *result = static_cast<T>(uresult);
// Addition is valid if the sign of (x + y) is equal to either that of x or
// that of y.
- if (std::numeric_limits<T>::is_signed) {
- if (HasSignBit(BinaryComplement(
- static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))) {
- *validity = RANGE_VALID;
- } else { // Direction of wrap is inverse of result sign.
- *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+ return (std::is_signed<T>::value)
+ ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) >= 0
+ : uresult >= uy; // Unsigned is either valid or underflow.
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAddOp {};
+
+template <typename T, typename U>
+struct CheckedAddOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+#if USE_OVERFLOW_BUILTINS
+ return !__builtin_add_overflow(x, y, result);
+#else
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedAddImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
}
- } else { // Unsigned is either valid or overflow.
- *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+#endif
}
- return static_cast<T>(uresult);
-}
+};
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
-CheckedSub(T x, T y, RangeConstraint* validity) {
+bool CheckedSubImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
- typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
UnsignedDst ux = static_cast<UnsignedDst>(x);
UnsignedDst uy = static_cast<UnsignedDst>(y);
UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+ *result = static_cast<T>(uresult);
// Subtraction is valid if either x and y have same sign, or (x-y) and x have
// the same sign.
- if (std::numeric_limits<T>::is_signed) {
- if (HasSignBit(BinaryComplement(
- static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))) {
- *validity = RANGE_VALID;
- } else { // Direction of wrap is inverse of result sign.
- *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+ return (std::is_signed<T>::value)
+ ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) >= 0
+ : x >= y;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedSubOp {};
+
+template <typename T, typename U>
+struct CheckedSubOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+#if USE_OVERFLOW_BUILTINS
+ return !__builtin_sub_overflow(x, y, result);
+#else
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedSubImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
}
- } else { // Unsigned is either valid or underflow.
- *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+#endif
}
- return static_cast<T>(uresult);
-}
+};
-// Integer multiplication is a bit complicated. In the fast case we just
-// we just promote to a twice wider type, and range check the result. In the
-// slow case we need to manually check that the result won't be truncated by
-// checking with division against the appropriate bound.
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- sizeof(T) * 2 <= sizeof(uintmax_t),
- T>::type
-CheckedMul(T x, T y, RangeConstraint* validity) {
- typedef typename TwiceWiderInteger<T>::type IntermediateType;
- IntermediateType tmp =
- static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
- *validity = DstRangeRelationToSrcRange<T>(tmp);
- return static_cast<T>(tmp);
+bool CheckedMulImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x*y is potentially undefined if we have a signed type,
+ // we compute it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = SafeUnsignedAbs(x);
+ const UnsignedDst uy = SafeUnsignedAbs(y);
+ UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
+ const bool is_negative =
+ std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+ *result = is_negative ? 0 - uresult : uresult;
+ // We have a fast out for unsigned identity or zero on the second operand.
+ // After that it's an unsigned overflow check on the absolute value, with
+ // a +1 bound for a negative result.
+ return uy <= UnsignedDst(!std::is_signed<T>::value || is_negative) ||
+ ux <= (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy;
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed &&
- (sizeof(T) * 2 > sizeof(uintmax_t)),
- T>::type
-CheckedMul(T x, T y, RangeConstraint* validity) {
- // If either side is zero then the result will be zero.
- if (!x || !y) {
- *validity = RANGE_VALID;
- return static_cast<T>(0);
-
- } else if (x > 0) {
- if (y > 0)
- *validity =
- x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW;
- else
- *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID
- : RANGE_UNDERFLOW;
-
- } else {
- if (y > 0)
- *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID
- : RANGE_UNDERFLOW;
- else
- *validity =
- y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
+template <typename T, typename U, class Enable = void>
+struct CheckedMulOp {};
+
+template <typename T, typename U>
+struct CheckedMulOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+#if USE_OVERFLOW_BUILTINS
+#if defined(__clang__)
+ // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
+ // support full-width, mixed-sign multiply builtins.
+ // https://crbug.com/613003
+ static const bool kUseMaxInt =
+ // Narrower type than uintptr_t is always safe.
+ std::numeric_limits<__typeof__(x * y)>::digits <
+ std::numeric_limits<intptr_t>::digits ||
+ // Safe for intptr_t and uintptr_t if the sign matches.
+ (IntegerBitsPlusSign<__typeof__(x * y)>::value ==
+ IntegerBitsPlusSign<intptr_t>::value &&
+ std::is_signed<T>::value == std::is_signed<U>::value);
+#else
+ static const bool kUseMaxInt = true;
+#endif
+ if (kUseMaxInt)
+ return !__builtin_mul_overflow(x, y, result);
+#endif
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedMulImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ }
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
+};
- return static_cast<T>(x * y);
-}
+// Avoid poluting the namespace once we're done with the macro.
+#undef USE_OVERFLOW_BUILTINS
+// Division just requires a check for a zero denominator or an invalid negation
+// on signed min/-1.
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed &&
- (sizeof(T) * 2 > sizeof(uintmax_t)),
- T>::type
-CheckedMul(T x, T y, RangeConstraint* validity) {
- *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
- ? RANGE_VALID
- : RANGE_OVERFLOW;
- return static_cast<T>(x * y);
+bool CheckedDivImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ if (y && (!std::is_signed<T>::value ||
+ x != std::numeric_limits<T>::lowest() || y != static_cast<T>(-1))) {
+ *result = x / y;
+ return true;
+ }
+ return false;
}
-// Division just requires a check for an invalid negation on signed min/-1.
-template <typename T>
-T CheckedDiv(T x,
- T y,
- RangeConstraint* validity,
- typename std::enable_if<std::numeric_limits<T>::is_integer,
- int>::type = 0) {
- if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() &&
- y == static_cast<T>(-1)) {
- *validity = RANGE_OVERFLOW;
- return std::numeric_limits<T>::min();
+template <typename T, typename U, class Enable = void>
+struct CheckedDivOp {};
+
+template <typename T, typename U>
+struct CheckedDivOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+ is_valid &= CheckedDivImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
-
- *validity = RANGE_VALID;
- return static_cast<T>(x / y);
-}
+};
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- T>::type
-CheckedMod(T x, T y, RangeConstraint* validity) {
- *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
- return static_cast<T>(x % y);
+bool CheckedModImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ if (y > 0) {
+ *result = static_cast<T>(x % y);
+ return true;
+ }
+ return false;
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- T>::type
-CheckedMod(T x, T y, RangeConstraint* validity) {
- *validity = RANGE_VALID;
- return static_cast<T>(x % y);
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedModOp {};
+
+template <typename T, typename U>
+struct CheckedModOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ bool is_valid = CheckedModImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+ }
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- T>::type
-CheckedNeg(T value, RangeConstraint* validity) {
- *validity =
- value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
- // The negation of signed min is min, so catch that one.
- return static_cast<T>(-value);
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedLshOp {};
+
+// Left shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Shifts of negative values
+// are undefined. Otherwise it is defined when the result fits.
+template <typename T, typename U>
+struct CheckedLshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V>
+ static bool Do(T x, U shift, V* result) {
+ using ShiftType = typename std::make_unsigned<T>::type;
+ static const ShiftType kBitWidth = IntegerBitsPlusSign<T>::value;
+ const ShiftType real_shift = static_cast<ShiftType>(shift);
+ // Signed shift is not legal on negative values.
+ if (!IsValueNegative(x) && real_shift < kBitWidth) {
+ // Just use a multiplication because it's easy.
+ // TODO(jschuh): This could probably be made more efficient.
+ if (!std::is_signed<T>::value || real_shift != kBitWidth - 1)
+ return CheckedMulOp<T, T>::Do(x, static_cast<T>(1) << shift, result);
+ return !x; // Special case zero for a full width signed shift.
+ }
+ return false;
+ }
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- T>::type
-CheckedNeg(T value, RangeConstraint* validity) {
- // The only legal unsigned negation is zero.
- *validity = value ? RANGE_UNDERFLOW : RANGE_VALID;
- return static_cast<T>(
- -static_cast<typename SignedIntegerForSize<T>::type>(value));
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedRshOp {};
+
+// Right shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Otherwise, it is always defined,
+// but a right shift of a negative value is implementation-dependent.
+template <typename T, typename U>
+struct CheckedRshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V = result_type>
+ static bool Do(T x, U shift, V* result) {
+ // Use the type conversion push negative values out of range.
+ using ShiftType = typename std::make_unsigned<T>::type;
+ if (static_cast<ShiftType>(shift) < IntegerBitsPlusSign<T>::value) {
+ T tmp = x >> shift;
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+ return false;
+ }
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- T>::type
-CheckedAbs(T value, RangeConstraint* validity) {
- *validity =
- value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
- return static_cast<T>(std::abs(value));
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedAndOp {};
+
+// For simplicity we support only unsigned integer results.
+template <typename T, typename U>
+struct CheckedAndOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) & static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- T>::type
-CheckedAbs(T value, RangeConstraint* validity) {
- // T is unsigned, so |value| must already be positive.
- *validity = RANGE_VALID;
- return value;
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedOrOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedOrOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) | static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- typename UnsignedIntegerForSize<T>::type>::type
-CheckedUnsignedAbs(T value) {
- typedef typename UnsignedIntegerForSize<T>::type UnsignedT;
- return value == std::numeric_limits<T>::min()
- ? static_cast<UnsignedT>(std::numeric_limits<T>::max()) + 1
- : static_cast<UnsignedT>(std::abs(value));
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedXorOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- T>::type
-CheckedUnsignedAbs(T value) {
- // T is unsigned, so |value| must already be positive.
- return static_cast<T>(value);
-}
+// Max doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMaxOp {};
+
+template <typename T, typename U>
+struct CheckedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ *result = IsGreater<T, U>::Test(x, y) ? static_cast<result_type>(x)
+ : static_cast<result_type>(y);
+ return true;
+ }
+};
-// These are the floating point stubs that the compiler needs to see. Only the
-// negation operation is ever called.
-#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
- template <typename T> \
- typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type \
- Checked##NAME(T, T, RangeConstraint*) { \
- NOTREACHED(); \
- return static_cast<T>(0); \
+// Min doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMinOp {};
+
+template <typename T, typename U>
+struct CheckedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ *result = IsLess<T, U>::Test(x, y) ? static_cast<result_type>(x)
+ : static_cast<result_type>(y);
+ return true;
}
+};
-BASE_FLOAT_ARITHMETIC_STUBS(Add)
-BASE_FLOAT_ARITHMETIC_STUBS(Sub)
-BASE_FLOAT_ARITHMETIC_STUBS(Mul)
-BASE_FLOAT_ARITHMETIC_STUBS(Div)
-BASE_FLOAT_ARITHMETIC_STUBS(Mod)
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Checked##NAME##Op< \
+ T, U, typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V> \
+ static bool Do(T x, U y, V* result) { \
+ using Promotion = typename MaxExponentPromotion<T, U>::type; \
+ Promotion presult = x OP y; \
+ *result = static_cast<V>(presult); \
+ return IsValueInRangeForNumericType<V>(presult); \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+// Wrap the unary operations to allow SFINAE when instantiating integrals versus
+// floating points. These don't perform any overflow checking. Rather, they
+// exhibit well-defined overflow semantics and rely on the caller to detect
+// if an overflow occured.
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ // This will compile to a NEG on Intel, and is normal negation on ARM.
+ return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
+}
-#undef BASE_FLOAT_ARITHMETIC_STUBS
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+ return -value;
+}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
- T value,
- RangeConstraint*) {
- return static_cast<T>(-value);
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
+ return ~value;
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
- T value,
- RangeConstraint*) {
- return static_cast<T>(std::abs(value));
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+ return static_cast<T>(SafeUnsignedAbs(value));
+}
+
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+ return value < 0 ? -value : value;
}
// Floats carry around their validity state with them, but integers do not. So,
@@ -379,10 +517,10 @@ enum NumericRepresentation {
template <typename NumericType>
struct GetNumericRepresentation {
static const NumericRepresentation value =
- std::numeric_limits<NumericType>::is_integer
+ std::is_integral<NumericType>::value
? NUMERIC_INTEGER
- : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING
- : NUMERIC_UNKNOWN);
+ : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
+ : NUMERIC_UNKNOWN);
};
template <typename T, NumericRepresentation type =
@@ -393,41 +531,48 @@ class CheckedNumericState {};
template <typename T>
class CheckedNumericState<T, NUMERIC_INTEGER> {
private:
+ // is_valid_ precedes value_ because member intializers in the constructors
+ // are evaluated in field order, and is_valid_ must be read when initializing
+ // value_.
+ bool is_valid_;
T value_;
- RangeConstraint validity_ : CHAR_BIT; // Actually requires only two bits.
+
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrZero(const Src value,
+ const bool is_valid) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (std::is_integral<SrcType>::value || is_valid)
+ ? static_cast<T>(value)
+ : static_cast<T>(0);
+ }
public:
template <typename Src, NumericRepresentation type>
friend class CheckedNumericState;
- CheckedNumericState() : value_(0), validity_(RANGE_VALID) {}
+ constexpr CheckedNumericState() : is_valid_(true), value_(0) {}
template <typename Src>
- CheckedNumericState(Src value, RangeConstraint validity)
- : value_(static_cast<T>(value)),
- validity_(GetRangeConstraint(validity |
- DstRangeRelationToSrcRange<T>(value))) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
+ constexpr CheckedNumericState(Src value, bool is_valid)
+ : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_)) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
}
// Copy constructor.
template <typename Src>
- CheckedNumericState(const CheckedNumericState<Src>& rhs)
- : value_(static_cast<T>(rhs.value())),
- validity_(GetRangeConstraint(
- rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {}
+ constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : is_valid_(rhs.IsValid()),
+ value_(WellDefinedConversionOrZero(rhs.value(), is_valid_)) {}
template <typename Src>
- explicit CheckedNumericState(
- Src value,
- typename std::enable_if<std::numeric_limits<Src>::is_specialized,
- int>::type = 0)
- : value_(static_cast<T>(value)),
- validity_(DstRangeRelationToSrcRange<T>(value)) {}
-
- RangeConstraint validity() const { return validity_; }
- T value() const { return value_; }
+ constexpr explicit CheckedNumericState(Src value)
+ : is_valid_(IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_)) {}
+
+ constexpr bool is_valid() const { return is_valid_; }
+ constexpr T value() const { return value_; }
};
// Floating points maintain their own validity, but need translation wrappers.
@@ -436,94 +581,58 @@ class CheckedNumericState<T, NUMERIC_FLOATING> {
private:
T value_;
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrNaN(const Src value,
+ const bool is_valid) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED ||
+ is_valid)
+ ? static_cast<T>(value)
+ : std::numeric_limits<T>::quiet_NaN();
+ }
+
public:
template <typename Src, NumericRepresentation type>
friend class CheckedNumericState;
- CheckedNumericState() : value_(0.0) {}
+ constexpr CheckedNumericState() : value_(0.0) {}
template <typename Src>
- CheckedNumericState(
- Src value,
- RangeConstraint /*validity*/,
- typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type =
- 0) {
- switch (DstRangeRelationToSrcRange<T>(value)) {
- case RANGE_VALID:
- value_ = static_cast<T>(value);
- break;
-
- case RANGE_UNDERFLOW:
- value_ = -std::numeric_limits<T>::infinity();
- break;
-
- case RANGE_OVERFLOW:
- value_ = std::numeric_limits<T>::infinity();
- break;
-
- case RANGE_INVALID:
- value_ = std::numeric_limits<T>::quiet_NaN();
- break;
-
- default:
- NOTREACHED();
- }
- }
+ constexpr CheckedNumericState(Src value, bool is_valid)
+ : value_(WellDefinedConversionOrNaN(value, is_valid)) {}
template <typename Src>
- explicit CheckedNumericState(
- Src value,
- typename std::enable_if<std::numeric_limits<Src>::is_specialized,
- int>::type = 0)
- : value_(static_cast<T>(value)) {}
+ constexpr explicit CheckedNumericState(Src value)
+ : value_(WellDefinedConversionOrNaN(
+ value,
+ IsValueInRangeForNumericType<T>(value))) {}
// Copy constructor.
template <typename Src>
- CheckedNumericState(const CheckedNumericState<Src>& rhs)
- : value_(static_cast<T>(rhs.value())) {}
-
- RangeConstraint validity() const {
- return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(),
- value_ >= -std::numeric_limits<T>::max());
+ constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(WellDefinedConversionOrNaN(
+ rhs.value(),
+ rhs.is_valid() && IsValueInRangeForNumericType<T>(rhs.value()))) {}
+
+ constexpr bool is_valid() const {
+ // Written this way because std::isfinite is not reliably constexpr.
+ // TODO(jschuh): Fix this if the libraries ever get fixed.
+ return value_ <= std::numeric_limits<T>::max() &&
+ value_ >= std::numeric_limits<T>::lowest();
}
- T value() const { return value_; }
-};
-
-// For integers less than 128-bit and floats 32-bit or larger, we have the type
-// with the larger maximum exponent take precedence.
-enum ArithmeticPromotionCategory { LEFT_PROMOTION, RIGHT_PROMOTION };
-
-template <typename Lhs,
- typename Rhs = Lhs,
- ArithmeticPromotionCategory Promotion =
- (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
- ? LEFT_PROMOTION
- : RIGHT_PROMOTION>
-struct ArithmeticPromotion;
-
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> {
- typedef Lhs type;
-};
-
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
- typedef Rhs type;
+ constexpr T value() const { return value_; }
};
-// We can statically check if operations on the provided types can wrap, so we
-// can skip the checked operations if they're not needed. So, for an integer we
-// care if the destination type preserves the sign and is twice the width of
-// the source.
-template <typename T, typename Lhs, typename Rhs>
-struct IsIntegerArithmeticSafe {
- static const bool value = !std::numeric_limits<T>::is_iec559 &&
- StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
- NUMERIC_RANGE_CONTAINED &&
- sizeof(T) >= (2 * sizeof(Lhs)) &&
- StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
- NUMERIC_RANGE_CONTAINED &&
- sizeof(T) >= (2 * sizeof(Rhs));
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+struct MathWrapper {
+ using math = M<typename UnderlyingType<L>::type,
+ typename UnderlyingType<R>::type,
+ void>;
+ using type = typename math::result_type;
};
} // namespace internal
diff --git a/base/numerics/safe_numerics_unittest.cc b/base/numerics/safe_numerics_unittest.cc
index 4be7ab59d7..ec6d0037c9 100644
--- a/base/numerics/safe_numerics_unittest.cc
+++ b/base/numerics/safe_numerics_unittest.cc
@@ -9,8 +9,10 @@
#include <type_traits>
#include "base/compiler_specific.h"
+#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
+#include "base/test/gtest_util.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -20,33 +22,44 @@
using std::numeric_limits;
using base::CheckedNumeric;
+using base::IsValidForType;
+using base::ValueOrDieForType;
+using base::ValueOrDefaultForType;
+using base::MakeCheckedNum;
+using base::CheckMax;
+using base::CheckMin;
+using base::CheckAdd;
+using base::CheckSub;
+using base::CheckMul;
+using base::CheckDiv;
+using base::CheckMod;
+using base::CheckLsh;
+using base::CheckRsh;
using base::checked_cast;
using base::IsValueInRangeForNumericType;
using base::IsValueNegative;
using base::SizeT;
using base::StrictNumeric;
+using base::MakeStrictNum;
using base::saturated_cast;
using base::strict_cast;
using base::internal::MaxExponent;
-using base::internal::RANGE_VALID;
-using base::internal::RANGE_INVALID;
-using base::internal::RANGE_OVERFLOW;
-using base::internal::RANGE_UNDERFLOW;
-using base::internal::SignedIntegerForSize;
-
-// These tests deliberately cause arithmetic overflows. If the compiler is
-// aggressive enough, it can const fold these overflows. Disable warnings about
-// overflows for const expressions.
+using base::internal::IntegerBitsPlusSign;
+using base::internal::RangeCheck;
+
+// These tests deliberately cause arithmetic boundary errors. If the compiler is
+// aggressive enough, it can const detect these errors, so we disable warnings.
#if defined(OS_WIN)
-#pragma warning(disable:4756)
+#pragma warning(disable : 4756) // Arithmetic overflow.
+#pragma warning(disable : 4293) // Invalid shift.
#endif
// This is a helper function for finding the maximum value in Src that can be
// wholy represented as the destination floating-point type.
template <typename Dst, typename Src>
Dst GetMaxConvertibleToFloat() {
- typedef numeric_limits<Dst> DstLimits;
- typedef numeric_limits<Src> SrcLimits;
+ using DstLimits = numeric_limits<Dst>;
+ using SrcLimits = numeric_limits<Src>;
static_assert(SrcLimits::is_specialized, "Source must be numeric.");
static_assert(DstLimits::is_specialized, "Destination must be numeric.");
CHECK(DstLimits::is_iec559);
@@ -61,20 +74,113 @@ Dst GetMaxConvertibleToFloat() {
return static_cast<Dst>(max);
}
+namespace base {
+namespace internal {
+
+// Test corner case promotions used
+static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int8_t>::value, "");
+static_assert(IsIntegerArithmeticSafe<int32_t, int16_t, int8_t>::value, "");
+static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int16_t>::value, "");
+static_assert(!IsIntegerArithmeticSafe<int32_t, int32_t, int8_t>::value, "");
+static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
+static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
+static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
+static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
+static_assert(
+ std::is_same<BigEnoughPromotion<int16_t, int8_t>::type, int16_t>::value,
+ "");
+static_assert(
+ std::is_same<BigEnoughPromotion<int32_t, uint32_t>::type, int64_t>::value,
+ "");
+static_assert(
+ std::is_same<BigEnoughPromotion<intmax_t, int8_t>::type, intmax_t>::value,
+ "");
+static_assert(
+ std::is_same<BigEnoughPromotion<uintmax_t, int8_t>::type, uintmax_t>::value,
+ "");
+static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
+static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
+static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
+static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<int16_t, int8_t>::type,
+ int32_t>::value,
+ "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<int32_t, uint32_t>::type,
+ int64_t>::value,
+ "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<intmax_t, int8_t>::type,
+ intmax_t>::value,
+ "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<uintmax_t, int8_t>::type,
+ uintmax_t>::value,
+ "");
+static_assert(FastIntegerArithmeticPromotion<int16_t, int8_t>::is_contained,
+ "");
+static_assert(FastIntegerArithmeticPromotion<int32_t, uint32_t>::is_contained,
+ "");
+static_assert(!FastIntegerArithmeticPromotion<intmax_t, int8_t>::is_contained,
+ "");
+static_assert(!FastIntegerArithmeticPromotion<uintmax_t, int8_t>::is_contained,
+ "");
+
+template <typename U>
+U GetNumericValueForTest(const CheckedNumeric<U>& src) {
+ return src.state_.value();
+}
+} // namespace internal.
+} // namespace base.
+
+using base::internal::GetNumericValueForTest;
+
+// Logs the ValueOrDie() failure instead of crashing.
+struct LogOnFailure {
+ template <typename T>
+ static T HandleFailure() {
+ LOG(WARNING) << "ValueOrDie() failed unexpectedly.";
+ return T();
+ }
+};
+
// Helper macros to wrap displaying the conversion types and line numbers.
#define TEST_EXPECTED_VALIDITY(expected, actual) \
- EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid()) \
- << "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
- << " on line " << line;
+ EXPECT_EQ(expected, (actual).template Cast<Dst>().IsValid()) \
+ << "Result test: Value " << GetNumericValueForTest(actual) << " as " \
+ << dst << " on line " << line
#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
-#define TEST_EXPECTED_VALUE(expected, actual) \
- EXPECT_EQ(static_cast<Dst>(expected), \
- CheckedNumeric<Dst>(actual).ValueUnsafe()) \
- << "Result test: Value " << +((actual).ValueUnsafe()) << " as " << dst \
- << " on line " << line;
+// We have to handle promotions, so infer the underlying type below from actual.
+#define TEST_EXPECTED_VALUE(expected, actual) \
+ EXPECT_EQ(static_cast<typename std::decay<decltype(actual)>::type::type>( \
+ expected), \
+ ((actual) \
+ .template ValueOrDie< \
+ typename std::decay<decltype(actual)>::type::type, \
+ LogOnFailure>())) \
+ << "Result test: Value " << GetNumericValueForTest(actual) << " as " \
+ << dst << " on line " << line
+
+// Test the simple pointer arithmetic overrides.
+template <typename Dst>
+void TestStrictPointerMath() {
+ Dst dummy_value = 0;
+ Dst* dummy_ptr = &dummy_value;
+ static const Dst kDummyOffset = 2; // Don't want to go too far.
+ EXPECT_EQ(dummy_ptr + kDummyOffset,
+ dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_EQ(dummy_ptr - kDummyOffset,
+ dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_NE(dummy_ptr, dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_NE(dummy_ptr, dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_DEATH_IF_SUPPORTED(
+ dummy_ptr + StrictNumeric<size_t>(std::numeric_limits<size_t>::max()),
+ "");
+}
// Signed integer arithmetic.
template <typename Dst>
@@ -84,34 +190,52 @@ static void TestSpecializedArithmetic(
typename std::enable_if<numeric_limits<Dst>::is_integer &&
numeric_limits<Dst>::is_signed,
int>::type = 0) {
- typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::min()));
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ using DstLimits = numeric_limits<Dst>;
+ TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::lowest()));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ MakeCheckedNum(-DstLimits::max()).Abs());
TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
- -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
+ DstLimits::lowest());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) - -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) - -1);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
- -DstLimits::max());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::lowest());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
DstLimits::max());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) / -1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * -1);
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ CheckedNumeric<Dst>(DstLimits::lowest() + 1) * Dst(-1));
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ CheckedNumeric<Dst>(-1) * Dst(DstLimits::lowest() + 1));
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ CheckedNumeric<Dst>(DstLimits::lowest()) * Dst(1));
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ CheckedNumeric<Dst>(1) * Dst(DstLimits::lowest()));
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ MakeCheckedNum(DstLimits::max()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).UnsignedAbs());
// Modulus is legal only for integers.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-1) % -2);
- TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
// Test all the different modulus combinations.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
@@ -119,6 +243,30 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
CheckedNumeric<Dst> checked_dst = 1;
TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+ // Test that div by 0 is avoided but returns invalid result.
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
+ // Test bit shifts.
+ volatile Dst negative_one = -1;
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+ << (IntegerBitsPlusSign<Dst>::value - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
+ << IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
+ TEST_EXPECTED_VALUE(
+ static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2),
+ CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2));
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0)
+ << (IntegerBitsPlusSign<Dst>::value - 1));
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
+ TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
+ IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_VALUE(
+ 0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
+
+ TestStrictPointerMath<Dst>();
}
// Unsigned integer arithmetic.
@@ -129,24 +277,30 @@ static void TestSpecializedArithmetic(
typename std::enable_if<numeric_limits<Dst>::is_integer &&
!numeric_limits<Dst>::is_signed,
int>::type = 0) {
- typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
- TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ using DstLimits = numeric_limits<Dst>;
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).UnsignedAbs());
TEST_EXPECTED_SUCCESS(
- CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
- std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
+ CheckedNumeric<typename std::make_signed<Dst>::type>(
+ std::numeric_limits<typename std::make_signed<Dst>::type>::lowest())
.UnsignedAbs());
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ MakeCheckedNum(DstLimits::max()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
// Modulus is legal only for integers.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) % 2);
- TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
// Test all the different modulus combinations.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
@@ -154,6 +308,49 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
CheckedNumeric<Dst> checked_dst = 1;
TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+ // Test that div by 0 is avoided but returns invalid result.
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+ << IntegerBitsPlusSign<Dst>::value);
+ // Test bit shifts.
+ volatile int negative_one = -1;
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+ << IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
+ << IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
+ TEST_EXPECTED_VALUE(
+ static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1),
+ CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1));
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
+ TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
+ IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_VALUE(
+ 0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) & 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) & 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
+ TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+ MakeCheckedNum(DstLimits::max()) & -1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 0);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) | 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) | 0);
+ TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+ CheckedNumeric<Dst>(0) | static_cast<Dst>(-1));
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) ^ 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) ^ 0);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) ^ 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) ^ 0);
+ TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+ CheckedNumeric<Dst>(0) ^ static_cast<Dst>(-1));
+ TEST_EXPECTED_VALUE(DstLimits::max(), ~CheckedNumeric<Dst>(0));
+
+ TestStrictPointerMath<Dst>();
}
// Floating point arithmetic.
@@ -162,32 +359,31 @@ void TestSpecializedArithmetic(
const char* dst,
int line,
typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
- typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+ using DstLimits = numeric_limits<Dst>;
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
- -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
+ DstLimits::lowest());
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
- -DstLimits::max());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::lowest());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
DstLimits::max());
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
- EXPECT_EQ(static_cast<Dst>(1.0), CheckedNumeric<Dst>(1.0).ValueFloating());
}
// Generic arithmetic tests.
template <typename Dst>
static void TestArithmetic(const char* dst, int line) {
- typedef numeric_limits<Dst> DstLimits;
+ using DstLimits = numeric_limits<Dst>;
EXPECT_EQ(true, CheckedNumeric<Dst>().IsValid());
EXPECT_EQ(false,
@@ -222,11 +418,13 @@ static void TestArithmetic(const char* dst, int line) {
TEST_EXPECTED_VALUE(1, checked_dst /= 1);
// Generic negation.
- TEST_EXPECTED_VALUE(0, -CheckedNumeric<Dst>());
- TEST_EXPECTED_VALUE(-1, -CheckedNumeric<Dst>(1));
- TEST_EXPECTED_VALUE(1, -CheckedNumeric<Dst>(-1));
- TEST_EXPECTED_VALUE(static_cast<Dst>(DstLimits::max() * -1),
- -CheckedNumeric<Dst>(DstLimits::max()));
+ if (DstLimits::is_signed) {
+ TEST_EXPECTED_VALUE(0, -CheckedNumeric<Dst>());
+ TEST_EXPECTED_VALUE(-1, -CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(1, -CheckedNumeric<Dst>(-1));
+ TEST_EXPECTED_VALUE(static_cast<Dst>(DstLimits::max() * -1),
+ -CheckedNumeric<Dst>(DstLimits::max()));
+ }
// Generic absolute value.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>().Abs());
@@ -237,32 +435,43 @@ static void TestArithmetic(const char* dst, int line) {
// Generic addition.
TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
- TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
+ if (numeric_limits<Dst>::is_signed)
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + 1);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
DstLimits::max());
// Generic subtraction.
- TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
- TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
+ if (numeric_limits<Dst>::is_signed) {
+ TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
+ TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
+ } else {
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) - -1);
+ }
// Generic multiplication.
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>(1) * 1));
- TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) * 2));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
- TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
- TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
+ if (numeric_limits<Dst>::is_signed) {
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
+ TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) * 2));
+ } else {
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) * -2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+ CheckedNumeric<uintmax_t>(-2));
+ }
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
DstLimits::max());
// Generic division.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / 1);
- TEST_EXPECTED_VALUE(DstLimits::min() / 2,
- CheckedNumeric<Dst>(DstLimits::min()) / 2);
+ TEST_EXPECTED_VALUE(DstLimits::lowest() / 2,
+ CheckedNumeric<Dst>(DstLimits::lowest()) / 2);
TEST_EXPECTED_VALUE(DstLimits::max() / 2,
CheckedNumeric<Dst>(DstLimits::max()) / 2);
@@ -304,28 +513,114 @@ enum NumericConversionType {
template <typename Dst, typename Src, NumericConversionType conversion>
struct TestNumericConversion {};
+enum RangeConstraint {
+ RANGE_VALID = 0x0, // Value can be represented by the destination type.
+ RANGE_UNDERFLOW = 0x1, // Value would underflow.
+ RANGE_OVERFLOW = 0x2, // Value would overflow.
+ RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
+};
+
+// These are some wrappers to make the tests a bit cleaner.
+constexpr RangeConstraint RangeCheckToEnum(const RangeCheck constraint) {
+ return static_cast<RangeConstraint>(
+ static_cast<int>(constraint.IsOverflowFlagSet()) << 1 |
+ static_cast<int>(constraint.IsUnderflowFlagSet()));
+}
+
// EXPECT_EQ wrappers providing specific detail on test failures.
-#define TEST_EXPECTED_RANGE(expected, actual) \
- EXPECT_EQ(expected, base::internal::DstRangeRelationToSrcRange<Dst>(actual)) \
- << "Conversion test: " << src << " value " << actual << " to " << dst \
- << " on line " << line;
+#define TEST_EXPECTED_RANGE(expected, actual) \
+ EXPECT_EQ(expected, \
+ RangeCheckToEnum( \
+ base::internal::DstRangeRelationToSrcRange<Dst>(actual))) \
+ << "Conversion test: " << src << " value " << actual << " to " << dst \
+ << " on line " << line
+
+template <typename Dst, typename Src>
+void TestStrictComparison() {
+ using DstLimits = numeric_limits<Dst>;
+ using SrcLimits = numeric_limits<Src>;
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < DstLimits::max(), "");
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < SrcLimits::max(), "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= DstLimits::max()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= SrcLimits::max()),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= DstLimits::max(),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= SrcLimits::max(),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > DstLimits::max()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > SrcLimits::max()),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) > DstLimits::lowest(), "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) > SrcLimits::lowest(), "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= DstLimits::lowest()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= SrcLimits::lowest()),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) >= DstLimits::lowest(),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) >= SrcLimits::lowest(),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < DstLimits::lowest()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < SrcLimits::lowest()),
+ "");
+ static_assert(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(1),
+ "");
+ static_assert(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(0),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) != static_cast<Dst>(0),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) != DstLimits::lowest(),
+ "");
+ static_assert(
+ !(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(1)), "");
+ static_assert(
+ !(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(0)), "");
+
+ // Due to differences in float handling between compilers, these aren't
+ // compile-time constants everywhere. So, we use run-time tests.
+ EXPECT_EQ(
+ SrcLimits::max(),
+ MakeCheckedNum(SrcLimits::max()).Max(DstLimits::lowest()).ValueOrDie());
+ EXPECT_EQ(
+ DstLimits::max(),
+ MakeCheckedNum(SrcLimits::lowest()).Max(DstLimits::max()).ValueOrDie());
+ EXPECT_EQ(
+ DstLimits::lowest(),
+ MakeCheckedNum(SrcLimits::max()).Min(DstLimits::lowest()).ValueOrDie());
+ EXPECT_EQ(
+ SrcLimits::lowest(),
+ MakeCheckedNum(SrcLimits::lowest()).Min(DstLimits::max()).ValueOrDie());
+ EXPECT_EQ(SrcLimits::lowest(), CheckMin(MakeStrictNum(1), MakeCheckedNum(0),
+ DstLimits::max(), SrcLimits::lowest())
+ .ValueOrDie());
+ EXPECT_EQ(DstLimits::max(), CheckMax(MakeStrictNum(1), MakeCheckedNum(0),
+ DstLimits::max(), SrcLimits::lowest())
+ .ValueOrDie());
+}
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- // Integral to floating.
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ // Integral to floating.
static_assert((DstLimits::is_iec559 && SrcLimits::is_integer) ||
- // Not floating to integral and...
- (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
- // Same sign, same numeric, source is narrower or same.
- ((SrcLimits::is_signed == DstLimits::is_signed &&
- sizeof(Dst) >= sizeof(Src)) ||
- // Or signed destination and source is smaller
- (DstLimits::is_signed && sizeof(Dst) > sizeof(Src)))),
+ // Not floating to integral and...
+ (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
+ // Same sign, same numeric, source is narrower or same.
+ ((SrcLimits::is_signed == DstLimits::is_signed &&
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value) ||
+ // Or signed destination and source is smaller
+ (DstLimits::is_signed &&
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value))),
"Comparison must be sign preserving and value preserving");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
TEST_EXPECTED_SUCCESS(checked_dst);
if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
@@ -350,7 +645,7 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
} else if (numeric_limits<Src>::is_signed) {
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
- TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
}
}
};
@@ -358,14 +653,15 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
static_assert(SrcLimits::is_signed == DstLimits::is_signed,
"Destination and source sign must be the same");
- static_assert(sizeof(Dst) < sizeof(Src) ||
- (DstLimits::is_integer && SrcLimits::is_iec559),
+ static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
"Destination must be narrower than source");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
@@ -389,15 +685,15 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
TEST_EXPECTED_RANGE(
RANGE_VALID,
static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
- TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
}
} else if (SrcLimits::is_signed) {
TEST_EXPECTED_VALUE(-1, checked_dst - static_cast<Src>(1));
- TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
} else {
TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
- TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
}
}
};
@@ -405,19 +701,21 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- static_assert(sizeof(Dst) >= sizeof(Src),
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ static_assert(MaxExponent<Dst>::value >= MaxExponent<Src>::value,
"Destination must be equal or wider than source.");
static_assert(SrcLimits::is_signed, "Source must be signed");
static_assert(!DstLimits::is_signed, "Destination must be unsigned");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
- TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
- TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
@@ -427,24 +725,32 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- static_assert((DstLimits::is_integer && SrcLimits::is_iec559) ||
- (sizeof(Dst) < sizeof(Src)),
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ static_assert(MaxExponent<Dst>::value < MaxExponent<Src>::value,
"Destination must be narrower than source.");
static_assert(SrcLimits::is_signed, "Source must be signed.");
static_assert(!DstLimits::is_signed, "Destination must be unsigned.");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
- TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
+
+ // Additional saturation tests.
+ EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max())) << src;
+ EXPECT_EQ(DstLimits::lowest(), saturated_cast<Dst>(SrcLimits::lowest()));
+
if (SrcLimits::is_iec559) {
+ EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::quiet_NaN()));
+
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::max() * -1);
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
@@ -459,10 +765,10 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
TEST_EXPECTED_RANGE(
RANGE_VALID,
static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
- TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
}
} else {
- TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
}
}
};
@@ -470,21 +776,27 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, UNSIGN_TO_SIGN_NARROW_OR_EQUAL> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- static_assert(sizeof(Dst) <= sizeof(Src),
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
"Destination must be narrower or equal to source.");
static_assert(!SrcLimits::is_signed, "Source must be unsigned.");
static_assert(DstLimits::is_signed, "Destination must be signed.");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
- TEST_EXPECTED_VALUE(SrcLimits::min(), checked_dst + SrcLimits::min());
+ TEST_EXPECTED_VALUE(SrcLimits::lowest(), checked_dst + SrcLimits::lowest());
- TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+
+ // Additional saturation tests.
+ EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max()));
+ EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::lowest()));
}
};
@@ -583,6 +895,43 @@ TEST(SafeNumerics, SizeTOperations) {
TEST_NUMERIC_CONVERSION(int, size_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
}
+// A one-off test to ensure StrictNumeric won't resolve to an incorrect type.
+// If this fails we'll just get a compiler error on an ambiguous overload.
+int TestOverload(int) { // Overload fails.
+ return 0;
+}
+uint8_t TestOverload(uint8_t) { // Overload fails.
+ return 0;
+}
+size_t TestOverload(size_t) { // Overload succeeds.
+ return 0;
+}
+
+static_assert(
+ std::is_same<decltype(TestOverload(StrictNumeric<int>())), int>::value,
+ "");
+static_assert(std::is_same<decltype(TestOverload(StrictNumeric<size_t>())),
+ size_t>::value,
+ "");
+
+template <typename T>
+struct CastTest1 {
+ static constexpr T NaN() { return -1; }
+ static constexpr T max() { return numeric_limits<T>::max() - 1; }
+ static constexpr T Overflow() { return max(); }
+ static constexpr T lowest() { return numeric_limits<T>::lowest() + 1; }
+ static constexpr T Underflow() { return lowest(); }
+};
+
+template <typename T>
+struct CastTest2 {
+ static constexpr T NaN() { return 11; }
+ static constexpr T max() { return 10; }
+ static constexpr T Overflow() { return max(); }
+ static constexpr T lowest() { return 1; }
+ static constexpr T Underflow() { return lowest(); }
+};
+
TEST(SafeNumerics, CastTests) {
// MSVC catches and warns that we're forcing saturation in these tests.
// Since that's intentional, we need to shut this warning off.
@@ -596,7 +945,7 @@ TEST(SafeNumerics, CastTests) {
double double_large = numeric_limits<double>::max();
double double_infinity = numeric_limits<float>::infinity();
double double_large_int = numeric_limits<int>::max();
- double double_small_int = numeric_limits<int>::min();
+ double double_small_int = numeric_limits<int>::lowest();
// Just test that the casts compile, since the other tests cover logic.
EXPECT_EQ(0, checked_cast<int>(static_cast<size_t>(0)));
@@ -612,9 +961,9 @@ TEST(SafeNumerics, CastTests) {
EXPECT_FALSE(CheckedNumeric<unsigned>(StrictNumeric<int>(-1)).IsValid());
EXPECT_TRUE(IsValueNegative(-1));
- EXPECT_TRUE(IsValueNegative(numeric_limits<int>::min()));
- EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::min()));
- EXPECT_TRUE(IsValueNegative(-numeric_limits<double>::max()));
+ EXPECT_TRUE(IsValueNegative(numeric_limits<int>::lowest()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::lowest()));
+ EXPECT_TRUE(IsValueNegative(numeric_limits<double>::lowest()));
EXPECT_FALSE(IsValueNegative(0));
EXPECT_FALSE(IsValueNegative(1));
EXPECT_FALSE(IsValueNegative(0u));
@@ -641,27 +990,83 @@ TEST(SafeNumerics, CastTests) {
EXPECT_EQ(saturated_cast<int>(double_large), numeric_limits<int>::max());
EXPECT_EQ(saturated_cast<float>(double_large), double_infinity);
EXPECT_EQ(saturated_cast<float>(-double_large), -double_infinity);
- EXPECT_EQ(numeric_limits<int>::min(), saturated_cast<int>(double_small_int));
+ EXPECT_EQ(numeric_limits<int>::lowest(),
+ saturated_cast<int>(double_small_int));
EXPECT_EQ(numeric_limits<int>::max(), saturated_cast<int>(double_large_int));
+ // Test the saturated cast overrides.
+ using FloatLimits = numeric_limits<float>;
+ using IntLimits = numeric_limits<int>;
+ EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(FloatLimits::quiet_NaN())));
+ EXPECT_EQ(CastTest1<int>::max(),
+ (saturated_cast<int, CastTest1>(FloatLimits::infinity())));
+ EXPECT_EQ(CastTest1<int>::max(),
+ (saturated_cast<int, CastTest1>(FloatLimits::max())));
+ EXPECT_EQ(CastTest1<int>::max(),
+ (saturated_cast<int, CastTest1>(float(IntLimits::max()))));
+ EXPECT_EQ(CastTest1<int>::lowest(),
+ (saturated_cast<int, CastTest1>(-FloatLimits::infinity())));
+ EXPECT_EQ(CastTest1<int>::lowest(),
+ (saturated_cast<int, CastTest1>(FloatLimits::lowest())));
+ EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0.0)));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1.0)));
+ EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1.0)));
+ EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0)));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1)));
+ EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1)));
+ EXPECT_EQ(CastTest1<int>::lowest(),
+ (saturated_cast<int, CastTest1>(float(IntLimits::lowest()))));
+ EXPECT_EQ(11, (saturated_cast<int, CastTest2>(FloatLimits::quiet_NaN())));
+ EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::infinity())));
+ EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::max())));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest2>(-FloatLimits::infinity())));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest2>(FloatLimits::lowest())));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest2>(0U)));
+
float not_a_number = std::numeric_limits<float>::infinity() -
std::numeric_limits<float>::infinity();
EXPECT_TRUE(std::isnan(not_a_number));
EXPECT_EQ(0, saturated_cast<int>(not_a_number));
-}
-
-#if GTEST_HAS_DEATH_TEST
-TEST(SafeNumerics, SaturatedCastChecks) {
- float not_a_number = std::numeric_limits<float>::infinity() -
- std::numeric_limits<float>::infinity();
- EXPECT_TRUE(std::isnan(not_a_number));
- EXPECT_DEATH((saturated_cast<int, base::SaturatedCastNaNBehaviorCheck>(
- not_a_number)), "");
+ // Test the CheckedNumeric value extractions functions.
+ auto int8_min = MakeCheckedNum(numeric_limits<int8_t>::lowest());
+ auto int8_max = MakeCheckedNum(numeric_limits<int8_t>::max());
+ auto double_max = MakeCheckedNum(numeric_limits<double>::max());
+ static_assert(
+ std::is_same<int16_t,
+ decltype(int8_min.ValueOrDie<int16_t>())::type>::value,
+ "ValueOrDie returning incorrect type.");
+ static_assert(
+ std::is_same<int16_t,
+ decltype(int8_min.ValueOrDefault<int16_t>(0))::type>::value,
+ "ValueOrDefault returning incorrect type.");
+ EXPECT_FALSE(IsValidForType<uint8_t>(int8_min));
+ EXPECT_TRUE(IsValidForType<uint8_t>(int8_max));
+ EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::lowest()),
+ ValueOrDieForType<int>(int8_min));
+ EXPECT_TRUE(IsValidForType<uint32_t>(int8_max));
+ EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::max()),
+ ValueOrDieForType<int>(int8_max));
+ EXPECT_EQ(0, ValueOrDefaultForType<int>(double_max, 0));
+ uint8_t uint8_dest = 0;
+ int16_t int16_dest = 0;
+ double double_dest = 0;
+ EXPECT_TRUE(int8_max.AssignIfValid(&uint8_dest));
+ EXPECT_EQ(static_cast<uint8_t>(numeric_limits<int8_t>::max()), uint8_dest);
+ EXPECT_FALSE(int8_min.AssignIfValid(&uint8_dest));
+ EXPECT_TRUE(int8_max.AssignIfValid(&int16_dest));
+ EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::max()), int16_dest);
+ EXPECT_TRUE(int8_min.AssignIfValid(&int16_dest));
+ EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::lowest()), int16_dest);
+ EXPECT_FALSE(double_max.AssignIfValid(&uint8_dest));
+ EXPECT_FALSE(double_max.AssignIfValid(&int16_dest));
+ EXPECT_TRUE(double_max.AssignIfValid(&double_dest));
+ EXPECT_EQ(numeric_limits<double>::max(), double_dest);
+ EXPECT_EQ(1, checked_cast<int>(StrictNumeric<int>(1)));
+ EXPECT_EQ(1, saturated_cast<int>(StrictNumeric<int>(1)));
+ EXPECT_EQ(1, strict_cast<int>(StrictNumeric<int>(1)));
}
-#endif // GTEST_HAS_DEATH_TEST
-
TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0));
EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(1));
@@ -672,9 +1077,9 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000000)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000001)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(1));
@@ -688,13 +1093,13 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0xffffffff)));
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x100000000)));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
- static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
- static_cast<int64_t>(std::numeric_limits<int32_t>::min()) - 1));
+ static_cast<int64_t>(std::numeric_limits<int32_t>::lowest()) - 1));
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0));
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(1));
@@ -705,10 +1110,10 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000000)));
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000001)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(INT64_C(-1)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(1));
@@ -730,11 +1135,11 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_FALSE(
IsValueInRangeForNumericType<int64_t>(UINT64_C(0xffffffffffffffff)));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
- static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
}
TEST(SafeNumerics, CompoundNumericOperations) {
@@ -760,3 +1165,22 @@ TEST(SafeNumerics, CompoundNumericOperations) {
too_large /= d;
EXPECT_FALSE(too_large.IsValid());
}
+
+TEST(SafeNumerics, VariadicNumericOperations) {
+ auto a = CheckAdd(1, 2UL, MakeCheckedNum(3LL), 4).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(a)::type>(10), a);
+ auto b = CheckSub(MakeCheckedNum(20.0), 2UL, 4).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(b)::type>(14.0), b);
+ auto c = CheckMul(20.0, MakeCheckedNum(1), 5, 3UL).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(c)::type>(300.0), c);
+ auto d = CheckDiv(20.0, 2.0, MakeCheckedNum(5LL), -4).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(d)::type>(-.5), d);
+ auto e = CheckMod(MakeCheckedNum(20), 3).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(e)::type>(2), e);
+ auto f = CheckLsh(1, MakeCheckedNum(2)).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(f)::type>(4), f);
+ auto g = CheckRsh(4, MakeCheckedNum(2)).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(g)::type>(1), g);
+ auto h = CheckRsh(CheckAdd(1, 1, 1, 1), CheckSub(4, 2)).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(h)::type>(1), h);
+}
diff --git a/base/observer_list.h b/base/observer_list.h
index afe1f46cd6..0572ba6500 100644
--- a/base/observer_list.h
+++ b/base/observer_list.h
@@ -11,6 +11,7 @@
#include <limits>
#include <vector>
+#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
@@ -46,11 +47,14 @@
// }
//
// void NotifyFoo() {
-// FOR_EACH_OBSERVER(Observer, observer_list_, OnFoo(this));
+// for (auto& observer : observer_list_)
+// observer.OnFoo(this);
// }
//
// void NotifyBar(int x, int y) {
-// FOR_EACH_OBSERVER(Observer, observer_list_, OnBar(this, x, y));
+// for (FooList::iterator i = observer_list.begin(),
+// e = observer_list.end(); i != e; ++i)
+// i->OnBar(this, x, y);
// }
//
// private:
@@ -80,20 +84,66 @@ class ObserverListBase
NOTIFY_EXISTING_ONLY
};
- // An iterator class that can be used to access the list of observers. See
- // also the FOR_EACH_OBSERVER macro defined below.
- class Iterator {
+ // An iterator class that can be used to access the list of observers.
+ template <class ContainerType>
+ class Iter {
public:
- explicit Iterator(ObserverListBase<ObserverType>* list);
- ~Iterator();
- ObserverType* GetNext();
+ Iter();
+ explicit Iter(ContainerType* list);
+ ~Iter();
+
+ // A workaround for C2244. MSVC requires fully qualified type name for
+ // return type on a function definition to match a function declaration.
+ using ThisType =
+ typename ObserverListBase<ObserverType>::template Iter<ContainerType>;
+
+ bool operator==(const Iter& other) const;
+ bool operator!=(const Iter& other) const;
+ ThisType& operator++();
+ ObserverType* operator->() const;
+ ObserverType& operator*() const;
private:
+ FRIEND_TEST_ALL_PREFIXES(ObserverListTest, BasicStdIterator);
+ FRIEND_TEST_ALL_PREFIXES(ObserverListTest, StdIteratorRemoveFront);
+
+ ObserverType* GetCurrent() const;
+ void EnsureValidIndex();
+
+ size_t clamped_max_index() const {
+ return std::min(max_index_, list_->observers_.size());
+ }
+
+ bool is_end() const { return !list_ || index_ == clamped_max_index(); }
+
WeakPtr<ObserverListBase<ObserverType>> list_;
+ // When initially constructed and each time the iterator is incremented,
+ // |index_| is guaranteed to point to a non-null index if the iterator
+ // has not reached the end of the ObserverList.
size_t index_;
size_t max_index_;
};
+ using Iterator = Iter<ObserverListBase<ObserverType>>;
+
+ using iterator = Iter<ObserverListBase<ObserverType>>;
+ iterator begin() {
+ // An optimization: do not involve weak pointers for empty list.
+ // Note: can't use ?: operator here due to some MSVC bug (unit tests fail)
+ if (observers_.empty())
+ return iterator();
+ return iterator(this);
+ }
+ iterator end() { return iterator(); }
+
+ using const_iterator = Iter<const ObserverListBase<ObserverType>>;
+ const_iterator begin() const {
+ if (observers_.empty())
+ return const_iterator();
+ return const_iterator(this);
+ }
+ const_iterator end() const { return const_iterator(); }
+
ObserverListBase() : notify_depth_(0), type_(NOTIFY_ALL) {}
explicit ObserverListBase(NotificationType type)
: notify_depth_(0), type_(type) {}
@@ -124,37 +174,99 @@ class ObserverListBase
int notify_depth_;
NotificationType type_;
- friend class ObserverListBase::Iterator;
+ template <class ContainerType>
+ friend class Iter;
DISALLOW_COPY_AND_ASSIGN(ObserverListBase);
};
template <class ObserverType>
-ObserverListBase<ObserverType>::Iterator::Iterator(
- ObserverListBase<ObserverType>* list)
- : list_(list->AsWeakPtr()),
+template <class ContainerType>
+ObserverListBase<ObserverType>::Iter<ContainerType>::Iter()
+ : index_(0), max_index_(0) {}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverListBase<ObserverType>::Iter<ContainerType>::Iter(ContainerType* list)
+ : list_(const_cast<ObserverListBase<ObserverType>*>(list)->AsWeakPtr()),
index_(0),
max_index_(list->type_ == NOTIFY_ALL ? std::numeric_limits<size_t>::max()
: list->observers_.size()) {
+ EnsureValidIndex();
+ DCHECK(list_);
++list_->notify_depth_;
}
template <class ObserverType>
-ObserverListBase<ObserverType>::Iterator::~Iterator() {
- if (list_.get() && --list_->notify_depth_ == 0)
+template <class ContainerType>
+ObserverListBase<ObserverType>::Iter<ContainerType>::~Iter() {
+ if (list_ && --list_->notify_depth_ == 0)
list_->Compact();
}
template <class ObserverType>
-ObserverType* ObserverListBase<ObserverType>::Iterator::GetNext() {
- if (!list_.get())
+template <class ContainerType>
+bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator==(
+ const Iter& other) const {
+ if (is_end() && other.is_end())
+ return true;
+ return list_.get() == other.list_.get() && index_ == other.index_;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator!=(
+ const Iter& other) const {
+ return !operator==(other);
+}
+
+template <class ObserverType>
+template <class ContainerType>
+typename ObserverListBase<ObserverType>::template Iter<ContainerType>&
+ ObserverListBase<ObserverType>::Iter<ContainerType>::operator++() {
+ if (list_) {
+ ++index_;
+ EnsureValidIndex();
+ }
+ return *this;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::operator->()
+ const {
+ ObserverType* current = GetCurrent();
+ DCHECK(current);
+ return current;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverType& ObserverListBase<ObserverType>::Iter<ContainerType>::operator*()
+ const {
+ ObserverType* current = GetCurrent();
+ DCHECK(current);
+ return *current;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::GetCurrent()
+ const {
+ if (!list_)
return nullptr;
- ListType& observers = list_->observers_;
- // Advance if the current element is null
- size_t max_index = std::min(max_index_, observers.size());
- while (index_ < max_index && !observers[index_])
+ return index_ < clamped_max_index() ? list_->observers_[index_] : nullptr;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+void ObserverListBase<ObserverType>::Iter<ContainerType>::EnsureValidIndex() {
+ if (!list_)
+ return;
+
+ size_t max_index = clamped_max_index();
+ while (index_ < max_index && !list_->observers_[index_])
++index_;
- return index_ < max_index ? observers[index_++] : nullptr;
}
template <class ObserverType>
@@ -205,9 +317,8 @@ void ObserverListBase<ObserverType>::Clear() {
template <class ObserverType>
void ObserverListBase<ObserverType>::Compact() {
- observers_.erase(
- std::remove(observers_.begin(), observers_.end(), nullptr),
- observers_.end());
+ observers_.erase(std::remove(observers_.begin(), observers_.end(), nullptr),
+ observers_.end());
}
template <class ObserverType, bool check_empty = false>
@@ -233,17 +344,6 @@ class ObserverList : public ObserverListBase<ObserverType> {
}
};
-#define FOR_EACH_OBSERVER(ObserverType, observer_list, func) \
- do { \
- if ((observer_list).might_have_observers()) { \
- typename base::ObserverListBase<ObserverType>::Iterator \
- it_inside_observer_macro(&observer_list); \
- ObserverType* obs; \
- while ((obs = it_inside_observer_macro.GetNext()) != nullptr) \
- obs->func; \
- } \
- } while (0)
-
} // namespace base
#endif // BASE_OBSERVER_LIST_H_
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
index fe783542f4..afb1010b67 100644
--- a/base/observer_list_threadsafe.h
+++ b/base/observer_list_threadsafe.h
@@ -7,17 +7,17 @@
#include <algorithm>
#include <map>
+#include <memory>
#include <tuple>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
#include "base/observer_list.h"
#include "base/single_thread_task_runner.h"
-#include "base/stl_util.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -55,56 +55,27 @@
///////////////////////////////////////////////////////////////////////////////
namespace base {
-
-// Forward declaration for ObserverListThreadSafeTraits.
-template <class ObserverType>
-class ObserverListThreadSafe;
-
namespace internal {
-// An UnboundMethod is a wrapper for a method where the actual object is
-// provided at Run dispatch time.
-template <class T, class Method, class Params>
-class UnboundMethod {
- public:
- UnboundMethod(Method m, const Params& p) : m_(m), p_(p) {
- static_assert((internal::ParamsUseScopedRefptrCorrectly<Params>::value),
- "bad unbound method params");
- }
- void Run(T* obj) const {
- DispatchToMethod(obj, m_, p_);
+template <typename ObserverType, typename Method>
+struct Dispatcher;
+
+template <typename ObserverType, typename ReceiverType, typename... Params>
+struct Dispatcher<ObserverType, void(ReceiverType::*)(Params...)> {
+ static void Run(void(ReceiverType::* m)(Params...),
+ Params... params, ObserverType* obj) {
+ (obj->*m)(std::forward<Params>(params)...);
}
- private:
- Method m_;
- Params p_;
};
} // namespace internal
-// This class is used to work around VS2005 not accepting:
-//
-// friend class
-// base::RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
-//
-// Instead of friending the class, we could friend the actual function
-// which calls delete. However, this ends up being
-// RefCountedThreadSafe::DeleteInternal(), which is private. So we
-// define our own templated traits class so we can friend it.
-template <class T>
-struct ObserverListThreadSafeTraits {
- static void Destruct(const ObserverListThreadSafe<T>* x) {
- delete x;
- }
-};
-
template <class ObserverType>
class ObserverListThreadSafe
- : public RefCountedThreadSafe<
- ObserverListThreadSafe<ObserverType>,
- ObserverListThreadSafeTraits<ObserverType>> {
+ : public RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>> {
public:
- typedef typename ObserverList<ObserverType>::NotificationType
- NotificationType;
+ using NotificationType =
+ typename ObserverList<ObserverType>::NotificationType;
ObserverListThreadSafe()
: type_(ObserverListBase<ObserverType>::NOTIFY_ALL) {}
@@ -113,17 +84,19 @@ class ObserverListThreadSafe
// Add an observer to the list. An observer should not be added to
// the same list more than once.
void AddObserver(ObserverType* obs) {
- // If there is not a current MessageLoop, it is impossible to notify on it,
+ // If there is no ThreadTaskRunnerHandle, it is impossible to notify on it,
// so do not add the observer.
- if (!MessageLoop::current())
+ if (!ThreadTaskRunnerHandle::IsSet())
return;
ObserverList<ObserverType>* list = nullptr;
PlatformThreadId thread_id = PlatformThread::CurrentId();
{
AutoLock lock(list_lock_);
- if (observer_lists_.find(thread_id) == observer_lists_.end())
- observer_lists_[thread_id] = new ObserverListContext(type_);
+ if (observer_lists_.find(thread_id) == observer_lists_.end()) {
+ observer_lists_[thread_id] =
+ base::MakeUnique<ObserverListContext>(type_);
+ }
list = &(observer_lists_[thread_id]->list);
}
list->AddObserver(obs);
@@ -135,32 +108,24 @@ class ObserverListThreadSafe
// If the observer to be removed is in the list, RemoveObserver MUST
// be called from the same thread which called AddObserver.
void RemoveObserver(ObserverType* obs) {
- ObserverListContext* context = nullptr;
- ObserverList<ObserverType>* list = nullptr;
PlatformThreadId thread_id = PlatformThread::CurrentId();
{
AutoLock lock(list_lock_);
- typename ObserversListMap::iterator it = observer_lists_.find(thread_id);
+ auto it = observer_lists_.find(thread_id);
if (it == observer_lists_.end()) {
// This will happen if we try to remove an observer on a thread
// we never added an observer for.
return;
}
- context = it->second;
- list = &context->list;
+ ObserverList<ObserverType>& list = it->second->list;
+
+ list.RemoveObserver(obs);
- // If we're about to remove the last observer from the list,
- // then we can remove this observer_list entirely.
- if (list->HasObserver(obs) && list->size() == 1)
+ // If that was the last observer in the list, remove the ObserverList
+ // entirely.
+ if (list.size() == 0)
observer_lists_.erase(it);
}
- list->RemoveObserver(obs);
-
- // If RemoveObserver is called from a notification, the size will be
- // nonzero. Instead of deleting here, the NotifyWrapper will delete
- // when it finishes iterating.
- if (list->size() == 0)
- delete context;
}
// Verifies that the list is currently empty (i.e. there are no observers).
@@ -174,27 +139,25 @@ class ObserverListThreadSafe
// Note, these calls are effectively asynchronous. You cannot assume
// that at the completion of the Notify call that all Observers have
// been Notified. The notification may still be pending delivery.
- template <class Method, class... Params>
+ template <typename Method, typename... Params>
void Notify(const tracked_objects::Location& from_here,
- Method m,
- const Params&... params) {
- internal::UnboundMethod<ObserverType, Method, std::tuple<Params...>> method(
- m, std::make_tuple(params...));
+ Method m, Params&&... params) {
+ Callback<void(ObserverType*)> method =
+ Bind(&internal::Dispatcher<ObserverType, Method>::Run,
+ m, std::forward<Params>(params)...);
AutoLock lock(list_lock_);
for (const auto& entry : observer_lists_) {
- ObserverListContext* context = entry.second;
+ ObserverListContext* context = entry.second.get();
context->task_runner->PostTask(
from_here,
- Bind(&ObserverListThreadSafe<ObserverType>::template NotifyWrapper<
- Method, std::tuple<Params...>>,
+ Bind(&ObserverListThreadSafe<ObserverType>::NotifyWrapper,
this, context, method));
}
}
private:
- // See comment above ObserverListThreadSafeTraits' definition.
- friend struct ObserverListThreadSafeTraits<ObserverType>;
+ friend class RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
struct ObserverListContext {
explicit ObserverListContext(NotificationType type)
@@ -208,35 +171,28 @@ class ObserverListThreadSafe
};
~ObserverListThreadSafe() {
- STLDeleteValues(&observer_lists_);
}
// Wrapper which is called to fire the notifications for each thread's
// ObserverList. This function MUST be called on the thread which owns
// the unsafe ObserverList.
- template <class Method, class Params>
- void NotifyWrapper(
- ObserverListContext* context,
- const internal::UnboundMethod<ObserverType, Method, Params>& method) {
+ void NotifyWrapper(ObserverListContext* context,
+ const Callback<void(ObserverType*)>& method) {
// Check that this list still needs notifications.
{
AutoLock lock(list_lock_);
- typename ObserversListMap::iterator it =
- observer_lists_.find(PlatformThread::CurrentId());
+ auto it = observer_lists_.find(PlatformThread::CurrentId());
// The ObserverList could have been removed already. In fact, it could
// have been removed and then re-added! If the master list's loop
// does not match this one, then we do not need to finish this
// notification.
- if (it == observer_lists_.end() || it->second != context)
+ if (it == observer_lists_.end() || it->second.get() != context)
return;
}
- {
- typename ObserverList<ObserverType>::Iterator it(&context->list);
- ObserverType* obs;
- while ((obs = it.GetNext()) != nullptr)
- method.Run(obs);
+ for (auto& observer : context->list) {
+ method.Run(&observer);
}
// If there are no more observers on the list, we can now delete it.
@@ -246,23 +202,22 @@ class ObserverListThreadSafe
// Remove |list| if it's not already removed.
// This can happen if multiple observers got removed in a notification.
// See http://crbug.com/55725.
- typename ObserversListMap::iterator it =
- observer_lists_.find(PlatformThread::CurrentId());
- if (it != observer_lists_.end() && it->second == context)
+ auto it = observer_lists_.find(PlatformThread::CurrentId());
+ if (it != observer_lists_.end() && it->second.get() == context)
observer_lists_.erase(it);
}
- delete context;
}
}
+ mutable Lock list_lock_; // Protects the observer_lists_.
+
// Key by PlatformThreadId because in tests, clients can attempt to remove
- // observers without a MessageLoop. If this were keyed by MessageLoop, that
- // operation would be silently ignored, leaving garbage in the ObserverList.
- typedef std::map<PlatformThreadId, ObserverListContext*>
- ObserversListMap;
+ // observers without a SingleThreadTaskRunner. If this were keyed by
+ // SingleThreadTaskRunner, that operation would be silently ignored, leaving
+ // garbage in the ObserverList.
+ std::map<PlatformThreadId, std::unique_ptr<ObserverListContext>>
+ observer_lists_;
- mutable Lock list_lock_; // Protects the observer_lists_.
- ObserversListMap observer_lists_;
const NotificationType type_;
DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafe);
diff --git a/base/observer_list_unittest.cc b/base/observer_list_unittest.cc
index 097a2ed28b..c5e556bd9d 100644
--- a/base/observer_list_unittest.cc
+++ b/base/observer_list_unittest.cc
@@ -22,13 +22,17 @@ class Foo {
public:
virtual void Observe(int x) = 0;
virtual ~Foo() {}
+ virtual int GetValue() const { return 0; }
};
class Adder : public Foo {
public:
explicit Adder(int scaler) : total(0), scaler_(scaler) {}
- void Observe(int x) override { total += x * scaler_; }
~Adder() override {}
+
+ void Observe(int x) override { total += x * scaler_; }
+ int GetValue() const override { return total; }
+
int total;
private:
@@ -37,16 +41,28 @@ class Adder : public Foo {
class Disrupter : public Foo {
public:
+ Disrupter(ObserverList<Foo>* list, Foo* doomed, bool remove_self)
+ : list_(list), doomed_(doomed), remove_self_(remove_self) {}
Disrupter(ObserverList<Foo>* list, Foo* doomed)
- : list_(list),
- doomed_(doomed) {
- }
+ : Disrupter(list, doomed, false) {}
+ Disrupter(ObserverList<Foo>* list, bool remove_self)
+ : Disrupter(list, nullptr, remove_self) {}
+
~Disrupter() override {}
- void Observe(int x) override { list_->RemoveObserver(doomed_); }
+
+ void Observe(int x) override {
+ if (remove_self_)
+ list_->RemoveObserver(this);
+ if (doomed_)
+ list_->RemoveObserver(doomed_);
+ }
+
+ void SetDoomed(Foo* doomed) { doomed_ = doomed; }
private:
ObserverList<Foo>* list_;
Foo* doomed_;
+ bool remove_self_;
};
class ThreadSafeDisrupter : public Foo {
@@ -67,21 +83,19 @@ template <typename ObserverListType>
class AddInObserve : public Foo {
public:
explicit AddInObserve(ObserverListType* observer_list)
- : added(false),
- observer_list(observer_list),
- adder(1) {
- }
+ : observer_list(observer_list), to_add_() {}
+
+ void SetToAdd(Foo* to_add) { to_add_ = to_add; }
void Observe(int x) override {
- if (!added) {
- added = true;
- observer_list->AddObserver(&adder);
+ if (to_add_) {
+ observer_list->AddObserver(to_add_);
+ to_add_ = nullptr;
}
}
- bool added;
ObserverListType* observer_list;
- Adder adder;
+ Foo* to_add_;
};
@@ -112,8 +126,6 @@ class AddRemoveThread : public PlatformThread::Delegate,
FROM_HERE,
base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
RunLoop().Run();
- //LOG(ERROR) << "Loop 0x" << std::hex << loop_ << " done. " <<
- // count_observes_ << ", " << count_addtask_;
delete loop_;
loop_ = reinterpret_cast<MessageLoop*>(0xdeadbeef);
delete this;
@@ -176,6 +188,8 @@ class AddRemoveThread : public PlatformThread::Delegate,
base::WeakPtrFactory<AddRemoveThread> weak_factory_;
};
+} // namespace
+
TEST(ObserverListTest, BasicTest) {
ObserverList<Foo> observer_list;
Adder a(1), b(-1), c(1), d(-1), e(-1);
@@ -187,7 +201,8 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_TRUE(observer_list.HasObserver(&a));
EXPECT_FALSE(observer_list.HasObserver(&c));
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
+ for (auto& observer : observer_list)
+ observer.Observe(10);
observer_list.AddObserver(&evil);
observer_list.AddObserver(&c);
@@ -196,7 +211,8 @@ TEST(ObserverListTest, BasicTest) {
// Removing an observer not in the list should do nothing.
observer_list.RemoveObserver(&e);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
+ for (auto& observer : observer_list)
+ observer.Observe(10);
EXPECT_EQ(20, a.total);
EXPECT_EQ(-20, b.total);
@@ -205,6 +221,52 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_EQ(0, e.total);
}
+TEST(ObserverListTest, DisruptSelf) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter evil(&observer_list, true);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+
+ observer_list.AddObserver(&evil);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(-20, b.total);
+ EXPECT_EQ(10, c.total);
+ EXPECT_EQ(-10, d.total);
+}
+
+TEST(ObserverListTest, DisruptBefore) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter evil(&observer_list, &b);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&evil);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(-10, b.total);
+ EXPECT_EQ(20, c.total);
+ EXPECT_EQ(-20, d.total);
+}
+
TEST(ObserverListThreadSafeTest, BasicTest) {
MessageLoop loop;
@@ -433,20 +495,24 @@ TEST(ObserverListTest, Existing) {
ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
Adder a(1);
AddInObserve<ObserverList<Foo> > b(&observer_list);
+ Adder c(1);
+ b.SetToAdd(&c);
observer_list.AddObserver(&a);
observer_list.AddObserver(&b);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ for (auto& observer : observer_list)
+ observer.Observe(1);
- EXPECT_TRUE(b.added);
+ EXPECT_FALSE(b.to_add_);
// B's adder should not have been notified because it was added during
// notification.
- EXPECT_EQ(0, b.adder.total);
+ EXPECT_EQ(0, c.total);
// Notify again to make sure b's adder is notified.
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
- EXPECT_EQ(1, b.adder.total);
+ for (auto& observer : observer_list)
+ observer.Observe(1);
+ EXPECT_EQ(1, c.total);
}
// Same as above, but for ObserverListThreadSafe
@@ -456,6 +522,8 @@ TEST(ObserverListThreadSafeTest, Existing) {
new ObserverListThreadSafe<Foo>(ObserverList<Foo>::NOTIFY_EXISTING_ONLY));
Adder a(1);
AddInObserve<ObserverListThreadSafe<Foo> > b(observer_list.get());
+ Adder c(1);
+ b.SetToAdd(&c);
observer_list->AddObserver(&a);
observer_list->AddObserver(&b);
@@ -463,15 +531,15 @@ TEST(ObserverListThreadSafeTest, Existing) {
observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
RunLoop().RunUntilIdle();
- EXPECT_TRUE(b.added);
+ EXPECT_FALSE(b.to_add_);
// B's adder should not have been notified because it was added during
// notification.
- EXPECT_EQ(0, b.adder.total);
+ EXPECT_EQ(0, c.total);
// Notify again to make sure b's adder is notified.
observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
RunLoop().RunUntilIdle();
- EXPECT_EQ(1, b.adder.total);
+ EXPECT_EQ(1, c.total);
}
class AddInClearObserve : public Foo {
@@ -501,7 +569,8 @@ TEST(ObserverListTest, ClearNotifyAll) {
observer_list.AddObserver(&a);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ for (auto& observer : observer_list)
+ observer.Observe(1);
EXPECT_TRUE(a.added());
EXPECT_EQ(1, a.adder().total)
<< "Adder should observe once and have sum of 1.";
@@ -513,7 +582,8 @@ TEST(ObserverListTest, ClearNotifyExistingOnly) {
observer_list.AddObserver(&a);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ for (auto& observer : observer_list)
+ observer.Observe(1);
EXPECT_TRUE(a.added());
EXPECT_EQ(0, a.adder().total)
<< "Adder should not observe, so sum should still be 0.";
@@ -536,10 +606,330 @@ TEST(ObserverListTest, IteratorOutlivesList) {
ListDestructor a(observer_list);
observer_list->AddObserver(&a);
- FOR_EACH_OBSERVER(Foo, *observer_list, Observe(0));
+ for (auto& observer : *observer_list)
+ observer.Observe(0);
// If this test fails, there'll be Valgrind errors when this function goes out
// of scope.
}
-} // namespace
+TEST(ObserverListTest, BasicStdIterator) {
+ using FooList = ObserverList<Foo>;
+ FooList observer_list;
+
+ // An optimization: begin() and end() do not involve weak pointers on
+ // empty list.
+ EXPECT_FALSE(observer_list.begin().list_);
+ EXPECT_FALSE(observer_list.end().list_);
+
+ // Iterate over empty list: no effect, no crash.
+ for (auto& i : observer_list)
+ i.Observe(10);
+
+ Adder a(1), b(-1), c(1), d(-1);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
+ i != e; ++i)
+ i->Observe(1);
+
+ EXPECT_EQ(1, a.total);
+ EXPECT_EQ(-1, b.total);
+ EXPECT_EQ(1, c.total);
+ EXPECT_EQ(-1, d.total);
+
+ // Check an iteration over a 'const view' for a given container.
+ const FooList& const_list = observer_list;
+ for (FooList::const_iterator i = const_list.begin(), e = const_list.end();
+ i != e; ++i) {
+ EXPECT_EQ(1, std::abs(i->GetValue()));
+ }
+
+ for (const auto& o : const_list)
+ EXPECT_EQ(1, std::abs(o.GetValue()));
+}
+
+TEST(ObserverListTest, StdIteratorRemoveItself) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBefore) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &b);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-1, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveAfter) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &c);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(0, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveAfterFront) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &a);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(1, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBeforeBack) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &d);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(0, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveFront) {
+ using FooList = ObserverList<Foo>;
+ FooList observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ bool test_disruptor = true;
+ for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
+ i != e; ++i) {
+ i->Observe(1);
+ // Check that second call to i->Observe() would crash here.
+ if (test_disruptor) {
+ EXPECT_FALSE(i.GetCurrent());
+ test_disruptor = false;
+ }
+ }
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBack) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+ observer_list.AddObserver(&disrupter);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, NestedLoop) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list) {
+ o.Observe(10);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+ }
+
+ EXPECT_EQ(15, a.total);
+ EXPECT_EQ(-15, b.total);
+ EXPECT_EQ(15, c.total);
+ EXPECT_EQ(-15, d.total);
+}
+
+TEST(ObserverListTest, NonCompactList) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1);
+
+ Disrupter disrupter1(&observer_list, true);
+ Disrupter disrupter2(&observer_list, true);
+
+ // Disrupt itself and another one.
+ disrupter1.SetDoomed(&disrupter2);
+
+ observer_list.AddObserver(&disrupter1);
+ observer_list.AddObserver(&disrupter2);
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+
+ for (auto& o : observer_list) {
+ // Get the { nullptr, nullptr, &a, &b } non-compact list
+ // on the first inner pass.
+ o.Observe(10);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+ }
+
+ EXPECT_EQ(13, a.total);
+ EXPECT_EQ(-13, b.total);
+}
+
+TEST(ObserverListTest, BecomesEmptyThanNonEmpty) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1);
+
+ Disrupter disrupter1(&observer_list, true);
+ Disrupter disrupter2(&observer_list, true);
+
+ // Disrupt itself and another one.
+ disrupter1.SetDoomed(&disrupter2);
+
+ observer_list.AddObserver(&disrupter1);
+ observer_list.AddObserver(&disrupter2);
+
+ bool add_observers = true;
+ for (auto& o : observer_list) {
+ // Get the { nullptr, nullptr } empty list on the first inner pass.
+ o.Observe(10);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ if (add_observers) {
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ add_observers = false;
+ }
+ }
+
+ EXPECT_EQ(12, a.total);
+ EXPECT_EQ(-12, b.total);
+}
+
+TEST(ObserverListTest, AddObserverInTheLastObserve) {
+ using FooList = ObserverList<Foo>;
+ FooList observer_list;
+
+ AddInObserve<FooList> a(&observer_list);
+ Adder b(-1);
+
+ a.SetToAdd(&b);
+ observer_list.AddObserver(&a);
+
+ auto it = observer_list.begin();
+ while (it != observer_list.end()) {
+ auto& observer = *it;
+ // Intentionally increment the iterator before calling Observe(). The
+ // ObserverList starts with only one observer, and it == observer_list.end()
+ // should be true after the next line.
+ ++it;
+ // However, the first Observe() call will add a second observer: at this
+ // point, it != observer_list.end() should be true, and Observe() should be
+ // called on the newly added observer on the next iteration of the loop.
+ observer.Observe(10);
+ }
+
+ EXPECT_EQ(-10, b.total);
+}
+
} // namespace base
diff --git a/base/optional.h b/base/optional.h
index b468964ae3..cf65ad7dac 100644
--- a/base/optional.h
+++ b/base/optional.h
@@ -8,7 +8,6 @@
#include <type_traits>
#include "base/logging.h"
-#include "base/memory/aligned_memory.h"
#include "base/template_util.h"
namespace base {
@@ -35,28 +34,70 @@ namespace internal {
template <typename T, bool = base::is_trivially_destructible<T>::value>
struct OptionalStorage {
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorage() : empty_('\0') {}
+
+ constexpr explicit OptionalStorage(const T& value)
+ : is_null_(false), value_(value) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ explicit OptionalStorage(T&& value)
+ : is_null_(false), value_(std::move(value)) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
+ template <class... Args>
+ explicit OptionalStorage(base::in_place_t, Args&&... args)
+ : is_null_(false), value_(std::forward<Args>(args)...) {}
+
// When T is not trivially destructible we must call its
// destructor before deallocating its memory.
~OptionalStorage() {
if (!is_null_)
- buffer_.template data_as<T>()->~T();
+ value_.~T();
}
bool is_null_ = true;
- base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+ union {
+ // |empty_| exists so that the union will always be initialized, even when
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
+ T value_;
+ };
};
template <typename T>
struct OptionalStorage<T, true> {
- // When T is trivially destructible (i.e. its destructor does nothing)
- // there is no need to call it.
- // Since |base::AlignedMemory| is just an array its destructor
- // is trivial. Explicitly defaulting the destructor means it's not
- // user-provided. All of this together make this destructor trivial.
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorage() : empty_('\0') {}
+
+ constexpr explicit OptionalStorage(const T& value)
+ : is_null_(false), value_(value) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ explicit OptionalStorage(T&& value)
+ : is_null_(false), value_(std::move(value)) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
+ template <class... Args>
+ explicit OptionalStorage(base::in_place_t, Args&&... args)
+ : is_null_(false), value_(std::forward<Args>(args)...) {}
+
+ // When T is trivially destructible (i.e. its destructor does nothing) there
+ // is no need to call it. Explicitly defaulting the destructor means it's not
+ // user-provided. Those two together make this destructor trivial.
~OptionalStorage() = default;
bool is_null_ = true;
- base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+ union {
+ // |empty_| exists so that the union will always be initialized, even when
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
+ T value_;
+ };
};
} // namespace internal
@@ -79,8 +120,9 @@ class Optional {
public:
using value_type = T;
- constexpr Optional() = default;
- Optional(base::nullopt_t) : Optional() {}
+ constexpr Optional() {}
+
+ constexpr Optional(base::nullopt_t) {}
Optional(const Optional& other) {
if (!other.storage_.is_null_)
@@ -92,14 +134,15 @@ class Optional {
Init(std::move(other.value()));
}
- Optional(const T& value) { Init(value); }
+ constexpr Optional(const T& value) : storage_(value) {}
- Optional(T&& value) { Init(std::move(value)); }
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ Optional(T&& value) : storage_(std::move(value)) {}
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
template <class... Args>
- explicit Optional(base::in_place_t, Args&&... args) {
- emplace(std::forward<Args>(args)...);
- }
+ explicit Optional(base::in_place_t, Args&&... args)
+ : storage_(base::in_place, std::forward<Args>(args)...) {}
~Optional() = default;
@@ -163,30 +206,32 @@ class Optional {
constexpr explicit operator bool() const { return !storage_.is_null_; }
+ constexpr bool has_value() const { return !storage_.is_null_; }
+
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T& value() & {
DCHECK(!storage_.is_null_);
- return *storage_.buffer_.template data_as<T>();
+ return storage_.value_;
}
// TODO(mlamouri): can't use 'constexpr' with DCHECK.
const T& value() const& {
DCHECK(!storage_.is_null_);
- return *storage_.buffer_.template data_as<T>();
+ return storage_.value_;
}
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T&& value() && {
DCHECK(!storage_.is_null_);
- return std::move(*storage_.buffer_.template data_as<T>());
+ return std::move(storage_.value_);
}
// TODO(mlamouri): can't use 'constexpr' with DCHECK.
const T&& value() const&& {
DCHECK(!storage_.is_null_);
- return std::move(*storage_.buffer_.template data_as<T>());
+ return std::move(storage_.value_);
}
template <class U>
@@ -217,10 +262,10 @@ class Optional {
if (storage_.is_null_ != other.storage_.is_null_) {
if (storage_.is_null_) {
- Init(std::move(*other.storage_.buffer_.template data_as<T>()));
+ Init(std::move(other.storage_.value_));
other.FreeIfNeeded();
} else {
- other.Init(std::move(*storage_.buffer_.template data_as<T>()));
+ other.Init(std::move(storage_.value_));
FreeIfNeeded();
}
return;
@@ -231,6 +276,10 @@ class Optional {
swap(**this, *other);
}
+ void reset() {
+ FreeIfNeeded();
+ }
+
template <class... Args>
void emplace(Args&&... args) {
FreeIfNeeded();
@@ -240,20 +289,20 @@ class Optional {
private:
void Init(const T& value) {
DCHECK(storage_.is_null_);
- new (storage_.buffer_.void_data()) T(value);
+ new (&storage_.value_) T(value);
storage_.is_null_ = false;
}
void Init(T&& value) {
DCHECK(storage_.is_null_);
- new (storage_.buffer_.void_data()) T(std::move(value));
+ new (&storage_.value_) T(std::move(value));
storage_.is_null_ = false;
}
template <class... Args>
void Init(Args&&... args) {
DCHECK(storage_.is_null_);
- new (storage_.buffer_.void_data()) T(std::forward<Args>(args)...);
+ new (&storage_.value_) T(std::forward<Args>(args)...);
storage_.is_null_ = false;
}
@@ -261,20 +310,20 @@ class Optional {
if (storage_.is_null_)
Init(value);
else
- *storage_.buffer_.template data_as<T>() = value;
+ storage_.value_ = value;
}
void InitOrAssign(T&& value) {
if (storage_.is_null_)
Init(std::move(value));
else
- *storage_.buffer_.template data_as<T>() = std::move(value);
+ storage_.value_ = std::move(value);
}
void FreeIfNeeded() {
if (storage_.is_null_)
return;
- storage_.buffer_.template data_as<T>()->~T();
+ storage_.value_.~T();
storage_.is_null_ = true;
}
diff --git a/base/optional_unittest.cc b/base/optional_unittest.cc
index d6bf263691..83025e8bda 100644
--- a/base/optional_unittest.cc
+++ b/base/optional_unittest.cc
@@ -98,7 +98,7 @@ static_assert(
TEST(OptionalTest, DefaultConstructor) {
{
- Optional<float> o;
+ constexpr Optional<float> o;
EXPECT_FALSE(o);
}
@@ -144,21 +144,28 @@ TEST(OptionalTest, CopyConstructor) {
TEST(OptionalTest, ValueConstructor) {
{
- Optional<float> o(0.1f);
+ constexpr float value = 0.1f;
+ constexpr Optional<float> o(value);
+
EXPECT_TRUE(o);
- EXPECT_EQ(o.value(), 0.1f);
+ EXPECT_EQ(value, o.value());
}
{
- Optional<std::string> o("foo");
+ std::string value("foo");
+ Optional<std::string> o(value);
+
EXPECT_TRUE(o);
- EXPECT_EQ(o.value(), "foo");
+ EXPECT_EQ(value, o.value());
}
{
- Optional<TestObject> o(TestObject(3, 0.1));
- EXPECT_TRUE(!!o);
- EXPECT_TRUE(o.value() == TestObject(3, 0.1));
+ TestObject value(3, 0.1);
+ Optional<TestObject> o(value);
+
+ EXPECT_TRUE(o);
+ EXPECT_EQ(TestObject::State::COPY_CONSTRUCTED, o->state());
+ EXPECT_EQ(value, o.value());
}
}
@@ -198,35 +205,28 @@ TEST(OptionalTest, MoveConstructor) {
TEST(OptionalTest, MoveValueConstructor) {
{
- Optional<float> first(0.1f);
- Optional<float> second(std::move(first.value()));
+ float value = 0.1f;
+ Optional<float> o(std::move(value));
- EXPECT_TRUE(second);
- EXPECT_EQ(second.value(), 0.1f);
-
- EXPECT_TRUE(first);
+ EXPECT_TRUE(o);
+ EXPECT_EQ(0.1f, o.value());
}
{
- Optional<std::string> first("foo");
- Optional<std::string> second(std::move(first.value()));
-
- EXPECT_TRUE(second);
- EXPECT_EQ("foo", second.value());
+ std::string value("foo");
+ Optional<std::string> o(std::move(value));
- EXPECT_TRUE(first);
+ EXPECT_TRUE(o);
+ EXPECT_EQ("foo", o.value());
}
{
- Optional<TestObject> first(TestObject(3, 0.1));
- Optional<TestObject> second(std::move(first.value()));
-
- EXPECT_TRUE(!!second);
- EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
- EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+ TestObject value(3, 0.1);
+ Optional<TestObject> o(std::move(value));
- EXPECT_TRUE(!!first);
- EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+ EXPECT_TRUE(o);
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, o->state());
+ EXPECT_EQ(TestObject(3, 0.1), o.value());
}
}
@@ -251,7 +251,7 @@ TEST(OptionalTest, ConstructorForwardArguments) {
}
TEST(OptionalTest, NulloptConstructor) {
- Optional<int> a = base::nullopt;
+ constexpr Optional<int> a(base::nullopt);
EXPECT_FALSE(a);
}
@@ -1298,4 +1298,49 @@ TEST(OptionalTest, Hash_UseInSet) {
EXPECT_NE(setOptInt.end(), setOptInt.find(3));
}
+TEST(OptionalTest, HasValue) {
+ Optional<int> a;
+ EXPECT_FALSE(a.has_value());
+
+ a = 42;
+ EXPECT_TRUE(a.has_value());
+
+ a = nullopt;
+ EXPECT_FALSE(a.has_value());
+
+ a = 0;
+ EXPECT_TRUE(a.has_value());
+
+ a = Optional<int>();
+ EXPECT_FALSE(a.has_value());
+}
+
+TEST(OptionalTest, Reset_int) {
+ Optional<int> a(0);
+ EXPECT_TRUE(a.has_value());
+ EXPECT_EQ(0, a.value());
+
+ a.reset();
+ EXPECT_FALSE(a.has_value());
+ EXPECT_EQ(-1, a.value_or(-1));
+}
+
+TEST(OptionalTest, Reset_Object) {
+ Optional<TestObject> a(TestObject(0, 0.1));
+ EXPECT_TRUE(a.has_value());
+ EXPECT_EQ(TestObject(0, 0.1), a.value());
+
+ a.reset();
+ EXPECT_FALSE(a.has_value());
+ EXPECT_EQ(TestObject(42, 0.0), a.value_or(TestObject(42, 0.0)));
+}
+
+TEST(OptionalTest, Reset_NoOp) {
+ Optional<int> a;
+ EXPECT_FALSE(a.has_value());
+
+ a.reset();
+ EXPECT_FALSE(a.has_value());
+}
+
} // namespace base
diff --git a/base/pending_task.cc b/base/pending_task.cc
index 73834bd460..b2f95b4c45 100644
--- a/base/pending_task.cc
+++ b/base/pending_task.cc
@@ -4,22 +4,17 @@
#include "base/pending_task.h"
+#include "base/message_loop/message_loop.h"
#include "base/tracked_objects.h"
namespace base {
PendingTask::PendingTask(const tracked_objects::Location& posted_from,
- base::Closure task)
- : base::TrackingInfo(posted_from, TimeTicks()),
- task(std::move(task)),
- posted_from(posted_from),
- sequence_num(0),
- nestable(true),
- is_high_res(false) {
-}
+ OnceClosure task)
+ : PendingTask(posted_from, std::move(task), TimeTicks(), true) {}
PendingTask::PendingTask(const tracked_objects::Location& posted_from,
- base::Closure task,
+ OnceClosure task,
TimeTicks delayed_run_time,
bool nestable)
: base::TrackingInfo(posted_from, delayed_run_time),
@@ -28,6 +23,17 @@ PendingTask::PendingTask(const tracked_objects::Location& posted_from,
sequence_num(0),
nestable(nestable),
is_high_res(false) {
+ const PendingTask* parent_task =
+ MessageLoop::current() ? MessageLoop::current()->current_pending_task_
+ : nullptr;
+ if (parent_task) {
+ task_backtrace[0] = parent_task->posted_from.program_counter();
+ std::copy(parent_task->task_backtrace.begin(),
+ parent_task->task_backtrace.end() - 1,
+ task_backtrace.begin() + 1);
+ } else {
+ task_backtrace.fill(nullptr);
+ }
}
PendingTask::PendingTask(PendingTask&& other) = default;
diff --git a/base/pending_task.h b/base/pending_task.h
index 5761653397..7f3fccd882 100644
--- a/base/pending_task.h
+++ b/base/pending_task.h
@@ -5,6 +5,7 @@
#ifndef BASE_PENDING_TASK_H_
#define BASE_PENDING_TASK_H_
+#include <array>
#include <queue>
#include "base/base_export.h"
@@ -18,10 +19,9 @@ namespace base {
// Contains data about a pending task. Stored in TaskQueue and DelayedTaskQueue
// for use by classes that queue and execute tasks.
struct BASE_EXPORT PendingTask : public TrackingInfo {
+ PendingTask(const tracked_objects::Location& posted_from, OnceClosure task);
PendingTask(const tracked_objects::Location& posted_from,
- Closure task);
- PendingTask(const tracked_objects::Location& posted_from,
- Closure task,
+ OnceClosure task,
TimeTicks delayed_run_time,
bool nestable);
PendingTask(PendingTask&& other);
@@ -33,11 +33,14 @@ struct BASE_EXPORT PendingTask : public TrackingInfo {
bool operator<(const PendingTask& other) const;
// The task to run.
- Closure task;
+ OnceClosure task;
// The site this PendingTask was posted from.
tracked_objects::Location posted_from;
+ // Task backtrace.
+ std::array<const void*, 4> task_backtrace;
+
// Secondary sort key for run time.
int sequence_num;
diff --git a/base/pickle.cc b/base/pickle.cc
index 4ef167b089..0079b3979b 100644
--- a/base/pickle.cc
+++ b/base/pickle.cc
@@ -233,7 +233,6 @@ void PickleSizer::AddBytes(int length) {
void PickleSizer::AddAttachment() {
// From IPC::Message::WriteAttachment
- AddBool();
AddInt();
}
diff --git a/base/posix/global_descriptors.cc b/base/posix/global_descriptors.cc
index 6c187838ad..8da808e52d 100644
--- a/base/posix/global_descriptors.cc
+++ b/base/posix/global_descriptors.cc
@@ -47,6 +47,22 @@ int GlobalDescriptors::MaybeGet(Key key) const {
return -1;
}
+base::ScopedFD GlobalDescriptors::TakeFD(
+ Key key,
+ base::MemoryMappedFile::Region* region) {
+ base::ScopedFD fd;
+ for (Mapping::iterator i = descriptors_.begin(); i != descriptors_.end();
+ ++i) {
+ if (i->key == key) {
+ *region = i->region;
+ fd.reset(i->fd);
+ descriptors_.erase(i);
+ break;
+ }
+ }
+ return fd;
+}
+
void GlobalDescriptors::Set(Key key, int fd) {
Set(key, fd, base::MemoryMappedFile::Region::kWholeFile);
}
diff --git a/base/posix/global_descriptors.h b/base/posix/global_descriptors.h
index edb299de5c..9d68761f23 100644
--- a/base/posix/global_descriptors.h
+++ b/base/posix/global_descriptors.h
@@ -13,6 +13,7 @@
#include <stdint.h>
#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_file.h"
#include "base/memory/singleton.h"
namespace base {
@@ -34,6 +35,10 @@ namespace base {
// It maps from an abstract key to a descriptor. If independent modules each
// need to define keys, then values should be chosen randomly so as not to
// collide.
+//
+// Note that this class is deprecated and passing file descriptor should ideally
+// be done through the command line and using FileDescriptorStore.
+// See https://crbugs.com/detail?id=692619
class BASE_EXPORT GlobalDescriptors {
public:
typedef uint32_t Key;
@@ -52,18 +57,7 @@ class BASE_EXPORT GlobalDescriptors {
// Often we want a canonical descriptor for a given Key. In this case, we add
// the following constant to the key value:
-#if !defined(OS_ANDROID)
static const int kBaseDescriptor = 3; // 0, 1, 2 are already taken.
-#else
- // 3 used by __android_log_write().
- // 4 used by... something important on Android M.
- // 5 used by... something important on Android L... on low-end devices.
- // TODO(amistry): An Android, this mechanism is only used for tests since the
- // content child launcher spawns a process by creating a new Activity using
- // the Android APIs. For tests, come up with a way that doesn't require using
- // a pre-defined fd.
- static const int kBaseDescriptor = 6;
-#endif
// Return the singleton instance of GlobalDescriptors.
static GlobalDescriptors* GetInstance();
@@ -74,6 +68,11 @@ class BASE_EXPORT GlobalDescriptors {
// Get a descriptor given a key. Returns -1 on error.
int MaybeGet(Key key) const;
+ // Returns a descriptor given a key and removes it from this class mappings.
+ // Also populates |region|.
+ // It is a fatal error if the key is not known.
+ base::ScopedFD TakeFD(Key key, base::MemoryMappedFile::Region* region);
+
// Get a region given a key. It is a fatal error if the key is not known.
base::MemoryMappedFile::Region GetRegion(Key key) const;
diff --git a/base/post_task_and_reply_with_result_internal.h b/base/post_task_and_reply_with_result_internal.h
new file mode 100644
index 0000000000..1456129324
--- /dev/null
+++ b/base/post_task_and_reply_with_result_internal.h
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
+#define BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
+
+#include <utility>
+
+#include "base/callback.h"
+
+namespace base {
+
+namespace internal {
+
+// Adapts a function that produces a result via a return value to
+// one that returns via an output parameter.
+template <typename ReturnType>
+void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
+ ReturnType* result) {
+ *result = func.Run();
+}
+
+// Adapts a T* result to a callblack that expects a T.
+template <typename TaskReturnType, typename ReplyArgType>
+void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
+ TaskReturnType* result) {
+ callback.Run(std::move(*result));
+}
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
diff --git a/base/power_monitor/power_monitor_device_source.h b/base/power_monitor/power_monitor_device_source.h
index 2dabac8865..4ecd2d981b 100644
--- a/base/power_monitor/power_monitor_device_source.h
+++ b/base/power_monitor/power_monitor_device_source.h
@@ -7,8 +7,6 @@
#include "base/base_export.h"
#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/observer_list_threadsafe.h"
#include "base/power_monitor/power_monitor_source.h"
#include "base/power_monitor/power_observer.h"
#include "build/build_config.h"
@@ -16,17 +14,8 @@
#if defined(OS_WIN)
#include <windows.h>
-// Windows HiRes timers drain the battery faster so we need to know the battery
-// status. This isn't true for other platforms.
-#define ENABLE_BATTERY_MONITORING 1
-#else
-#undef ENABLE_BATTERY_MONITORING
#endif // !OS_WIN
-#if defined(ENABLE_BATTERY_MONITORING)
-#include "base/timer/timer.h"
-#endif // defined(ENABLE_BATTERY_MONITORING)
-
#if defined(OS_IOS)
#include <objc/runtime.h>
#endif // OS_IOS
@@ -93,19 +82,11 @@ class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
// false otherwise.
bool IsOnBatteryPowerImpl() override;
- // Checks the battery status and notifies observers if the battery
- // status has changed.
- void BatteryCheck();
-
#if defined(OS_IOS)
// Holds pointers to system event notification observers.
std::vector<id> notification_observers_;
#endif
-#if defined(ENABLE_BATTERY_MONITORING)
- base::OneShotTimer delayed_battery_check_;
-#endif
-
#if defined(OS_WIN)
PowerMessageWindow power_message_window_;
#endif
diff --git a/base/power_monitor/power_monitor_source.h b/base/power_monitor/power_monitor_source.h
index e63f4f82bf..b69cbf8317 100644
--- a/base/power_monitor/power_monitor_source.h
+++ b/base/power_monitor/power_monitor_source.h
@@ -49,9 +49,14 @@ class BASE_EXPORT PowerMonitorSource {
// false otherwise.
virtual bool IsOnBatteryPowerImpl() = 0;
+ // Sets the initial state for |on_battery_power_|, which defaults to false
+ // since not all implementations can provide the value at construction. May
+ // only be called before a base::PowerMonitor has been created.
+ void SetInitialOnBatteryPowerState(bool on_battery_power);
+
private:
- bool on_battery_power_;
- bool suspended_;
+ bool on_battery_power_ = false;
+ bool suspended_ = false;
// This lock guards access to on_battery_power_, to ensure that
// IsOnBatteryPower can be called from any thread.
diff --git a/base/process/internal_linux.cc b/base/process/internal_linux.cc
index d286f4e753..c7820040ce 100644
--- a/base/process/internal_linux.cc
+++ b/base/process/internal_linux.cc
@@ -133,9 +133,10 @@ size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
return StringToSizeT(proc_stats[field_num], &value) ? value : 0;
}
-int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
+int64_t ReadStatFileAndGetFieldAsInt64(const FilePath& stat_file,
+ ProcStatsFields field_num) {
std::string stats_data;
- if (!ReadProcStats(pid, &stats_data))
+ if (!ReadProcFile(stat_file, &stats_data))
return 0;
std::vector<std::string> proc_stats;
if (!ParseProcStats(stats_data, &proc_stats))
@@ -143,6 +144,16 @@ int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
return GetProcStatsFieldAsInt64(proc_stats, field_num);
}
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
+ FilePath stat_file = internal::GetProcPidDir(pid).Append(kStatFile);
+ return ReadStatFileAndGetFieldAsInt64(stat_file, field_num);
+}
+
+int64_t ReadProcSelfStatsAndGetFieldAsInt64(ProcStatsFields field_num) {
+ FilePath stat_file = FilePath(kProcDir).Append("self").Append(kStatFile);
+ return ReadStatFileAndGetFieldAsInt64(stat_file, field_num);
+}
+
size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
ProcStatsFields field_num) {
std::string stats_data;
@@ -170,6 +181,32 @@ Time GetBootTime() {
return Time::FromTimeT(btime);
}
+TimeDelta GetUserCpuTimeSinceBoot() {
+ FilePath path("/proc/stat");
+ std::string contents;
+ if (!ReadProcFile(path, &contents))
+ return TimeDelta();
+
+ ProcStatMap proc_stat;
+ ParseProcStat(contents, &proc_stat);
+ ProcStatMap::const_iterator cpu_it = proc_stat.find("cpu");
+ if (cpu_it == proc_stat.end())
+ return TimeDelta();
+
+ std::vector<std::string> cpu = SplitString(
+ cpu_it->second, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+ if (cpu.size() < 2 || cpu[0] != "cpu")
+ return TimeDelta();
+
+ uint64_t user;
+ uint64_t nice;
+ if (!StringToUint64(cpu[0], &user) || !StringToUint64(cpu[1], &nice))
+ return TimeDelta();
+
+ return ClockTicksToTimeDelta(user + nice);
+}
+
TimeDelta ClockTicksToTimeDelta(int clock_ticks) {
// This queries the /proc-specific scaling factor which is
// conceptually the system hertz. To dump this value on another
diff --git a/base/process/internal_linux.h b/base/process/internal_linux.h
index ba793f7cc7..99d0fd5af1 100644
--- a/base/process/internal_linux.h
+++ b/base/process/internal_linux.h
@@ -72,9 +72,12 @@ int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats,
size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
ProcStatsFields field_num);
-// Convenience wrapper around GetProcStatsFieldAsInt64(), ParseProcStats() and
+// Convenience wrappers around GetProcStatsFieldAsInt64(), ParseProcStats() and
// ReadProcStats(). See GetProcStatsFieldAsInt64() for details.
+int64_t ReadStatsFilendGetFieldAsInt64(const FilePath& stat_file,
+ ProcStatsFields field_num);
int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num);
+int64_t ReadProcSelfStatsAndGetFieldAsInt64(ProcStatsFields field_num);
// Same as ReadProcStatsAndGetFieldAsInt64() but for size_t values.
size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
@@ -83,6 +86,9 @@ size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
// Returns the time that the OS started. Clock ticks are relative to this.
Time GetBootTime();
+// Returns the amount of time spent in user space since boot across all CPUs.
+TimeDelta GetUserCpuTimeSinceBoot();
+
// Converts Linux clock ticks to a wall time delta.
TimeDelta ClockTicksToTimeDelta(int clock_ticks);
diff --git a/base/process/kill.h b/base/process/kill.h
index c664f33262..6d410e02a0 100644
--- a/base/process/kill.h
+++ b/base/process/kill.h
@@ -18,6 +18,16 @@ namespace base {
class ProcessFilter;
+#if defined(OS_WIN)
+namespace win {
+
+// See definition in sandbox/win/src/sandbox_types.h
+const DWORD kSandboxFatalMemoryExceeded = 7012;
+
+} // namespace win
+
+#endif // OS_WIN
+
// Return status values from GetTerminationStatus. Don't use these as
// exit code arguments to KillProcess*(), use platform/application
// specific values instead.
@@ -39,6 +49,7 @@ enum TerminationStatus {
TERMINATION_STATUS_OOM_PROTECTED, // child was protected from oom kill
#endif
TERMINATION_STATUS_LAUNCH_FAILED, // child process never launched
+ TERMINATION_STATUS_OOM, // Process died due to oom
TERMINATION_STATUS_MAX_ENUM
};
diff --git a/base/process/kill_posix.cc b/base/process/kill_posix.cc
index 85470e05f9..4dc60ef2a2 100644
--- a/base/process/kill_posix.cc
+++ b/base/process/kill_posix.cc
@@ -52,6 +52,8 @@ TerminationStatus GetTerminationStatusImpl(ProcessHandle handle,
case SIGFPE:
case SIGILL:
case SIGSEGV:
+ case SIGTRAP:
+ case SIGSYS:
return TERMINATION_STATUS_PROCESS_CRASHED;
case SIGKILL:
#if defined(OS_CHROMEOS)
diff --git a/base/process/launch.cc b/base/process/launch.cc
index 3ca5155a12..c03e1a75db 100644
--- a/base/process/launch.cc
+++ b/base/process/launch.cc
@@ -7,43 +7,11 @@
namespace base {
-LaunchOptions::LaunchOptions()
- : wait(false),
-#if defined(OS_WIN)
- start_hidden(false),
- handles_to_inherit(NULL),
- inherit_handles(false),
- as_user(NULL),
- empty_desktop_name(false),
- job_handle(NULL),
- stdin_handle(NULL),
- stdout_handle(NULL),
- stderr_handle(NULL),
- force_breakaway_from_job_(false)
-#else
- clear_environ(false),
- fds_to_remap(NULL),
- maximize_rlimits(NULL),
- new_process_group(false)
-#if defined(OS_LINUX)
- , clone_flags(0)
- , allow_new_privs(false)
- , kill_on_parent_death(false)
-#endif // OS_LINUX
-#if defined(OS_POSIX)
- , pre_exec_delegate(NULL)
-#endif // OS_POSIX
-#if defined(OS_CHROMEOS)
- , ctrl_terminal_fd(-1)
-#endif // OS_CHROMEOS
-#endif // !defined(OS_WIN)
- {
-}
+LaunchOptions::LaunchOptions() = default;
LaunchOptions::LaunchOptions(const LaunchOptions& other) = default;
-LaunchOptions::~LaunchOptions() {
-}
+LaunchOptions::~LaunchOptions() = default;
LaunchOptions LaunchOptionsForTest() {
LaunchOptions options;
diff --git a/base/process/launch.h b/base/process/launch.h
index b8c02597a6..be8f6e73b9 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -63,17 +63,17 @@ struct BASE_EXPORT LaunchOptions {
~LaunchOptions();
// If true, wait for the process to complete.
- bool wait;
+ bool wait = false;
// If not empty, change to this directory before executing the new process.
base::FilePath current_directory;
#if defined(OS_WIN)
- bool start_hidden;
+ bool start_hidden = false;
// If non-null, inherit exactly the list of handles in this vector (these
// handles must be inheritable).
- HandlesToInheritVector* handles_to_inherit;
+ HandlesToInheritVector* handles_to_inherit = nullptr;
// If true, the new process inherits handles from the parent. In production
// code this flag should be used only when running short-lived, trusted
@@ -81,7 +81,7 @@ struct BASE_EXPORT LaunchOptions {
// leak to the child process, causing errors such as open socket hangs.
// Note: If |handles_to_inherit| is non-null, this flag is ignored and only
// those handles will be inherited.
- bool inherit_handles;
+ bool inherit_handles = false;
// If non-null, runs as if the user represented by the token had launched it.
// Whether the application is visible on the interactive desktop depends on
@@ -90,29 +90,29 @@ struct BASE_EXPORT LaunchOptions {
// To avoid hard to diagnose problems, when specified this loads the
// environment variables associated with the user and if this operation fails
// the entire call fails as well.
- UserTokenHandle as_user;
+ UserTokenHandle as_user = nullptr;
// If true, use an empty string for the desktop name.
- bool empty_desktop_name;
+ bool empty_desktop_name = false;
// If non-null, launches the application in that job object. The process will
// be terminated immediately and LaunchProcess() will fail if assignment to
// the job object fails.
- HANDLE job_handle;
+ HANDLE job_handle = nullptr;
// Handles for the redirection of stdin, stdout and stderr. The handles must
// be inheritable. Caller should either set all three of them or none (i.e.
// there is no way to redirect stderr without redirecting stdin). The
// |inherit_handles| flag must be set to true when redirecting stdio stream.
- HANDLE stdin_handle;
- HANDLE stdout_handle;
- HANDLE stderr_handle;
+ HANDLE stdin_handle = nullptr;
+ HANDLE stdout_handle = nullptr;
+ HANDLE stderr_handle = nullptr;
// If set to true, ensures that the child process is launched with the
// CREATE_BREAKAWAY_FROM_JOB flag which allows it to breakout of the parent
// job if any.
- bool force_breakaway_from_job_;
-#else
+ bool force_breakaway_from_job_ = false;
+#else // !defined(OS_WIN)
// Set/unset environment variables. These are applied on top of the parent
// process environment. Empty (the default) means to inherit the same
// environment. See AlterEnvironment().
@@ -120,53 +120,58 @@ struct BASE_EXPORT LaunchOptions {
// Clear the environment for the new process before processing changes from
// |environ|.
- bool clear_environ;
+ bool clear_environ = false;
// If non-null, remap file descriptors according to the mapping of
// src fd->dest fd to propagate FDs into the child process.
// This pointer is owned by the caller and must live through the
// call to LaunchProcess().
- const FileHandleMappingVector* fds_to_remap;
+ const FileHandleMappingVector* fds_to_remap = nullptr;
// Each element is an RLIMIT_* constant that should be raised to its
// rlim_max. This pointer is owned by the caller and must live through
// the call to LaunchProcess().
- const std::vector<int>* maximize_rlimits;
+ const std::vector<int>* maximize_rlimits = nullptr;
// If true, start the process in a new process group, instead of
// inheriting the parent's process group. The pgid of the child process
// will be the same as its pid.
- bool new_process_group;
+ bool new_process_group = false;
#if defined(OS_LINUX)
// If non-zero, start the process using clone(), using flags as provided.
// Unlike in clone, clone_flags may not contain a custom termination signal
// that is sent to the parent when the child dies. The termination signal will
// always be set to SIGCHLD.
- int clone_flags;
+ int clone_flags = 0;
// By default, child processes will have the PR_SET_NO_NEW_PRIVS bit set. If
// true, then this bit will not be set in the new child process.
- bool allow_new_privs;
+ bool allow_new_privs = false;
// Sets parent process death signal to SIGKILL.
- bool kill_on_parent_death;
+ bool kill_on_parent_death = false;
#endif // defined(OS_LINUX)
#if defined(OS_POSIX)
+ // If not empty, launch the specified executable instead of
+ // cmdline.GetProgram(). This is useful when it is necessary to pass a custom
+ // argv[0].
+ base::FilePath real_path;
+
// If non-null, a delegate to be run immediately prior to executing the new
// program in the child process.
//
// WARNING: If LaunchProcess is called in the presence of multiple threads,
// code running in this delegate essentially needs to be async-signal safe
// (see man 7 signal for a list of allowed functions).
- PreExecDelegate* pre_exec_delegate;
+ PreExecDelegate* pre_exec_delegate = nullptr;
#endif // defined(OS_POSIX)
#if defined(OS_CHROMEOS)
// If non-negative, the specified file descriptor will be set as the launched
// process' controlling terminal.
- int ctrl_terminal_fd;
+ int ctrl_terminal_fd = -1;
#endif // defined(OS_CHROMEOS)
#endif // !defined(OS_WIN)
};
@@ -270,6 +275,12 @@ BASE_EXPORT bool GetAppOutputWithExitCode(const CommandLine& cl,
BASE_EXPORT void RaiseProcessToHighPriority();
#if defined(OS_MACOSX)
+// An implementation of LaunchProcess() that uses posix_spawn() instead of
+// fork()+exec(). This does not support the |pre_exec_delegate| and
+// |current_directory| options.
+Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
+ const LaunchOptions& options);
+
// Restore the default exception handler, setting it to Apple Crash Reporter
// (ReportCrash). When forking and execing a new process, the child will
// inherit the parent's exception ports, which may be set to the Breakpad
diff --git a/base/process/launch_mac.cc b/base/process/launch_mac.cc
index 5895eae435..3732bc1ecc 100644
--- a/base/process/launch_mac.cc
+++ b/base/process/launch_mac.cc
@@ -4,13 +4,75 @@
#include "base/process/launch.h"
+#include <crt_externs.h>
#include <mach/mach.h>
-#include <servers/bootstrap.h>
+#include <spawn.h>
+#include <string.h>
+#include <sys/wait.h>
#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/threading/thread_restrictions.h"
namespace base {
+namespace {
+
+// DPSXCHECK is a Debug Posix Spawn Check macro. The posix_spawn* family of
+// functions return an errno value, as opposed to setting errno directly. This
+// macro emulates a DPCHECK().
+#define DPSXCHECK(expr) \
+ do { \
+ int rv = (expr); \
+ DCHECK_EQ(rv, 0) << #expr << ": -" << rv << " " << strerror(rv); \
+ } while (0)
+
+class PosixSpawnAttr {
+ public:
+ PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_init(&attr_)); }
+
+ ~PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_destroy(&attr_)); }
+
+ posix_spawnattr_t* get() { return &attr_; }
+
+ private:
+ posix_spawnattr_t attr_;
+};
+
+class PosixSpawnFileActions {
+ public:
+ PosixSpawnFileActions() {
+ DPSXCHECK(posix_spawn_file_actions_init(&file_actions_));
+ }
+
+ ~PosixSpawnFileActions() {
+ DPSXCHECK(posix_spawn_file_actions_destroy(&file_actions_));
+ }
+
+ void Open(int filedes, const char* path, int mode) {
+ DPSXCHECK(posix_spawn_file_actions_addopen(&file_actions_, filedes, path,
+ mode, 0));
+ }
+
+ void Dup2(int filedes, int newfiledes) {
+ DPSXCHECK(
+ posix_spawn_file_actions_adddup2(&file_actions_, filedes, newfiledes));
+ }
+
+ void Inherit(int filedes) {
+ DPSXCHECK(posix_spawn_file_actions_addinherit_np(&file_actions_, filedes));
+ }
+
+ const posix_spawn_file_actions_t* get() const { return &file_actions_; }
+
+ private:
+ posix_spawn_file_actions_t file_actions_;
+
+ DISALLOW_COPY_AND_ASSIGN(PosixSpawnFileActions);
+};
+
+} // namespace
+
void RestoreDefaultExceptionHandler() {
// This function is tailored to remove the Breakpad exception handler.
// exception_mask matches s_exception_mask in
@@ -28,4 +90,93 @@ void RestoreDefaultExceptionHandler() {
EXCEPTION_DEFAULT, THREAD_STATE_NONE);
}
+Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
+ const LaunchOptions& options) {
+ DCHECK(!options.pre_exec_delegate)
+ << "LaunchProcessPosixSpawn does not support PreExecDelegate";
+ DCHECK(options.current_directory.empty())
+ << "LaunchProcessPosixSpawn does not support current_directory";
+
+ PosixSpawnAttr attr;
+
+ short flags = POSIX_SPAWN_CLOEXEC_DEFAULT;
+ if (options.new_process_group) {
+ flags |= POSIX_SPAWN_SETPGROUP;
+ DPSXCHECK(posix_spawnattr_setpgroup(attr.get(), 0));
+ }
+ DPSXCHECK(posix_spawnattr_setflags(attr.get(), flags));
+
+ PosixSpawnFileActions file_actions;
+
+ // Process file descriptors for the child. By default, LaunchProcess will
+ // open stdin to /dev/null and inherit stdout and stderr.
+ bool inherit_stdout = true, inherit_stderr = true;
+ bool null_stdin = true;
+ if (options.fds_to_remap) {
+ for (const auto& dup2_pair : *options.fds_to_remap) {
+ if (dup2_pair.second == STDIN_FILENO) {
+ null_stdin = false;
+ } else if (dup2_pair.second == STDOUT_FILENO) {
+ inherit_stdout = false;
+ } else if (dup2_pair.second == STDERR_FILENO) {
+ inherit_stderr = false;
+ }
+
+ if (dup2_pair.first == dup2_pair.second) {
+ file_actions.Inherit(dup2_pair.second);
+ } else {
+ file_actions.Dup2(dup2_pair.first, dup2_pair.second);
+ }
+ }
+ }
+
+ if (null_stdin) {
+ file_actions.Open(STDIN_FILENO, "/dev/null", O_RDONLY);
+ }
+ if (inherit_stdout) {
+ file_actions.Inherit(STDOUT_FILENO);
+ }
+ if (inherit_stderr) {
+ file_actions.Inherit(STDERR_FILENO);
+ }
+
+ std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
+ for (size_t i = 0; i < argv.size(); i++) {
+ argv_cstr[i] = const_cast<char*>(argv[i].c_str());
+ }
+ argv_cstr[argv.size()] = nullptr;
+
+ std::unique_ptr<char* []> owned_environ;
+ char** new_environ = options.clear_environ ? nullptr : *_NSGetEnviron();
+ if (!options.environ.empty()) {
+ owned_environ = AlterEnvironment(new_environ, options.environ);
+ new_environ = owned_environ.get();
+ }
+
+ const char* executable_path = !options.real_path.empty()
+ ? options.real_path.value().c_str()
+ : argv_cstr[0];
+
+ // Use posix_spawnp as some callers expect to have PATH consulted.
+ pid_t pid;
+ int rv = posix_spawnp(&pid, executable_path, file_actions.get(), attr.get(),
+ &argv_cstr[0], new_environ);
+
+ if (rv != 0) {
+ DLOG(ERROR) << "posix_spawnp(" << executable_path << "): -" << rv << " "
+ << strerror(rv);
+ return Process();
+ }
+
+ if (options.wait) {
+ // While this isn't strictly disk IO, waiting for another process to
+ // finish is the sort of thing ThreadRestrictions is trying to prevent.
+ base::ThreadRestrictions::AssertIOAllowed();
+ pid_t ret = HANDLE_EINTR(waitpid(pid, nullptr, 0));
+ DPCHECK(ret > 0);
+ }
+
+ return Process(pid);
+}
+
} // namespace base
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 4fb1018276..19effa2ce5 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -60,6 +60,8 @@
#if defined(OS_MACOSX)
#include <crt_externs.h>
#include <sys/event.h>
+
+#include "base/feature_list.h"
#else
extern char** environ;
#endif
@@ -70,6 +72,11 @@ namespace base {
namespace {
+#if defined(OS_MACOSX)
+const Feature kMacLaunchProcessPosixSpawn{"MacLaunchProcessPosixSpawn",
+ FEATURE_ENABLED_BY_DEFAULT};
+#endif
+
// Get the process's "environment" (i.e. the thing that setenv/getenv
// work with).
char** GetEnvironment() {
@@ -296,6 +303,15 @@ Process LaunchProcess(const CommandLine& cmdline,
Process LaunchProcess(const std::vector<std::string>& argv,
const LaunchOptions& options) {
+#if defined(OS_MACOSX)
+ if (FeatureList::IsEnabled(kMacLaunchProcessPosixSpawn)) {
+ // TODO(rsesek): Do this unconditionally. There is one user for each of
+ // these two options. https://crbug.com/179923.
+ if (!options.pre_exec_delegate && options.current_directory.empty())
+ return LaunchProcessPosixSpawn(argv, options);
+ }
+#endif
+
size_t fd_shuffle_size = 0;
if (options.fds_to_remap) {
fd_shuffle_size = options.fds_to_remap->size();
@@ -492,7 +508,10 @@ Process LaunchProcess(const std::vector<std::string>& argv,
options.pre_exec_delegate->RunAsyncSafe();
}
- execvp(argv_cstr[0], argv_cstr.get());
+ const char* executable_path = !options.real_path.empty() ?
+ options.real_path.value().c_str() : argv_cstr[0];
+
+ execvp(executable_path, argv_cstr.get());
RAW_LOG(ERROR, "LaunchProcess: failed to execvp:");
RAW_LOG(ERROR, argv_cstr[0]);
diff --git a/base/process/memory.cc b/base/process/memory.cc
new file mode 100644
index 0000000000..6349c08ca0
--- /dev/null
+++ b/base/process/memory.cc
@@ -0,0 +1,54 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/process/memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Defined in memory_win.cc for Windows.
+#if !defined(OS_WIN)
+
+namespace {
+
+// Breakpad server classifies base::`anonymous namespace'::OnNoMemory as
+// out-of-memory crash.
+NOINLINE void OnNoMemory(size_t size) {
+ size_t tmp_size = size;
+ base::debug::Alias(&tmp_size);
+ LOG(FATAL) << "Out of memory. size=" << tmp_size;
+}
+
+} // namespace
+
+void TerminateBecauseOutOfMemory(size_t size) {
+ OnNoMemory(size);
+}
+
+#endif
+
+// Defined in memory_mac.mm for Mac.
+#if !defined(OS_MACOSX)
+
+bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
+ const size_t alloc_size = num_items * size;
+
+ // Overflow check
+ if (size && ((alloc_size / size) != num_items)) {
+ *result = NULL;
+ return false;
+ }
+
+ if (!UncheckedMalloc(alloc_size, result))
+ return false;
+
+ memset(*result, 0, alloc_size);
+ return true;
+}
+
+#endif
+
+} // namespace base
diff --git a/base/process/memory.h b/base/process/memory.h
new file mode 100644
index 0000000000..77911cfc35
--- /dev/null
+++ b/base/process/memory.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_MEMORY_H_
+#define BASE_PROCESS_MEMORY_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+#ifdef PVALLOC_AVAILABLE
+// Build config explicitly tells us whether or not pvalloc is available.
+#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
+#define PVALLOC_AVAILABLE 1
+#else
+#define PVALLOC_AVAILABLE 0
+#endif
+
+namespace base {
+
+// Enables 'terminate on heap corruption' flag. Helps protect against heap
+// overflow. Has no effect if the OS doesn't provide the necessary facility.
+BASE_EXPORT void EnableTerminationOnHeapCorruption();
+
+// Turns on process termination if memory runs out.
+BASE_EXPORT void EnableTerminationOnOutOfMemory();
+
+// Terminates process. Should be called only for out of memory errors.
+// Crash reporting classifies such crashes as OOM.
+BASE_EXPORT void TerminateBecauseOutOfMemory(size_t size);
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+BASE_EXPORT extern size_t g_oom_size;
+
+// The maximum allowed value for the OOM score.
+const int kMaxOomScore = 1000;
+
+// This adjusts /proc/<pid>/oom_score_adj so the Linux OOM killer will
+// prefer to kill certain process types over others. The range for the
+// adjustment is [-1000, 1000], with [0, 1000] being user accessible.
+// If the Linux system doesn't support the newer oom_score_adj range
+// of [0, 1000], then we revert to using the older oom_adj, and
+// translate the given value into [0, 15]. Some aliasing of values
+// may occur in that case, of course.
+BASE_EXPORT bool AdjustOOMScore(ProcessId process, int score);
+#endif
+
+#if defined(OS_WIN)
+namespace win {
+
+// Custom Windows exception code chosen to indicate an out of memory error.
+// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
+// "To make sure that you do not define a code that conflicts with an existing
+// exception code" ... "The resulting error code should therefore have the
+// highest four bits set to hexadecimal E."
+// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
+const DWORD kOomExceptionCode = 0xe0000008;
+
+} // namespace win
+#endif
+
+// Special allocator functions for callers that want to check for OOM.
+// These will not abort if the allocation fails even if
+// EnableTerminationOnOutOfMemory has been called.
+// This can be useful for huge and/or unpredictable size memory allocations.
+// Please only use this if you really handle the case when the allocation
+// fails. Doing otherwise would risk security.
+// These functions may still crash on OOM when running under memory tools,
+// specifically ASan and other sanitizers.
+// Return value tells whether the allocation succeeded. If it fails |result| is
+// set to NULL, otherwise it holds the memory address.
+BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedMalloc(size_t size,
+ void** result);
+BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedCalloc(size_t num_items,
+ size_t size,
+ void** result);
+
+} // namespace base
+
+#endif // BASE_PROCESS_MEMORY_H_
diff --git a/base/process/memory_linux.cc b/base/process/memory_linux.cc
new file mode 100644
index 0000000000..985bc54eb6
--- /dev/null
+++ b/base/process/memory_linux.cc
@@ -0,0 +1,212 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <stddef.h>
+
+#include <new>
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/features.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+
+#if defined(USE_TCMALLOC)
+#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
+#endif
+
+namespace base {
+
+size_t g_oom_size = 0U;
+
+namespace {
+
+void OnNoMemorySize(size_t size) {
+ g_oom_size = size;
+
+ if (size != 0)
+ LOG(FATAL) << "Out of memory, size = " << size;
+ LOG(FATAL) << "Out of memory.";
+}
+
+void OnNoMemory() {
+ OnNoMemorySize(0);
+}
+
+} // namespace
+
+// TODO(primiano): Once the unified shim is on by default (crbug.com/550886)
+// get rid of the code in this entire #if section. The whole termination-on-OOM
+// logic is implemented in the shim.
+#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
+ !defined(THREAD_SANITIZER) && !defined(LEAK_SANITIZER) && \
+ !BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+
+#if defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
+
+extern "C" {
+void* __libc_malloc(size_t size);
+void* __libc_realloc(void* ptr, size_t size);
+void* __libc_calloc(size_t nmemb, size_t size);
+void* __libc_valloc(size_t size);
+#if PVALLOC_AVAILABLE == 1
+void* __libc_pvalloc(size_t size);
+#endif
+void* __libc_memalign(size_t alignment, size_t size);
+
+// Overriding the system memory allocation functions:
+//
+// For security reasons, we want malloc failures to be fatal. Too much code
+// doesn't check for a NULL return value from malloc and unconditionally uses
+// the resulting pointer. If the first offset that they try to access is
+// attacker controlled, then the attacker can direct the code to access any
+// part of memory.
+//
+// Thus, we define all the standard malloc functions here and mark them as
+// visibility 'default'. This means that they replace the malloc functions for
+// all Chromium code and also for all code in shared libraries. There are tests
+// for this in process_util_unittest.cc.
+//
+// If we are using tcmalloc, then the problem is moot since tcmalloc handles
+// this for us. Thus this code is in a !defined(USE_TCMALLOC) block.
+//
+// If we are testing the binary with AddressSanitizer, we should not
+// redefine malloc and let AddressSanitizer do it instead.
+//
+// We call the real libc functions in this code by using __libc_malloc etc.
+// Previously we tried using dlsym(RTLD_NEXT, ...) but that failed depending on
+// the link order. Since ld.so needs calloc during symbol resolution, it
+// defines its own versions of several of these functions in dl-minimal.c.
+// Depending on the runtime library order, dlsym ended up giving us those
+// functions and bad things happened. See crbug.com/31809
+//
+// This means that any code which calls __libc_* gets the raw libc versions of
+// these functions.
+
+#define DIE_ON_OOM_1(function_name) \
+ void* function_name(size_t) __attribute__ ((visibility("default"))); \
+ \
+ void* function_name(size_t size) { \
+ void* ret = __libc_##function_name(size); \
+ if (ret == NULL && size != 0) \
+ OnNoMemorySize(size); \
+ return ret; \
+ }
+
+#define DIE_ON_OOM_2(function_name, arg1_type) \
+ void* function_name(arg1_type, size_t) \
+ __attribute__ ((visibility("default"))); \
+ \
+ void* function_name(arg1_type arg1, size_t size) { \
+ void* ret = __libc_##function_name(arg1, size); \
+ if (ret == NULL && size != 0) \
+ OnNoMemorySize(size); \
+ return ret; \
+ }
+
+DIE_ON_OOM_1(malloc)
+DIE_ON_OOM_1(valloc)
+#if PVALLOC_AVAILABLE == 1
+DIE_ON_OOM_1(pvalloc)
+#endif
+
+DIE_ON_OOM_2(calloc, size_t)
+DIE_ON_OOM_2(realloc, void*)
+DIE_ON_OOM_2(memalign, size_t)
+
+// posix_memalign has a unique signature and doesn't have a __libc_ variant.
+int posix_memalign(void** ptr, size_t alignment, size_t size)
+ __attribute__ ((visibility("default")));
+
+int posix_memalign(void** ptr, size_t alignment, size_t size) {
+ // This will use the safe version of memalign, above.
+ *ptr = memalign(alignment, size);
+ return 0;
+}
+
+} // extern C
+
+#else
+
+// TODO(mostynb@opera.com): dlsym dance
+
+#endif // LIBC_GLIBC && !USE_TCMALLOC
+
+#endif // !*_SANITIZER
+
+void EnableTerminationOnHeapCorruption() {
+ // On Linux, there nothing to do AFAIK.
+}
+
+void EnableTerminationOnOutOfMemory() {
+ // Set the new-out of memory handler.
+ std::set_new_handler(&OnNoMemory);
+ // If we're using glibc's allocator, the above functions will override
+ // malloc and friends and make them die on out of memory.
+
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+ allocator::SetCallNewHandlerOnMallocFailure(true);
+#elif defined(USE_TCMALLOC)
+ // For tcmalloc, we need to tell it to behave like new.
+ tc_set_new_mode(1);
+#endif
+}
+
+// NOTE: This is not the only version of this function in the source:
+// the setuid sandbox (in process_util_linux.c, in the sandbox source)
+// also has its own C version.
+bool AdjustOOMScore(ProcessId process, int score) {
+ if (score < 0 || score > kMaxOomScore)
+ return false;
+
+ FilePath oom_path(internal::GetProcPidDir(process));
+
+ // Attempt to write the newer oom_score_adj file first.
+ FilePath oom_file = oom_path.AppendASCII("oom_score_adj");
+ if (PathExists(oom_file)) {
+ std::string score_str = IntToString(score);
+ DVLOG(1) << "Adjusting oom_score_adj of " << process << " to "
+ << score_str;
+ int score_len = static_cast<int>(score_str.length());
+ return (score_len == WriteFile(oom_file, score_str.c_str(), score_len));
+ }
+
+ // If the oom_score_adj file doesn't exist, then we write the old
+ // style file and translate the oom_adj score to the range 0-15.
+ oom_file = oom_path.AppendASCII("oom_adj");
+ if (PathExists(oom_file)) {
+ // Max score for the old oom_adj range. Used for conversion of new
+ // values to old values.
+ const int kMaxOldOomScore = 15;
+
+ int converted_score = score * kMaxOldOomScore / kMaxOomScore;
+ std::string score_str = IntToString(converted_score);
+ DVLOG(1) << "Adjusting oom_adj of " << process << " to " << score_str;
+ int score_len = static_cast<int>(score_str.length());
+ return (score_len == WriteFile(oom_file, score_str.c_str(), score_len));
+ }
+
+ return false;
+}
+
+bool UncheckedMalloc(size_t size, void** result) {
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+ *result = allocator::UncheckedAlloc(size);
+#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \
+ (!defined(LIBC_GLIBC) && !defined(USE_TCMALLOC))
+ *result = malloc(size);
+#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
+ *result = __libc_malloc(size);
+#elif defined(USE_TCMALLOC)
+ *result = tc_malloc_skip_new_handler(size);
+#endif
+ return *result != NULL;
+}
+
+} // namespace base
diff --git a/base/process/memory_stubs.cc b/base/process/memory_stubs.cc
new file mode 100644
index 0000000000..67deb4f58b
--- /dev/null
+++ b/base/process/memory_stubs.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+
+namespace base {
+
+void EnableTerminationOnOutOfMemory() {
+}
+
+void EnableTerminationOnHeapCorruption() {
+}
+
+bool AdjustOOMScore(ProcessId process, int score) {
+ ALLOW_UNUSED_PARAM(process);
+ ALLOW_UNUSED_PARAM(score);
+ return false;
+}
+
+void TerminateBecauseOutOfMemory(size_t size) {
+ ALLOW_UNUSED_PARAM(size);
+ abort();
+}
+
+// UncheckedMalloc and Calloc exist so that platforms making use of
+// EnableTerminationOnOutOfMemory have a way to allocate memory without
+// crashing. This _stubs.cc file is for platforms that do not support
+// EnableTerminationOnOutOfMemory (note the empty implementation above). As
+// such, these two Unchecked.alloc functions need only trivially pass-through to
+// their respective stdlib function since those functions will return null on a
+// failure to allocate.
+
+bool UncheckedMalloc(size_t size, void** result) {
+ *result = malloc(size);
+ return *result != nullptr;
+}
+
+bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
+ *result = calloc(num_items, size);
+ return *result != nullptr;
+}
+
+} // namespace base
diff --git a/base/process/port_provider_mac.cc b/base/process/port_provider_mac.cc
index ac13949ac8..23d214c3f3 100644
--- a/base/process/port_provider_mac.cc
+++ b/base/process/port_provider_mac.cc
@@ -21,7 +21,8 @@ void PortProvider::RemoveObserver(Observer* observer) {
void PortProvider::NotifyObservers(ProcessHandle process) {
base::AutoLock l(lock_);
- FOR_EACH_OBSERVER(Observer, observer_list_, OnReceivedTaskPort(process));
+ for (auto& observer : observer_list_)
+ observer.OnReceivedTaskPort(process);
}
} // namespace base
diff --git a/base/process/process.h b/base/process/process.h
index 70c8260193..fc2add24c5 100644
--- a/base/process/process.h
+++ b/base/process/process.h
@@ -15,8 +15,17 @@
#include "base/win/scoped_handle.h"
#endif
+#if defined(OS_MACOSX)
+#include "base/feature_list.h"
+#include "base/process/port_provider_mac.h"
+#endif
+
namespace base {
+#if defined(OS_MACOSX)
+extern const Feature kMacAllowBackgroundingProcesses;
+#endif
+
// Provides a move-only encapsulation of a process.
//
// This object is not tied to the lifetime of the underlying process: the
@@ -67,6 +76,9 @@ class BASE_EXPORT Process {
// Returns true if processes can be backgrounded.
static bool CanBackgroundProcesses();
+ // Terminates the current process immediately with |exit_code|.
+ static void TerminateCurrentProcessImmediately(int exit_code);
+
// Returns true if this objects represents a valid process.
bool IsValid() const;
@@ -99,13 +111,35 @@ class BASE_EXPORT Process {
// any process.
// NOTE: |exit_code| is optional, nullptr can be passed if the exit code is
// not required.
- bool WaitForExit(int* exit_code);
+ bool WaitForExit(int* exit_code) const;
// Same as WaitForExit() but only waits for up to |timeout|.
// NOTE: |exit_code| is optional, nullptr can be passed if the exit code
// is not required.
- bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code);
-
+ bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const;
+
+#if defined(OS_MACOSX)
+ // The Mac needs a Mach port in order to manipulate a process's priority,
+ // and there's no good way to get that from base given the pid. These Mac
+ // variants of the IsProcessBackgrounded and SetProcessBackgrounded API take
+ // a port provider for this reason. See crbug.com/460102
+ //
+ // A process is backgrounded when its task priority is
+ // |TASK_BACKGROUND_APPLICATION|.
+ //
+ // Returns true if the port_provider can locate a task port for the process
+ // and it is backgrounded. If port_provider is null, returns false.
+ bool IsProcessBackgrounded(PortProvider* port_provider) const;
+
+ // Set the process as backgrounded. If value is
+ // true, the priority of the associated task will be set to
+ // TASK_BACKGROUND_APPLICATION. If value is false, the
+ // priority of the process will be set to TASK_FOREGROUND_APPLICATION.
+ //
+ // Returns true if the priority was changed, false otherwise. If
+ // |port_provider| is null, this is a no-op and it returns false.
+ bool SetProcessBackgrounded(PortProvider* port_provider, bool value);
+#else
// A process is backgrounded when it's priority is lower than normal.
// Return true if this process is backgrounded, false otherwise.
bool IsProcessBackgrounded() const;
@@ -115,7 +149,7 @@ class BASE_EXPORT Process {
// will be made "normal" - equivalent to default process priority.
// Returns true if the priority was changed, false otherwise.
bool SetProcessBackgrounded(bool value);
-
+#endif // defined(OS_MACOSX)
// Returns an integer representing the priority of a process. The meaning
// of this value is OS dependent.
int GetPriority() const;
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index 0b38726431..a38930a208 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -46,7 +46,7 @@ std::unique_ptr<Value> SystemMetrics::ToValue() const {
return std::move(res);
}
-ProcessMetrics* ProcessMetrics::CreateCurrentProcessMetrics() {
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateCurrentProcessMetrics() {
#if !defined(OS_MACOSX) || defined(OS_IOS)
return CreateProcessMetrics(base::GetCurrentProcessHandle());
#else
@@ -84,8 +84,9 @@ int ProcessMetrics::CalculateIdleWakeupsPerSecond(
last_idle_wakeups_time_ = time;
last_absolute_idle_wakeups_ = absolute_idle_wakeups;
- // Round to average wakeups per second.
int64_t wakeups_delta_for_ms = wakeups_delta * Time::kMicrosecondsPerSecond;
+ // Round the result up by adding 1/2 (the second term resolves to 1/2 without
+ // dropping down into floating point).
return (wakeups_delta_for_ms + time_delta / 2) / time_delta;
}
#else
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 57cb3abec0..71d6042e00 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -11,6 +11,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <memory>
#include <string>
#include "base/base_export.h"
@@ -103,22 +104,22 @@ class BASE_EXPORT ProcessMetrics {
~ProcessMetrics();
// Creates a ProcessMetrics for the specified process.
- // The caller owns the returned object.
#if !defined(OS_MACOSX) || defined(OS_IOS)
- static ProcessMetrics* CreateProcessMetrics(ProcessHandle process);
+ static std::unique_ptr<ProcessMetrics> CreateProcessMetrics(
+ ProcessHandle process);
#else
// The port provider needs to outlive the ProcessMetrics object returned by
// this function. If NULL is passed as provider, the returned object
// only returns valid metrics if |process| is the current process.
- static ProcessMetrics* CreateProcessMetrics(ProcessHandle process,
- PortProvider* port_provider);
+ static std::unique_ptr<ProcessMetrics> CreateProcessMetrics(
+ ProcessHandle process,
+ PortProvider* port_provider);
#endif // !defined(OS_MACOSX) || defined(OS_IOS)
// Creates a ProcessMetrics for the current process. This a cross-platform
// convenience wrapper for CreateProcessMetrics().
- // The caller owns the returned object.
- static ProcessMetrics* CreateCurrentProcessMetrics();
+ static std::unique_ptr<ProcessMetrics> CreateCurrentProcessMetrics();
// Returns the current space allocated for the pagefile, in bytes (these pages
// may or may not be in memory). On Linux, this returns the total virtual
@@ -135,8 +136,7 @@ class BASE_EXPORT ProcessMetrics {
// memory currently allocated to a process that cannot be shared. Returns
// false on platform specific error conditions. Note: |private_bytes|
// returns 0 on unsupported OSes: prior to XP SP2.
- bool GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes);
+ bool GetMemoryBytes(size_t* private_bytes, size_t* shared_bytes) const;
// Fills a CommittedKBytes with both resident and paged
// memory usage as per definition of CommittedBytes.
void GetCommittedKBytes(CommittedKBytes* usage) const;
@@ -144,6 +144,9 @@ class BASE_EXPORT ProcessMetrics {
// usage in bytes, as per definition of WorkingSetBytes. Note that this
// function is somewhat expensive on Windows (a few ms per process).
bool GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const;
+ // Computes pss (proportional set size) of a process. Note that this
+ // function is somewhat expensive on Windows (a few ms per process).
+ bool GetProportionalSetSizeBytes(uint64_t* pss_bytes) const;
#if defined(OS_MACOSX)
// Fills both CommitedKBytes and WorkingSetKBytes in a single operation. This
@@ -151,6 +154,10 @@ class BASE_EXPORT ProcessMetrics {
// system call.
bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
WorkingSetKBytes* ws_usage) const;
+ // Returns private, shared, and total resident bytes.
+ bool GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes,
+ size_t* resident_bytes) const;
#endif
// Returns the CPU usage in percent since the last time this method or
@@ -295,9 +302,9 @@ struct BASE_EXPORT SystemMemoryInfoKB {
int dirty;
// vmstats data.
- int pswpin;
- int pswpout;
- int pgmajfault;
+ unsigned long pswpin;
+ unsigned long pswpout;
+ unsigned long pgmajfault;
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
#if defined(OS_CHROMEOS)
@@ -374,6 +381,9 @@ BASE_EXPORT bool IsValidDiskName(const std::string& candidate);
// Retrieves data from /proc/diskstats about system-wide disk I/O.
// Fills in the provided |diskinfo| structure. Returns true on success.
BASE_EXPORT bool GetSystemDiskInfo(SystemDiskInfo* diskinfo);
+
+// Returns the amount of time spent in user space since boot across all CPUs.
+BASE_EXPORT TimeDelta GetUserCpuTimeSinceBoot();
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_CHROMEOS)
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index 3d27656d6a..5d542cc675 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -17,6 +17,7 @@
#include "base/files/dir_reader_posix.h"
#include "base/files/file_util.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/process/internal_linux.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
@@ -72,8 +73,8 @@ size_t ReadProcStatusAndGetFieldAsSizeT(pid_t pid, const std::string& field) {
const std::string& key = pairs[i].first;
const std::string& value_str = pairs[i].second;
if (key == field) {
- std::vector<StringPiece> split_value_str = SplitStringPiece(
- value_str, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ std::vector<StringPiece> split_value_str =
+ SplitStringPiece(value_str, " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
if (split_value_str.size() != 2 || split_value_str[1] != "kB") {
NOTREACHED();
return 0;
@@ -163,8 +164,9 @@ int GetProcessCPU(pid_t pid) {
} // namespace
// static
-ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) {
- return new ProcessMetrics(process);
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+ ProcessHandle process) {
+ return WrapUnique(new ProcessMetrics(process));
}
// On linux, we return vsize.
@@ -190,7 +192,7 @@ size_t ProcessMetrics::GetPeakWorkingSetSize() const {
}
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) {
+ size_t* shared_bytes) const {
WorkingSetKBytes ws_usage;
if (!GetWorkingSetKBytes(&ws_usage))
return false;
@@ -354,8 +356,7 @@ bool ProcessMetrics::GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage)
}
std::vector<std::string> totmaps_fields = SplitString(
- totmaps_data, base::kWhitespaceASCII, base::KEEP_WHITESPACE,
- base::SPLIT_WANT_NONEMPTY);
+ totmaps_data, kWhitespaceASCII, KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
DCHECK_EQ("Pss:", totmaps_fields[kPssIndex-1]);
DCHECK_EQ("Private_Clean:", totmaps_fields[kPrivate_CleanIndex - 1]);
@@ -406,8 +407,8 @@ bool ProcessMetrics::GetWorkingSetKBytesStatm(WorkingSetKBytes* ws_usage)
return false;
}
- std::vector<StringPiece> statm_vec = SplitStringPiece(
- statm, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ std::vector<StringPiece> statm_vec =
+ SplitStringPiece(statm, " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
if (statm_vec.size() != 7)
return false; // Not the format we expect.
@@ -686,12 +687,16 @@ bool ParseProcVmstat(const std::string& vmstat_data,
if (tokens.size() != 2)
continue;
+ uint64_t val;
+ if (!StringToUint64(tokens[1], &val))
+ continue;
+
if (tokens[0] == "pswpin") {
- StringToInt(tokens[1], &meminfo->pswpin);
+ meminfo->pswpin = val;
} else if (tokens[0] == "pswpout") {
- StringToInt(tokens[1], &meminfo->pswpout);
+ meminfo->pswpout = val;
} else if (tokens[0] == "pgmajfault") {
- StringToInt(tokens[1], &meminfo->pgmajfault);
+ meminfo->pgmajfault = val;
}
}
@@ -907,6 +912,10 @@ bool GetSystemDiskInfo(SystemDiskInfo* diskinfo) {
return true;
}
+TimeDelta GetUserCpuTimeSinceBoot() {
+ return internal::GetUserCpuTimeSinceBoot();
+}
+
#if defined(OS_CHROMEOS)
std::unique_ptr<Value> SwapInfo::ToValue() const {
std::unique_ptr<DictionaryValue> res(new DictionaryValue());
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index 8b5d5644ff..51f5fd4e16 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -15,6 +15,7 @@
#include "base/logging.h"
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_port.h"
+#include "base/memory/ptr_util.h"
#include "base/sys_info.h"
#if !defined(TASK_POWER_INFO)
@@ -79,10 +80,7 @@ bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
} // namespace
-SystemMemoryInfoKB::SystemMemoryInfoKB() {
- total = 0;
- free = 0;
-}
+SystemMemoryInfoKB::SystemMemoryInfoKB() : total(0), free(0) {}
SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
default;
@@ -94,10 +92,10 @@ SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
// otherwise return 0.
// static
-ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
ProcessHandle process,
PortProvider* port_provider) {
- return new ProcessMetrics(process, port_provider);
+ return WrapUnique(new ProcessMetrics(process, port_provider));
}
size_t ProcessMetrics::GetPagefileUsage() const {
@@ -112,10 +110,12 @@ size_t ProcessMetrics::GetPeakPagefileUsage() const {
}
size_t ProcessMetrics::GetWorkingSetSize() const {
- task_basic_info_64 task_info_data;
- if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
+ size_t private_bytes = 0;
+ size_t shared_bytes = 0;
+ size_t resident_bytes = 0;
+ if (!GetMemoryBytes(&private_bytes, &shared_bytes, &resident_bytes))
return 0;
- return task_info_data.resident_size;
+ return resident_bytes;
}
size_t ProcessMetrics::GetPeakWorkingSetSize() const {
@@ -126,7 +126,7 @@ size_t ProcessMetrics::GetPeakWorkingSetSize() const {
// private_bytes is the size of private resident memory.
// shared_bytes is the size of shared resident memory.
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) {
+ size_t* shared_bytes) const {
size_t private_pages_count = 0;
size_t shared_pages_count = 0;
@@ -145,7 +145,7 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
// The same region can be referenced multiple times. To avoid double counting
// we need to keep track of which regions we've already counted.
- base::hash_set<int> seen_objects;
+ hash_set<int> seen_objects;
// We iterate through each VM region in the task's address map. For shared
// memory we add up all the pages that are marked as shared. Like libtop we
@@ -191,6 +191,7 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
info.share_mode = SM_PRIVATE;
switch (info.share_mode) {
+ case SM_LARGE_PAGE:
case SM_PRIVATE:
private_pages_count += info.private_pages_resident;
private_pages_count += info.shared_pages_resident;
@@ -199,6 +200,9 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
private_pages_count += info.private_pages_resident;
// Fall through
case SM_SHARED:
+ case SM_PRIVATE_ALIASED:
+ case SM_TRUESHARED:
+ case SM_SHARED_ALIASED:
if (seen_objects.count(info.obj_id) == 0) {
// Only count the first reference to this region.
seen_objects.insert(info.obj_id);
@@ -248,6 +252,15 @@ bool ProcessMetrics::GetCommittedAndWorkingSetKBytes(
return true;
}
+bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes,
+ size_t* resident_bytes) const {
+ if (!GetMemoryBytes(private_bytes, shared_bytes))
+ return false;
+ *resident_bytes = *private_bytes + *shared_bytes;
+ return true;
+}
+
#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
(r)->tv_sec = (a)->seconds; \
(r)->tv_usec = (a)->microseconds; \
@@ -326,6 +339,17 @@ int ProcessMetrics::GetIdleWakeupsPerSecond() {
// where TASK_POWER_INFO isn't supported yet.
return 0;
}
+
+ // The task_power_info struct contains two wakeup counters:
+ // task_interrupt_wakeups and task_platform_idle_wakeups.
+ // task_interrupt_wakeups is the total number of wakeups generated by the
+ // process, and is the number that Activity Monitor reports.
+ // task_platform_idle_wakeups is a subset of task_interrupt_wakeups that
+ // tallies the number of times the processor was taken out of its low-power
+ // idle state to handle a wakeup. task_platform_idle_wakeups therefore result
+ // in a greater power increase than the other interrupts which occur while the
+ // CPU is already working, and reducing them has a greater overall impact on
+ // power usage. See the powermetrics man page for more info.
return CalculateIdleWakeupsPerSecond(
power_info_data.task_platform_idle_wakeups);
}
diff --git a/base/process/process_metrics_posix.cc b/base/process/process_metrics_posix.cc
index fad581eece..13acf2ea34 100644
--- a/base/process/process_metrics_posix.cc
+++ b/base/process/process_metrics_posix.cc
@@ -33,6 +33,8 @@ static const rlim_t kSystemDefaultMaxFds = 256;
static const rlim_t kSystemDefaultMaxFds = 8192;
#elif defined(OS_FREEBSD)
static const rlim_t kSystemDefaultMaxFds = 8192;
+#elif defined(OS_NETBSD)
+static const rlim_t kSystemDefaultMaxFds = 1024;
#elif defined(OS_OPENBSD)
static const rlim_t kSystemDefaultMaxFds = 256;
#elif defined(OS_ANDROID)
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index 94a2ffe7f8..b0bd7ea80b 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -286,13 +286,13 @@ TEST_F(SystemMetricsTest, ParseVmstat) {
"pgrefill_high 0\n"
"pgrefill_movable 0\n";
EXPECT_TRUE(ParseProcVmstat(valid_input1, &meminfo));
- EXPECT_EQ(meminfo.pswpin, 179);
- EXPECT_EQ(meminfo.pswpout, 406);
- EXPECT_EQ(meminfo.pgmajfault, 487192);
+ EXPECT_EQ(179LU, meminfo.pswpin);
+ EXPECT_EQ(406LU, meminfo.pswpout);
+ EXPECT_EQ(487192LU, meminfo.pgmajfault);
EXPECT_TRUE(ParseProcVmstat(valid_input2, &meminfo));
- EXPECT_EQ(meminfo.pswpin, 12);
- EXPECT_EQ(meminfo.pswpout, 901);
- EXPECT_EQ(meminfo.pgmajfault, 2023);
+ EXPECT_EQ(12LU, meminfo.pswpin);
+ EXPECT_EQ(901LU, meminfo.pswpout);
+ EXPECT_EQ(2023LU, meminfo.pgmajfault);
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
@@ -485,10 +485,13 @@ MULTIPROCESS_TEST_MAIN(ChildMain) {
} // namespace
+// Arc++ note: don't compile as SpawnMultiProcessTestChild brings in a lot of
+// extra dependency.
+#if !defined(OS_ANDROID) && !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
TEST(ProcessMetricsTest, GetOpenFdCount) {
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- const FilePath temp_path = temp_dir.path();
+ const FilePath temp_path = temp_dir.GetPath();
CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
Process child = SpawnMultiProcessTestChild(
@@ -513,6 +516,8 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
EXPECT_EQ(0, open_fds);
ASSERT_TRUE(child.Terminate(0, true));
}
+#endif // !defined(__ANDROID__)
+
#endif // defined(OS_LINUX)
} // namespace debug
diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc
index ba9b5447c0..a1d84e9128 100644
--- a/base/process/process_posix.cc
+++ b/base/process/process_posix.cc
@@ -9,6 +9,7 @@
#include <sys/resource.h>
#include <sys/wait.h>
+#include "base/debug/activity_tracker.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
@@ -266,12 +267,17 @@ Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
return Process(handle);
}
-#if !defined(OS_LINUX)
+#if !defined(OS_LINUX) && !defined(OS_MACOSX)
// static
bool Process::CanBackgroundProcesses() {
return false;
}
-#endif // !defined(OS_LINUX)
+#endif // !defined(OS_LINUX) && !defined(OS_MACOSX)
+
+// static
+void Process::TerminateCurrentProcessImmediately(int exit_code) {
+ _exit(exit_code);
+}
bool Process::IsValid() const {
return process_ != kNullProcessHandle;
@@ -314,6 +320,12 @@ bool Process::Terminate(int /*exit_code*/, bool wait) const {
if (result && wait) {
int tries = 60;
+ if (RunningOnValgrind()) {
+ // Wait for some extra time when running under Valgrind since the child
+ // processes may take some time doing leak checking.
+ tries *= 2;
+ }
+
unsigned sleep_ms = 4;
// The process may not end immediately due to pending I/O
@@ -353,15 +365,18 @@ bool Process::Terminate(int /*exit_code*/, bool wait) const {
}
#endif // !defined(OS_NACL_NONSFI)
-bool Process::WaitForExit(int* exit_code) {
+bool Process::WaitForExit(int* exit_code) const {
return WaitForExitWithTimeout(TimeDelta::Max(), exit_code);
}
-bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) {
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
+ // Record the event that this thread is blocking upon (for hang diagnosis).
+ base::debug::ScopedProcessWaitActivity process_activity(this);
+
return WaitForExitWithTimeoutImpl(Handle(), exit_code, timeout);
}
-#if !defined(OS_LINUX)
+#if !defined(OS_LINUX) && !defined(OS_MACOSX)
bool Process::IsProcessBackgrounded() const {
// See SetProcessBackgrounded().
DCHECK(IsValid());
@@ -369,13 +384,13 @@ bool Process::IsProcessBackgrounded() const {
}
bool Process::SetProcessBackgrounded(bool /*value*/) {
- // Not implemented for POSIX systems other than Linux. With POSIX, if we were
- // to lower the process priority we wouldn't be able to raise it back to its
- // initial priority.
+ // Not implemented for POSIX systems other than Linux and Mac. With POSIX, if
+ // we were to lower the process priority we wouldn't be able to raise it back
+ // to its initial priority.
NOTIMPLEMENTED();
return false;
}
-#endif // !defined(OS_LINUX)
+#endif // !defined(OS_LINUX) && !defined(OS_MACOSX)
int Process::GetPriority() const {
DCHECK(IsValid());
diff --git a/base/profiler/scoped_profile.h b/base/profiler/scoped_profile.h
index 657150a0f1..4df6a1bc02 100644
--- a/base/profiler/scoped_profile.h
+++ b/base/profiler/scoped_profile.h
@@ -16,28 +16,38 @@
#include "base/location.h"
#include "base/macros.h"
#include "base/profiler/tracked_time.h"
+#include "base/trace_event/heap_profiler.h"
#include "base/tracked_objects.h"
-#define PASTE_LINE_NUMBER_ON_NAME(name, line) name##line
+// Two level indirection is required for correct macro substitution.
+#define PASTE_COUNTER_ON_NAME2(name, counter) name##counter
+#define PASTE_COUNTER_ON_NAME(name, counter) \
+ PASTE_COUNTER_ON_NAME2(name, counter)
-#define LINE_BASED_VARIABLE_NAME_FOR_PROFILING \
- PASTE_LINE_NUMBER_ON_NAME(some_profiler_variable_, __LINE__)
+#define COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING \
+ PASTE_COUNTER_ON_NAME(some_profiler_variable_, __COUNTER__)
// Defines the containing scope as a profiled region. This allows developers to
// profile their code and see results on their about:profiler page, as well as
-// on the UMA dashboard.
-#define TRACK_RUN_IN_THIS_SCOPED_REGION(dispatch_function_name) \
- ::tracked_objects::ScopedProfile LINE_BASED_VARIABLE_NAME_FOR_PROFILING( \
- FROM_HERE_WITH_EXPLICIT_FUNCTION(#dispatch_function_name), \
- ::tracked_objects::ScopedProfile::ENABLED)
+// on the UMA dashboard and heap profiler.
+#define TRACK_RUN_IN_THIS_SCOPED_REGION(dispatch_function_name) \
+ const ::tracked_objects::Location& location = \
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(#dispatch_function_name); \
+ TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
+ COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING(location.file_name()); \
+ ::tracked_objects::ScopedProfile COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING( \
+ location, ::tracked_objects::ScopedProfile::ENABLED)
// Same as TRACK_RUN_IN_THIS_SCOPED_REGION except that there's an extra param
// which is concatenated with the function name for better filtering.
-#define TRACK_SCOPED_REGION(category_name, dispatch_function_name) \
- ::tracked_objects::ScopedProfile LINE_BASED_VARIABLE_NAME_FOR_PROFILING( \
- FROM_HERE_WITH_EXPLICIT_FUNCTION( \
- "[" category_name "]" dispatch_function_name), \
- ::tracked_objects::ScopedProfile::ENABLED)
+#define TRACK_SCOPED_REGION(category_name, dispatch_function_name) \
+ const ::tracked_objects::Location& location = \
+ FROM_HERE_WITH_EXPLICIT_FUNCTION("[" category_name \
+ "]" dispatch_function_name); \
+ TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
+ COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING(location.file_name()); \
+ ::tracked_objects::ScopedProfile COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING( \
+ location, ::tracked_objects::ScopedProfile::ENABLED)
namespace tracked_objects {
class Births;
diff --git a/base/rand_util_posix.cc b/base/rand_util_posix.cc
index 6a6e05ada8..469f7af9bf 100644
--- a/base/rand_util_posix.cc
+++ b/base/rand_util_posix.cc
@@ -13,6 +13,7 @@
#include "base/files/file_util.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
namespace {
@@ -22,7 +23,7 @@ namespace {
// we can use LazyInstance to handle opening it on the first access.
class URandomFd {
public:
- URandomFd() : fd_(open("/dev/urandom", O_RDONLY)) {
+ URandomFd() : fd_(HANDLE_EINTR(open("/dev/urandom", O_RDONLY | O_CLOEXEC))) {
DCHECK_GE(fd_, 0) << "Cannot open /dev/urandom: " << errno;
}
diff --git a/base/run_loop.cc b/base/run_loop.cc
index a2322f8495..4c19d3589f 100644
--- a/base/run_loop.cc
+++ b/base/run_loop.cc
@@ -19,12 +19,14 @@ RunLoop::RunLoop()
running_(false),
quit_when_idle_received_(false),
weak_factory_(this) {
+ DCHECK(loop_);
}
RunLoop::~RunLoop() {
}
void RunLoop::Run() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!BeforeRun())
return;
@@ -44,6 +46,7 @@ void RunLoop::RunUntilIdle() {
}
void RunLoop::Quit() {
+ DCHECK(thread_checker_.CalledOnValidThread());
quit_called_ = true;
if (running_ && loop_->run_loop_ == this) {
// This is the inner-most RunLoop, so quit now.
@@ -52,6 +55,7 @@ void RunLoop::Quit() {
}
void RunLoop::QuitWhenIdle() {
+ DCHECK(thread_checker_.CalledOnValidThread());
quit_when_idle_received_ = true;
}
diff --git a/base/run_loop.h b/base/run_loop.h
index 635018f434..077d097ba9 100644
--- a/base/run_loop.h
+++ b/base/run_loop.h
@@ -10,6 +10,7 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_checker.h"
#include "build/build_config.h"
namespace base {
@@ -105,6 +106,8 @@ class BASE_EXPORT RunLoop {
// that we should quit Run once it becomes idle.
bool quit_when_idle_received_;
+ base::ThreadChecker thread_checker_;
+
// WeakPtrFactory for QuitClosure safety.
base::WeakPtrFactory<RunLoop> weak_factory_;
diff --git a/base/scoped_generic.h b/base/scoped_generic.h
index 84de6b7d50..c2d51cfdb4 100644
--- a/base/scoped_generic.h
+++ b/base/scoped_generic.h
@@ -14,7 +14,7 @@
namespace base {
-// This class acts like ScopedPtr with a custom deleter (although is slightly
+// This class acts like unique_ptr with a custom deleter (although is slightly
// less fancy in some of the more escoteric respects) except that it keeps a
// copy of the object rather than a pointer, and we require that the contained
// object has some kind of "invalid" value.
@@ -22,12 +22,12 @@ namespace base {
// Defining a scoper based on this class allows you to get a scoper for
// non-pointer types without having to write custom code for set, reset, and
// move, etc. and get almost identical semantics that people are used to from
-// scoped_ptr.
+// unique_ptr.
//
// It is intended that you will typedef this class with an appropriate deleter
// to implement clean up tasks for objects that act like pointers from a
// resource management standpoint but aren't, such as file descriptors and
-// various types of operating system handles. Using scoped_ptr for these
+// various types of operating system handles. Using unique_ptr for these
// things requires that you keep a pointer to the handle valid for the lifetime
// of the scoper (which is easy to mess up).
//
@@ -97,7 +97,7 @@ class ScopedGeneric {
}
// Frees the currently owned object, if any. Then takes ownership of a new
- // object, if given. Self-resets are not allowd as on scoped_ptr. See
+ // object, if given. Self-resets are not allowd as on unique_ptr. See
// http://crbug.com/162971
void reset(const element_type& value = traits_type::InvalidValue()) {
if (data_.generic != traits_type::InvalidValue() && data_.generic == value)
diff --git a/base/scoped_observer.h b/base/scoped_observer.h
index 13d7ca8bb1..7f1d6fba96 100644
--- a/base/scoped_observer.h
+++ b/base/scoped_observer.h
@@ -47,7 +47,7 @@ class ScopedObserver {
}
bool IsObserving(Source* source) const {
- return ContainsValue(sources_, source);
+ return base::ContainsValue(sources_, source);
}
bool IsObservingSources() const { return !sources_.empty(); }
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index af9d2bf19d..519c997eb0 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -87,31 +87,30 @@ void OverflowTestsSoftExpectTrue(bool overflow_detected) {
}
}
-#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_LINUX)
+#if defined(OS_IOS) || defined(ADDRESS_SANITIZER) || \
+ defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER)
#define MAYBE_NewOverflow DISABLED_NewOverflow
#else
#define MAYBE_NewOverflow NewOverflow
#endif
// Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
// IOS doesn't honor nothrow, so disable the test there.
-// Crashes on Windows Dbg builds, disable there as well.
-// Disabled on Linux because failing Linux Valgrind bot, and Valgrind exclusions
-// are not currently read. See http://crbug.com/582398
+// Disabled under XSan because asan aborts when new returns nullptr,
+// https://bugs.chromium.org/p/chromium/issues/detail?id=690271#c15
TEST(SecurityTest, MAYBE_NewOverflow) {
const size_t kArraySize = 4096;
// We want something "dynamic" here, so that the compiler doesn't
// immediately reject crazy arrays.
const size_t kDynamicArraySize = HideValueFromCompiler(kArraySize);
- // numeric_limits are still not constexpr until we switch to C++11, so we
- // use an ugly cast.
- const size_t kMaxSizeT = ~static_cast<size_t>(0);
- ASSERT_EQ(numeric_limits<size_t>::max(), kMaxSizeT);
+ const size_t kMaxSizeT = std::numeric_limits<size_t>::max();
const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
const size_t kDynamicArraySize2 = HideValueFromCompiler(kArraySize2);
{
std::unique_ptr<char[][kArraySize]> array_pointer(
new (nothrow) char[kDynamicArraySize2][kArraySize]);
- OverflowTestsSoftExpectTrue(!array_pointer);
+ // Prevent clang from optimizing away the whole test.
+ char* volatile p = reinterpret_cast<char*>(array_pointer.get());
+ OverflowTestsSoftExpectTrue(!p);
}
// On windows, the compiler prevents static array sizes of more than
// 0x7fffffff (error C2148).
@@ -121,7 +120,9 @@ TEST(SecurityTest, MAYBE_NewOverflow) {
{
std::unique_ptr<char[][kArraySize2]> array_pointer(
new (nothrow) char[kDynamicArraySize][kArraySize2]);
- OverflowTestsSoftExpectTrue(!array_pointer);
+ // Prevent clang from optimizing away the whole test.
+ char* volatile p = reinterpret_cast<char*>(array_pointer.get());
+ OverflowTestsSoftExpectTrue(!p);
}
#endif // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
}
diff --git a/base/sequence_checker.h b/base/sequence_checker.h
index ad0182825c..471631844b 100644
--- a/base/sequence_checker.h
+++ b/base/sequence_checker.h
@@ -5,13 +5,6 @@
#ifndef BASE_SEQUENCE_CHECKER_H_
#define BASE_SEQUENCE_CHECKER_H_
-// See comments for the similar block in thread_checker.h.
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
-#define ENABLE_SEQUENCE_CHECKER 1
-#else
-#define ENABLE_SEQUENCE_CHECKER 0
-#endif
-
#include "base/sequence_checker_impl.h"
namespace base {
@@ -22,23 +15,22 @@ namespace base {
// the right version for your build configuration.
class SequenceCheckerDoNothing {
public:
- bool CalledOnValidSequencedThread() const {
- return true;
- }
+ bool CalledOnValidSequence() const { return true; }
void DetachFromSequence() {}
};
-// SequenceChecker is a helper class used to help verify that some
-// methods of a class are called in sequence -- that is, called from
-// the same SequencedTaskRunner. It is a generalization of
-// ThreadChecker; see comments in sequence_checker_impl.h for details.
+// SequenceChecker is a helper class to verify that calls to some methods of a
+// class are sequenced. Calls are sequenced when they are issued:
+// - From tasks posted to SequencedTaskRunners or SingleThreadTaskRunners bound
+// to the same sequence, or,
+// - From a single thread outside of any task.
//
// Example:
// class MyClass {
// public:
// void Foo() {
-// DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+// DCHECK(sequence_checker_.CalledOnValidSequence());
// ... (do stuff) ...
// }
//
@@ -46,16 +38,14 @@ class SequenceCheckerDoNothing {
// SequenceChecker sequence_checker_;
// }
//
-// In Release mode, CalledOnValidSequencedThread() will always return true.
-#if ENABLE_SEQUENCE_CHECKER
+// In Release mode, CalledOnValidSequence() will always return true.
+#if DCHECK_IS_ON()
class SequenceChecker : public SequenceCheckerImpl {
};
#else
class SequenceChecker : public SequenceCheckerDoNothing {
};
-#endif // ENABLE_SEQUENCE_CHECKER
-
-#undef ENABLE_SEQUENCE_CHECKER
+#endif // DCHECK_IS_ON()
} // namespace base
diff --git a/base/sequence_checker_impl.cc b/base/sequence_checker_impl.cc
index e95b8ee5f3..df2a8cb24f 100644
--- a/base/sequence_checker_impl.cc
+++ b/base/sequence_checker_impl.cc
@@ -4,43 +4,66 @@
#include "base/sequence_checker_impl.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/sequence_token.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread_checker_impl.h"
+
namespace base {
-SequenceCheckerImpl::SequenceCheckerImpl()
- : sequence_token_assigned_(false) {
- AutoLock auto_lock(lock_);
- EnsureSequenceTokenAssigned();
-}
+class SequenceCheckerImpl::Core {
+ public:
+ Core()
+ : sequence_token_(SequenceToken::GetForCurrentThread()),
+ sequenced_worker_pool_token_(
+ SequencedWorkerPool::GetSequenceTokenForCurrentThread()) {
+ // SequencedWorkerPool doesn't use SequenceToken and code outside of
+ // SequenceWorkerPool doesn't set a SequencedWorkerPool token.
+ DCHECK(!sequence_token_.IsValid() ||
+ !sequenced_worker_pool_token_.IsValid());
+ }
-SequenceCheckerImpl::~SequenceCheckerImpl() {}
+ ~Core() = default;
-bool SequenceCheckerImpl::CalledOnValidSequencedThread() const {
- AutoLock auto_lock(lock_);
- EnsureSequenceTokenAssigned();
+ bool CalledOnValidThread() const {
+ if (sequence_token_.IsValid())
+ return sequence_token_ == SequenceToken::GetForCurrentThread();
- // If this thread is not associated with a SequencedWorkerPool,
- // SequenceChecker behaves as a ThreadChecker. See header for details.
- if (!sequence_token_.IsValid())
+ if (sequenced_worker_pool_token_.IsValid()) {
+ return sequenced_worker_pool_token_.Equals(
+ SequencedWorkerPool::GetSequenceTokenForCurrentThread());
+ }
+
+ // SequenceChecker behaves as a ThreadChecker when it is not bound to a
+ // valid sequence token.
return thread_checker_.CalledOnValidThread();
+ }
- return sequence_token_.Equals(
- SequencedWorkerPool::GetSequenceTokenForCurrentThread());
-}
+ private:
+ SequenceToken sequence_token_;
-void SequenceCheckerImpl::DetachFromSequence() {
+ // TODO(gab): Remove this when SequencedWorkerPool is deprecated in favor of
+ // TaskScheduler. crbug.com/622400
+ SequencedWorkerPool::SequenceToken sequenced_worker_pool_token_;
+
+ // Used when |sequenced_worker_pool_token_| and |sequence_token_| are invalid.
+ ThreadCheckerImpl thread_checker_;
+};
+
+SequenceCheckerImpl::SequenceCheckerImpl() : core_(MakeUnique<Core>()) {}
+SequenceCheckerImpl::~SequenceCheckerImpl() = default;
+
+bool SequenceCheckerImpl::CalledOnValidSequence() const {
AutoLock auto_lock(lock_);
- thread_checker_.DetachFromThread();
- sequence_token_assigned_ = false;
- sequence_token_ = SequencedWorkerPool::SequenceToken();
+ if (!core_)
+ core_ = MakeUnique<Core>();
+ return core_->CalledOnValidThread();
}
-void SequenceCheckerImpl::EnsureSequenceTokenAssigned() const {
- lock_.AssertAcquired();
- if (sequence_token_assigned_)
- return;
-
- sequence_token_assigned_ = true;
- sequence_token_ = SequencedWorkerPool::GetSequenceTokenForCurrentThread();
+void SequenceCheckerImpl::DetachFromSequence() {
+ AutoLock auto_lock(lock_);
+ core_.reset();
}
} // namespace base
diff --git a/base/sequence_checker_impl.h b/base/sequence_checker_impl.h
index e3c5fed508..a54c388451 100644
--- a/base/sequence_checker_impl.h
+++ b/base/sequence_checker_impl.h
@@ -5,44 +5,40 @@
#ifndef BASE_SEQUENCE_CHECKER_IMPL_H_
#define BASE_SEQUENCE_CHECKER_IMPL_H_
+#include <memory>
+
#include "base/base_export.h"
+#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
-#include "base/threading/sequenced_worker_pool.h"
-#include "base/threading/thread_checker_impl.h"
namespace base {
-// SequenceCheckerImpl is used to help verify that some methods of a
-// class are called in sequence -- that is, called from the same
-// SequencedTaskRunner. It is a generalization of ThreadChecker; in
-// particular, it behaves exactly like ThreadChecker if constructed
-// on a thread that is not part of a SequencedWorkerPool.
+// Real implementation of SequenceChecker for use in debug mode or for temporary
+// use in release mode (e.g. to CHECK on a threading issue seen only in the
+// wild).
+//
+// Note: You should almost always use the SequenceChecker class to get the right
+// version for your build configuration.
class BASE_EXPORT SequenceCheckerImpl {
public:
SequenceCheckerImpl();
~SequenceCheckerImpl();
- // Returns whether the we are being called on the same sequence token
- // as previous calls. If there is no associated sequence, then returns
- // whether we are being called on the underlying ThreadChecker's thread.
- bool CalledOnValidSequencedThread() const;
+ // Returns true if called in sequence with previous calls to this method and
+ // the constructor.
+ bool CalledOnValidSequence() const WARN_UNUSED_RESULT;
- // Unbinds the checker from the currently associated sequence. The
- // checker will be re-bound on the next call to CalledOnValidSequence().
+ // Unbinds the checker from the currently associated sequence. The checker
+ // will be re-bound on the next call to CalledOnValidSequence().
void DetachFromSequence();
private:
- void EnsureSequenceTokenAssigned() const;
+ class Core;
// Guards all variables below.
mutable Lock lock_;
-
- // Used if |sequence_token_| is not valid.
- ThreadCheckerImpl thread_checker_;
- mutable bool sequence_token_assigned_;
-
- mutable SequencedWorkerPool::SequenceToken sequence_token_;
+ mutable std::unique_ptr<Core> core_;
DISALLOW_COPY_AND_ASSIGN(SequenceCheckerImpl);
};
diff --git a/base/sequence_checker_unittest.cc b/base/sequence_checker_unittest.cc
index 196bb1cc79..86e9298d97 100644
--- a/base/sequence_checker_unittest.cc
+++ b/base/sequence_checker_unittest.cc
@@ -2,334 +2,257 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/sequence_checker.h"
-
#include <stddef.h>
#include <memory>
-#include <utility>
+#include <string>
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/location.h"
-#include "base/logging.h"
+#include "base/callback_forward.h"
#include "base/macros.h"
-#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/sequence_checker_impl.h"
+#include "base/sequence_token.h"
#include "base/single_thread_task_runner.h"
#include "base/test/sequenced_worker_pool_owner.h"
-#include "base/threading/thread.h"
+#include "base/threading/simple_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
-// Duplicated from base/sequence_checker.h so that we can be good citizens
-// there and undef the macro.
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
-#define ENABLE_SEQUENCE_CHECKER 1
-#else
-#define ENABLE_SEQUENCE_CHECKER 0
-#endif
-
namespace base {
namespace {
-const size_t kNumWorkerThreads = 3;
+constexpr size_t kNumWorkerThreads = 3;
-// Simple class to exercise the basics of SequenceChecker.
-// DoStuff should verify that it's called on a valid sequenced thread.
-// SequenceCheckedObject can be destroyed on any thread (like WeakPtr).
-class SequenceCheckedObject {
+// Runs a callback on another thread.
+class RunCallbackThread : public SimpleThread {
public:
- SequenceCheckedObject() {}
- ~SequenceCheckedObject() {}
-
- // Verifies that it was called on the same thread as the constructor.
- void DoStuff() {
- DCHECK(sequence_checker_.CalledOnValidSequencedThread());
- }
-
- void DetachFromSequence() {
- sequence_checker_.DetachFromSequence();
+ explicit RunCallbackThread(const Closure& callback)
+ : SimpleThread("RunCallbackThread"), callback_(callback) {
+ Start();
+ Join();
}
private:
- SequenceChecker sequence_checker_;
+ // SimpleThread:
+ void Run() override { callback_.Run(); }
+
+ const Closure callback_;
- DISALLOW_COPY_AND_ASSIGN(SequenceCheckedObject);
+ DISALLOW_COPY_AND_ASSIGN(RunCallbackThread);
};
class SequenceCheckerTest : public testing::Test {
- public:
- SequenceCheckerTest() : other_thread_("sequence_checker_test_other_thread") {}
-
- void SetUp() override {
- other_thread_.Start();
- ResetPool();
- }
-
- void TearDown() override {
- other_thread_.Stop();
- }
-
protected:
- base::Thread* other_thread() { return &other_thread_; }
+ SequenceCheckerTest() : pool_owner_(kNumWorkerThreads, "test") {}
- const scoped_refptr<SequencedWorkerPool>& pool() {
- return pool_owner_->pool();
+ void PostToSequencedWorkerPool(const Closure& callback,
+ const std::string& token_name) {
+ pool_owner_.pool()->PostNamedSequencedWorkerTask(token_name, FROM_HERE,
+ callback);
}
- void PostDoStuffToWorkerPool(SequenceCheckedObject* sequence_checked_object,
- const std::string& token_name) {
- pool()->PostNamedSequencedWorkerTask(
- token_name,
- FROM_HERE,
- base::Bind(&SequenceCheckedObject::DoStuff,
- base::Unretained(sequence_checked_object)));
+ void FlushSequencedWorkerPoolForTesting() {
+ pool_owner_.pool()->FlushForTesting();
}
- void PostDoStuffToOtherThread(
- SequenceCheckedObject* sequence_checked_object) {
- other_thread()->task_runner()->PostTask(
- FROM_HERE, base::Bind(&SequenceCheckedObject::DoStuff,
- base::Unretained(sequence_checked_object)));
- }
-
- void PostDeleteToOtherThread(
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object) {
- other_thread()->message_loop()->task_runner()->DeleteSoon(
- FROM_HERE, sequence_checked_object.release());
- }
-
- // Destroys the SequencedWorkerPool instance, blocking until it is fully shut
- // down, and creates a new instance.
- void ResetPool() {
- pool_owner_.reset(new SequencedWorkerPoolOwner(kNumWorkerThreads, "test"));
- }
-
- void MethodOnDifferentThreadDeathTest();
- void DetachThenCallFromDifferentThreadDeathTest();
- void DifferentSequenceTokensDeathTest();
- void WorkerPoolAndSimpleThreadDeathTest();
- void TwoDifferentWorkerPoolsDeathTest();
-
private:
MessageLoop message_loop_; // Needed by SequencedWorkerPool to function.
- base::Thread other_thread_;
- std::unique_ptr<SequencedWorkerPoolOwner> pool_owner_;
+ SequencedWorkerPoolOwner pool_owner_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceCheckerTest);
};
-TEST_F(SequenceCheckerTest, CallsAllowedOnSameThread) {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+void ExpectCalledOnValidSequence(SequenceCheckerImpl* sequence_checker) {
+ ASSERT_TRUE(sequence_checker);
- // Verify that DoStuff doesn't assert.
- sequence_checked_object->DoStuff();
+ // This should bind |sequence_checker| to the current sequence if it wasn't
+ // already bound to a sequence.
+ EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
- // Verify that the destructor doesn't assert.
- sequence_checked_object.reset();
+ // Since |sequence_checker| is now bound to the current sequence, another call
+ // to CalledOnValidSequence() should return true.
+ EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
}
-TEST_F(SequenceCheckerTest, DestructorAllowedOnDifferentThread) {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+void ExpectCalledOnValidSequenceWithSequenceToken(
+ SequenceCheckerImpl* sequence_checker,
+ SequenceToken sequence_token) {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(sequence_token);
+ ExpectCalledOnValidSequence(sequence_checker);
+}
- // Verify the destructor doesn't assert when called on a different thread.
- PostDeleteToOtherThread(std::move(sequence_checked_object));
- other_thread()->Stop();
+void ExpectNotCalledOnValidSequence(SequenceCheckerImpl* sequence_checker) {
+ ASSERT_TRUE(sequence_checker);
+ EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
}
-TEST_F(SequenceCheckerTest, DetachFromSequence) {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+} // namespace
- // Verify that DoStuff doesn't assert when called on a different thread after
- // a call to DetachFromSequence.
- sequence_checked_object->DetachFromSequence();
+TEST_F(SequenceCheckerTest, CallsAllowedOnSameThreadNoSequenceToken) {
+ SequenceCheckerImpl sequence_checker;
+ EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
+}
- PostDoStuffToOtherThread(sequence_checked_object.get());
- other_thread()->Stop();
+TEST_F(SequenceCheckerTest, CallsAllowedOnSameThreadSameSequenceToken) {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ SequenceCheckerImpl sequence_checker;
+ EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
}
-TEST_F(SequenceCheckerTest, SameSequenceTokenValid) {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+TEST_F(SequenceCheckerTest, CallsDisallowedOnDifferentThreadsNoSequenceToken) {
+ SequenceCheckerImpl sequence_checker;
+ RunCallbackThread thread(
+ Bind(&ExpectNotCalledOnValidSequence, Unretained(&sequence_checker)));
+}
+
+TEST_F(SequenceCheckerTest, CallsAllowedOnDifferentThreadsSameSequenceToken) {
+ const SequenceToken sequence_token(SequenceToken::Create());
- sequence_checked_object->DetachFromSequence();
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- pool()->FlushForTesting();
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(sequence_token);
+ SequenceCheckerImpl sequence_checker;
+ EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
- PostDeleteToOtherThread(std::move(sequence_checked_object));
- other_thread()->Stop();
+ RunCallbackThread thread(Bind(&ExpectCalledOnValidSequenceWithSequenceToken,
+ Unretained(&sequence_checker), sequence_token));
}
-TEST_F(SequenceCheckerTest, DetachSequenceTokenValid) {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+TEST_F(SequenceCheckerTest, CallsDisallowedOnSameThreadDifferentSequenceToken) {
+ std::unique_ptr<SequenceCheckerImpl> sequence_checker;
- sequence_checked_object->DetachFromSequence();
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- pool()->FlushForTesting();
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ sequence_checker.reset(new SequenceCheckerImpl);
+ }
- sequence_checked_object->DetachFromSequence();
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
- pool()->FlushForTesting();
+ {
+ // Different SequenceToken.
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
+ }
- PostDeleteToOtherThread(std::move(sequence_checked_object));
- other_thread()->Stop();
+ // No SequenceToken.
+ EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
}
-#if GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, DetachFromSequence) {
+ std::unique_ptr<SequenceCheckerImpl> sequence_checker;
-void SequenceCheckerTest::MethodOnDifferentThreadDeathTest() {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ sequence_checker.reset(new SequenceCheckerImpl);
+ }
- // DoStuff should assert in debug builds only when called on a
- // different thread.
- PostDoStuffToOtherThread(sequence_checked_object.get());
- other_thread()->Stop();
-}
+ sequence_checker->DetachFromSequence();
-#if ENABLE_SEQUENCE_CHECKER
-TEST_F(SequenceCheckerTest, MethodNotAllowedOnDifferentThreadDeathTestInDebug) {
- // The default style "fast" does not support multi-threaded tests.
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- ASSERT_DEATH({
- MethodOnDifferentThreadDeathTest();
- }, "");
-}
-#else
-TEST_F(SequenceCheckerTest, MethodAllowedOnDifferentThreadDeathTestInRelease) {
- MethodOnDifferentThreadDeathTest();
+ {
+ // Verify that CalledOnValidSequence() returns true when called with
+ // a different sequence token after a call to DetachFromSequence().
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+ }
}
-#endif // ENABLE_SEQUENCE_CHECKER
-void SequenceCheckerTest::DetachThenCallFromDifferentThreadDeathTest() {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+TEST_F(SequenceCheckerTest, DetachFromSequenceNoSequenceToken) {
+ SequenceCheckerImpl sequence_checker;
+ sequence_checker.DetachFromSequence();
- // DoStuff doesn't assert when called on a different thread
- // after a call to DetachFromSequence.
- sequence_checked_object->DetachFromSequence();
- PostDoStuffToOtherThread(sequence_checked_object.get());
- other_thread()->Stop();
+ // Verify that CalledOnValidSequence() returns true when called on a
+ // different thread after a call to DetachFromSequence().
+ RunCallbackThread thread(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)));
- // DoStuff should assert in debug builds only after moving to
- // another thread.
- sequence_checked_object->DoStuff();
+ EXPECT_FALSE(sequence_checker.CalledOnValidSequence());
}
-#if ENABLE_SEQUENCE_CHECKER
-TEST_F(SequenceCheckerTest, DetachFromSequenceDeathTestInDebug) {
- // The default style "fast" does not support multi-threaded tests.
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- ASSERT_DEATH({
- DetachThenCallFromDifferentThreadDeathTest();
- }, "");
-}
-#else
-TEST_F(SequenceCheckerTest, DetachFromThreadDeathTestInRelease) {
- DetachThenCallFromDifferentThreadDeathTest();
+TEST_F(SequenceCheckerTest, SequencedWorkerPool_SameSequenceTokenValid) {
+ SequenceCheckerImpl sequence_checker;
+ sequence_checker.DetachFromSequence();
+
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ FlushSequencedWorkerPoolForTesting();
}
-#endif // ENABLE_SEQUENCE_CHECKER
-void SequenceCheckerTest::DifferentSequenceTokensDeathTest() {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+TEST_F(SequenceCheckerTest, SequencedWorkerPool_DetachSequenceTokenValid) {
+ SequenceCheckerImpl sequence_checker;
+ sequence_checker.DetachFromSequence();
- sequence_checked_object->DetachFromSequence();
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
- pool()->FlushForTesting();
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ FlushSequencedWorkerPoolForTesting();
- PostDeleteToOtherThread(std::move(sequence_checked_object));
- other_thread()->Stop();
-}
+ sequence_checker.DetachFromSequence();
-#if ENABLE_SEQUENCE_CHECKER
-TEST_F(SequenceCheckerTest, DifferentSequenceTokensDeathTestInDebug) {
- // The default style "fast" does not support multi-threaded tests.
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- ASSERT_DEATH({
- DifferentSequenceTokensDeathTest();
- }, "");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "B");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "B");
+ FlushSequencedWorkerPoolForTesting();
}
-#else
-TEST_F(SequenceCheckerTest, DifferentSequenceTokensDeathTestInRelease) {
- DifferentSequenceTokensDeathTest();
-}
-#endif // ENABLE_SEQUENCE_CHECKER
-void SequenceCheckerTest::WorkerPoolAndSimpleThreadDeathTest() {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+TEST_F(SequenceCheckerTest,
+ SequencedWorkerPool_DifferentSequenceTokensInvalid) {
+ SequenceCheckerImpl sequence_checker;
+ sequence_checker.DetachFromSequence();
+
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ FlushSequencedWorkerPoolForTesting();
+
+ PostToSequencedWorkerPool(
+ Bind(&ExpectNotCalledOnValidSequence, Unretained(&sequence_checker)),
+ "B");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectNotCalledOnValidSequence, Unretained(&sequence_checker)),
+ "B");
+ FlushSequencedWorkerPoolForTesting();
+}
- sequence_checked_object->DetachFromSequence();
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- pool()->FlushForTesting();
+TEST_F(SequenceCheckerTest,
+ SequencedWorkerPool_WorkerPoolAndSimpleThreadInvalid) {
+ SequenceCheckerImpl sequence_checker;
+ sequence_checker.DetachFromSequence();
- PostDoStuffToOtherThread(sequence_checked_object.get());
- other_thread()->Stop();
-}
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ FlushSequencedWorkerPoolForTesting();
-#if ENABLE_SEQUENCE_CHECKER
-TEST_F(SequenceCheckerTest, WorkerPoolAndSimpleThreadDeathTestInDebug) {
- // The default style "fast" does not support multi-threaded tests.
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- ASSERT_DEATH({
- WorkerPoolAndSimpleThreadDeathTest();
- }, "");
+ EXPECT_FALSE(sequence_checker.CalledOnValidSequence());
}
-#else
-TEST_F(SequenceCheckerTest, WorkerPoolAndSimpleThreadDeathTestInRelease) {
- WorkerPoolAndSimpleThreadDeathTest();
-}
-#endif // ENABLE_SEQUENCE_CHECKER
-void SequenceCheckerTest::TwoDifferentWorkerPoolsDeathTest() {
- std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
- new SequenceCheckedObject);
+TEST_F(SequenceCheckerTest,
+ SequencedWorkerPool_TwoDifferentWorkerPoolsInvalid) {
+ SequenceCheckerImpl sequence_checker;
+ sequence_checker.DetachFromSequence();
- sequence_checked_object->DetachFromSequence();
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
- pool()->FlushForTesting();
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ PostToSequencedWorkerPool(
+ Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
+ FlushSequencedWorkerPoolForTesting();
SequencedWorkerPoolOwner second_pool_owner(kNumWorkerThreads, "test2");
second_pool_owner.pool()->PostNamedSequencedWorkerTask(
- "A",
- FROM_HERE,
- base::Bind(&SequenceCheckedObject::DoStuff,
- base::Unretained(sequence_checked_object.get())));
+ "A", FROM_HERE, base::Bind(&ExpectNotCalledOnValidSequence,
+ base::Unretained(&sequence_checker)));
second_pool_owner.pool()->FlushForTesting();
}
-#if ENABLE_SEQUENCE_CHECKER
-TEST_F(SequenceCheckerTest, TwoDifferentWorkerPoolsDeathTestInDebug) {
- // The default style "fast" does not support multi-threaded tests.
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- ASSERT_DEATH({
- TwoDifferentWorkerPoolsDeathTest();
- }, "");
-}
-#else
-TEST_F(SequenceCheckerTest, TwoDifferentWorkerPoolsDeathTestInRelease) {
- TwoDifferentWorkerPoolsDeathTest();
-}
-#endif // ENABLE_SEQUENCE_CHECKER
-
-#endif // GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
-
-} // namespace
-
} // namespace base
-
-// Just in case we ever get lumped together with other compilation units.
-#undef ENABLE_SEQUENCE_CHECKER
diff --git a/base/sequence_token.cc b/base/sequence_token.cc
new file mode 100644
index 0000000000..264e3b65e3
--- /dev/null
+++ b/base/sequence_token.cc
@@ -0,0 +1,92 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_token.h"
+
+#include "base/atomic_sequence_num.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+base::StaticAtomicSequenceNumber g_sequence_token_generator;
+
+base::StaticAtomicSequenceNumber g_task_token_generator;
+
+LazyInstance<ThreadLocalPointer<const SequenceToken>>::Leaky
+ tls_current_sequence_token = LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalPointer<const TaskToken>>::Leaky
+ tls_current_task_token = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+bool SequenceToken::operator==(const SequenceToken& other) const {
+ return token_ == other.token_ && IsValid();
+}
+
+bool SequenceToken::operator!=(const SequenceToken& other) const {
+ return !(*this == other);
+}
+
+bool SequenceToken::IsValid() const {
+ return token_ != kInvalidSequenceToken;
+}
+
+int SequenceToken::ToInternalValue() const {
+ return token_;
+}
+
+SequenceToken SequenceToken::Create() {
+ return SequenceToken(g_sequence_token_generator.GetNext());
+}
+
+SequenceToken SequenceToken::GetForCurrentThread() {
+ const SequenceToken* current_sequence_token =
+ tls_current_sequence_token.Get().Get();
+ return current_sequence_token ? *current_sequence_token : SequenceToken();
+}
+
+bool TaskToken::operator==(const TaskToken& other) const {
+ return token_ == other.token_ && IsValid();
+}
+
+bool TaskToken::operator!=(const TaskToken& other) const {
+ return !(*this == other);
+}
+
+bool TaskToken::IsValid() const {
+ return token_ != kInvalidTaskToken;
+}
+
+TaskToken TaskToken::Create() {
+ return TaskToken(g_task_token_generator.GetNext());
+}
+
+TaskToken TaskToken::GetForCurrentThread() {
+ const TaskToken* current_task_token = tls_current_task_token.Get().Get();
+ return current_task_token ? *current_task_token : TaskToken();
+}
+
+ScopedSetSequenceTokenForCurrentThread::ScopedSetSequenceTokenForCurrentThread(
+ const SequenceToken& sequence_token)
+ : sequence_token_(sequence_token), task_token_(TaskToken::Create()) {
+ DCHECK(!tls_current_sequence_token.Get().Get());
+ DCHECK(!tls_current_task_token.Get().Get());
+ tls_current_sequence_token.Get().Set(&sequence_token_);
+ tls_current_task_token.Get().Set(&task_token_);
+}
+
+ScopedSetSequenceTokenForCurrentThread::
+ ~ScopedSetSequenceTokenForCurrentThread() {
+ DCHECK_EQ(tls_current_sequence_token.Get().Get(), &sequence_token_);
+ DCHECK_EQ(tls_current_task_token.Get().Get(), &task_token_);
+ tls_current_sequence_token.Get().Set(nullptr);
+ tls_current_task_token.Get().Set(nullptr);
+}
+
+} // namespace base
diff --git a/base/sequence_token.h b/base/sequence_token.h
new file mode 100644
index 0000000000..6e7d191ae8
--- /dev/null
+++ b/base/sequence_token.h
@@ -0,0 +1,115 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_TOKEN_H_
+#define BASE_SEQUENCE_TOKEN_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+// A token that identifies a series of sequenced tasks (i.e. tasks that run one
+// at a time in posting order).
+class BASE_EXPORT SequenceToken {
+ public:
+ // Instantiates an invalid SequenceToken.
+ SequenceToken() = default;
+
+ // Explicitly allow copy.
+ SequenceToken(const SequenceToken& other) = default;
+ SequenceToken& operator=(const SequenceToken& other) = default;
+
+ // An invalid SequenceToken is not equal to any other SequenceToken, including
+ // other invalid SequenceTokens.
+ bool operator==(const SequenceToken& other) const;
+ bool operator!=(const SequenceToken& other) const;
+
+ // Returns true if this is a valid SequenceToken.
+ bool IsValid() const;
+
+ // Returns the integer uniquely representing this SequenceToken. This method
+ // should only be used for tracing and debugging.
+ int ToInternalValue() const;
+
+ // Returns a valid SequenceToken which isn't equal to any previously returned
+ // SequenceToken.
+ static SequenceToken Create();
+
+ // Returns the SequenceToken associated with the task running on the current
+ // thread, as determined by the active ScopedSetSequenceTokenForCurrentThread
+ // if any.
+ static SequenceToken GetForCurrentThread();
+
+ private:
+ explicit SequenceToken(int token) : token_(token) {}
+
+ static constexpr int kInvalidSequenceToken = -1;
+ int token_ = kInvalidSequenceToken;
+};
+
+// A token that identifies a task.
+//
+// This is used by ThreadCheckerImpl to determine whether calls to
+// CalledOnValidThread() come from the same task and hence are deterministically
+// single-threaded (vs. calls coming from different sequenced or parallel tasks,
+// which may or may not run on the same thread).
+class BASE_EXPORT TaskToken {
+ public:
+ // Instantiates an invalid TaskToken.
+ TaskToken() = default;
+
+ // Explicitly allow copy.
+ TaskToken(const TaskToken& other) = default;
+ TaskToken& operator=(const TaskToken& other) = default;
+
+ // An invalid TaskToken is not equal to any other TaskToken, including
+ // other invalid TaskTokens.
+ bool operator==(const TaskToken& other) const;
+ bool operator!=(const TaskToken& other) const;
+
+ // Returns true if this is a valid TaskToken.
+ bool IsValid() const;
+
+ // In the scope of a ScopedSetSequenceTokenForCurrentThread, returns a valid
+ // TaskToken which isn't equal to any TaskToken returned in the scope of a
+ // different ScopedSetSequenceTokenForCurrentThread. Otherwise, returns an
+ // invalid TaskToken.
+ static TaskToken GetForCurrentThread();
+
+ private:
+ friend class ScopedSetSequenceTokenForCurrentThread;
+
+ explicit TaskToken(int token) : token_(token) {}
+
+ // Returns a valid TaskToken which isn't equal to any previously returned
+ // TaskToken. This is private as it only meant to be instantiated by
+ // ScopedSetSequenceTokenForCurrentThread.
+ static TaskToken Create();
+
+ static constexpr int kInvalidTaskToken = -1;
+ int token_ = kInvalidTaskToken;
+};
+
+// Instantiate this in the scope where a single task runs.
+class BASE_EXPORT ScopedSetSequenceTokenForCurrentThread {
+ public:
+ // Throughout the lifetime of the constructed object,
+ // SequenceToken::GetForCurrentThread() will return |sequence_token| and
+ // TaskToken::GetForCurrentThread() will return a TaskToken which is not equal
+ // to any TaskToken returned in the scope of another
+ // ScopedSetSequenceTokenForCurrentThread.
+ ScopedSetSequenceTokenForCurrentThread(const SequenceToken& sequence_token);
+ ~ScopedSetSequenceTokenForCurrentThread();
+
+ private:
+ const SequenceToken sequence_token_;
+ const TaskToken task_token_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSetSequenceTokenForCurrentThread);
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCE_TOKEN_H_
diff --git a/base/sequence_token_unittest.cc b/base/sequence_token_unittest.cc
new file mode 100644
index 0000000000..b0e69de42b
--- /dev/null
+++ b/base/sequence_token_unittest.cc
@@ -0,0 +1,133 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_token.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(SequenceTokenTest, IsValid) {
+ EXPECT_FALSE(SequenceToken().IsValid());
+ EXPECT_TRUE(SequenceToken::Create().IsValid());
+}
+
+TEST(SequenceTokenTest, OperatorEquals) {
+ SequenceToken invalid_a;
+ SequenceToken invalid_b;
+ const SequenceToken valid_a = SequenceToken::Create();
+ const SequenceToken valid_b = SequenceToken::Create();
+
+ EXPECT_FALSE(invalid_a == invalid_a);
+ EXPECT_FALSE(invalid_a == invalid_b);
+ EXPECT_FALSE(invalid_a == valid_a);
+ EXPECT_FALSE(invalid_a == valid_b);
+
+ EXPECT_FALSE(valid_a == invalid_a);
+ EXPECT_FALSE(valid_a == invalid_b);
+ EXPECT_EQ(valid_a, valid_a);
+ EXPECT_FALSE(valid_a == valid_b);
+}
+
+TEST(SequenceTokenTest, OperatorNotEquals) {
+ SequenceToken invalid_a;
+ SequenceToken invalid_b;
+ const SequenceToken valid_a = SequenceToken::Create();
+ const SequenceToken valid_b = SequenceToken::Create();
+
+ EXPECT_NE(invalid_a, invalid_a);
+ EXPECT_NE(invalid_a, invalid_b);
+ EXPECT_NE(invalid_a, valid_a);
+ EXPECT_NE(invalid_a, valid_b);
+
+ EXPECT_NE(valid_a, invalid_a);
+ EXPECT_NE(valid_a, invalid_b);
+ EXPECT_FALSE(valid_a != valid_a);
+ EXPECT_NE(valid_a, valid_b);
+}
+
+TEST(SequenceTokenTest, GetForCurrentThread) {
+ const SequenceToken token = SequenceToken::Create();
+
+ EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
+
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(token);
+ EXPECT_TRUE(SequenceToken::GetForCurrentThread().IsValid());
+ EXPECT_EQ(token, SequenceToken::GetForCurrentThread());
+ }
+
+ EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
+}
+
+TEST(SequenceTokenTest, ToInternalValue) {
+ const SequenceToken token1 = SequenceToken::Create();
+ const SequenceToken token2 = SequenceToken::Create();
+
+ // Confirm that internal values are unique.
+ EXPECT_NE(token1.ToInternalValue(), token2.ToInternalValue());
+}
+
+// Expect a default-constructed TaskToken to be invalid and not equal to
+// another invalid TaskToken.
+TEST(TaskTokenTest, InvalidDefaultConstructed) {
+ EXPECT_FALSE(TaskToken().IsValid());
+ EXPECT_NE(TaskToken(), TaskToken());
+}
+
+// Expect a TaskToken returned by TaskToken::GetForCurrentThread() outside the
+// scope of a ScopedSetSequenceTokenForCurrentThread to be invalid.
+TEST(TaskTokenTest, InvalidOutsideScope) {
+ EXPECT_FALSE(TaskToken::GetForCurrentThread().IsValid());
+}
+
+// Expect an invalid TaskToken not to be equal with a valid TaskToken.
+TEST(TaskTokenTest, ValidNotEqualsInvalid) {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ TaskToken valid = TaskToken::GetForCurrentThread();
+ TaskToken invalid;
+ EXPECT_NE(valid, invalid);
+}
+
+// Expect TaskTokens returned by TaskToken::GetForCurrentThread() in the scope
+// of the same ScopedSetSequenceTokenForCurrentThread instance to be
+// valid and equal with each other.
+TEST(TaskTokenTest, EqualInSameScope) {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+
+ const TaskToken token_a = TaskToken::GetForCurrentThread();
+ const TaskToken token_b = TaskToken::GetForCurrentThread();
+
+ EXPECT_TRUE(token_a.IsValid());
+ EXPECT_TRUE(token_b.IsValid());
+ EXPECT_EQ(token_a, token_b);
+}
+
+// Expect TaskTokens returned by TaskToken::GetForCurrentThread() in the scope
+// of different ScopedSetSequenceTokenForCurrentThread instances to be
+// valid but not equal to each other.
+TEST(TaskTokenTest, NotEqualInDifferentScopes) {
+ TaskToken token_a;
+ TaskToken token_b;
+
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ token_a = TaskToken::GetForCurrentThread();
+ }
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ token_b = TaskToken::GetForCurrentThread();
+ }
+
+ EXPECT_TRUE(token_a.IsValid());
+ EXPECT_TRUE(token_b.IsValid());
+ EXPECT_NE(token_a, token_b);
+}
+
+} // namespace base
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
index 00d4048815..dc11ebc3f1 100644
--- a/base/sequenced_task_runner.cc
+++ b/base/sequenced_task_runner.cc
@@ -14,18 +14,24 @@ bool SequencedTaskRunner::PostNonNestableTask(
return PostNonNestableDelayedTask(from_here, task, base::TimeDelta());
}
-bool SequencedTaskRunner::DeleteSoonInternal(
+bool SequencedTaskRunner::DeleteOrReleaseSoonInternal(
const tracked_objects::Location& from_here,
- void(*deleter)(const void*),
+ void (*deleter)(const void*),
const void* object) {
return PostNonNestableTask(from_here, Bind(deleter, object));
}
-bool SequencedTaskRunner::ReleaseSoonInternal(
- const tracked_objects::Location& from_here,
- void(*releaser)(const void*),
- const void* object) {
- return PostNonNestableTask(from_here, Bind(releaser, object));
+OnTaskRunnerDeleter::OnTaskRunnerDeleter(
+ scoped_refptr<SequencedTaskRunner> task_runner)
+ : task_runner_(std::move(task_runner)) {
+}
+
+OnTaskRunnerDeleter::~OnTaskRunnerDeleter() {
}
+OnTaskRunnerDeleter::OnTaskRunnerDeleter(OnTaskRunnerDeleter&&) = default;
+
+OnTaskRunnerDeleter& OnTaskRunnerDeleter::operator=(
+ OnTaskRunnerDeleter&&) = default;
+
} // namespace base
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
index 6bb3f2b871..6b2726ed4f 100644
--- a/base/sequenced_task_runner.h
+++ b/base/sequenced_task_runner.h
@@ -122,9 +122,8 @@ class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
template <class T>
bool DeleteSoon(const tracked_objects::Location& from_here,
const T* object) {
- return
- subtle::DeleteHelperInternal<T, bool>::DeleteViaSequencedTaskRunner(
- this, from_here, object);
+ return DeleteOrReleaseSoonInternal(from_here, &DeleteHelper<T>::DoDelete,
+ object);
}
// Submits a non-nestable task to release the given object. Returns
@@ -132,26 +131,34 @@ class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
// and false if the object definitely will not be released.
template <class T>
bool ReleaseSoon(const tracked_objects::Location& from_here,
- T* object) {
- return
- subtle::ReleaseHelperInternal<T, bool>::ReleaseViaSequencedTaskRunner(
- this, from_here, object);
+ const T* object) {
+ return DeleteOrReleaseSoonInternal(from_here, &ReleaseHelper<T>::DoRelease,
+ object);
}
protected:
~SequencedTaskRunner() override {}
private:
- template <class T, class R> friend class subtle::DeleteHelperInternal;
- template <class T, class R> friend class subtle::ReleaseHelperInternal;
+ bool DeleteOrReleaseSoonInternal(const tracked_objects::Location& from_here,
+ void (*deleter)(const void*),
+ const void* object);
+};
+
+struct BASE_EXPORT OnTaskRunnerDeleter {
+ explicit OnTaskRunnerDeleter(scoped_refptr<SequencedTaskRunner> task_runner);
+ ~OnTaskRunnerDeleter();
- bool DeleteSoonInternal(const tracked_objects::Location& from_here,
- void(*deleter)(const void*),
- const void* object);
+ OnTaskRunnerDeleter(OnTaskRunnerDeleter&&);
+ OnTaskRunnerDeleter& operator=(OnTaskRunnerDeleter&&);
+
+ template <typename T>
+ void operator()(const T* ptr) {
+ if (ptr)
+ task_runner_->DeleteSoon(FROM_HERE, ptr);
+ }
- bool ReleaseSoonInternal(const tracked_objects::Location& from_here,
- void(*releaser)(const void*),
- const void* object);
+ scoped_refptr<SequencedTaskRunner> task_runner_;
};
} // namespace base
diff --git a/base/sequenced_task_runner_helpers.h b/base/sequenced_task_runner_helpers.h
index 7980b46b6c..18ec0e26f5 100644
--- a/base/sequenced_task_runner_helpers.h
+++ b/base/sequenced_task_runner_helpers.h
@@ -5,23 +5,9 @@
#ifndef BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
#define BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
-#include "base/debug/alias.h"
-#include "base/macros.h"
-
-// TODO(akalin): Investigate whether it's possible to just have
-// SequencedTaskRunner use these helpers (instead of MessageLoop).
-// Then we can just move these to sequenced_task_runner.h.
-
-namespace tracked_objects {
-class Location;
-}
-
namespace base {
-namespace subtle {
-template <class T, class R> class DeleteHelperInternal;
-template <class T, class R> class ReleaseHelperInternal;
-}
+class SequencedTaskRunner;
// Template helpers which use function indirection to erase T from the
// function signature while still remembering it so we can call the
@@ -34,80 +20,23 @@ template <class T, class R> class ReleaseHelperInternal;
template <class T>
class DeleteHelper {
private:
- template <class T2, class R> friend class subtle::DeleteHelperInternal;
-
static void DoDelete(const void* object) {
- delete reinterpret_cast<const T*>(object);
+ delete static_cast<const T*>(object);
}
- DISALLOW_COPY_AND_ASSIGN(DeleteHelper);
+ friend class SequencedTaskRunner;
};
template <class T>
class ReleaseHelper {
private:
- template <class T2, class R> friend class subtle::ReleaseHelperInternal;
-
static void DoRelease(const void* object) {
- reinterpret_cast<const T*>(object)->Release();
+ static_cast<const T*>(object)->Release();
}
- DISALLOW_COPY_AND_ASSIGN(ReleaseHelper);
+ friend class SequencedTaskRunner;
};
-namespace subtle {
-
-// An internal SequencedTaskRunner-like class helper for DeleteHelper
-// and ReleaseHelper. We don't want to expose the Do*() functions
-// directly directly since the void* argument makes it possible to
-// pass/ an object of the wrong type to delete. Instead, we force
-// callers to go through these internal helpers for type
-// safety. SequencedTaskRunner-like classes which expose DeleteSoon or
-// ReleaseSoon methods should friend the appropriate helper and
-// implement a corresponding *Internal method with the following
-// signature:
-//
-// bool(const tracked_objects::Location&,
-// void(*function)(const void*),
-// void* object)
-//
-// An implementation of this function should simply create a
-// base::Closure from (function, object) and return the result of
-// posting the task.
-template <class T, class ReturnType>
-class DeleteHelperInternal {
- public:
- template <class SequencedTaskRunnerType>
- static ReturnType DeleteViaSequencedTaskRunner(
- SequencedTaskRunnerType* sequenced_task_runner,
- const tracked_objects::Location& from_here,
- const T* object) {
- return sequenced_task_runner->DeleteSoonInternal(
- from_here, &DeleteHelper<T>::DoDelete, object);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(DeleteHelperInternal);
-};
-
-template <class T, class ReturnType>
-class ReleaseHelperInternal {
- public:
- template <class SequencedTaskRunnerType>
- static ReturnType ReleaseViaSequencedTaskRunner(
- SequencedTaskRunnerType* sequenced_task_runner,
- const tracked_objects::Location& from_here,
- const T* object) {
- return sequenced_task_runner->ReleaseSoonInternal(
- from_here, &ReleaseHelper<T>::DoRelease, object);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ReleaseHelperInternal);
-};
-
-} // namespace subtle
-
} // namespace base
#endif // BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
diff --git a/base/sha1_portable.cc b/base/sha1.cc
index dd2ab6fe17..a710001ab7 100644
--- a/base/sha1_portable.cc
+++ b/base/sha1.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <string.h>
+#include "base/sys_byteorder.h"
namespace base {
@@ -92,10 +93,6 @@ static inline uint32_t K(uint32_t t) {
}
}
-static inline void swapends(uint32_t* t) {
- *t = (*t >> 24) | ((*t >> 8) & 0xff00) | ((*t & 0xff00) << 8) | (*t << 24);
-}
-
const int SecureHashAlgorithm::kDigestSizeBytes = 20;
void SecureHashAlgorithm::Init() {
@@ -118,7 +115,7 @@ void SecureHashAlgorithm::Final() {
Process();
for (int t = 0; t < 5; ++t)
- swapends(&H[t]);
+ H[t] = ByteSwap(H[t]);
}
void SecureHashAlgorithm::Update(const void* data, size_t nbytes) {
@@ -165,7 +162,7 @@ void SecureHashAlgorithm::Process() {
// W and M are in a union, so no need to memcpy.
// memcpy(W, M, sizeof(M));
for (t = 0; t < 16; ++t)
- swapends(&W[t]);
+ W[t] = ByteSwap(W[t]);
// b.
for (t = 16; t < 80; ++t)
diff --git a/base/stl_util.h b/base/stl_util.h
index 12e226a9db..b0670b295e 100644
--- a/base/stl_util.h
+++ b/base/stl_util.h
@@ -8,13 +8,37 @@
#define BASE_STL_UTIL_H_
#include <algorithm>
+#include <deque>
+#include <forward_list>
#include <functional>
#include <iterator>
+#include <list>
+#include <map>
+#include <set>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "base/logging.h"
+namespace base {
+
+namespace internal {
+
+// Calls erase on iterators of matching elements.
+template <typename Container, typename Predicate>
+void IterateAndEraseIf(Container& container, Predicate pred) {
+ for (auto it = container.begin(); it != container.end();) {
+ if (pred(*it))
+ it = container.erase(it);
+ else
+ ++it;
+ }
+}
+
+} // namespace internal
+
// Clears internal memory of an STL object.
// STL clear()/reserve(0) does not always free internal memory allocated
// This function uses swap/destructor to ensure the internal memory is freed.
@@ -27,69 +51,6 @@ void STLClearObject(T* obj) {
obj->reserve(0);
}
-// For a range within a container of pointers, calls delete (non-array version)
-// on these pointers.
-// NOTE: for these three functions, we could just implement a DeleteObject
-// functor and then call for_each() on the range and functor, but this
-// requires us to pull in all of algorithm.h, which seems expensive.
-// For hash_[multi]set, it is important that this deletes behind the iterator
-// because the hash_set may call the hash function on the iterator when it is
-// advanced, which could result in the hash function trying to deference a
-// stale pointer.
-template <class ForwardIterator>
-void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
- while (begin != end) {
- ForwardIterator temp = begin;
- ++begin;
- delete *temp;
- }
-}
-
-// For a range within a container of pairs, calls delete (non-array version) on
-// BOTH items in the pairs.
-// NOTE: Like STLDeleteContainerPointers, it is important that this deletes
-// behind the iterator because if both the key and value are deleted, the
-// container may call the hash function on the iterator when it is advanced,
-// which could result in the hash function trying to dereference a stale
-// pointer.
-template <class ForwardIterator>
-void STLDeleteContainerPairPointers(ForwardIterator begin,
- ForwardIterator end) {
- while (begin != end) {
- ForwardIterator temp = begin;
- ++begin;
- delete temp->first;
- delete temp->second;
- }
-}
-
-// For a range within a container of pairs, calls delete (non-array version) on
-// the FIRST item in the pairs.
-// NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
-template <class ForwardIterator>
-void STLDeleteContainerPairFirstPointers(ForwardIterator begin,
- ForwardIterator end) {
- while (begin != end) {
- ForwardIterator temp = begin;
- ++begin;
- delete temp->first;
- }
-}
-
-// For a range within a container of pairs, calls delete.
-// NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
-// Deleting the value does not always invalidate the iterator, but it may
-// do so if the key is a pointer into the value object.
-template <class ForwardIterator>
-void STLDeleteContainerPairSecondPointers(ForwardIterator begin,
- ForwardIterator end) {
- while (begin != end) {
- ForwardIterator temp = begin;
- ++begin;
- delete temp->second;
- }
-}
-
// Counts the number of instances of val in a container.
template <typename Container, typename T>
typename std::iterator_traits<
@@ -115,74 +76,6 @@ inline char* string_as_array(std::string* str) {
return str->empty() ? NULL : &*str->begin();
}
-// The following functions are useful for cleaning up STL containers whose
-// elements point to allocated memory.
-
-// STLDeleteElements() deletes all the elements in an STL container and clears
-// the container. This function is suitable for use with a vector, set,
-// hash_set, or any other STL container which defines sensible begin(), end(),
-// and clear() methods.
-//
-// If container is NULL, this function is a no-op.
-//
-// As an alternative to calling STLDeleteElements() directly, consider
-// STLElementDeleter (defined below), which ensures that your container's
-// elements are deleted when the STLElementDeleter goes out of scope.
-template <class T>
-void STLDeleteElements(T* container) {
- if (!container)
- return;
- STLDeleteContainerPointers(container->begin(), container->end());
- container->clear();
-}
-
-// Given an STL container consisting of (key, value) pairs, STLDeleteValues
-// deletes all the "value" components and clears the container. Does nothing
-// in the case it's given a NULL pointer.
-template <class T>
-void STLDeleteValues(T* container) {
- if (!container)
- return;
- STLDeleteContainerPairSecondPointers(container->begin(), container->end());
- container->clear();
-}
-
-
-// The following classes provide a convenient way to delete all elements or
-// values from STL containers when they goes out of scope. This greatly
-// simplifies code that creates temporary objects and has multiple return
-// statements. Example:
-//
-// vector<MyProto *> tmp_proto;
-// STLElementDeleter<vector<MyProto *> > d(&tmp_proto);
-// if (...) return false;
-// ...
-// return success;
-
-// Given a pointer to an STL container this class will delete all the element
-// pointers when it goes out of scope.
-template<class T>
-class STLElementDeleter {
- public:
- STLElementDeleter<T>(T* container) : container_(container) {}
- ~STLElementDeleter<T>() { STLDeleteElements(container_); }
-
- private:
- T* container_;
-};
-
-// Given a pointer to an STL container this class will delete all the value
-// pointers when it goes out of scope.
-template<class T>
-class STLValueDeleter {
- public:
- STLValueDeleter<T>(T* container) : container_(container) {}
- ~STLValueDeleter<T>() { STLDeleteValues(container_); }
-
- private:
- T* container_;
-};
-
// Test to see if a set, map, hash_set or hash_map contains a particular key.
// Returns true if the key is in the collection.
template <typename Collection, typename Key>
@@ -198,8 +91,6 @@ bool ContainsValue(const Collection& collection, const Value& value) {
collection.end();
}
-namespace base {
-
// Returns true if the container is sorted.
template <typename Container>
bool STLIsSorted(const Container& cont) {
@@ -257,6 +148,145 @@ bool STLIncludes(const Arg1& a1, const Arg2& a2) {
a2.begin(), a2.end());
}
+// Erase/EraseIf are based on library fundamentals ts v2 erase/erase_if
+// http://en.cppreference.com/w/cpp/experimental/lib_extensions_2
+// They provide a generic way to erase elements from a container.
+// The functions here implement these for the standard containers until those
+// functions are available in the C++ standard.
+// For Chromium containers overloads should be defined in their own headers
+// (like standard containers).
+// Note: there is no std::erase for standard associative containers so we don't
+// have it either.
+
+template <typename CharT, typename Traits, typename Allocator, typename Value>
+void Erase(std::basic_string<CharT, Traits, Allocator>& container,
+ const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <typename CharT, typename Traits, typename Allocator, class Predicate>
+void EraseIf(std::basic_string<CharT, Traits, Allocator>& container,
+ Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::deque<T, Allocator>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::deque<T, Allocator>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::vector<T, Allocator>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::vector<T, Allocator>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::forward_list<T, Allocator>& container, const Value& value) {
+ // Unlike std::forward_list::remove, this function template accepts
+ // heterogeneous types and does not force a conversion to the container's
+ // value type before invoking the == operator.
+ container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::forward_list<T, Allocator>& container, Predicate pred) {
+ container.remove_if(pred);
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::list<T, Allocator>& container, const Value& value) {
+ // Unlike std::list::remove, this function template accepts heterogeneous
+ // types and does not force a conversion to the container's value type before
+ // invoking the == operator.
+ container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::list<T, Allocator>& container, Predicate pred) {
+ container.remove_if(pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::map<Key, T, Compare, Allocator>& container, Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multimap<Key, T, Compare, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::set<Key, Compare, Allocator>& container, Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multiset<Key, Compare, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class T,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_map<Key, T, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class T,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(
+ std::unordered_multimap<Key, T, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_set<Key, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_multiset<Key, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
} // namespace base
#endif // BASE_STL_UTIL_H_
diff --git a/base/stl_util_unittest.cc b/base/stl_util_unittest.cc
index 42004eb869..48d0f660b5 100644
--- a/base/stl_util_unittest.cc
+++ b/base/stl_util_unittest.cc
@@ -4,8 +4,20 @@
#include "base/stl_util.h"
+#include <deque>
+#include <forward_list>
+#include <functional>
+#include <iterator>
+#include <list>
+#include <map>
#include <set>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -28,6 +40,55 @@ class ComparableValue {
int value_;
};
+template <typename Container>
+void RunEraseTest() {
+ const std::pair<Container, Container> test_data[] = {
+ {Container(), Container()}, {{1, 2, 3}, {1, 3}}, {{1, 2, 3, 2}, {1, 3}}};
+
+ for (auto test_case : test_data) {
+ base::Erase(test_case.first, 2);
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+}
+
+// This test is written for containers of std::pair<int, int> to support maps.
+template <typename Container>
+void RunEraseIfTest() {
+ struct {
+ Container input;
+ Container erase_even;
+ Container erase_odd;
+ } test_data[] = {
+ {Container(), Container(), Container()},
+ {{{1, 1}, {2, 2}, {3, 3}}, {{1, 1}, {3, 3}}, {{2, 2}}},
+ {{{1, 1}, {2, 2}, {3, 3}, {4, 4}}, {{1, 1}, {3, 3}}, {{2, 2}, {4, 4}}},
+ };
+
+ for (auto test_case : test_data) {
+ base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
+ return !(elem.first & 1);
+ });
+ EXPECT_EQ(test_case.erase_even, test_case.input);
+ }
+
+ for (auto test_case : test_data) {
+ base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
+ return elem.first & 1;
+ });
+ EXPECT_EQ(test_case.erase_odd, test_case.input);
+ }
+}
+
+struct CustomIntHash {
+ size_t operator()(int elem) const { return std::hash<int>()(elem) + 1; }
+};
+
+struct HashByFirst {
+ size_t operator()(const std::pair<int, int>& elem) const {
+ return std::hash<int>()(elem.first);
+ }
+};
+
} // namespace
namespace base {
@@ -263,5 +324,100 @@ TEST(StringAsArrayTest, WriteCopy) {
EXPECT_EQ("abc", s2);
}
+TEST(Erase, String) {
+ const std::pair<std::string, std::string> test_data[] = {
+ {"", ""}, {"abc", "bc"}, {"abca", "bc"},
+ };
+
+ for (auto test_case : test_data) {
+ Erase(test_case.first, 'a');
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+
+ for (auto test_case : test_data) {
+ EraseIf(test_case.first, [](char elem) { return elem < 'b'; });
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+}
+
+TEST(Erase, String16) {
+ std::pair<base::string16, base::string16> test_data[] = {
+ {base::string16(), base::string16()},
+ {UTF8ToUTF16("abc"), UTF8ToUTF16("bc")},
+ {UTF8ToUTF16("abca"), UTF8ToUTF16("bc")},
+ };
+
+ const base::string16 letters = UTF8ToUTF16("ab");
+ for (auto test_case : test_data) {
+ Erase(test_case.first, letters[0]);
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+
+ for (auto test_case : test_data) {
+ EraseIf(test_case.first, [&](short elem) { return elem < letters[1]; });
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+}
+
+TEST(Erase, Deque) {
+ RunEraseTest<std::deque<int>>();
+ RunEraseIfTest<std::deque<std::pair<int, int>>>();
+}
+
+TEST(Erase, Vector) {
+ RunEraseTest<std::vector<int>>();
+ RunEraseIfTest<std::vector<std::pair<int, int>>>();
+}
+
+TEST(Erase, ForwardList) {
+ RunEraseTest<std::forward_list<int>>();
+ RunEraseIfTest<std::forward_list<std::pair<int, int>>>();
+}
+
+TEST(Erase, List) {
+ RunEraseTest<std::list<int>>();
+ RunEraseIfTest<std::list<std::pair<int, int>>>();
+}
+
+TEST(Erase, Map) {
+ RunEraseIfTest<std::map<int, int>>();
+ RunEraseIfTest<std::map<int, int, std::greater<int>>>();
+}
+
+TEST(Erase, Multimap) {
+ RunEraseIfTest<std::multimap<int, int>>();
+ RunEraseIfTest<std::multimap<int, int, std::greater<int>>>();
+}
+
+TEST(Erase, Set) {
+ RunEraseIfTest<std::set<std::pair<int, int>>>();
+ RunEraseIfTest<
+ std::set<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
+}
+
+TEST(Erase, Multiset) {
+ RunEraseIfTest<std::multiset<std::pair<int, int>>>();
+ RunEraseIfTest<
+ std::multiset<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
+}
+
+TEST(Erase, UnorderedMap) {
+ RunEraseIfTest<std::unordered_map<int, int>>();
+ RunEraseIfTest<std::unordered_map<int, int, CustomIntHash>>();
+}
+
+TEST(Erase, UnorderedMultimap) {
+ RunEraseIfTest<std::unordered_multimap<int, int>>();
+ RunEraseIfTest<std::unordered_multimap<int, int, CustomIntHash>>();
+}
+
+TEST(Erase, UnorderedSet) {
+ RunEraseIfTest<std::unordered_set<std::pair<int, int>, HashByFirst>>();
+}
+
+TEST(Erase, UnorderedMultiset) {
+ RunEraseIfTest<std::unordered_multiset<std::pair<int, int>, HashByFirst>>();
+}
+
} // namespace
} // namespace base
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
index 09aeb444d6..adb4bdb8d2 100644
--- a/base/strings/string_number_conversions.cc
+++ b/base/strings/string_number_conversions.cc
@@ -10,11 +10,11 @@
#include <wctype.h>
#include <limits>
+#include <type_traits>
#include "base/logging.h"
#include "base/numerics/safe_math.h"
#include "base/scoped_clear_errno.h"
-#include "base/scoped_clear_errno.h"
namespace base {
@@ -35,7 +35,8 @@ struct IntToStringT {
// The ValueOrDie call below can never fail, because UnsignedAbs is valid
// for all valid inputs.
- auto res = CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
+ typename std::make_unsigned<INT>::type res =
+ CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
CHR* end = outbuf + kOutputBufSize;
CHR* i = end;
diff --git a/base/strings/string_number_conversions_unittest.cc b/base/strings/string_number_conversions_unittest.cc
index 91191e07e1..b4c3068f36 100644
--- a/base/strings/string_number_conversions_unittest.cc
+++ b/base/strings/string_number_conversions_unittest.cc
@@ -719,19 +719,41 @@ TEST(StringNumberConversionsTest, StringToDouble) {
double output;
bool success;
} cases[] = {
+ // Test different forms of zero.
{"0", 0.0, true},
+ {"+0", 0.0, true},
+ {"-0", 0.0, true},
+ {"0.0", 0.0, true},
+ {"000000000000000000000000000000.0", 0.0, true},
+ {"0.000000000000000000000000000", 0.0, true},
+
+ // Test the answer.
{"42", 42.0, true},
{"-42", -42.0, true},
+
+ // Test variances of an ordinary number.
{"123.45", 123.45, true},
{"-123.45", -123.45, true},
{"+123.45", 123.45, true},
+
+ // Test different forms of representation.
{"2.99792458e8", 299792458.0, true},
{"149597870.691E+3", 149597870691.0, true},
{"6.", 6.0, true},
- {"9e99999999999999999999", std::numeric_limits<double>::infinity(),
- false},
- {"-9e99999999999999999999", -std::numeric_limits<double>::infinity(),
- false},
+
+ // Test around the largest/smallest value that a double can represent.
+ {"9e307", 9e307, true},
+ {"1.7976e308", 1.7976e308, true},
+ {"1.7977e308", HUGE_VAL, false},
+ {"1.797693134862315807e+308", HUGE_VAL, true},
+ {"1.797693134862315808e+308", HUGE_VAL, false},
+ {"9e308", HUGE_VAL, false},
+ {"9e309", HUGE_VAL, false},
+ {"9e999", HUGE_VAL, false},
+ {"9e1999", HUGE_VAL, false},
+ {"9e19999", HUGE_VAL, false},
+ {"9e99999999999999999999", std::numeric_limits<double>::infinity(), false},
+ {"-9e99999999999999999999", -std::numeric_limits<double>::infinity(), false},
{"1e-2", 0.01, true},
{"42 ", 42.0, false},
{" 1e-2", 0.01, false},
@@ -739,6 +761,9 @@ TEST(StringNumberConversionsTest, StringToDouble) {
{"-1E-7", -0.0000001, true},
{"01e02", 100, true},
{"2.3e15", 2.3e15, true},
+ {"100e-309", 100e-309, true},
+
+ // Test some invalid cases.
{"\t\n\v\f\r -123.45e2", -12345.0, false},
{"+123 e4", 123.0, false},
{"123e ", 123.0, false},
@@ -749,6 +774,10 @@ TEST(StringNumberConversionsTest, StringToDouble) {
{"-", 0.0, false},
{"+", 0.0, false},
{"", 0.0, false},
+
+ // crbug.org/588726
+ {"-0.0010000000000000000000000000000000000000001e-256",
+ -1.0000000000000001e-259, true},
};
for (size_t i = 0; i < arraysize(cases); ++i) {
@@ -809,4 +838,54 @@ TEST(StringNumberConversionsTest, HexEncode) {
EXPECT_EQ(hex.compare("01FF02FE038081"), 0);
}
+// Test cases of known-bad strtod conversions that motivated the use of dmg_fp.
+// See https://bugs.chromium.org/p/chromium/issues/detail?id=593512.
+TEST(StringNumberConversionsTest, StrtodFailures) {
+ static const struct {
+ const char* input;
+ uint64_t expected;
+ } cases[] = {
+ // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-visual-c-plus-plus/
+ {"9214843084008499", 0x43405e6cec57761aULL},
+ {"0.500000000000000166533453693773481063544750213623046875",
+ 0x3fe0000000000002ULL},
+ {"30078505129381147446200", 0x44997a3c7271b021ULL},
+ {"1777820000000000000001", 0x4458180d5bad2e3eULL},
+ {"0.500000000000000166547006220929549868969843373633921146392822265625",
+ 0x3fe0000000000002ULL},
+ {"0.50000000000000016656055874808561867439493653364479541778564453125",
+ 0x3fe0000000000002ULL},
+ {"0.3932922657273", 0x3fd92bb352c4623aULL},
+
+ // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-gcc-and-glibc/
+ {"0.500000000000000166533453693773481063544750213623046875",
+ 0x3fe0000000000002ULL},
+ {"3.518437208883201171875e13", 0x42c0000000000002ULL},
+ {"62.5364939768271845828", 0x404f44abd5aa7ca4ULL},
+ {"8.10109172351e-10", 0x3e0bd5cbaef0fd0cULL},
+ {"1.50000000000000011102230246251565404236316680908203125",
+ 0x3ff8000000000000ULL},
+ {"9007199254740991.4999999999999999999999999999999995",
+ 0x433fffffffffffffULL},
+
+ // http://www.exploringbinary.com/incorrect-decimal-to-floating-point-conversion-in-sqlite/
+ {"1e-23", 0x3b282db34012b251ULL},
+ {"8.533e+68", 0x4e3fa69165a8eea2ULL},
+ {"4.1006e-184", 0x19dbe0d1c7ea60c9ULL},
+ {"9.998e+307", 0x7fe1cc0a350ca87bULL},
+ {"9.9538452227e-280", 0x0602117ae45cde43ULL},
+ {"6.47660115e-260", 0x0a1fdd9e333badadULL},
+ {"7.4e+47", 0x49e033d7eca0adefULL},
+ {"5.92e+48", 0x4a1033d7eca0adefULL},
+ {"7.35e+66", 0x4dd172b70eababa9ULL},
+ {"8.32116e+55", 0x4b8b2628393e02cdULL},
+ };
+
+ for (const auto& test : cases) {
+ double output;
+ EXPECT_TRUE(StringToDouble(test.input, &output));
+ EXPECT_EQ(bit_cast<uint64_t>(output), test.expected);
+ }
+}
+
} // namespace base
diff --git a/base/strings/string_split.cc b/base/strings/string_split.cc
index 6c949b989a..a8180b24d3 100644
--- a/base/strings/string_split.cc
+++ b/base/strings/string_split.cc
@@ -227,18 +227,22 @@ bool SplitStringIntoKeyValuePairs(StringPiece input,
return success;
}
-void SplitStringUsingSubstr(StringPiece16 input,
- StringPiece16 delimiter,
- std::vector<string16>* result) {
- SplitStringUsingSubstrT(input, delimiter, TRIM_WHITESPACE, SPLIT_WANT_ALL,
- result);
+std::vector<string16> SplitStringUsingSubstr(StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<string16> result;
+ SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+ return result;
}
-void SplitStringUsingSubstr(StringPiece input,
- StringPiece delimiter,
- std::vector<std::string>* result) {
- SplitStringUsingSubstrT(input, delimiter, TRIM_WHITESPACE, SPLIT_WANT_ALL,
- result);
+std::vector<std::string> SplitStringUsingSubstr(StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<std::string> result;
+ SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+ return result;
}
std::vector<StringPiece16> SplitStringPieceUsingSubstr(
diff --git a/base/strings/string_split.h b/base/strings/string_split.h
index ec9f24604a..24b9dfa1e9 100644
--- a/base/strings/string_split.h
+++ b/base/strings/string_split.h
@@ -90,16 +90,16 @@ BASE_EXPORT bool SplitStringIntoKeyValuePairs(StringPiece input,
// Similar to SplitString, but use a substring delimiter instead of a list of
// characters that are all possible delimiters.
-//
-// TODO(brettw) this should probably be changed and expanded to provide a
-// mirror of the SplitString[Piece] API above, just with the different
-// delimiter handling.
-BASE_EXPORT void SplitStringUsingSubstr(StringPiece16 input,
- StringPiece16 delimiter,
- std::vector<string16>* result);
-BASE_EXPORT void SplitStringUsingSubstr(StringPiece input,
- StringPiece delimiter,
- std::vector<std::string>* result);
+BASE_EXPORT std::vector<string16> SplitStringUsingSubstr(
+ StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
+BASE_EXPORT std::vector<std::string> SplitStringUsingSubstr(
+ StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
// Like SplitStringUsingSubstr above except it returns a vector of StringPieces
// which reference the original buffer without copying. Although you have to be
diff --git a/base/strings/string_split_unittest.cc b/base/strings/string_split_unittest.cc
index 657a2db7b5..bf09aa5497 100644
--- a/base/strings/string_split_unittest.cc
+++ b/base/strings/string_split_unittest.cc
@@ -150,8 +150,8 @@ TEST_F(SplitStringIntoKeyValuePairsTest, DelimiterInValue) {
}
TEST(SplitStringUsingSubstrTest, EmptyString) {
- std::vector<std::string> results;
- SplitStringUsingSubstr(std::string(), "DELIMITER", &results);
+ std::vector<std::string> results = SplitStringUsingSubstr(
+ std::string(), "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
ASSERT_EQ(1u, results.size());
EXPECT_THAT(results, ElementsAre(""));
}
@@ -231,38 +231,33 @@ TEST(StringUtilTest, SplitString_WhitespaceAndResultType) {
}
TEST(SplitStringUsingSubstrTest, StringWithNoDelimiter) {
- std::vector<std::string> results;
- SplitStringUsingSubstr("alongwordwithnodelimiter", "DELIMITER", &results);
+ std::vector<std::string> results = SplitStringUsingSubstr(
+ "alongwordwithnodelimiter", "DELIMITER", TRIM_WHITESPACE,
+ SPLIT_WANT_ALL);
ASSERT_EQ(1u, results.size());
EXPECT_THAT(results, ElementsAre("alongwordwithnodelimiter"));
}
TEST(SplitStringUsingSubstrTest, LeadingDelimitersSkipped) {
- std::vector<std::string> results;
- SplitStringUsingSubstr(
+ std::vector<std::string> results = SplitStringUsingSubstr(
"DELIMITERDELIMITERDELIMITERoneDELIMITERtwoDELIMITERthree",
- "DELIMITER",
- &results);
+ "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
ASSERT_EQ(6u, results.size());
EXPECT_THAT(results, ElementsAre("", "", "", "one", "two", "three"));
}
TEST(SplitStringUsingSubstrTest, ConsecutiveDelimitersSkipped) {
- std::vector<std::string> results;
- SplitStringUsingSubstr(
+ std::vector<std::string> results = SplitStringUsingSubstr(
"unoDELIMITERDELIMITERDELIMITERdosDELIMITERtresDELIMITERDELIMITERcuatro",
- "DELIMITER",
- &results);
+ "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
ASSERT_EQ(7u, results.size());
EXPECT_THAT(results, ElementsAre("uno", "", "", "dos", "tres", "", "cuatro"));
}
TEST(SplitStringUsingSubstrTest, TrailingDelimitersSkipped) {
- std::vector<std::string> results;
- SplitStringUsingSubstr(
+ std::vector<std::string> results = SplitStringUsingSubstr(
"unDELIMITERdeuxDELIMITERtroisDELIMITERquatreDELIMITERDELIMITERDELIMITER",
- "DELIMITER",
- &results);
+ "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
ASSERT_EQ(7u, results.size());
EXPECT_THAT(
results, ElementsAre("un", "deux", "trois", "quatre", "", "", ""));
diff --git a/base/strings/string_util.cc b/base/strings/string_util.cc
index cb668ed7ff..71ae894dd6 100644
--- a/base/strings/string_util.cc
+++ b/base/strings/string_util.cc
@@ -64,6 +64,21 @@ static bool CompareParameter(const ReplacementOffset& elem1,
return elem1.parameter < elem2.parameter;
}
+// Overloaded function to append one string onto the end of another. Having a
+// separate overload for |source| as both string and StringPiece allows for more
+// efficient usage from functions templated to work with either type (avoiding a
+// redundant call to the BasicStringPiece constructor in both cases).
+template <typename string_type>
+inline void AppendToString(string_type* target, const string_type& source) {
+ target->append(source);
+}
+
+template <typename string_type>
+inline void AppendToString(string_type* target,
+ const BasicStringPiece<string_type>& source) {
+ source.AppendToString(target);
+}
+
// Assuming that a pointer is the size of a "machine word", then
// uintptr_t is an integer type that is also a machine word.
typedef uintptr_t MachineWord;
@@ -853,21 +868,40 @@ char16* WriteInto(string16* str, size_t length_with_null) {
return WriteIntoT(str, length_with_null);
}
-template<typename STR>
-static STR JoinStringT(const std::vector<STR>& parts,
- BasicStringPiece<STR> sep) {
- if (parts.empty())
- return STR();
+// Generic version for all JoinString overloads. |list_type| must be a sequence
+// (std::vector or std::initializer_list) of strings/StringPieces (std::string,
+// string16, StringPiece or StringPiece16). |string_type| is either std::string
+// or string16.
+template <typename list_type, typename string_type>
+static string_type JoinStringT(const list_type& parts,
+ BasicStringPiece<string_type> sep) {
+ if (parts.size() == 0)
+ return string_type();
+
+ // Pre-allocate the eventual size of the string. Start with the size of all of
+ // the separators (note that this *assumes* parts.size() > 0).
+ size_t total_size = (parts.size() - 1) * sep.size();
+ for (const auto& part : parts)
+ total_size += part.size();
+ string_type result;
+ result.reserve(total_size);
- STR result(parts[0]);
auto iter = parts.begin();
+ DCHECK(iter != parts.end());
+ AppendToString(&result, *iter);
++iter;
for (; iter != parts.end(); ++iter) {
sep.AppendToString(&result);
- result += *iter;
+ // Using the overloaded AppendToString allows this template function to work
+ // on both strings and StringPieces without creating an intermediate
+ // StringPiece object.
+ AppendToString(&result, *iter);
}
+ // Sanity-check that we pre-allocated correctly.
+ DCHECK_EQ(total_size, result.size());
+
return result;
}
@@ -881,6 +915,26 @@ string16 JoinString(const std::vector<string16>& parts,
return JoinStringT(parts, separator);
}
+std::string JoinString(const std::vector<StringPiece>& parts,
+ StringPiece separator) {
+ return JoinStringT(parts, separator);
+}
+
+string16 JoinString(const std::vector<StringPiece16>& parts,
+ StringPiece16 separator) {
+ return JoinStringT(parts, separator);
+}
+
+std::string JoinString(std::initializer_list<StringPiece> parts,
+ StringPiece separator) {
+ return JoinStringT(parts, separator);
+}
+
+string16 JoinString(std::initializer_list<StringPiece16> parts,
+ StringPiece16 separator) {
+ return JoinStringT(parts, separator);
+}
+
template<class FormatStringType, class OutStringType>
OutStringType DoReplaceStringPlaceholders(
const FormatStringType& format_string,
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
index 0ee077c62b..29076ed913 100644
--- a/base/strings/string_util.h
+++ b/base/strings/string_util.h
@@ -12,6 +12,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <initializer_list>
#include <string>
#include <vector>
@@ -290,8 +291,6 @@ BASE_EXPORT bool ContainsOnlyChars(const StringPiece16& input,
BASE_EXPORT bool IsStringUTF8(const StringPiece& str);
BASE_EXPORT bool IsStringASCII(const StringPiece& str);
BASE_EXPORT bool IsStringASCII(const StringPiece16& str);
-// A convenience adaptor for WebStrings, as they don't convert into
-// StringPieces directly.
BASE_EXPORT bool IsStringASCII(const string16& str);
#if defined(WCHAR_T_IS_UTF32)
BASE_EXPORT bool IsStringASCII(const std::wstring& str);
@@ -437,11 +436,30 @@ BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
#endif
-// Does the opposite of SplitString().
+// Does the opposite of SplitString()/SplitStringPiece(). Joins a vector or list
+// of strings into a single string, inserting |separator| (which may be empty)
+// in between all elements.
+//
+// If possible, callers should build a vector of StringPieces and use the
+// StringPiece variant, so that they do not create unnecessary copies of
+// strings. For example, instead of using SplitString, modifying the vector,
+// then using JoinString, use SplitStringPiece followed by JoinString so that no
+// copies of those strings are created until the final join operation.
BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
StringPiece separator);
BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
StringPiece16 separator);
+BASE_EXPORT std::string JoinString(const std::vector<StringPiece>& parts,
+ StringPiece separator);
+BASE_EXPORT string16 JoinString(const std::vector<StringPiece16>& parts,
+ StringPiece16 separator);
+// Explicit initializer_list overloads are required to break ambiguity when used
+// with a literal initializer list (otherwise the compiler would not be able to
+// decide between the string and StringPiece overloads).
+BASE_EXPORT std::string JoinString(std::initializer_list<StringPiece> parts,
+ StringPiece separator);
+BASE_EXPORT string16 JoinString(std::initializer_list<StringPiece16> parts,
+ StringPiece16 separator);
// Replace $1-$2-$3..$9 in the format string with values from |subst|.
// Additionally, any number of consecutive '$' characters is replaced by that
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
index df2226e48b..6ac307ec2b 100644
--- a/base/strings/string_util_unittest.cc
+++ b/base/strings/string_util_unittest.cc
@@ -676,6 +676,10 @@ TEST(StringUtilTest, JoinString) {
std::vector<std::string> parts;
EXPECT_EQ(std::string(), JoinString(parts, separator));
+ parts.push_back(std::string());
+ EXPECT_EQ(std::string(), JoinString(parts, separator));
+ parts.clear();
+
parts.push_back("a");
EXPECT_EQ("a", JoinString(parts, separator));
@@ -694,6 +698,10 @@ TEST(StringUtilTest, JoinString16) {
std::vector<string16> parts;
EXPECT_EQ(string16(), JoinString(parts, separator));
+ parts.push_back(string16());
+ EXPECT_EQ(string16(), JoinString(parts, separator));
+ parts.clear();
+
parts.push_back(ASCIIToUTF16("a"));
EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
@@ -707,6 +715,108 @@ TEST(StringUtilTest, JoinString16) {
EXPECT_EQ(ASCIIToUTF16("a|b|c|| "), JoinString(parts, ASCIIToUTF16("|")));
}
+TEST(StringUtilTest, JoinStringPiece) {
+ std::string separator(", ");
+ std::vector<StringPiece> parts;
+ EXPECT_EQ(std::string(), JoinString(parts, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ parts.push_back(StringPiece());
+ EXPECT_EQ(std::string(), JoinString(parts, separator));
+ parts.clear();
+
+ parts.push_back("a");
+ EXPECT_EQ("a", JoinString(parts, separator));
+
+ parts.push_back("b");
+ parts.push_back("c");
+ EXPECT_EQ("a, b, c", JoinString(parts, separator));
+
+ parts.push_back(StringPiece());
+ EXPECT_EQ("a, b, c, ", JoinString(parts, separator));
+ parts.push_back(" ");
+ EXPECT_EQ("a|b|c|| ", JoinString(parts, "|"));
+}
+
+TEST(StringUtilTest, JoinStringPiece16) {
+ string16 separator = ASCIIToUTF16(", ");
+ std::vector<StringPiece16> parts;
+ EXPECT_EQ(string16(), JoinString(parts, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ parts.push_back(StringPiece16());
+ EXPECT_EQ(string16(), JoinString(parts, separator));
+ parts.clear();
+
+ const string16 kA = ASCIIToUTF16("a");
+ parts.push_back(kA);
+ EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
+
+ const string16 kB = ASCIIToUTF16("b");
+ parts.push_back(kB);
+ const string16 kC = ASCIIToUTF16("c");
+ parts.push_back(kC);
+ EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString(parts, separator));
+
+ parts.push_back(StringPiece16());
+ EXPECT_EQ(ASCIIToUTF16("a, b, c, "), JoinString(parts, separator));
+ const string16 kSpace = ASCIIToUTF16(" ");
+ parts.push_back(kSpace);
+ EXPECT_EQ(ASCIIToUTF16("a|b|c|| "), JoinString(parts, ASCIIToUTF16("|")));
+}
+
+TEST(StringUtilTest, JoinStringInitializerList) {
+ std::string separator(", ");
+ EXPECT_EQ(std::string(), JoinString({}, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ EXPECT_EQ(std::string(), JoinString({StringPiece()}, separator));
+
+ // With const char*s.
+ EXPECT_EQ("a", JoinString({"a"}, separator));
+ EXPECT_EQ("a, b, c", JoinString({"a", "b", "c"}, separator));
+ EXPECT_EQ("a, b, c, ", JoinString({"a", "b", "c", StringPiece()}, separator));
+ EXPECT_EQ("a|b|c|| ", JoinString({"a", "b", "c", StringPiece(), " "}, "|"));
+
+ // With std::strings.
+ const std::string kA = "a";
+ const std::string kB = "b";
+ EXPECT_EQ("a, b", JoinString({kA, kB}, separator));
+
+ // With StringPieces.
+ const StringPiece kPieceA = kA;
+ const StringPiece kPieceB = kB;
+ EXPECT_EQ("a, b", JoinString({kPieceA, kPieceB}, separator));
+}
+
+TEST(StringUtilTest, JoinStringInitializerList16) {
+ string16 separator = ASCIIToUTF16(", ");
+ EXPECT_EQ(string16(), JoinString({}, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ EXPECT_EQ(string16(), JoinString({StringPiece16()}, separator));
+
+ // With string16s.
+ const string16 kA = ASCIIToUTF16("a");
+ EXPECT_EQ(ASCIIToUTF16("a"), JoinString({kA}, separator));
+
+ const string16 kB = ASCIIToUTF16("b");
+ const string16 kC = ASCIIToUTF16("c");
+ EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString({kA, kB, kC}, separator));
+
+ EXPECT_EQ(ASCIIToUTF16("a, b, c, "),
+ JoinString({kA, kB, kC, StringPiece16()}, separator));
+ const string16 kSpace = ASCIIToUTF16(" ");
+ EXPECT_EQ(
+ ASCIIToUTF16("a|b|c|| "),
+ JoinString({kA, kB, kC, StringPiece16(), kSpace}, ASCIIToUTF16("|")));
+
+ // With StringPiece16s.
+ const StringPiece16 kPieceA = kA;
+ const StringPiece16 kPieceB = kB;
+ EXPECT_EQ(ASCIIToUTF16("a, b"), JoinString({kPieceA, kPieceB}, separator));
+}
+
TEST(StringUtilTest, StartsWith) {
EXPECT_TRUE(StartsWith("javascript:url", "javascript",
base::CompareCase::SENSITIVE));
diff --git a/base/strings/utf_string_conversion_utils.h b/base/strings/utf_string_conversion_utils.h
index c716404539..2d95870c58 100644
--- a/base/strings/utf_string_conversion_utils.h
+++ b/base/strings/utf_string_conversion_utils.h
@@ -5,7 +5,8 @@
#ifndef BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
#define BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
-// This should only be used by the various UTF string conversion files.
+// Low-level UTF handling functions. Most code will want to use the functions
+// in utf_string_conversions.h
#include <stddef.h>
#include <stdint.h>
diff --git a/base/strings/utf_string_conversions.cc b/base/strings/utf_string_conversions.cc
index 6b17eacd6c..85450c6566 100644
--- a/base/strings/utf_string_conversions.cc
+++ b/base/strings/utf_string_conversions.cc
@@ -180,10 +180,6 @@ bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
}
std::string UTF16ToUTF8(StringPiece16 utf16) {
- if (IsStringASCII(utf16)) {
- return std::string(utf16.begin(), utf16.end());
- }
-
std::string ret;
// Ignore the success flag of this call, it will do the best it can for
// invalid input, which is what we want here.
diff --git a/base/synchronization/atomic_flag.cc b/base/synchronization/atomic_flag.cc
new file mode 100644
index 0000000000..8c2018d369
--- /dev/null
+++ b/base/synchronization/atomic_flag.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/atomic_flag.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+AtomicFlag::AtomicFlag() {
+ // It doesn't matter where the AtomicFlag is built so long as it's always
+ // Set() from the same sequence after. Note: the sequencing requirements are
+ // necessary for IsSet()'s callers to know which sequence's memory operations
+ // they are synchronized with.
+ set_sequence_checker_.DetachFromSequence();
+}
+
+void AtomicFlag::Set() {
+ DCHECK(set_sequence_checker_.CalledOnValidSequence());
+ base::subtle::Release_Store(&flag_, 1);
+}
+
+bool AtomicFlag::IsSet() const {
+ return base::subtle::Acquire_Load(&flag_) != 0;
+}
+
+void AtomicFlag::UnsafeResetForTesting() {
+ base::subtle::Release_Store(&flag_, 0);
+}
+
+} // namespace base
diff --git a/base/synchronization/atomic_flag.h b/base/synchronization/atomic_flag.h
new file mode 100644
index 0000000000..ff175e190c
--- /dev/null
+++ b/base/synchronization/atomic_flag.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+#define BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+// A flag that can safely be set from one thread and read from other threads.
+//
+// This class IS NOT intended for synchronization between threads.
+class BASE_EXPORT AtomicFlag {
+ public:
+ AtomicFlag();
+ ~AtomicFlag() = default;
+
+ // Set the flag. Must always be called from the same sequence.
+ void Set();
+
+ // Returns true iff the flag was set. If this returns true, the current thread
+ // is guaranteed to be synchronized with all memory operations on the sequence
+ // which invoked Set() up until at least the first call to Set() on it.
+ bool IsSet() const;
+
+ // Resets the flag. Be careful when using this: callers might not expect
+ // IsSet() to return false after returning true once.
+ void UnsafeResetForTesting();
+
+ private:
+ base::subtle::Atomic32 flag_ = 0;
+ SequenceChecker set_sequence_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
diff --git a/base/synchronization/atomic_flag_unittest.cc b/base/synchronization/atomic_flag_unittest.cc
new file mode 100644
index 0000000000..a3aa3341a0
--- /dev/null
+++ b/base/synchronization/atomic_flag_unittest.cc
@@ -0,0 +1,131 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/atomic_flag.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+void ExpectSetFlagDeath(AtomicFlag* flag) {
+ ASSERT_TRUE(flag);
+ EXPECT_DCHECK_DEATH(flag->Set());
+}
+
+// Busy waits (to explicitly avoid using synchronization constructs that would
+// defeat the purpose of testing atomics) until |tested_flag| is set and then
+// verifies that non-atomic |*expected_after_flag| is true and sets |*done_flag|
+// before returning if it's non-null.
+void BusyWaitUntilFlagIsSet(AtomicFlag* tested_flag, bool* expected_after_flag,
+ AtomicFlag* done_flag) {
+ while (!tested_flag->IsSet())
+ PlatformThread::YieldCurrentThread();
+
+ EXPECT_TRUE(*expected_after_flag);
+ if (done_flag)
+ done_flag->Set();
+}
+
+} // namespace
+
+TEST(AtomicFlagTest, SimpleSingleThreadedTest) {
+ AtomicFlag flag;
+ ASSERT_FALSE(flag.IsSet());
+ flag.Set();
+ ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(AtomicFlagTest, DoubleSetTest) {
+ AtomicFlag flag;
+ ASSERT_FALSE(flag.IsSet());
+ flag.Set();
+ ASSERT_TRUE(flag.IsSet());
+ flag.Set();
+ ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(AtomicFlagTest, ReadFromDifferentThread) {
+ // |tested_flag| is the one being tested below.
+ AtomicFlag tested_flag;
+ // |expected_after_flag| is used to confirm that sequential consistency is
+ // obtained around |tested_flag|.
+ bool expected_after_flag = false;
+ // |reset_flag| is used to confirm the test flows as intended without using
+ // synchronization constructs which would defeat the purpose of exercising
+ // atomics.
+ AtomicFlag reset_flag;
+
+ Thread thread("AtomicFlagTest.ReadFromDifferentThread");
+ ASSERT_TRUE(thread.Start());
+ thread.task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&BusyWaitUntilFlagIsSet, &tested_flag, &expected_after_flag,
+ &reset_flag));
+
+ // To verify that IsSet() fetches the flag's value from memory every time it
+ // is called (not just the first time that it is called on a thread), sleep
+ // before setting the flag.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+
+ // |expected_after_flag| is used to verify that all memory operations
+ // performed before |tested_flag| is Set() are visible to threads that can see
+ // IsSet().
+ expected_after_flag = true;
+ tested_flag.Set();
+
+ // Sleep again to give the busy loop time to observe the flag and verify
+ // expectations.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+
+ // Use |reset_flag| to confirm that the above completed (which the rest of
+ // this test assumes).
+ ASSERT_TRUE(reset_flag.IsSet());
+
+ tested_flag.UnsafeResetForTesting();
+ EXPECT_FALSE(tested_flag.IsSet());
+ expected_after_flag = false;
+
+ // Perform the same test again after the controlled UnsafeResetForTesting(),
+ // |thread| is guaranteed to be synchronized past the
+ // |UnsafeResetForTesting()| call when the task runs per the implicit
+ // synchronization in the post task mechanism.
+ thread.task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&BusyWaitUntilFlagIsSet, &tested_flag, &expected_after_flag,
+ nullptr));
+
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+
+ expected_after_flag = true;
+ tested_flag.Set();
+
+ // The |thread|'s destructor will block until the posted task completes, so
+ // the test will time out if it fails to see the flag be set.
+}
+
+TEST(AtomicFlagTest, SetOnDifferentSequenceDeathTest) {
+ // Checks that Set() can't be called from another sequence after being called
+ // on this one. AtomicFlag should die on a DCHECK if Set() is called again
+ // from another sequence.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ Thread t("AtomicFlagTest.SetOnDifferentThreadDeathTest");
+ ASSERT_TRUE(t.Start());
+ EXPECT_TRUE(t.WaitUntilThreadStarted());
+
+ AtomicFlag flag;
+ flag.Set();
+ t.task_runner()->PostTask(FROM_HERE, Bind(&ExpectSetFlagDeath, &flag));
+}
+
+} // namespace base
diff --git a/base/synchronization/cancellation_flag.cc b/base/synchronization/cancellation_flag.cc
deleted file mode 100644
index ca5c0a8283..0000000000
--- a/base/synchronization/cancellation_flag.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/synchronization/cancellation_flag.h"
-
-#include "base/logging.h"
-
-namespace base {
-
-void CancellationFlag::Set() {
-#if !defined(NDEBUG)
- DCHECK_EQ(set_on_, PlatformThread::CurrentId());
-#endif
- base::subtle::Release_Store(&flag_, 1);
-}
-
-bool CancellationFlag::IsSet() const {
- return base::subtle::Acquire_Load(&flag_) != 0;
-}
-
-void CancellationFlag::UnsafeResetForTesting() {
- base::subtle::Release_Store(&flag_, 0);
-}
-
-} // namespace base
diff --git a/base/synchronization/cancellation_flag.h b/base/synchronization/cancellation_flag.h
index f2f83f47da..39094e2dc0 100644
--- a/base/synchronization/cancellation_flag.h
+++ b/base/synchronization/cancellation_flag.h
@@ -5,44 +5,15 @@
#ifndef BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
#define BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/threading/platform_thread.h"
+#include "base/synchronization/atomic_flag.h"
namespace base {
-// CancellationFlag allows one thread to cancel jobs executed on some worker
-// thread. Calling Set() from one thread and IsSet() from a number of threads
-// is thread-safe.
-//
-// This class IS NOT intended for synchronization between threads.
-class BASE_EXPORT CancellationFlag {
- public:
- CancellationFlag() : flag_(false) {
-#if !defined(NDEBUG)
- set_on_ = PlatformThread::CurrentId();
-#endif
- }
- ~CancellationFlag() {}
-
- // Set the flag. May only be called on the thread which owns the object.
- void Set();
- bool IsSet() const; // Returns true iff the flag was set.
-
- // For subtle reasons that may be different on different architectures,
- // a different thread testing IsSet() may erroneously read 'true' after
- // this method has been called.
- void UnsafeResetForTesting();
-
- private:
- base::subtle::Atomic32 flag_;
-#if !defined(NDEBUG)
- PlatformThreadId set_on_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(CancellationFlag);
-};
+// Use inheritance instead of "using" to allow forward declaration of "class
+// CancellationFlag".
+// TODO(fdoray): Replace CancellationFlag with AtomicFlag throughout the
+// codebase and delete this file. crbug.com/630251
+class CancellationFlag : public AtomicFlag {};
} // namespace base
diff --git a/base/synchronization/cancellation_flag_unittest.cc b/base/synchronization/cancellation_flag_unittest.cc
deleted file mode 100644
index 13c74bcbd4..0000000000
--- a/base/synchronization/cancellation_flag_unittest.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Tests of CancellationFlag class.
-
-#include "base/synchronization/cancellation_flag.h"
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/spin_wait.h"
-#include "base/threading/thread.h"
-#include "base/time/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/platform_test.h"
-
-namespace base {
-
-namespace {
-
-//------------------------------------------------------------------------------
-// Define our test class.
-//------------------------------------------------------------------------------
-
-void CancelHelper(CancellationFlag* flag) {
-#if GTEST_HAS_DEATH_TEST
- ASSERT_DEBUG_DEATH(flag->Set(), "");
-#endif
-}
-
-TEST(CancellationFlagTest, SimpleSingleThreadedTest) {
- CancellationFlag flag;
- ASSERT_FALSE(flag.IsSet());
- flag.Set();
- ASSERT_TRUE(flag.IsSet());
-}
-
-TEST(CancellationFlagTest, DoubleSetTest) {
- CancellationFlag flag;
- ASSERT_FALSE(flag.IsSet());
- flag.Set();
- ASSERT_TRUE(flag.IsSet());
- flag.Set();
- ASSERT_TRUE(flag.IsSet());
-}
-
-TEST(CancellationFlagTest, SetOnDifferentThreadDeathTest) {
- // Checks that Set() can't be called from any other thread.
- // CancellationFlag should die on a DCHECK if Set() is called from
- // other thread.
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- Thread t("CancellationFlagTest.SetOnDifferentThreadDeathTest");
- ASSERT_TRUE(t.Start());
- ASSERT_TRUE(t.message_loop());
- ASSERT_TRUE(t.IsRunning());
-
- CancellationFlag flag;
- t.task_runner()->PostTask(FROM_HERE, base::Bind(&CancelHelper, &flag));
-}
-
-} // namespace
-
-} // namespace base
diff --git a/base/synchronization/condition_variable.h b/base/synchronization/condition_variable.h
index ebf90d249a..b567751172 100644
--- a/base/synchronization/condition_variable.h
+++ b/base/synchronization/condition_variable.h
@@ -91,11 +91,13 @@ class BASE_EXPORT ConditionVariable {
~ConditionVariable();
// Wait() releases the caller's critical section atomically as it starts to
- // sleep, and the reacquires it when it is signaled.
+ // sleep, and the reacquires it when it is signaled. The wait functions are
+ // susceptible to spurious wakeups. (See usage note 1 for more details.)
void Wait();
void TimedWait(const TimeDelta& max_time);
- // Broadcast() revives all waiting threads.
+ // Broadcast() revives all waiting threads. (See usage note 2 for more
+ // details.)
void Broadcast();
// Signal() revives one waiting thread.
void Signal();
diff --git a/base/synchronization/condition_variable_posix.cc b/base/synchronization/condition_variable_posix.cc
index d86fd180ec..d07c671810 100644
--- a/base/synchronization/condition_variable_posix.cc
+++ b/base/synchronization/condition_variable_posix.cc
@@ -118,6 +118,8 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
#endif // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
#endif // OS_MACOSX
+ // On failure, we only expect the CV to timeout. Any other error value means
+ // that we've unexpectedly woken up.
DCHECK(rv == 0 || rv == ETIMEDOUT);
#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
diff --git a/base/synchronization/lock.h b/base/synchronization/lock.h
index fbf6cef769..599984e8b6 100644
--- a/base/synchronization/lock.h
+++ b/base/synchronization/lock.h
@@ -61,6 +61,23 @@ class BASE_EXPORT Lock {
void AssertAcquired() const;
#endif // DCHECK_IS_ON()
+ // Whether Lock mitigates priority inversion when used from different thread
+ // priorities.
+ static bool HandlesMultipleThreadPriorities() {
+#if defined(OS_POSIX)
+ // POSIX mitigates priority inversion by setting the priority of a thread
+ // holding a Lock to the maximum priority of any other thread waiting on it.
+ return internal::LockImpl::PriorityInheritanceAvailable();
+#elif defined(OS_WIN)
+ // Windows mitigates priority inversion by randomly boosting the priority of
+ // ready threads.
+ // https://msdn.microsoft.com/library/windows/desktop/ms684831.aspx
+ return true;
+#else
+#error Unsupported platform
+#endif
+ }
+
#if defined(OS_POSIX) || defined(OS_WIN)
// Both Windows and POSIX implementations of ConditionVariable need to be
// able to see our lock and tweak our debugging counters, as they release and
diff --git a/base/synchronization/lock_impl.h b/base/synchronization/lock_impl.h
index cbaabc784b..603585a050 100644
--- a/base/synchronization/lock_impl.h
+++ b/base/synchronization/lock_impl.h
@@ -48,6 +48,11 @@ class BASE_EXPORT LockImpl {
// unnecessary.
NativeHandle* native_handle() { return &native_handle_; }
+#if defined(OS_POSIX)
+ // Whether this lock will attempt to use priority inheritance.
+ static bool PriorityInheritanceAvailable();
+#endif
+
private:
NativeHandle native_handle_;
diff --git a/base/synchronization/lock_impl_posix.cc b/base/synchronization/lock_impl_posix.cc
index 5619adaf5d..ff997ea65f 100644
--- a/base/synchronization/lock_impl_posix.cc
+++ b/base/synchronization/lock_impl_posix.cc
@@ -7,27 +7,45 @@
#include <errno.h>
#include <string.h>
+#include "base/debug/activity_tracker.h"
#include "base/logging.h"
+#include "base/synchronization/lock.h"
namespace base {
namespace internal {
+// Determines which platforms can consider using priority inheritance locks. Use
+// this define for platform code that may not compile if priority inheritance
+// locks aren't available. For this platform code,
+// PRIORITY_INHERITANCE_LOCKS_POSSIBLE() is a necessary but insufficient check.
+// Lock::PriorityInheritanceAvailable still must be checked as the code may
+// compile but the underlying platform still may not correctly support priority
+// inheritance locks.
+#if defined(OS_NACL) || defined(OS_ANDROID) || defined(__ANDROID__)
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 0
+#else
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 1
+#endif
+
LockImpl::LockImpl() {
-#ifndef NDEBUG
- // In debug, setup attributes for lock error checking.
pthread_mutexattr_t mta;
int rv = pthread_mutexattr_init(&mta);
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE()
+ if (PriorityInheritanceAvailable()) {
+ rv = pthread_mutexattr_setprotocol(&mta, PTHREAD_PRIO_INHERIT);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+ }
+#endif
+#ifndef NDEBUG
+ // In debug, setup attributes for lock error checking.
rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+#endif
rv = pthread_mutex_init(&native_handle_, &mta);
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
rv = pthread_mutexattr_destroy(&mta);
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
-#else
- // In release, go with the default lock attributes.
- pthread_mutex_init(&native_handle_, NULL);
-#endif
}
LockImpl::~LockImpl() {
@@ -42,6 +60,7 @@ bool LockImpl::Try() {
}
void LockImpl::Lock() {
+ base::debug::ScopedLockAcquireActivity lock_activity(this);
int rv = pthread_mutex_lock(&native_handle_);
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
}
@@ -51,5 +70,29 @@ void LockImpl::Unlock() {
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
}
+// static
+bool LockImpl::PriorityInheritanceAvailable() {
+#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
+ return true;
+#else
+ // Security concerns prevent the use of priority inheritance mutexes on Linux.
+ // * CVE-2010-0622 - wake_futex_pi unlocks incorrect, possible DoS.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0622
+ // * CVE-2012-6647 - Linux < 3.5.1, futex_wait_requeue_pi possible DoS.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6647
+ // * CVE-2014-3153 - Linux <= 3.14.5, futex_requeue, privilege escalation.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-3153
+ //
+ // If the above were all addressed, we still need a runtime check to deal with
+ // the bug below.
+ // * glibc Bug 14652: https://sourceware.org/bugzilla/show_bug.cgi?id=14652
+ // Fixed in glibc 2.17.
+ // Priority inheritance mutexes may deadlock with condition variables
+ // during recacquisition of the mutex after the condition variable is
+ // signalled.
+ return false;
+#endif
+}
+
} // namespace internal
} // namespace base
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index 3863e98455..761965f03a 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -25,6 +25,7 @@
namespace base {
class TimeDelta;
+class TimeTicks;
// A WaitableEvent can be a useful thread synchronization tool when you want to
// allow one thread to wait for another thread to finish some work. For
@@ -86,12 +87,17 @@ class BASE_EXPORT WaitableEvent {
// delete e;
void Wait();
- // Wait up until max_time has passed for the event to be signaled. Returns
- // true if the event was signaled. If this method returns false, then it
- // does not necessarily mean that max_time was exceeded.
+ // Wait up until wait_delta has passed for the event to be signaled. Returns
+ // true if the event was signaled.
//
// TimedWait can synchronise its own destruction like |Wait|.
- bool TimedWait(const TimeDelta& max_time);
+ bool TimedWait(const TimeDelta& wait_delta);
+
+ // Wait up until end_time deadline has passed for the event to be signaled.
+ // Return true if the event was signaled.
+ //
+ // TimedWaitUntil can synchronise its own destruction like |Wait|.
+ bool TimedWaitUntil(const TimeTicks& end_time);
#if defined(OS_WIN)
HANDLE handle() const { return handle_.Get(); }
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index b32c882711..5dfff468ad 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include <vector>
+#include "base/debug/activity_tracker.h"
#include "base/logging.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
@@ -152,14 +153,22 @@ class SyncWaiter : public WaitableEvent::Waiter {
};
void WaitableEvent::Wait() {
- bool result = TimedWait(TimeDelta::FromSeconds(-1));
+ bool result = TimedWaitUntil(TimeTicks::Max());
DCHECK(result) << "TimedWait() should never fail with infinite timeout";
}
-bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+ // TimeTicks takes care of overflow including the cases when wait_delta
+ // is a maximum value.
+ return TimedWaitUntil(TimeTicks::Now() + wait_delta);
+}
+
+bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
base::ThreadRestrictions::AssertWaitAllowed();
- const TimeTicks end_time(TimeTicks::Now() + max_time);
- const bool finite_time = max_time.ToInternalValue() >= 0;
+ // Record the event that this thread is blocking upon (for hang diagnosis).
+ base::debug::ScopedEventWaitActivity event_activity(this);
+
+ const bool finite_time = !end_time.is_max();
kernel_->lock_.Acquire();
if (kernel_->signaled_) {
@@ -232,6 +241,9 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
base::ThreadRestrictions::AssertWaitAllowed();
DCHECK(count) << "Cannot wait on no events";
+ // Record an event (the first) that this thread is blocking upon.
+ base::debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
+
// We need to acquire the locks in a globally consistent order. Thus we sort
// the array of waitables by address. We actually sort a pairs so that we can
// map back to the original index values later.
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
index ac5c9f1255..c0e280aa97 100644
--- a/base/synchronization/waitable_event_unittest.cc
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -136,13 +136,7 @@ TEST(WaitableEventTest, WaitMany) {
// Tests that using TimeDelta::Max() on TimedWait() is not the same as passing
// a timeout of 0. (crbug.com/465948)
-#if defined(OS_POSIX)
-// crbug.com/465948 not fixed yet.
-#define MAYBE_TimedWait DISABLED_TimedWait
-#else
-#define MAYBE_TimedWait TimedWait
-#endif
-TEST(WaitableEventTest, MAYBE_TimedWait) {
+TEST(WaitableEventTest, TimedWait) {
WaitableEvent* ev =
new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
@@ -153,11 +147,58 @@ TEST(WaitableEventTest, MAYBE_TimedWait) {
TimeTicks start = TimeTicks::Now();
PlatformThread::Create(0, &signaler, &thread);
- ev->TimedWait(TimeDelta::Max());
+ EXPECT_TRUE(ev->TimedWait(TimeDelta::Max()));
EXPECT_GE(TimeTicks::Now() - start, thread_delay);
delete ev;
PlatformThread::Join(thread);
}
+// Tests that a sub-ms TimedWait doesn't time out promptly.
+TEST(WaitableEventTest, SubMsTimedWait) {
+ WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ TimeDelta delay = TimeDelta::FromMicroseconds(900);
+ TimeTicks start_time = TimeTicks::Now();
+ ev.TimedWait(delay);
+ EXPECT_GE(TimeTicks::Now() - start_time, delay);
+}
+
+// Tests that TimedWaitUntil can be safely used with various end_time deadline
+// values.
+TEST(WaitableEventTest, TimedWaitUntil) {
+ WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ TimeTicks start_time(TimeTicks::Now());
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+
+ // Should be OK to wait for the current time or time in the past.
+ // That should end promptly and be equivalent to IsSignalled.
+ EXPECT_FALSE(ev.TimedWaitUntil(start_time));
+ EXPECT_FALSE(ev.TimedWaitUntil(start_time - delay));
+
+ // Should be OK to wait for zero TimeTicks().
+ EXPECT_FALSE(ev.TimedWaitUntil(TimeTicks()));
+
+ // Waiting for a time in the future shouldn't end before the deadline
+ // if the event isn't signalled.
+ EXPECT_FALSE(ev.TimedWaitUntil(start_time + delay));
+ EXPECT_GE(TimeTicks::Now() - start_time, delay);
+
+ // Test that passing TimeTicks::Max to TimedWaitUntil is valid and isn't
+ // the same as passing TimeTicks(). Also verifies that signaling event
+ // ends the wait promptly.
+ WaitableEventSignaler signaler(delay, &ev);
+ PlatformThreadHandle thread;
+ start_time = TimeTicks::Now();
+ PlatformThread::Create(0, &signaler, &thread);
+
+ EXPECT_TRUE(ev.TimedWaitUntil(TimeTicks::Max()));
+ EXPECT_GE(TimeTicks::Now() - start_time, delay);
+
+ PlatformThread::Join(thread);
+}
+
} // namespace base
diff --git a/base/synchronization/waitable_event_watcher.h b/base/synchronization/waitable_event_watcher.h
index eb51effa49..44ef5047ed 100644
--- a/base/synchronization/waitable_event_watcher.h
+++ b/base/synchronization/waitable_event_watcher.h
@@ -6,13 +6,14 @@
#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/win/object_watcher.h"
#else
#include "base/callback.h"
-#include "base/message_loop/message_loop.h"
#include "base/synchronization/waitable_event.h"
#endif
@@ -20,14 +21,13 @@ namespace base {
class Flag;
class AsyncWaiter;
-class AsyncCallbackTask;
class WaitableEvent;
// This class provides a way to wait on a WaitableEvent asynchronously.
//
// Each instance of this object can be waiting on a single WaitableEvent. When
-// the waitable event is signaled, a callback is made in the thread of a given
-// MessageLoop. This callback can be deleted by deleting the waiter.
+// the waitable event is signaled, a callback is invoked on the sequence that
+// called StartWatching(). This callback can be deleted by deleting the waiter.
//
// Typical usage:
//
@@ -60,53 +60,56 @@ class WaitableEvent;
class BASE_EXPORT WaitableEventWatcher
#if defined(OS_WIN)
- : public win::ObjectWatcher::Delegate {
-#else
- : public MessageLoop::DestructionObserver {
+ : public win::ObjectWatcher::Delegate
#endif
+{
public:
typedef Callback<void(WaitableEvent*)> EventCallback;
WaitableEventWatcher();
+
+#if defined(OS_WIN)
~WaitableEventWatcher() override;
+#else
+ ~WaitableEventWatcher();
+#endif
- // When @event is signaled, the given callback is called on the thread of the
- // current message loop when StartWatching is called.
+ // When |event| is signaled, |callback| is called on the sequence that called
+ // StartWatching().
bool StartWatching(WaitableEvent* event, const EventCallback& callback);
- // Cancel the current watch. Must be called from the same thread which
+ // Cancel the current watch. Must be called from the same sequence which
// started the watch.
//
// Does nothing if no event is being watched, nor if the watch has completed.
// The callback will *not* be called for the current watch after this
- // function returns. Since the callback runs on the same thread as this
+ // function returns. Since the callback runs on the same sequence as this
// function, it cannot be called during this function either.
void StopWatching();
- // Return the currently watched event, or NULL if no object is currently being
- // watched.
- WaitableEvent* GetWatchedEvent();
-
- // Return the callback that will be invoked when the event is
- // signaled.
- const EventCallback& callback() const { return callback_; }
-
private:
#if defined(OS_WIN)
void OnObjectSignaled(HANDLE h) override;
+
win::ObjectWatcher watcher_;
+ EventCallback callback_;
+ WaitableEvent* event_ = nullptr;
#else
- // Implementation of MessageLoop::DestructionObserver
- void WillDestroyCurrentMessageLoop() override;
-
- MessageLoop* message_loop_;
+ // Instantiated in StartWatching(). Set before the callback runs. Reset in
+ // StopWatching() or StartWatching().
scoped_refptr<Flag> cancel_flag_;
- AsyncWaiter* waiter_;
- base::Closure internal_callback_;
+
+ // Enqueued in the wait list of the watched WaitableEvent.
+ AsyncWaiter* waiter_ = nullptr;
+
+ // Kernel of the watched WaitableEvent.
scoped_refptr<WaitableEvent::WaitableEventKernel> kernel_;
+
+ // Ensures that StartWatching() and StopWatching() are called on the same
+ // sequence.
+ SequenceChecker sequence_checker_;
#endif
- WaitableEvent* event_;
- EventCallback callback_;
+ DISALLOW_COPY_AND_ASSIGN(WaitableEventWatcher);
};
} // namespace base
diff --git a/base/synchronization/waitable_event_watcher_posix.cc b/base/synchronization/waitable_event_watcher_posix.cc
index 7cf8688d4c..3adbc5f977 100644
--- a/base/synchronization/waitable_event_watcher_posix.cc
+++ b/base/synchronization/waitable_event_watcher_posix.cc
@@ -4,12 +4,12 @@
#include "base/synchronization/waitable_event_watcher.h"
+#include <utility>
+
#include "base/bind.h"
-#include "base/location.h"
-#include "base/macros.h"
-#include "base/single_thread_task_runner.h"
+#include "base/logging.h"
#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
+#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
@@ -17,14 +17,15 @@ namespace base {
// WaitableEventWatcher (async waits).
//
// The basic design is that we add an AsyncWaiter to the wait-list of the event.
-// That AsyncWaiter has a pointer to MessageLoop, and a Task to be posted to it.
-// The MessageLoop ends up running the task, which calls the delegate.
+// That AsyncWaiter has a pointer to SequencedTaskRunner, and a Task to be
+// posted to it. The task ends up calling the callback when it runs on the
+// sequence.
//
// Since the wait can be canceled, we have a thread-safe Flag object which is
// set when the wait has been canceled. At each stage in the above, we check the
// flag before going onto the next stage. Since the wait may only be canceled in
-// the MessageLoop which runs the Task, we are assured that the delegate cannot
-// be called after canceling...
+// the sequence which runs the Task, we are assured that the callback cannot be
+// called after canceling...
// -----------------------------------------------------------------------------
// A thread-safe, reference-counted, write-once flag.
@@ -54,23 +55,22 @@ class Flag : public RefCountedThreadSafe<Flag> {
};
// -----------------------------------------------------------------------------
-// This is an asynchronous waiter which posts a task to a MessageLoop when
-// fired. An AsyncWaiter may only be in a single wait-list.
+// This is an asynchronous waiter which posts a task to a SequencedTaskRunner
+// when fired. An AsyncWaiter may only be in a single wait-list.
// -----------------------------------------------------------------------------
class AsyncWaiter : public WaitableEvent::Waiter {
public:
- AsyncWaiter(MessageLoop* message_loop,
+ AsyncWaiter(scoped_refptr<SequencedTaskRunner> task_runner,
const base::Closure& callback,
Flag* flag)
- : message_loop_(message_loop),
+ : task_runner_(std::move(task_runner)),
callback_(callback),
- flag_(flag) { }
+ flag_(flag) {}
bool Fire(WaitableEvent* event) override {
// Post the callback if we haven't been cancelled.
- if (!flag_->value()) {
- message_loop_->task_runner()->PostTask(FROM_HERE, callback_);
- }
+ if (!flag_->value())
+ task_runner_->PostTask(FROM_HERE, callback_);
// We are removed from the wait-list by the WaitableEvent itself. It only
// remains to delete ourselves.
@@ -85,37 +85,37 @@ class AsyncWaiter : public WaitableEvent::Waiter {
bool Compare(void* tag) override { return tag == flag_.get(); }
private:
- MessageLoop *const message_loop_;
- base::Closure callback_;
- scoped_refptr<Flag> flag_;
+ const scoped_refptr<SequencedTaskRunner> task_runner_;
+ const base::Closure callback_;
+ const scoped_refptr<Flag> flag_;
};
// -----------------------------------------------------------------------------
-// For async waits we need to make a callback in a MessageLoop thread. We do
-// this by posting a callback, which calls the delegate and keeps track of when
-// the event is canceled.
+// For async waits we need to run a callback on a sequence. We do this by
+// posting an AsyncCallbackHelper task, which calls the callback and keeps track
+// of when the event is canceled.
// -----------------------------------------------------------------------------
void AsyncCallbackHelper(Flag* flag,
const WaitableEventWatcher::EventCallback& callback,
WaitableEvent* event) {
- // Runs in MessageLoop thread.
+ // Runs on the sequence that called StartWatching().
if (!flag->value()) {
- // This is to let the WaitableEventWatcher know that the event has occured
- // because it needs to be able to return NULL from GetWatchedObject
+ // This is to let the WaitableEventWatcher know that the event has occured.
flag->Set();
callback.Run(event);
}
}
-WaitableEventWatcher::WaitableEventWatcher()
- : message_loop_(NULL),
- cancel_flag_(NULL),
- waiter_(NULL),
- event_(NULL) {
+WaitableEventWatcher::WaitableEventWatcher() {
+ sequence_checker_.DetachFromSequence();
}
WaitableEventWatcher::~WaitableEventWatcher() {
- StopWatching();
+ // The destructor may be called from a different sequence than StartWatching()
+ // when there is no active watch. To avoid triggering a DCHECK in
+ // StopWatching(), do not call it when there is no active watch.
+ if (cancel_flag_ && !cancel_flag_->value())
+ StopWatching();
}
// -----------------------------------------------------------------------------
@@ -125,61 +125,44 @@ WaitableEventWatcher::~WaitableEventWatcher() {
bool WaitableEventWatcher::StartWatching(
WaitableEvent* event,
const EventCallback& callback) {
- MessageLoop *const current_ml = MessageLoop::current();
- DCHECK(current_ml) << "Cannot create WaitableEventWatcher without a "
- "current MessageLoop";
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK(SequencedTaskRunnerHandle::Get());
// A user may call StartWatching from within the callback function. In this
// case, we won't know that we have finished watching, expect that the Flag
// will have been set in AsyncCallbackHelper().
- if (cancel_flag_.get() && cancel_flag_->value()) {
- if (message_loop_) {
- message_loop_->RemoveDestructionObserver(this);
- message_loop_ = NULL;
- }
-
- cancel_flag_ = NULL;
- }
+ if (cancel_flag_.get() && cancel_flag_->value())
+ cancel_flag_ = nullptr;
- DCHECK(!cancel_flag_.get()) << "StartWatching called while still watching";
+ DCHECK(!cancel_flag_) << "StartWatching called while still watching";
cancel_flag_ = new Flag;
- callback_ = callback;
- internal_callback_ = base::Bind(
- &AsyncCallbackHelper, base::RetainedRef(cancel_flag_), callback_, event);
+ const Closure internal_callback = base::Bind(
+ &AsyncCallbackHelper, base::RetainedRef(cancel_flag_), callback, event);
WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get();
AutoLock locked(kernel->lock_);
- event_ = event;
-
if (kernel->signaled_) {
if (!kernel->manual_reset_)
kernel->signaled_ = false;
// No hairpinning - we can't call the delegate directly here. We have to
- // enqueue a task on the MessageLoop as normal.
- current_ml->task_runner()->PostTask(FROM_HERE, internal_callback_);
+ // post a task to the SequencedTaskRunnerHandle as usual.
+ SequencedTaskRunnerHandle::Get()->PostTask(FROM_HERE, internal_callback);
return true;
}
- message_loop_ = current_ml;
- current_ml->AddDestructionObserver(this);
-
kernel_ = kernel;
- waiter_ = new AsyncWaiter(current_ml, internal_callback_, cancel_flag_.get());
+ waiter_ = new AsyncWaiter(SequencedTaskRunnerHandle::Get(), internal_callback,
+ cancel_flag_.get());
event->Enqueue(waiter_);
return true;
}
void WaitableEventWatcher::StopWatching() {
- callback_.Reset();
-
- if (message_loop_) {
- message_loop_->RemoveDestructionObserver(this);
- message_loop_ = NULL;
- }
+ DCHECK(sequence_checker_.CalledOnValidSequence());
if (!cancel_flag_.get()) // if not currently watching...
return;
@@ -227,44 +210,24 @@ void WaitableEventWatcher::StopWatching() {
// have been enqueued with the MessageLoop because the waiter was never
// signaled)
delete waiter_;
- internal_callback_.Reset();
cancel_flag_ = NULL;
return;
}
- // Case 3: the waiter isn't on the wait-list, thus it was signaled. It may
- // not have run yet, so we set the flag to tell it not to bother enqueuing the
- // task on the MessageLoop, but to delete it instead. The Waiter deletes
- // itself once run.
+ // Case 3: the waiter isn't on the wait-list, thus it was signaled. It may not
+ // have run yet, so we set the flag to tell it not to bother enqueuing the
+ // task on the SequencedTaskRunner, but to delete it instead. The Waiter
+ // deletes itself once run.
cancel_flag_->Set();
cancel_flag_ = NULL;
// If the waiter has already run then the task has been enqueued. If the Task
// hasn't yet run, the flag will stop the delegate from getting called. (This
- // is thread safe because one may only delete a Handle from the MessageLoop
- // thread.)
+ // is thread safe because one may only delete a Handle from the sequence that
+ // called StartWatching()).
//
// If the delegate has already been called then we have nothing to do. The
// task has been deleted by the MessageLoop.
}
-WaitableEvent* WaitableEventWatcher::GetWatchedEvent() {
- if (!cancel_flag_.get())
- return NULL;
-
- if (cancel_flag_->value())
- return NULL;
-
- return event_;
-}
-
-// -----------------------------------------------------------------------------
-// This is called when the MessageLoop which the callback will be run it is
-// deleted. We need to cancel the callback as if we had been deleted, but we
-// will still be deleted at some point in the future.
-// -----------------------------------------------------------------------------
-void WaitableEventWatcher::WillDestroyCurrentMessageLoop() {
- StopWatching();
-}
-
} // namespace base
diff --git a/base/sys_byteorder.h b/base/sys_byteorder.h
index 8d9066c702..9ee1827e1e 100644
--- a/base/sys_byteorder.h
+++ b/base/sys_byteorder.h
@@ -13,6 +13,7 @@
#include <stdint.h>
+#include "base/logging.h"
#include "build/build_config.h"
#if defined(COMPILER_MSVC)
@@ -46,6 +47,21 @@ inline uint64_t ByteSwap(uint64_t x) {
#endif
}
+inline uintptr_t ByteSwapUintPtrT(uintptr_t x) {
+ // We do it this way because some build configurations are ILP32 even when
+ // defined(ARCH_CPU_64_BITS). Unfortunately, we can't use sizeof in #ifs. But,
+ // because these conditionals are constexprs, the irrelevant branches will
+ // likely be optimized away, so this construction should not result in code
+ // bloat.
+ if (sizeof(uintptr_t) == 4) {
+ return ByteSwap(static_cast<uint32_t>(x));
+ } else if (sizeof(uintptr_t) == 8) {
+ return ByteSwap(static_cast<uint64_t>(x));
+ } else {
+ NOTREACHED();
+ }
+}
+
// Converts the bytes in |x| from host order (endianness) to little endian, and
// returns the result.
inline uint16_t ByteSwapToLE16(uint16_t x) {
diff --git a/base/sys_info.h b/base/sys_info.h
index b10747703d..e35feff735 100644
--- a/base/sys_info.h
+++ b/base/sys_info.h
@@ -107,9 +107,19 @@ class BASE_EXPORT SysInfo {
static bool GetLsbReleaseValue(const std::string& key, std::string* value);
// Convenience function for GetLsbReleaseValue("CHROMEOS_RELEASE_BOARD",...).
- // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
+ // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set. Otherwise returns
+ // the full name of the board. WARNING: the returned value often differs in
+ // developer built system compared to devices that use the official version.
+ // E.g. for developer built version, the function could return 'glimmer' while
+ // for officially used versions it would be like 'glimmer-signed-mp-v4keys'.
+ // Use GetStrippedReleaseBoard() function if you need only the short name of
+ // the board (would be 'glimmer' in the case described above).
static std::string GetLsbReleaseBoard();
+ // Convenience function for GetLsbReleaseBoard() removing trailing "-signed-*"
+ // if present. Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
+ static std::string GetStrippedReleaseBoard();
+
// Returns the creation time of /etc/lsb-release. (Used to get the date and
// time of the Chrome OS build).
static Time GetLsbReleaseTime();
diff --git a/base/sys_info_chromeos.cc b/base/sys_info_chromeos.cc
index 3794ed96c6..29f83845dc 100644
--- a/base/sys_info_chromeos.cc
+++ b/base/sys_info_chromeos.cc
@@ -163,7 +163,7 @@ class ChromeOSVersionInfo {
bool is_running_on_chromeos_;
};
-static LazyInstance<ChromeOSVersionInfo>
+static LazyInstance<ChromeOSVersionInfo>::Leaky
g_chrome_os_version_info = LAZY_INSTANCE_INITIALIZER;
ChromeOSVersionInfo& GetChromeOSVersionInfo() {
@@ -200,6 +200,16 @@ std::string SysInfo::GetLsbReleaseBoard() {
}
// static
+std::string SysInfo::GetStrippedReleaseBoard() {
+ std::string board = GetLsbReleaseBoard();
+ const size_t index = board.find("-signed-");
+ if (index != std::string::npos)
+ board.resize(index);
+
+ return base::ToLowerASCII(board);
+}
+
+// static
Time SysInfo::GetLsbReleaseTime() {
return GetChromeOSVersionInfo().lsb_release_time();
}
diff --git a/base/sys_info_mac.mm b/base/sys_info_mac.mm
index 102d99f3d0..f8d668c8ff 100644
--- a/base/sys_info_mac.mm
+++ b/base/sys_info_mac.mm
@@ -47,18 +47,12 @@ void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
*minor_version = version.minorVersion;
*bugfix_version = version.patchVersion;
} else {
-#else
- // Android buildbots are too old and have trouble using the forward
- // declarations for some reason. Conditionally-compile the above block
- // only when building on a more modern version of OS X.
- if (true) {
-#endif
// -[NSProcessInfo operatingSystemVersion] is documented available in 10.10.
// It's also available via a private API since 10.9.2. For the remaining
// cases in 10.9, rely on ::Gestalt(..). Since this code is only needed for
// 10.9.0 and 10.9.1 and uses the recommended replacement thereafter,
// suppress the warning for this fallback case.
- DCHECK(base::mac::IsOSMavericks());
+ DCHECK(base::mac::IsOS10_9());
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
Gestalt(gestaltSystemVersionMajor,
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
index 5d1c450139..cbdfa3f7a9 100644
--- a/base/sys_info_posix.cc
+++ b/base/sys_info_posix.cc
@@ -28,6 +28,11 @@
#include <sys/statvfs.h>
#endif
+#if defined(OS_LINUX)
+#include <linux/magic.h>
+#include <sys/vfs.h>
+#endif
+
namespace {
#if !defined(OS_OPENBSD)
@@ -73,6 +78,23 @@ base::LazyInstance<
base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
+#if defined(OS_LINUX)
+bool IsStatsZeroIfUnlimited(const base::FilePath& path) {
+ struct statfs stats;
+
+ if (HANDLE_EINTR(statfs(path.value().c_str(), &stats)) != 0)
+ return false;
+
+ switch (static_cast<uint32_t>(stats.f_type)) {
+ case TMPFS_MAGIC:
+ case HUGETLBFS_MAGIC:
+ case RAMFS_MAGIC:
+ return true;
+ }
+ return false;
+}
+#endif
+
bool GetDiskSpaceInfo(const base::FilePath& path,
int64_t* available_bytes,
int64_t* total_bytes) {
@@ -80,10 +102,25 @@ bool GetDiskSpaceInfo(const base::FilePath& path,
if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
return false;
- if (available_bytes)
- *available_bytes = static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
- if (total_bytes)
- *total_bytes = static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
+#if defined(OS_LINUX)
+ const bool zero_size_means_unlimited =
+ stats.f_blocks == 0 && IsStatsZeroIfUnlimited(path);
+#else
+ const bool zero_size_means_unlimited = false;
+#endif
+
+ if (available_bytes) {
+ *available_bytes =
+ zero_size_means_unlimited
+ ? std::numeric_limits<int64_t>::max()
+ : static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+ }
+
+ if (total_bytes) {
+ *total_bytes = zero_size_means_unlimited
+ ? std::numeric_limits<int64_t>::max()
+ : static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
+ }
return true;
}
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
index 0231df6379..c3b8507707 100644
--- a/base/sys_info_unittest.cc
+++ b/base/sys_info_unittest.cc
@@ -156,4 +156,14 @@ TEST_F(SysInfoTest, IsRunningOnChromeOS) {
EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
}
+TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
+ const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
+ EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+
+ const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
+ EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+}
+
#endif // OS_CHROMEOS
diff --git a/base/task/cancelable_task_tracker.cc b/base/task/cancelable_task_tracker.cc
index 6f394100d5..9999c18303 100644
--- a/base/task/cancelable_task_tracker.cc
+++ b/base/task/cancelable_task_tracker.cc
@@ -8,21 +8,14 @@
#include <utility>
-#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
#include "base/synchronization/cancellation_flag.h"
#include "base/task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
-using base::Bind;
-using base::CancellationFlag;
-using base::Closure;
-using base::hash_map;
-using base::TaskRunner;
+namespace base {
namespace {
@@ -57,8 +50,6 @@ void RunOrPostToTaskRunner(TaskRunner* task_runner, const Closure& closure) {
} // namespace
-namespace base {
-
// static
const CancelableTaskTracker::TaskId CancelableTaskTracker::kBadTaskId = 0;
@@ -66,7 +57,7 @@ CancelableTaskTracker::CancelableTaskTracker()
: next_id_(1),weak_factory_(this) {}
CancelableTaskTracker::~CancelableTaskTracker() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
TryCancelAll();
}
@@ -75,7 +66,7 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTask(
TaskRunner* task_runner,
const tracked_objects::Location& from_here,
const Closure& task) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
return PostTaskAndReply(task_runner, from_here, task, Bind(&base::DoNothing));
}
@@ -83,12 +74,12 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTask(
CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
TaskRunner* task_runner,
const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ Closure task,
+ Closure reply) {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
- // We need a MessageLoop to run reply.
- DCHECK(base::ThreadTaskRunnerHandle::IsSet());
+ // We need a SequencedTaskRunnerHandle to run |reply|.
+ DCHECK(base::SequencedTaskRunnerHandle::IsSet());
// Owned by reply callback below.
CancellationFlag* flag = new CancellationFlag();
@@ -96,15 +87,12 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
TaskId id = next_id_;
next_id_++; // int64_t is big enough that we ignore the potential overflow.
- const Closure& untrack_closure =
+ Closure untrack_closure =
Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id);
- bool success =
- task_runner->PostTaskAndReply(from_here,
- Bind(&RunIfNotCanceled, flag, task),
- Bind(&RunIfNotCanceledThenUntrack,
- base::Owned(flag),
- reply,
- untrack_closure));
+ bool success = task_runner->PostTaskAndReply(
+ from_here, Bind(&RunIfNotCanceled, flag, std::move(task)),
+ Bind(&RunIfNotCanceledThenUntrack, base::Owned(flag), std::move(reply),
+ std::move(untrack_closure)));
if (!success)
return kBadTaskId;
@@ -115,8 +103,8 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
IsCanceledCallback* is_canceled_cb) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(base::ThreadTaskRunnerHandle::IsSet());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK(base::SequencedTaskRunnerHandle::IsSet());
TaskId id = next_id_;
next_id_++; // int64_t is big enough that we ignore the potential overflow.
@@ -129,11 +117,11 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id),
flag);
- // Will always run |untrack_and_delete_flag| on current MessageLoop.
+ // Will always run |untrack_and_delete_flag| on current sequence.
base::ScopedClosureRunner* untrack_and_delete_flag_runner =
new base::ScopedClosureRunner(
Bind(&RunOrPostToTaskRunner,
- RetainedRef(base::ThreadTaskRunnerHandle::Get()),
+ RetainedRef(base::SequencedTaskRunnerHandle::Get()),
untrack_and_delete_flag));
*is_canceled_cb =
@@ -144,7 +132,7 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
}
void CancelableTaskTracker::TryCancel(TaskId id) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
hash_map<TaskId, CancellationFlag*>::const_iterator it = task_flags_.find(id);
if (it == task_flags_.end()) {
@@ -160,7 +148,7 @@ void CancelableTaskTracker::TryCancel(TaskId id) {
}
void CancelableTaskTracker::TryCancelAll() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
for (hash_map<TaskId, CancellationFlag*>::const_iterator it =
task_flags_.begin();
@@ -171,19 +159,19 @@ void CancelableTaskTracker::TryCancelAll() {
}
bool CancelableTaskTracker::HasTrackedTasks() const {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
return !task_flags_.empty();
}
void CancelableTaskTracker::Track(TaskId id, CancellationFlag* flag) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
bool success = task_flags_.insert(std::make_pair(id, flag)).second;
DCHECK(success);
}
void CancelableTaskTracker::Untrack(TaskId id) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
size_t num = task_flags_.erase(id);
DCHECK_EQ(1u, num);
}
diff --git a/base/task/cancelable_task_tracker.h b/base/task/cancelable_task_tracker.h
index 86b5a45845..4f64a24060 100644
--- a/base/task/cancelable_task_tracker.h
+++ b/base/task/cancelable_task_tracker.h
@@ -15,36 +15,38 @@
//
// CancelableCallback (base/cancelable_callback.h) and WeakPtr binding are
// preferred solutions for canceling a task. However, they don't support
-// cancelation from another thread. This is sometimes a performance critical
+// cancelation from another sequence. This is sometimes a performance critical
// requirement. E.g. We need to cancel database lookup task on DB thread when
// user changes inputed text. If it is performance critical to do a best effort
-// cancelation of a task, then CancelableTaskTracker is appropriate,
-// otherwise use one of the other mechanisms.
+// cancelation of a task, then CancelableTaskTracker is appropriate, otherwise
+// use one of the other mechanisms.
//
// THREAD-SAFETY:
//
-// 1. CancelableTaskTracker objects are not thread safe. They must
-// be created, used, and destroyed on the originating thread that posts the
-// task. It's safe to destroy a CancelableTaskTracker while there
-// are outstanding tasks. This is commonly used to cancel all outstanding
-// tasks.
+// 1. A CancelableTaskTracker object must be created, used, and destroyed on a
+// single sequence.
//
-// 2. Both task and reply are deleted on the originating thread.
+// 2. It's safe to destroy a CancelableTaskTracker while there are outstanding
+// tasks. This is commonly used to cancel all outstanding tasks.
//
-// 3. IsCanceledCallback is thread safe and can be run or deleted on any
-// thread.
+// 3. Both task and reply are deleted on the originating sequence.
+//
+// 4. IsCanceledCallback can be run or deleted on any sequence.
#ifndef BASE_TASK_CANCELABLE_TASK_TRACKER_H_
#define BASE_TASK_CANCELABLE_TASK_TRACKER_H_
#include <stdint.h>
+#include <utility>
+
#include "base/base_export.h"
+#include "base/bind.h"
#include "base/callback.h"
#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
-#include "base/task_runner_util.h"
-#include "base/threading/thread_checker.h"
+#include "base/post_task_and_reply_with_result_internal.h"
+#include "base/sequence_checker.h"
namespace tracked_objects {
class Location;
@@ -74,25 +76,21 @@ class BASE_EXPORT CancelableTaskTracker {
TaskId PostTaskAndReply(base::TaskRunner* task_runner,
const tracked_objects::Location& from_here,
- const base::Closure& task,
- const base::Closure& reply);
+ base::Closure task,
+ base::Closure reply);
template <typename TaskReturnType, typename ReplyArgType>
- TaskId PostTaskAndReplyWithResult(
- base::TaskRunner* task_runner,
- const tracked_objects::Location& from_here,
- const base::Callback<TaskReturnType(void)>& task,
- const base::Callback<void(ReplyArgType)>& reply) {
+ TaskId PostTaskAndReplyWithResult(base::TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ base::Callback<TaskReturnType()> task,
+ base::Callback<void(ReplyArgType)> reply) {
TaskReturnType* result = new TaskReturnType();
return PostTaskAndReply(
- task_runner,
- from_here,
+ task_runner, from_here,
base::Bind(&base::internal::ReturnAsParamAdapter<TaskReturnType>,
- task,
- base::Unretained(result)),
+ std::move(task), base::Unretained(result)),
base::Bind(&base::internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
- reply,
- base::Owned(result)));
+ std::move(reply), base::Owned(result)));
}
// Creates a tracked TaskId and an associated IsCanceledCallback. Client can
@@ -130,7 +128,7 @@ class BASE_EXPORT CancelableTaskTracker {
base::hash_map<TaskId, base::CancellationFlag*> task_flags_;
TaskId next_id_;
- base::ThreadChecker thread_checker_;
+ SequenceChecker sequence_checker_;
base::WeakPtrFactory<CancelableTaskTracker> weak_factory_;
diff --git a/base/task/cancelable_task_tracker_unittest.cc b/base/task/cancelable_task_tracker_unittest.cc
index ff9e40b855..fd480f3687 100644
--- a/base/task/cancelable_task_tracker_unittest.cc
+++ b/base/task/cancelable_task_tracker_unittest.cc
@@ -15,6 +15,7 @@
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/test/gtest_util.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -112,7 +113,7 @@ TEST_F(CancelableTaskTrackerTest, CancelPostedTask) {
test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE));
EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
- EXPECT_EQ(1U, test_task_runner->GetPendingTasks().size());
+ EXPECT_EQ(1U, test_task_runner->NumPendingTasks());
task_tracker_.TryCancel(task_id);
@@ -344,23 +345,11 @@ class CancelableTaskTrackerDeathTest : public CancelableTaskTrackerTest {
}
};
-// Duplicated from base/threading/thread_checker.h so that we can be
-// good citizens there and undef the macro.
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-#define ENABLE_THREAD_CHECKER 1
-#else
-#define ENABLE_THREAD_CHECKER 0
-#endif
-
// Runs |fn| with |task_tracker|, expecting it to crash in debug mode.
void MaybeRunDeadlyTaskTrackerMemberFunction(
CancelableTaskTracker* task_tracker,
const Callback<void(CancelableTaskTracker*)>& fn) {
-// CancelableTask uses DCHECKs with its ThreadChecker (itself only
-// enabled in debug mode).
-#if ENABLE_THREAD_CHECKER
- EXPECT_DEATH_IF_SUPPORTED(fn.Run(task_tracker), "");
-#endif
+ EXPECT_DCHECK_DEATH(fn.Run(task_tracker));
}
void PostDoNothingTask(CancelableTaskTracker* task_tracker) {
diff --git a/base/task_runner.cc b/base/task_runner.cc
index 262e1f8b09..35c0a23274 100644
--- a/base/task_runner.cc
+++ b/base/task_runner.cc
@@ -4,6 +4,8 @@
#include "base/task_runner.h"
+#include <utility>
+
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/threading/post_task_and_reply_impl.h"
@@ -45,12 +47,11 @@ bool TaskRunner::PostTask(const tracked_objects::Location& from_here,
return PostDelayedTask(from_here, task, base::TimeDelta());
}
-bool TaskRunner::PostTaskAndReply(
- const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply) {
+bool TaskRunner::PostTaskAndReply(const tracked_objects::Location& from_here,
+ Closure task,
+ Closure reply) {
return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
- from_here, task, reply);
+ from_here, std::move(task), std::move(reply));
}
TaskRunner::TaskRunner() {}
diff --git a/base/task_runner.h b/base/task_runner.h
index 9593835eeb..be3039d372 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -8,7 +8,7 @@
#include <stddef.h>
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
@@ -123,8 +123,8 @@ class BASE_EXPORT TaskRunner
// and the reply will cancel itself safely because it is bound to a
// WeakPtr<>.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply);
+ Closure task,
+ Closure reply);
protected:
friend struct TaskRunnerTraits;
diff --git a/base/task_runner_util.h b/base/task_runner_util.h
index ba8e120c6f..7fda07624d 100644
--- a/base/task_runner_util.h
+++ b/base/task_runner_util.h
@@ -5,37 +5,17 @@
#ifndef BASE_TASK_RUNNER_UTIL_H_
#define BASE_TASK_RUNNER_UTIL_H_
+#include <utility>
+
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/callback.h"
#include "base/logging.h"
+#include "base/post_task_and_reply_with_result_internal.h"
#include "base/task_runner.h"
namespace base {
-namespace internal {
-
-// Adapts a function that produces a result via a return value to
-// one that returns via an output parameter.
-template <typename ReturnType>
-void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
- ReturnType* result) {
- *result = func.Run();
-}
-
-// Adapts a T* result to a callblack that expects a T.
-template <typename TaskReturnType, typename ReplyArgType>
-void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
- TaskReturnType* result) {
- // TODO(ajwong): Remove this conditional and add a DCHECK to enforce that
- // |reply| must be non-null in PostTaskAndReplyWithResult() below after
- // current code that relies on this API softness has been removed.
- // http://crbug.com/162712
- if (!callback.is_null())
- callback.Run(std::move(*result));
-}
-
-} // namespace internal
-
// When you have these methods
//
// R DoWorkAndReturn();
@@ -51,18 +31,18 @@ void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
// Bind(&DoWorkAndReturn),
// Bind(&Callback));
template <typename TaskReturnType, typename ReplyArgType>
-bool PostTaskAndReplyWithResult(
- TaskRunner* task_runner,
- const tracked_objects::Location& from_here,
- const Callback<TaskReturnType(void)>& task,
- const Callback<void(ReplyArgType)>& reply) {
+bool PostTaskAndReplyWithResult(TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ Callback<TaskReturnType()> task,
+ Callback<void(ReplyArgType)> reply) {
+ DCHECK(task);
+ DCHECK(reply);
TaskReturnType* result = new TaskReturnType();
return task_runner->PostTaskAndReply(
- from_here,
- base::Bind(&internal::ReturnAsParamAdapter<TaskReturnType>, task,
- result),
- base::Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>, reply,
- base::Owned(result)));
+ from_here, base::Bind(&internal::ReturnAsParamAdapter<TaskReturnType>,
+ std::move(task), result),
+ base::Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
+ std::move(reply), base::Owned(result)));
}
} // namespace base
diff --git a/base/task_scheduler/scheduler_lock_impl.cc b/base/task_scheduler/scheduler_lock_impl.cc
index 7480e18da1..d60f25939b 100644
--- a/base/task_scheduler/scheduler_lock_impl.cc
+++ b/base/task_scheduler/scheduler_lock_impl.cc
@@ -67,19 +67,30 @@ class SafeAcquisitionTracker {
// Otherwise, make sure that the previous lock acquired is an allowed
// predecessor.
AutoLock auto_lock(allowed_predecessor_map_lock_);
+ // Using at() is exception-safe here as |lock| was registered already.
const SchedulerLockImpl* allowed_predecessor =
allowed_predecessor_map_.at(lock);
DCHECK_EQ(acquired_locks->back(), allowed_predecessor);
}
+ // Asserts that |lock|'s registered predecessor is safe. Because
+ // SchedulerLocks are registered at construction time and any predecessor
+ // specified on a SchedulerLock must already exist, the first registered
+ // SchedulerLock in a potential chain must have a null predecessor and is thus
+ // cycle-free. Any subsequent SchedulerLock with a predecessor must come from
+ // the set of registered SchedulerLocks. Since the registered SchedulerLocks
+ // only contain cycle-free SchedulerLocks, this subsequent SchedulerLock is
+ // itself cycle-free and may be safely added to the registered SchedulerLock
+ // set.
void AssertSafePredecessor(const SchedulerLockImpl* lock) const {
allowed_predecessor_map_lock_.AssertAcquired();
- for (const SchedulerLockImpl* predecessor =
- allowed_predecessor_map_.at(lock);
- predecessor != nullptr;
- predecessor = allowed_predecessor_map_.at(predecessor)) {
- DCHECK_NE(predecessor, lock) <<
- "Scheduler lock predecessor cycle detected.";
+ // Using at() is exception-safe here as |lock| was registered already.
+ const SchedulerLockImpl* predecessor = allowed_predecessor_map_.at(lock);
+ if (predecessor) {
+ DCHECK(allowed_predecessor_map_.find(predecessor) !=
+ allowed_predecessor_map_.end())
+ << "SchedulerLock was registered before its predecessor. "
+ << "Potential cycle detected";
}
}
diff --git a/base/task_scheduler/scheduler_lock_unittest.cc b/base/task_scheduler/scheduler_lock_unittest.cc
index daa50257f1..55182479aa 100644
--- a/base/task_scheduler/scheduler_lock_unittest.cc
+++ b/base/task_scheduler/scheduler_lock_unittest.cc
@@ -10,7 +10,7 @@
#include "base/macros.h"
#include "base/rand_util.h"
#include "base/synchronization/waitable_event.h"
-#include "base/task_scheduler/test_utils.h"
+#include "base/test/gtest_util.h"
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -131,7 +131,7 @@ TEST(TaskSchedulerLock, AcquirePredecessorWrongOrder) {
EXPECT_DCHECK_DEATH({
lock.Acquire();
predecessor.Acquire();
- }, "");
+ });
}
TEST(TaskSchedulerLock, AcquireNonPredecessor) {
@@ -140,7 +140,7 @@ TEST(TaskSchedulerLock, AcquireNonPredecessor) {
EXPECT_DCHECK_DEATH({
lock1.Acquire();
lock2.Acquire();
- }, "");
+ });
}
TEST(TaskSchedulerLock, AcquireMultipleLocksInOrder) {
@@ -172,7 +172,7 @@ TEST(TaskSchedulerLock, AcquireMultipleLocksNoTransitivity) {
EXPECT_DCHECK_DEATH({
lock1.Acquire();
lock3.Acquire();
- }, "");
+ });
}
TEST(TaskSchedulerLock, AcquireLocksDifferentThreadsSafely) {
@@ -258,7 +258,7 @@ TEST(TaskSchedulerLock, SelfReferentialLock) {
SchedulerLock lock;
};
- EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; }, "");
+ EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; });
}
TEST(TaskSchedulerLock, PredecessorCycle) {
@@ -269,7 +269,7 @@ TEST(TaskSchedulerLock, PredecessorCycle) {
SchedulerLock lock2;
};
- EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+ EXPECT_DCHECK_DEATH({ LockCycle cycle; });
}
TEST(TaskSchedulerLock, PredecessorLongerCycle) {
@@ -288,7 +288,7 @@ TEST(TaskSchedulerLock, PredecessorLongerCycle) {
SchedulerLock lock5;
};
- EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+ EXPECT_DCHECK_DEATH({ LockCycle cycle; });
}
} // namespace
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc b/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
new file mode 100644
index 0000000000..a163863d0f
--- /dev/null
+++ b/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+LazyInstance<ThreadLocalPointer<const TaskPriority>>::Leaky
+ tls_task_priority_for_current_thread = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+ScopedSetTaskPriorityForCurrentThread::ScopedSetTaskPriorityForCurrentThread(
+ TaskPriority priority)
+ : priority_(priority) {
+ DCHECK(!tls_task_priority_for_current_thread.Get().Get());
+ tls_task_priority_for_current_thread.Get().Set(&priority_);
+}
+
+ScopedSetTaskPriorityForCurrentThread::
+ ~ScopedSetTaskPriorityForCurrentThread() {
+ DCHECK_EQ(&priority_, tls_task_priority_for_current_thread.Get().Get());
+ tls_task_priority_for_current_thread.Get().Set(nullptr);
+}
+
+TaskPriority GetTaskPriorityForCurrentThread() {
+ const TaskPriority* priority =
+ tls_task_priority_for_current_thread.Get().Get();
+ return priority ? *priority : TaskPriority::USER_VISIBLE;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread.h b/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
new file mode 100644
index 0000000000..4508911d9c
--- /dev/null
+++ b/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
+#define BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+namespace internal {
+
+class BASE_EXPORT ScopedSetTaskPriorityForCurrentThread {
+ public:
+ // Within the scope of this object, GetTaskPriorityForCurrentThread() will
+ // return |priority|.
+ ScopedSetTaskPriorityForCurrentThread(TaskPriority priority);
+ ~ScopedSetTaskPriorityForCurrentThread();
+
+ private:
+ const TaskPriority priority_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSetTaskPriorityForCurrentThread);
+};
+
+// Returns the priority of the TaskScheduler task running on the current thread,
+// or TaskPriority::USER_VISIBLE if no TaskScheduler task is running on the
+// current thread.
+BASE_EXPORT TaskPriority GetTaskPriorityForCurrentThread();
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc b/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
new file mode 100644
index 0000000000..c497af6770
--- /dev/null
+++ b/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+
+#include "base/task_scheduler/task_traits.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(TaskSchedulerScopedSetTaskPriorityForCurrentThreadTest,
+ ScopedSetTaskPriorityForCurrentThread) {
+ EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
+ {
+ ScopedSetTaskPriorityForCurrentThread
+ scoped_set_task_priority_for_current_thread(
+ TaskPriority::USER_BLOCKING);
+ EXPECT_EQ(TaskPriority::USER_BLOCKING, GetTaskPriorityForCurrentThread());
+ }
+ EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
index 4ecb60568c..601b5402d0 100644
--- a/base/task_scheduler/sequence.cc
+++ b/base/task_scheduler/sequence.cc
@@ -26,24 +26,30 @@ bool Sequence::PushTask(std::unique_ptr<Task> task) {
return queue_.size() == 1;
}
-const Task* Sequence::PeekTask() const {
- AutoSchedulerLock auto_lock(lock_);
-
- if (queue_.empty())
- return nullptr;
-
- return queue_.front().get();
-}
-
-bool Sequence::PopTask() {
+std::unique_ptr<Task> Sequence::TakeTask() {
AutoSchedulerLock auto_lock(lock_);
DCHECK(!queue_.empty());
+ DCHECK(queue_.front());
const int priority_index =
static_cast<int>(queue_.front()->traits.priority());
DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
--num_tasks_per_priority_[priority_index];
+ return std::move(queue_.front());
+}
+
+TaskTraits Sequence::PeekTaskTraits() const {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!queue_.empty());
+ DCHECK(queue_.front());
+ return queue_.front()->traits;
+}
+
+bool Sequence::Pop() {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!queue_.empty());
+ DCHECK(!queue_.front());
queue_.pop();
return queue_.empty();
}
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
index 3fa037fa35..ed1d0ac401 100644
--- a/base/task_scheduler/sequence.h
+++ b/base/task_scheduler/sequence.h
@@ -13,6 +13,7 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/sequence_token.h"
#include "base/task_scheduler/scheduler_lock.h"
#include "base/task_scheduler/sequence_sort_key.h"
#include "base/task_scheduler/task.h"
@@ -21,7 +22,10 @@
namespace base {
namespace internal {
-// A sequence holds tasks that must be executed in posting order.
+// A Sequence holds slots each containing up to a single Task that must be
+// executed in posting order.
+//
+// In comments below, an "empty Sequence" is a Sequence with no slot.
//
// Note: there is a known refcounted-ownership cycle in the Scheduler
// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
@@ -40,26 +44,38 @@ class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
public:
Sequence();
- // Adds |task| at the end of the sequence's queue. Returns true if the
- // sequence was empty before this operation.
+ // Adds |task| in a new slot at the end of the Sequence. Returns true if the
+ // Sequence was empty before this operation.
bool PushTask(std::unique_ptr<Task> task);
- // Returns the task in front of the sequence's queue, if any.
- const Task* PeekTask() const;
+ // Transfers ownership of the Task in the front slot of the Sequence to the
+ // caller. The front slot of the Sequence will be nullptr and remain until
+ // Pop(). Cannot be called on an empty Sequence or a Sequence whose front slot
+ // is already nullptr.
+ std::unique_ptr<Task> TakeTask();
+
+ // Returns the TaskTraits of the Task in front of the Sequence. Cannot be
+ // called on an empty Sequence or on a Sequence whose front slot is empty.
+ TaskTraits PeekTaskTraits() const;
- // Removes the task in front of the sequence's queue. Returns true if the
- // sequence is empty after this operation. Cannot be called on an empty
- // sequence.
- bool PopTask();
+ // Removes the front slot of the Sequence. The front slot must have been
+ // emptied by TakeTask() before this is called. Cannot be called on an empty
+ // Sequence. Returns true if the Sequence is empty after this operation.
+ bool Pop();
- // Returns a SequenceSortKey representing the priority of the sequence. Cannot
- // be called on an empty sequence.
+ // Returns a SequenceSortKey representing the priority of the Sequence. Cannot
+ // be called on an empty Sequence.
SequenceSortKey GetSortKey() const;
+ // Returns a token that uniquely identifies this Sequence.
+ const SequenceToken& token() const { return token_; }
+
private:
friend class RefCountedThreadSafe<Sequence>;
~Sequence();
+ const SequenceToken token_ = SequenceToken::Create();
+
// Synchronizes access to all members.
mutable SchedulerLock lock_;
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
index 6a15299e1e..c45d8a87d0 100644
--- a/base/task_scheduler/sequence_unittest.cc
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -4,7 +4,12 @@
#include "base/task_scheduler/sequence.h"
+#include <utility>
+
+#include "base/bind.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/test/gtest_util.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -13,6 +18,7 @@ namespace internal {
namespace {
+
class TaskSchedulerSequenceTest : public testing::Test {
public:
TaskSchedulerSequenceTest()
@@ -56,7 +62,7 @@ class TaskSchedulerSequenceTest : public testing::Test {
std::unique_ptr<Task> task_e_owned_;
// Raw pointers to those same tasks for verification. This is needed because
- // the scoped_ptrs above no longer point to the tasks once they have been
+ // the unique_ptrs above no longer point to the tasks once they have been
// moved into a Sequence.
const Task* task_a_;
const Task* task_b_;
@@ -70,54 +76,54 @@ class TaskSchedulerSequenceTest : public testing::Test {
} // namespace
-TEST_F(TaskSchedulerSequenceTest, PushPopPeek) {
+TEST_F(TaskSchedulerSequenceTest, PushTakeRemove) {
scoped_refptr<Sequence> sequence(new Sequence);
// Push task A in the sequence. Its sequenced time should be updated and it
// should be in front of the sequence.
EXPECT_TRUE(sequence->PushTask(std::move(task_a_owned_)));
EXPECT_FALSE(task_a_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
// Push task B, C and D in the sequence. Their sequenced time should be
// updated and task A should always remain in front of the sequence.
EXPECT_FALSE(sequence->PushTask(std::move(task_b_owned_)));
EXPECT_FALSE(task_b_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
EXPECT_FALSE(sequence->PushTask(std::move(task_c_owned_)));
EXPECT_FALSE(task_c_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
EXPECT_FALSE(sequence->PushTask(std::move(task_d_owned_)));
EXPECT_FALSE(task_d_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
- // Pop task A. Task B should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_b_, sequence->PeekTask());
+ // Get the task in front of the sequence. It should be task A.
+ EXPECT_EQ(task_a_, sequence->TakeTask().get());
- // Pop task B. Task C should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_c_, sequence->PeekTask());
+ // Remove the empty slot. Task B should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_b_, sequence->TakeTask().get());
- // Pop task C. Task D should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_d_, sequence->PeekTask());
+ // Remove the empty slot. Task C should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_c_, sequence->TakeTask().get());
- // Push task E in the sequence. Its sequenced time should be updated and
- // task D should remain in front.
+ // Remove the empty slot. Task D should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_d_, sequence->TakeTask().get());
+
+ // Push task E in the sequence. Its sequenced time should be updated.
EXPECT_FALSE(sequence->PushTask(std::move(task_e_owned_)));
EXPECT_FALSE(task_e_->sequenced_time.is_null());
- EXPECT_EQ(task_d_, sequence->PeekTask());
- // Pop task D. Task E should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_e_, sequence->PeekTask());
+ // Remove the empty slot. Task E should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_e_, sequence->TakeTask().get());
- // Pop task E. The sequence should now be empty.
- EXPECT_TRUE(sequence->PopTask());
- EXPECT_EQ(nullptr, sequence->PeekTask());
+ // Remove the empty slot. The sequence should now be empty.
+ EXPECT_TRUE(sequence->Pop());
}
TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
@@ -152,21 +158,24 @@ TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
// Pop task A. The highest priority is still USER_BLOCKING. The task in front
// of the sequence is now task B.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(
SequenceSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time),
sequence->GetSortKey());
// Pop task B. The highest priority is still USER_BLOCKING. The task in front
// of the sequence is now task C.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(
SequenceSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time),
sequence->GetSortKey());
// Pop task C. The highest priority is still USER_BLOCKING. The task in front
// of the sequence is now task D.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(
SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
sequence->GetSortKey());
@@ -180,10 +189,38 @@ TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
// Pop task D. The highest priority is now from task E (BACKGROUND). The
// task in front of the sequence is now task E.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time),
sequence->GetSortKey());
}
+// Verify that a DCHECK fires if Pop() is called on a sequence whose front slot
+// isn't empty.
+TEST_F(TaskSchedulerSequenceTest, PopNonEmptyFrontSlot) {
+ scoped_refptr<Sequence> sequence(new Sequence);
+ sequence->PushTask(
+ MakeUnique<Task>(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
+
+ EXPECT_DCHECK_DEATH({ sequence->Pop(); });
+}
+
+// Verify that a DCHECK fires if TakeTask() is called on a sequence whose front
+// slot is empty.
+TEST_F(TaskSchedulerSequenceTest, TakeEmptyFrontSlot) {
+ scoped_refptr<Sequence> sequence(new Sequence);
+ sequence->PushTask(
+ MakeUnique<Task>(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
+
+ EXPECT_TRUE(sequence->TakeTask());
+ EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
+}
+
+// Verify that a DCHECK fires if TakeTask() is called on an empty sequence.
+TEST_F(TaskSchedulerSequenceTest, TakeEmptySequence) {
+ scoped_refptr<Sequence> sequence(new Sequence);
+ EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
+}
+
} // namespace internal
} // namespace base
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
index 8a589a2021..3780c16dcb 100644
--- a/base/task_scheduler/task.cc
+++ b/base/task_scheduler/task.cc
@@ -10,12 +10,20 @@ namespace internal {
Task::Task(const tracked_objects::Location& posted_from,
const Closure& task,
const TaskTraits& traits,
- const TimeDelta& delay)
+ TimeDelta delay)
: PendingTask(posted_from,
task,
delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
false), // Not nestable.
- traits(traits) {}
+ // Prevent a delayed BLOCK_SHUTDOWN task from blocking shutdown before
+ // being scheduled by changing its shutdown behavior to SKIP_ON_SHUTDOWN.
+ traits(!delay.is_zero() &&
+ traits.shutdown_behavior() ==
+ TaskShutdownBehavior::BLOCK_SHUTDOWN
+ ? TaskTraits(traits).WithShutdownBehavior(
+ TaskShutdownBehavior::SKIP_ON_SHUTDOWN)
+ : traits),
+ delay(delay) {}
Task::~Task() = default;
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
index 2b53c690fd..c5b9bdb53b 100644
--- a/base/task_scheduler/task.h
+++ b/base/task_scheduler/task.h
@@ -23,17 +23,22 @@ namespace internal {
// profiling inherited from PendingTask.
struct BASE_EXPORT Task : public PendingTask {
// |posted_from| is the site the task was posted from. |task| is the closure
- // to run. |traits| is metadata about the task. |delay| is a delay that must
- // expire before the Task runs.
+ // to run. |traits_in| is metadata about the task. |delay| is a delay that
+ // must expire before the Task runs. If |delay| is non-zero and the shutdown
+ // behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
+ // automatically adjusted to SKIP_ON_SHUTDOWN.
Task(const tracked_objects::Location& posted_from,
const Closure& task,
const TaskTraits& traits,
- const TimeDelta& delay);
+ TimeDelta delay);
~Task();
// The TaskTraits of this task.
const TaskTraits traits;
+ // The delay that must expire before the task runs.
+ const TimeDelta delay;
+
// The time at which the task was inserted in its sequence. For an undelayed
// task, this happens at post time. For a delayed task, this happens some
// time after the task's delay has expired. If the task hasn't been inserted
diff --git a/base/task_scheduler/task_traits.cc b/base/task_scheduler/task_traits.cc
index dd55535852..6acf3244f5 100644
--- a/base/task_scheduler/task_traits.cc
+++ b/base/task_scheduler/task_traits.cc
@@ -8,20 +8,29 @@
#include <ostream>
+#include "base/logging.h"
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+
namespace base {
// Do not rely on defaults hard-coded below beyond the guarantees described in
// the header; anything else is subject to change. Tasks should explicitly
// request defaults if the behavior is critical to the task.
TaskTraits::TaskTraits()
- : with_file_io_(false),
- priority_(TaskPriority::BACKGROUND),
+ : may_block_(false),
+ with_base_sync_primitives_(false),
+ priority_(internal::GetTaskPriorityForCurrentThread()),
shutdown_behavior_(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {}
TaskTraits::~TaskTraits() = default;
-TaskTraits& TaskTraits::WithFileIO() {
- with_file_io_ = true;
+TaskTraits& TaskTraits::MayBlock() {
+ may_block_ = true;
+ return *this;
+}
+
+TaskTraits& TaskTraits::WithBaseSyncPrimitives() {
+ with_base_sync_primitives_ = true;
return *this;
}
@@ -36,34 +45,41 @@ TaskTraits& TaskTraits::WithShutdownBehavior(
return *this;
}
-std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
+const char* TaskPriorityToString(TaskPriority task_priority) {
switch (task_priority) {
case TaskPriority::BACKGROUND:
- os << "BACKGROUND";
- break;
+ return "BACKGROUND";
case TaskPriority::USER_VISIBLE:
- os << "USER_VISIBLE";
- break;
+ return "USER_VISIBLE";
case TaskPriority::USER_BLOCKING:
- os << "USER_BLOCKING";
- break;
+ return "USER_BLOCKING";
}
- return os;
+ NOTREACHED();
+ return "";
}
-std::ostream& operator<<(std::ostream& os,
- const TaskShutdownBehavior& shutdown_behavior) {
+const char* TaskShutdownBehaviorToString(
+ TaskShutdownBehavior shutdown_behavior) {
switch (shutdown_behavior) {
case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
- os << "CONTINUE_ON_SHUTDOWN";
- break;
+ return "CONTINUE_ON_SHUTDOWN";
case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
- os << "SKIP_ON_SHUTDOWN";
- break;
+ return "SKIP_ON_SHUTDOWN";
case TaskShutdownBehavior::BLOCK_SHUTDOWN:
- os << "BLOCK_SHUTDOWN";
- break;
+ return "BLOCK_SHUTDOWN";
}
+ NOTREACHED();
+ return "";
+}
+
+std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
+ os << TaskPriorityToString(task_priority);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const TaskShutdownBehavior& shutdown_behavior) {
+ os << TaskShutdownBehaviorToString(shutdown_behavior);
return os;
}
diff --git a/base/task_scheduler/task_traits.h b/base/task_scheduler/task_traits.h
index 0c0d304dcf..435fdac9af 100644
--- a/base/task_scheduler/task_traits.h
+++ b/base/task_scheduler/task_traits.h
@@ -78,19 +78,57 @@ enum class TaskShutdownBehavior {
// Describes metadata for a single task or a group of tasks.
class BASE_EXPORT TaskTraits {
public:
- // Constructs a default TaskTraits for tasks with
- // (1) no I/O,
- // (2) low priority, and
- // (3) may block shutdown or be skipped on shutdown.
- // Tasks that require stricter guarantees should highlight those by requesting
+ // Constructs a default TaskTraits for tasks that
+ // (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
+ // (2) prefer inheriting the current priority to specifying their own, and
+ // (3) can either block shutdown or be skipped on shutdown
+ // (TaskScheduler implementation is free to choose a fitting default).
+ // Tasks that require stricter guarantees and/or know the specific
+ // TaskPriority appropriate for them should highlight those by requesting
// explicit traits below.
TaskTraits();
TaskTraits(const TaskTraits& other) = default;
TaskTraits& operator=(const TaskTraits& other) = default;
~TaskTraits();
- // Allows tasks with these traits to do file I/O.
- TaskTraits& WithFileIO();
+ // Tasks with this trait may block. This includes but is not limited to tasks
+ // that wait on synchronous file I/O operations: read or write a file from
+ // disk, interact with a pipe or a socket, rename or delete a file, enumerate
+ // files in a directory, etc. This trait isn't required for the mere use of
+ // locks. For tasks that block on base/ synchronization primitives, see
+ // WithBaseSyncPrimitives().
+ TaskTraits& MayBlock();
+
+ // Tasks with this trait will pass base::AssertWaitAllowed(), i.e. will be
+ // allowed on the following methods :
+ // - base::WaitableEvent::Wait
+ // - base::ConditionVariable::Wait
+ // - base::PlatformThread::Join
+ // - base::PlatformThread::Sleep
+ // - base::Process::WaitForExit
+ // - base::Process::WaitForExitWithTimeout
+ //
+ // Tasks should generally not use these methods.
+ //
+ // Instead of waiting on a WaitableEvent or a ConditionVariable, put the work
+ // that should happen after the wait in a callback and post that callback from
+ // where the WaitableEvent or ConditionVariable would have been signaled. If
+ // something needs to be scheduled after many tasks have executed, use
+ // base::BarrierClosure.
+ //
+ // Avoid creating threads. Instead, use
+ // base::Create(Sequenced|SingleTreaded)TaskRunnerWithTraits(). If a thread is
+ // really needed, make it non-joinable and add cleanup work at the end of the
+ // thread's main function (if using base::Thread, override Cleanup()).
+ //
+ // On Windows, join processes asynchronously using base::win::ObjectWatcher.
+ //
+ // MayBlock() must be specified in conjunction with this trait if and only if
+ // removing usage of methods listed above in the labeled tasks would still
+ // result in tasks that may block (per MayBlock()'s definition).
+ //
+ // In doubt, consult with //base/task_scheduler/OWNERS.
+ TaskTraits& WithBaseSyncPrimitives();
// Applies |priority| to tasks with these traits.
TaskTraits& WithPriority(TaskPriority priority);
@@ -98,8 +136,11 @@ class BASE_EXPORT TaskTraits {
// Applies |shutdown_behavior| to tasks with these traits.
TaskTraits& WithShutdownBehavior(TaskShutdownBehavior shutdown_behavior);
- // Returns true if file I/O is allowed by these traits.
- bool with_file_io() const { return with_file_io_; }
+ // Returns true if tasks with these traits may block.
+ bool may_block() const { return may_block_; }
+
+ // Returns true if tasks with these traits may use base/ sync primitives.
+ bool with_base_sync_primitives() const { return with_base_sync_primitives_; }
// Returns the priority of tasks with these traits.
TaskPriority priority() const { return priority_; }
@@ -108,29 +149,22 @@ class BASE_EXPORT TaskTraits {
TaskShutdownBehavior shutdown_behavior() const { return shutdown_behavior_; }
private:
- bool with_file_io_;
+ bool may_block_;
+ bool with_base_sync_primitives_;
TaskPriority priority_;
TaskShutdownBehavior shutdown_behavior_;
};
-// Describes how tasks are executed by a task runner.
-enum class ExecutionMode {
- // Can execute multiple tasks at a time in any order.
- PARALLEL,
-
- // Executes one task at a time in posting order. The sequence’s priority is
- // equivalent to the highest priority pending task in the sequence.
- SEQUENCED,
+// Returns string literals for the enums defined in this file. These methods
+// should only be used for tracing and debugging.
+BASE_EXPORT const char* TaskPriorityToString(TaskPriority task_priority);
+BASE_EXPORT const char* TaskShutdownBehaviorToString(
+ TaskShutdownBehavior task_priority);
- // Executes one task at a time on a single thread in posting order.
- SINGLE_THREADED,
-};
-
-// Stream operators so TaskPriority and TaskShutdownBehavior can be used in
-// DCHECK statements.
+// Stream operators so that the enums defined in this file can be used in
+// DCHECK and EXPECT statements.
BASE_EXPORT std::ostream& operator<<(std::ostream& os,
const TaskPriority& shutdown_behavior);
-
BASE_EXPORT std::ostream& operator<<(
std::ostream& os,
const TaskShutdownBehavior& shutdown_behavior);
diff --git a/base/task_scheduler/test_utils.h b/base/task_scheduler/test_utils.h
index bafd09aa2a..dbd1227f52 100644
--- a/base/task_scheduler/test_utils.h
+++ b/base/task_scheduler/test_utils.h
@@ -5,15 +5,16 @@
#ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
#define BASE_TASK_SCHEDULER_TEST_UTILS_H_
-#include "base/logging.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
+namespace base {
+namespace internal {
+namespace test {
-// Death tests misbehave on Android.
-#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-#define EXPECT_DCHECK_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
-#else
-#define EXPECT_DCHECK_DEATH(statement, regex)
-#endif
+// An enumeration of possible task scheduler TaskRunner types. Used to
+// parametrize relevant task_scheduler tests.
+enum class ExecutionMode { PARALLEL, SEQUENCED, SINGLE_THREADED };
+
+} // namespace test
+} // namespace internal
+} // namespace base
#endif // BASE_TASK_SCHEDULER_TEST_UTILS_H_
diff --git a/base/template_util.h b/base/template_util.h
index 1bfc1ac814..42552107cf 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -23,6 +23,28 @@
#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
#endif
+// Some versions of libstdc++ have partial support for type_traits, but misses
+// a smaller subset while removing some of the older non-standard stuff. Assume
+// that all versions below 5.0 fall in this category, along with one 5.0
+// experimental release. Test for this by consulting compiler major version,
+// the only reliable option available, so theoretically this could fail should
+// you attempt to mix an earlier version of libstdc++ with >= GCC5. But
+// that's unlikely to work out, especially as GCC5 changed ABI.
+#define CR_GLIBCXX_5_0_0 20150123
+#if (defined(__GNUC__) && __GNUC__ < 5) || \
+ (defined(__GLIBCXX__) && __GLIBCXX__ == CR_GLIBCXX_5_0_0)
+#define CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+#endif
+
+// This hacks around using gcc with libc++ which has some incompatibilies.
+// - is_trivially_* doesn't work: https://llvm.org/bugs/show_bug.cgi?id=27538
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that works with older
+// gcc versions.
+#if !defined(__clang__) && defined(_LIBCPP_VERSION)
+#define CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#endif
+
namespace base {
template <class T> struct is_non_const_reference : std::false_type {};
@@ -126,8 +148,53 @@ template <class T>
using is_trivially_destructible = std::is_trivially_destructible<T>;
#endif
+// is_trivially_copyable is especially hard to get right.
+// - Older versions of libstdc++ will fail to have it like they do for other
+// type traits. In this case we should provide it based on compiler
+// intrinsics. This is covered by the CR_USE_FALLBACKS_FOR_OLD_GLIBCXX define.
+// - An experimental release of gcc includes most of type_traits but misses
+// is_trivially_copyable, so we still have to avoid using libstdc++ in this
+// case, which is covered by CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX.
+// - When compiling libc++ from before r239653, with a gcc compiler, the
+// std::is_trivially_copyable can fail. So we need to work around that by not
+// using the one in libc++ in this case. This is covered by the
+// CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX define, and is discussed in
+// https://llvm.org/bugs/show_bug.cgi?id=27538#c1 where they point out that
+// in libc++'s commit r239653 this is fixed by libc++ checking for gcc 5.1.
+// - In both of the above cases we are using the gcc compiler. When defining
+// this ourselves on compiler intrinsics, the __is_trivially_copyable()
+// intrinsic is not available on gcc before version 5.1 (see the discussion in
+// https://llvm.org/bugs/show_bug.cgi?id=27538#c1 again), so we must check for
+// that version.
+// - When __is_trivially_copyable() is not available because we are on gcc older
+// than 5.1, we need to fall back to something, so we use __has_trivial_copy()
+// instead based on what was done one-off in bit_cast() previously.
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace and it works with gcc as needed.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX) || \
+ defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
+ defined(CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX)
+template <typename T>
+struct is_trivially_copyable {
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that does this for
+// us.
+#if _GNUC_VER >= 501
+ static constexpr bool value = __is_trivially_copyable(T);
+#else
+ static constexpr bool value = __has_trivial_copy(T);
+#endif
+};
+#else
+template <class T>
+using is_trivially_copyable = std::is_trivially_copyable<T>;
+#endif
+
} // namespace base
#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#undef CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#undef CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
#endif // BASE_TEMPLATE_UTIL_H_
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index 51863a2a0c..844707ebd1 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -22,11 +22,16 @@ static_library("test_config") {
]
}
-# GYP: //base/base.gyp:test_support_base
static_library("test_support") {
testonly = true
sources = [
"../trace_event/trace_config_memory_test_util.h",
+ "android/java_handler_thread_for_testing.cc",
+ "android/java_handler_thread_for_testing.h",
+ "android/test_system_message_handler_link_android.cc",
+ "android/test_system_message_handler_link_android.h",
+ "fuzzed_data_provider.cc",
+ "fuzzed_data_provider.h",
"gtest_util.cc",
"gtest_util.h",
"gtest_xml_unittest_result_printer.cc",
@@ -35,12 +40,15 @@ static_library("test_support") {
"gtest_xml_util.h",
"histogram_tester.cc",
"histogram_tester.h",
+ "icu_test_util.cc",
+ "icu_test_util.h",
"ios/wait_util.h",
"ios/wait_util.mm",
"launcher/test_result.cc",
"launcher/test_result.h",
"launcher/test_results_tracker.h",
"launcher/unit_test_launcher.h",
+ "mock_callback.h",
"mock_chrome_application_mac.h",
"mock_chrome_application_mac.mm",
"mock_devices_changed_observer.cc",
@@ -49,7 +57,9 @@ static_library("test_support") {
"mock_entropy_provider.h",
"mock_log.cc",
"mock_log.h",
+ "multiprocess_test.cc",
"multiprocess_test.h",
+ "multiprocess_test_android.cc",
"null_task_runner.cc",
"null_task_runner.h",
"opaque_ref_counted.cc",
@@ -62,12 +72,20 @@ static_library("test_support") {
"perf_time_logger.h",
"power_monitor_test_base.cc",
"power_monitor_test_base.h",
+ "scoped_async_task_scheduler.cc",
+ "scoped_async_task_scheduler.h",
"scoped_command_line.cc",
"scoped_command_line.h",
+ "scoped_feature_list.cc",
+ "scoped_feature_list.h",
"scoped_locale.cc",
"scoped_locale.h",
+ "scoped_mock_time_message_loop_task_runner.cc",
+ "scoped_mock_time_message_loop_task_runner.h",
"scoped_path_override.cc",
"scoped_path_override.h",
+ "scoped_task_scheduler.cc",
+ "scoped_task_scheduler.h",
"sequenced_task_runner_test_template.cc",
"sequenced_task_runner_test_template.h",
"sequenced_worker_pool_owner.cc",
@@ -133,8 +151,6 @@ static_library("test_support") {
"launcher/test_launcher_tracer.h",
"launcher/test_results_tracker.cc",
"launcher/unit_test_launcher.cc",
- "multiprocess_test.cc",
- "multiprocess_test_android.cc",
]
}
@@ -178,7 +194,11 @@ static_library("test_support") {
}
if (is_android) {
- deps += [ ":base_unittests_jni_headers" ]
+ deps += [
+ ":base_unittests_jni_headers",
+ ":test_support_jni_headers",
+ ]
+ public_deps += [ ":test_support_java" ]
}
if (is_nacl_nonsfi) {
@@ -191,6 +211,8 @@ static_library("test_support") {
sources -= [
"gtest_xml_util.cc",
"gtest_xml_util.h",
+ "icu_test_util.cc",
+ "icu_test_util.h",
"perf_test_suite.cc",
"perf_test_suite.h",
"scoped_path_override.cc",
@@ -255,6 +277,41 @@ static_library("run_all_unittests") {
]
}
+# These sources are linked into both the base_unittests binary and the test
+# shared library target below.
+source_set("native_library_test_utils") {
+ testonly = true
+ sources = [
+ "native_library_test_utils.cc",
+ "native_library_test_utils.h",
+ ]
+}
+
+# This shared library is dynamically loaded by NativeLibrary unittests.
+shared_library("test_shared_library") {
+ testonly = true
+ sources = [
+ "test_shared_library.cc",
+ ]
+
+ deps = [
+ ":native_library_test_utils",
+ ]
+}
+
+static_library("run_all_base_unittests") {
+ # Only targets in base should depend on this, targets outside base
+ # should depend on run_all_unittests above.
+ visibility = [ "//base/*" ]
+ testonly = true
+ sources = [
+ "run_all_base_unittests.cc",
+ ]
+ deps = [
+ ":test_support",
+ ]
+}
+
if (is_linux) {
shared_library("malloc_wrapper") {
testonly = true
@@ -272,8 +329,47 @@ if (is_android) {
generate_jni("base_unittests_jni_headers") {
sources = [
"android/java/src/org/chromium/base/ContentUriTestUtils.java",
+ "android/java/src/org/chromium/base/TestSystemMessageHandler.java",
"android/java/src/org/chromium/base/TestUiThread.java",
]
jni_package = "base"
}
+
+ generate_jni("test_support_jni_headers") {
+ sources = [
+ "android/java/src/org/chromium/base/MainReturnCodeResult.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
+ ]
+ jni_package = "base"
+ }
+
+ android_library("test_support_java") {
+ testonly = true
+ deps = [
+ "//base:base_java",
+ "//testing/android/native_test:native_main_runner_java",
+ "//third_party/android_tools:android_support_annotations_java",
+ "//third_party/jsr-305:jsr_305_javalib",
+ ]
+ srcjar_deps = [ ":test_support_java_aidl" ]
+ java_files = [
+ "android/java/src/org/chromium/base/FileDescriptorInfo.java",
+ "android/java/src/org/chromium/base/MainReturnCodeResult.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService0.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService1.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService2.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService3.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService4.java",
+ ]
+ }
+
+ android_aidl("test_support_java_aidl") {
+ testonly = true
+ import_include = [ "android/java/src" ]
+ sources = [
+ "android/java/src/org/chromium/base/ITestClient.aidl",
+ ]
+ }
}
diff --git a/base/test/gtest_util.cc b/base/test/gtest_util.cc
new file mode 100644
index 0000000000..6da902da2e
--- /dev/null
+++ b/base/test/gtest_util.cc
@@ -0,0 +1,112 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/gtest_util.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/json/json_file_value_serializer.h"
+#include "base/strings/string_util.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TestIdentifier::TestIdentifier() {
+}
+
+TestIdentifier::TestIdentifier(const TestIdentifier& other) = default;
+
+std::string FormatFullTestName(const std::string& test_case_name,
+ const std::string& test_name) {
+ return test_case_name + "." + test_name;
+}
+
+std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name) {
+ std::string test_name_no_disabled(full_test_name);
+ ReplaceSubstringsAfterOffset(&test_name_no_disabled, 0, "DISABLED_", "");
+ return test_name_no_disabled;
+}
+
+std::vector<TestIdentifier> GetCompiledInTests() {
+ testing::UnitTest* const unit_test = testing::UnitTest::GetInstance();
+
+ std::vector<TestIdentifier> tests;
+ for (int i = 0; i < unit_test->total_test_case_count(); ++i) {
+ const testing::TestCase* test_case = unit_test->GetTestCase(i);
+ for (int j = 0; j < test_case->total_test_count(); ++j) {
+ const testing::TestInfo* test_info = test_case->GetTestInfo(j);
+ TestIdentifier test_data;
+ test_data.test_case_name = test_case->name();
+ test_data.test_name = test_info->name();
+ test_data.file = test_info->file();
+ test_data.line = test_info->line();
+ tests.push_back(test_data);
+ }
+ }
+ return tests;
+}
+
+bool WriteCompiledInTestsToFile(const FilePath& path) {
+ std::vector<TestIdentifier> tests(GetCompiledInTests());
+
+ ListValue root;
+ for (size_t i = 0; i < tests.size(); ++i) {
+ std::unique_ptr<DictionaryValue> test_info(new DictionaryValue);
+ test_info->SetString("test_case_name", tests[i].test_case_name);
+ test_info->SetString("test_name", tests[i].test_name);
+ test_info->SetString("file", tests[i].file);
+ test_info->SetInteger("line", tests[i].line);
+ root.Append(std::move(test_info));
+ }
+
+ JSONFileValueSerializer serializer(path);
+ return serializer.Serialize(root);
+}
+
+bool ReadTestNamesFromFile(const FilePath& path,
+ std::vector<TestIdentifier>* output) {
+ JSONFileValueDeserializer deserializer(path);
+ int error_code = 0;
+ std::string error_message;
+ std::unique_ptr<base::Value> value =
+ deserializer.Deserialize(&error_code, &error_message);
+ if (!value.get())
+ return false;
+
+ base::ListValue* tests = nullptr;
+ if (!value->GetAsList(&tests))
+ return false;
+
+ std::vector<base::TestIdentifier> result;
+ for (base::ListValue::iterator i = tests->begin(); i != tests->end(); ++i) {
+ base::DictionaryValue* test = nullptr;
+ if (!(*i)->GetAsDictionary(&test))
+ return false;
+
+ TestIdentifier test_data;
+
+ if (!test->GetStringASCII("test_case_name", &test_data.test_case_name))
+ return false;
+
+ if (!test->GetStringASCII("test_name", &test_data.test_name))
+ return false;
+
+ if (!test->GetStringASCII("file", &test_data.file))
+ return false;
+
+ if (!test->GetInteger("line", &test_data.line))
+ return false;
+
+ result.push_back(test_data);
+ }
+
+ output->swap(result);
+ return true;
+}
+
+} // namespace base
diff --git a/base/test/gtest_util.h b/base/test/gtest_util.h
new file mode 100644
index 0000000000..8dfb1f236f
--- /dev/null
+++ b/base/test/gtest_util.h
@@ -0,0 +1,102 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_GTEST_UTIL_H_
+#define BASE_TEST_GTEST_UTIL_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// EXPECT/ASSERT_DCHECK_DEATH is intended to replace EXPECT/ASSERT_DEBUG_DEATH
+// when the death is expected to be caused by a DCHECK. Contrary to
+// EXPECT/ASSERT_DEBUG_DEATH however, it doesn't execute the statement in non-
+// dcheck builds as DCHECKs are intended to catch things that should never
+// happen and as such executing the statement results in undefined behavior
+// (|statement| is compiled in unsupported configurations nonetheless).
+// Death tests misbehave on Android.
+#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+// EXPECT/ASSERT_DCHECK_DEATH tests verify that a DCHECK is hit ("Check failed"
+// is part of the error message), but intentionally do not expose the gtest
+// death test's full |regex| parameter to avoid users having to verify the exact
+// syntax of the error message produced by the DCHECK.
+#define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "Check failed")
+#define ASSERT_DCHECK_DEATH(statement) ASSERT_DEATH(statement, "Check failed")
+
+#else
+// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+// Macro copied from gtest-death-test-internal.h as it's (1) internal for now
+// and (2) only defined if !GTEST_HAS_DEATH_TEST which is only a subset of the
+// conditions in which it's needed here.
+// TODO(gab): Expose macro in upstream gtest repo for consumers like us that
+// want more specific death tests and remove this hack.
+# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_LOG_(WARNING) \
+ << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified."; \
+ } else if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::RE::PartialMatch(".*", (regex)); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ terminator; \
+ } else \
+ ::testing::Message()
+
+#define EXPECT_DCHECK_DEATH(statement) \
+ GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", )
+#define ASSERT_DCHECK_DEATH(statement) \
+ GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", return)
+
+#endif
+// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+namespace base {
+
+class FilePath;
+
+struct TestIdentifier {
+ TestIdentifier();
+ TestIdentifier(const TestIdentifier& other);
+
+ std::string test_case_name;
+ std::string test_name;
+ std::string file;
+ int line;
+};
+
+// Constructs a full test name given a test case name and a test name,
+// e.g. for test case "A" and test name "B" returns "A.B".
+std::string FormatFullTestName(const std::string& test_case_name,
+ const std::string& test_name);
+
+// Returns the full test name with the "DISABLED_" prefix stripped out.
+// e.g. for the full test names "A.DISABLED_B", "DISABLED_A.B", and
+// "DISABLED_A.DISABLED_B", returns "A.B".
+std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name);
+
+// Returns a vector of gtest-based tests compiled into
+// current executable.
+std::vector<TestIdentifier> GetCompiledInTests();
+
+// Writes the list of gtest-based tests compiled into
+// current executable as a JSON file. Returns true on success.
+bool WriteCompiledInTestsToFile(const FilePath& path) WARN_UNUSED_RESULT;
+
+// Reads the list of gtest-based tests from |path| into |output|.
+// Returns true on success.
+bool ReadTestNamesFromFile(
+ const FilePath& path,
+ std::vector<TestIdentifier>* output) WARN_UNUSED_RESULT;
+
+} // namespace base
+
+#endif // BASE_TEST_GTEST_UTIL_H_
diff --git a/base/test/mock_entropy_provider.cc b/base/test/mock_entropy_provider.cc
new file mode 100644
index 0000000000..5ebf19a7c7
--- /dev/null
+++ b/base/test/mock_entropy_provider.cc
@@ -0,0 +1,20 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/mock_entropy_provider.h"
+
+namespace base {
+
+MockEntropyProvider::MockEntropyProvider() : entropy_value_(0.5) {}
+MockEntropyProvider::MockEntropyProvider(double entropy_value)
+ : entropy_value_(entropy_value) {}
+MockEntropyProvider::~MockEntropyProvider() {}
+
+double MockEntropyProvider::GetEntropyForTrial(
+ const std::string& trial_name,
+ uint32_t randomization_seed) const {
+ return entropy_value_;
+}
+
+} // namespace base
diff --git a/base/test/mock_entropy_provider.h b/base/test/mock_entropy_provider.h
new file mode 100644
index 0000000000..ca2b4bc8fe
--- /dev/null
+++ b/base/test/mock_entropy_provider.h
@@ -0,0 +1,32 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
+#define BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
+
+#include <stdint.h>
+
+#include "base/metrics/field_trial.h"
+
+namespace base {
+
+class MockEntropyProvider : public base::FieldTrial::EntropyProvider {
+ public:
+ MockEntropyProvider();
+ explicit MockEntropyProvider(double entropy_value);
+ ~MockEntropyProvider() override;
+
+ // base::FieldTrial::EntropyProvider:
+ double GetEntropyForTrial(const std::string& trial_name,
+ uint32_t randomization_seed) const override;
+
+ private:
+ double entropy_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockEntropyProvider);
+};
+
+} // namespace base
+
+#endif // BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
index de56e7f6be..fcc4d123ed 100644
--- a/base/test/multiprocess_test.cc
+++ b/base/test/multiprocess_test.cc
@@ -26,6 +26,19 @@ Process SpawnMultiProcessTestChild(
return LaunchProcess(command_line, options);
}
+
+bool WaitForMultiprocessTestChildExit(const Process& process,
+ TimeDelta timeout,
+ int* exit_code) {
+ return process.WaitForExitWithTimeout(timeout, exit_code);
+}
+
+bool TerminateMultiProcessTestChild(const Process& process,
+ int exit_code,
+ bool wait) {
+ return process.Terminate(exit_code, wait);
+}
+
#endif // !OS_ANDROID && !__ANDROID__ && !__ANDROID_HOST__
CommandLine GetMultiProcessTestChildBaseCommandLine() {
@@ -39,6 +52,8 @@ CommandLine GetMultiProcessTestChildBaseCommandLine() {
MultiProcessTest::MultiProcessTest() {
}
+// Don't compile on Arc++.
+#if 0
Process MultiProcessTest::SpawnChild(const std::string& procname) {
LaunchOptions options;
#if defined(OS_WIN)
@@ -52,6 +67,7 @@ Process MultiProcessTest::SpawnChildWithOptions(
const LaunchOptions& options) {
return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options);
}
+#endif
CommandLine MultiProcessTest::MakeCmdLine(const std::string& procname) {
CommandLine command_line = GetMultiProcessTestChildBaseCommandLine();
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
index ae4c3eb2ef..bf9663759e 100644
--- a/base/test/multiprocess_test.h
+++ b/base/test/multiprocess_test.h
@@ -40,7 +40,7 @@ class CommandLine;
// // Do stuff involving |test_child_process| and the child process....
//
// int rv = -1;
-// ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+// ASSERT_TRUE(base::WaitForMultiprocessTestChildExit(test_child_process,
// TestTimeouts::action_timeout(), &rv));
// EXPECT_EQ(0, rv);
// }
@@ -51,6 +51,10 @@ class CommandLine;
// // Code here runs in a child process....
// return 0;
// }
+//
+// If you need to terminate the child process, use the
+// TerminateMultiProcessTestChild method to ensure that test will work on
+// Android.
// Spawns a child process and executes the function |procname| declared using
// |MULTIPROCESS_TEST_MAIN()| or |MULTIPROCESS_TEST_MAIN_WITH_SETUP()|.
@@ -66,24 +70,17 @@ Process SpawnMultiProcessTestChild(
// may add any flags needed for your child process.
CommandLine GetMultiProcessTestChildBaseCommandLine();
-#if defined(OS_ANDROID)
-
-// Enable the alternate test child implementation which support spawning a child
-// after threads have been created. If used, this MUST be the first line of
-// main(). The main function is passed in to avoid a link-time dependency in
-// component builds.
-void InitAndroidMultiProcessTestHelper(int (*main)(int, char**));
-
-// Returns true if the current process is a test child.
-bool AndroidIsChildProcess();
-
-// Wait for a test child to exit if the alternate test child implementation is
-// being used.
-bool AndroidWaitForChildExitWithTimeout(
- const Process& process, TimeDelta timeout, int* exit_code)
- WARN_UNUSED_RESULT;
-
-#endif // defined(OS_ANDROID)
+// Waits for the child process to exit. Returns true if the process exited
+// within |timeout| and sets |exit_code| if non null.
+bool WaitForMultiprocessTestChildExit(const Process& process,
+ TimeDelta timeout,
+ int* exit_code);
+
+// Terminates |process| with |exit_code|. If |wait| is true, this call blocks
+// until the process actually terminates.
+bool TerminateMultiProcessTestChild(const Process& process,
+ int exit_code,
+ bool wait);
// MultiProcessTest ------------------------------------------------------------
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
index f58b452d1c..c74f013da1 100644
--- a/base/test/multiprocess_test_android.cc
+++ b/base/test/multiprocess_test_android.cc
@@ -4,451 +4,87 @@
#include "base/test/multiprocess_test.h"
-#include <errno.h>
#include <string.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <memory>
-#include <utility>
#include <vector>
+#include "base/android/context_utils.h"
+#include "base/android/jni_android.h"
+#include "base/android/jni_array.h"
+#include "base/android/scoped_java_ref.h"
#include "base/base_switches.h"
#include "base/command_line.h"
-#include "base/containers/hash_tables.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/macros.h"
-#include "base/pickle.h"
-#include "base/posix/global_descriptors.h"
-#include "base/posix/unix_domain_socket_linux.h"
-#include "testing/multiprocess_func_list.h"
+#include "jni/MainReturnCodeResult_jni.h"
+#include "jni/MultiprocessTestClientLauncher_jni.h"
namespace base {
-namespace {
-
-const int kMaxMessageSize = 1024 * 1024;
-const int kFragmentSize = 4096;
-
-// Message sent between parent process and helper child process.
-enum class MessageType : uint32_t {
- START_REQUEST,
- START_RESPONSE,
- WAIT_REQUEST,
- WAIT_RESPONSE,
-};
-
-struct MessageHeader {
- uint32_t size;
- MessageType type;
-};
-
-struct StartProcessRequest {
- MessageHeader header =
- {sizeof(StartProcessRequest), MessageType::START_REQUEST};
-
- uint32_t num_args = 0;
- uint32_t num_fds = 0;
-};
-
-struct StartProcessResponse {
- MessageHeader header =
- {sizeof(StartProcessResponse), MessageType::START_RESPONSE};
-
- pid_t child_pid;
-};
-
-struct WaitProcessRequest {
- MessageHeader header =
- {sizeof(WaitProcessRequest), MessageType::WAIT_REQUEST};
-
- pid_t pid;
- uint64_t timeout_ms;
-};
-
-struct WaitProcessResponse {
- MessageHeader header =
- {sizeof(WaitProcessResponse), MessageType::WAIT_RESPONSE};
-
- bool success = false;
- int32_t exit_code = 0;
-};
-
-// Helper class that implements an alternate test child launcher for
-// multi-process tests. The default implementation doesn't work if the child is
-// launched after starting threads. However, for some tests (i.e. Mojo), this
-// is necessary. This implementation works around that issue by forking a helper
-// process very early in main(), before any real work is done. Then, when a
-// child needs to be spawned, a message is sent to that helper process, which
-// then forks and returns the result to the parent. The forked child then calls
-// main() and things look as though a brand new process has been fork/exec'd.
-class LaunchHelper {
- public:
- using MainFunction = int (*)(int, char**);
-
- LaunchHelper() {}
-
- // Initialise the alternate test child implementation.
- void Init(MainFunction main);
-
- // Starts a child test helper process.
- Process StartChildTestHelper(const std::string& procname,
- const CommandLine& base_command_line,
- const LaunchOptions& options);
-
- // Waits for a child test helper process.
- bool WaitForChildExitWithTimeout(const Process& process, TimeDelta timeout,
- int* exit_code);
-
- bool IsReady() const { return child_fd_ != -1; }
- bool IsChild() const { return is_child_; }
-
- private:
- // Wrappers around sendmsg/recvmsg that supports message fragmentation.
- void Send(int fd, const MessageHeader* msg, const std::vector<int>& fds);
- ssize_t Recv(int fd, void* buf, std::vector<ScopedFD>* fds);
-
- // Parent process implementation.
- void DoParent(int fd);
- // Helper process implementation.
- void DoHelper(int fd);
-
- void StartProcessInHelper(const StartProcessRequest* request,
- std::vector<ScopedFD> fds);
- void WaitForChildInHelper(const WaitProcessRequest* request);
-
- bool is_child_ = false;
-
- // Parent vars.
- int child_fd_ = -1;
-
- // Helper vars.
- int parent_fd_ = -1;
- MainFunction main_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(LaunchHelper);
-};
-
-void LaunchHelper::Init(MainFunction main) {
- main_ = main;
-
- // Create a communication channel between the parent and child launch helper.
- // fd[0] belongs to the parent, fd[1] belongs to the child.
- int fds[2] = {-1, -1};
- int rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds);
- PCHECK(rv == 0);
- CHECK_NE(-1, fds[0]);
- CHECK_NE(-1, fds[1]);
-
- pid_t pid = fork();
- PCHECK(pid >= 0) << "Fork failed";
- if (pid) {
- // Parent.
- rv = close(fds[1]);
- PCHECK(rv == 0);
- DoParent(fds[0]);
- } else {
- // Helper.
- rv = close(fds[0]);
- PCHECK(rv == 0);
- DoHelper(fds[1]);
- NOTREACHED();
- _exit(0);
- }
-}
-
-void LaunchHelper::Send(
- int fd, const MessageHeader* msg, const std::vector<int>& fds) {
- uint32_t bytes_remaining = msg->size;
- const char* buf = reinterpret_cast<const char*>(msg);
- while (bytes_remaining) {
- size_t send_size =
- (bytes_remaining > kFragmentSize) ? kFragmentSize : bytes_remaining;
- bool success = UnixDomainSocket::SendMsg(
- fd, buf, send_size,
- (bytes_remaining == msg->size) ? fds : std::vector<int>());
- CHECK(success);
- bytes_remaining -= send_size;
- buf += send_size;
- }
-}
-
-ssize_t LaunchHelper::Recv(int fd, void* buf, std::vector<ScopedFD>* fds) {
- ssize_t size = UnixDomainSocket::RecvMsg(fd, buf, kFragmentSize, fds);
- if (size <= 0)
- return size;
-
- const MessageHeader* header = reinterpret_cast<const MessageHeader*>(buf);
- CHECK(header->size < kMaxMessageSize);
- uint32_t bytes_remaining = header->size - size;
- char* buffer = reinterpret_cast<char*>(buf);
- buffer += size;
- while (bytes_remaining) {
- std::vector<ScopedFD> dummy_fds;
- size = UnixDomainSocket::RecvMsg(fd, buffer, kFragmentSize, &dummy_fds);
- if (size <= 0)
- return size;
-
- CHECK(dummy_fds.empty());
- CHECK(size == kFragmentSize ||
- static_cast<size_t>(size) == bytes_remaining);
- bytes_remaining -= size;
- buffer += size;
- }
- return header->size;
-}
-
-void LaunchHelper::DoParent(int fd) {
- child_fd_ = fd;
-}
-
-void LaunchHelper::DoHelper(int fd) {
- parent_fd_ = fd;
- is_child_ = true;
- std::unique_ptr<char[]> buf(new char[kMaxMessageSize]);
- while (true) {
- // Wait for a message from the parent.
- std::vector<ScopedFD> fds;
- ssize_t size = Recv(parent_fd_, buf.get(), &fds);
- if (size == 0 || (size < 0 && errno == ECONNRESET)) {
- _exit(0);
- }
- PCHECK(size > 0);
-
- const MessageHeader* header =
- reinterpret_cast<const MessageHeader*>(buf.get());
- CHECK_EQ(static_cast<ssize_t>(header->size), size);
- switch (header->type) {
- case MessageType::START_REQUEST:
- StartProcessInHelper(
- reinterpret_cast<const StartProcessRequest*>(buf.get()),
- std::move(fds));
- break;
- case MessageType::WAIT_REQUEST:
- WaitForChildInHelper(
- reinterpret_cast<const WaitProcessRequest*>(buf.get()));
- break;
- default:
- LOG(FATAL) << "Unsupported message type: "
- << static_cast<uint32_t>(header->type);
- }
- }
-}
-
-void LaunchHelper::StartProcessInHelper(const StartProcessRequest* request,
- std::vector<ScopedFD> fds) {
- pid_t pid = fork();
- PCHECK(pid >= 0) << "Fork failed";
- if (pid) {
- // Helper.
- StartProcessResponse resp;
- resp.child_pid = pid;
- Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
- std::vector<int>());
- } else {
- // Child.
- PCHECK(close(parent_fd_) == 0);
- parent_fd_ = -1;
- CommandLine::Reset();
-
- Pickle serialised_extra(reinterpret_cast<const char*>(request + 1),
- request->header.size - sizeof(StartProcessRequest));
- PickleIterator iter(serialised_extra);
- std::vector<std::string> args;
- for (size_t i = 0; i < request->num_args; i++) {
- std::string arg;
- CHECK(iter.ReadString(&arg));
- args.push_back(std::move(arg));
- }
-
- CHECK_EQ(request->num_fds, fds.size());
- for (size_t i = 0; i < request->num_fds; i++) {
- int new_fd;
- CHECK(iter.ReadInt(&new_fd));
- int old_fd = fds[i].release();
- if (new_fd != old_fd) {
- if (dup2(old_fd, new_fd) < 0) {
- PLOG(FATAL) << "dup2";
- }
- PCHECK(close(old_fd) == 0);
- }
- }
+// A very basic implementation for Android. On Android tests can run in an APK
+// and we don't have an executable to exec*. This implementation does the bare
+// minimum to execute the method specified by procname (in the child process).
+// - All options except |fds_to_remap| are ignored.
+//
+// NOTE: This MUST NOT run on the main thread of the NativeTest application.
+Process SpawnMultiProcessTestChild(const std::string& procname,
+ const CommandLine& base_command_line,
+ const LaunchOptions& options) {
+ JNIEnv* env = android::AttachCurrentThread();
+ DCHECK(env);
- // argv has argc+1 elements, where the last element is NULL.
- std::unique_ptr<char*[]> argv(new char*[args.size() + 1]);
- for (size_t i = 0; i < args.size(); i++) {
- argv[i] = const_cast<char*>(args[i].c_str());
+ std::vector<int> fd_keys;
+ std::vector<int> fd_fds;
+ if (options.fds_to_remap) {
+ for (auto& iter : *options.fds_to_remap) {
+ fd_keys.push_back(iter.second);
+ fd_fds.push_back(iter.first);
}
- argv[args.size()] = nullptr;
- _exit(main_(args.size(), argv.get()));
- NOTREACHED();
}
-}
-
-void LaunchHelper::WaitForChildInHelper(const WaitProcessRequest* request) {
- Process process(request->pid);
- TimeDelta timeout = TimeDelta::FromMilliseconds(request->timeout_ms);
- int exit_code = -1;
- bool success = process.WaitForExitWithTimeout(timeout, &exit_code);
-
- WaitProcessResponse resp;
- resp.exit_code = exit_code;
- resp.success = success;
- Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
- std::vector<int>());
-}
-Process LaunchHelper::StartChildTestHelper(const std::string& procname,
- const CommandLine& base_command_line,
- const LaunchOptions& options) {
+ android::ScopedJavaLocalRef<jobjectArray> fds =
+ android::Java_MultiprocessTestClientLauncher_makeFdInfoArray(
+ env, base::android::ToJavaIntArray(env, fd_keys),
+ base::android::ToJavaIntArray(env, fd_fds));
CommandLine command_line(base_command_line);
- if (!command_line.HasSwitch(switches::kTestChildProcess))
+ if (!command_line.HasSwitch(switches::kTestChildProcess)) {
command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
-
- StartProcessRequest request;
- Pickle serialised_extra;
- const CommandLine::StringVector& argv = command_line.argv();
- for (const auto& arg : argv)
- CHECK(serialised_extra.WriteString(arg));
- request.num_args = argv.size();
-
- std::vector<int> fds_to_send;
- if (options.fds_to_remap) {
- for (auto p : *options.fds_to_remap) {
- CHECK(serialised_extra.WriteInt(p.second));
- fds_to_send.push_back(p.first);
- }
- request.num_fds = options.fds_to_remap->size();
}
- size_t buf_size = sizeof(StartProcessRequest) + serialised_extra.size();
- request.header.size = buf_size;
- std::unique_ptr<char[]> buffer(new char[buf_size]);
- memcpy(buffer.get(), &request, sizeof(StartProcessRequest));
- memcpy(buffer.get() + sizeof(StartProcessRequest), serialised_extra.data(),
- serialised_extra.size());
-
- // Send start message.
- Send(child_fd_, reinterpret_cast<const MessageHeader*>(buffer.get()),
- fds_to_send);
-
- // Synchronously get response.
- StartProcessResponse response;
- std::vector<ScopedFD> recv_fds;
- ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
- PCHECK(resp_size == sizeof(StartProcessResponse));
-
- return Process(response.child_pid);
+ android::ScopedJavaLocalRef<jobjectArray> j_argv =
+ android::ToJavaArrayOfStrings(env, command_line.argv());
+ jint pid = android::Java_MultiprocessTestClientLauncher_launchClient(
+ env, android::GetApplicationContext(), j_argv, fds);
+ return Process(pid);
}
-bool LaunchHelper::WaitForChildExitWithTimeout(
- const Process& process, TimeDelta timeout, int* exit_code) {
-
- WaitProcessRequest request;
- request.pid = process.Handle();
- request.timeout_ms = timeout.InMilliseconds();
-
- Send(child_fd_, reinterpret_cast<const MessageHeader*>(&request),
- std::vector<int>());
-
- WaitProcessResponse response;
- std::vector<ScopedFD> recv_fds;
- ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
- PCHECK(resp_size == sizeof(WaitProcessResponse));
-
- if (!response.success)
+bool WaitForMultiprocessTestChildExit(const Process& process,
+ TimeDelta timeout,
+ int* exit_code) {
+ JNIEnv* env = android::AttachCurrentThread();
+ DCHECK(env);
+
+ base::android::ScopedJavaLocalRef<jobject> result_code =
+ android::Java_MultiprocessTestClientLauncher_waitForMainToReturn(
+ env, android::GetApplicationContext(), process.Pid(),
+ static_cast<int32_t>(timeout.InMilliseconds()));
+ if (result_code.is_null() ||
+ Java_MainReturnCodeResult_hasTimedOut(env, result_code)) {
return false;
-
- *exit_code = response.exit_code;
+ }
+ if (exit_code) {
+ *exit_code = Java_MainReturnCodeResult_getReturnCode(env, result_code);
+ }
return true;
}
-LazyInstance<LaunchHelper>::Leaky g_launch_helper;
-
-} // namespace
-
-void InitAndroidMultiProcessTestHelper(int (*main)(int, char**)) {
- DCHECK(main);
- // Don't allow child processes to themselves create new child processes.
- if (g_launch_helper.Get().IsChild())
- return;
- g_launch_helper.Get().Init(main);
-}
-
-bool AndroidIsChildProcess() {
- return g_launch_helper.Get().IsChild();
-}
-
-bool AndroidWaitForChildExitWithTimeout(
- const Process& process, TimeDelta timeout, int* exit_code) {
- CHECK(g_launch_helper.Get().IsReady());
- return g_launch_helper.Get().WaitForChildExitWithTimeout(
- process, timeout, exit_code);
-}
-
-// A very basic implementation for Android. On Android tests can run in an APK
-// and we don't have an executable to exec*. This implementation does the bare
-// minimum to execute the method specified by procname (in the child process).
-// - All options except |fds_to_remap| are ignored.
-Process SpawnMultiProcessTestChild(const std::string& procname,
- const CommandLine& base_command_line,
- const LaunchOptions& options) {
- if (g_launch_helper.Get().IsReady()) {
- return g_launch_helper.Get().StartChildTestHelper(
- procname, base_command_line, options);
- }
-
- // TODO(viettrungluu): The FD-remapping done below is wrong in the presence of
- // cycles (e.g., fd1 -> fd2, fd2 -> fd1). crbug.com/326576
- FileHandleMappingVector empty;
- const FileHandleMappingVector* fds_to_remap =
- options.fds_to_remap ? options.fds_to_remap : &empty;
-
- pid_t pid = fork();
-
- if (pid < 0) {
- PLOG(ERROR) << "fork";
- return Process();
- }
- if (pid > 0) {
- // Parent process.
- return Process(pid);
- }
- // Child process.
- base::hash_set<int> fds_to_keep_open;
- for (FileHandleMappingVector::const_iterator it = fds_to_remap->begin();
- it != fds_to_remap->end(); ++it) {
- fds_to_keep_open.insert(it->first);
- }
- // Keep standard FDs (stdin, stdout, stderr, etc.) open since this
- // is not meant to spawn a daemon.
- int base = GlobalDescriptors::kBaseDescriptor;
- for (int fd = base; fd < sysconf(_SC_OPEN_MAX); ++fd) {
- if (fds_to_keep_open.find(fd) == fds_to_keep_open.end()) {
- close(fd);
- }
- }
- for (FileHandleMappingVector::const_iterator it = fds_to_remap->begin();
- it != fds_to_remap->end(); ++it) {
- int old_fd = it->first;
- int new_fd = it->second;
- if (dup2(old_fd, new_fd) < 0) {
- PLOG(FATAL) << "dup2";
- }
- close(old_fd);
- }
- CommandLine::Reset();
- CommandLine::Init(0, nullptr);
- CommandLine* command_line = CommandLine::ForCurrentProcess();
- command_line->InitFromArgv(base_command_line.argv());
- if (!command_line->HasSwitch(switches::kTestChildProcess))
- command_line->AppendSwitchASCII(switches::kTestChildProcess, procname);
+bool TerminateMultiProcessTestChild(const Process& process,
+ int exit_code,
+ bool wait) {
+ JNIEnv* env = android::AttachCurrentThread();
+ DCHECK(env);
- _exit(multi_process_function_list::InvokeChildProcessTest(procname));
- return Process();
+ return android::Java_MultiprocessTestClientLauncher_terminate(
+ env, android::GetApplicationContext(), process.Pid(), exit_code, wait);
}
} // namespace base
diff --git a/base/test/opaque_ref_counted.cc b/base/test/opaque_ref_counted.cc
index ed6c36f1a2..36253e5ef9 100644
--- a/base/test/opaque_ref_counted.cc
+++ b/base/test/opaque_ref_counted.cc
@@ -11,17 +11,31 @@ namespace base {
class OpaqueRefCounted : public RefCounted<OpaqueRefCounted> {
public:
- OpaqueRefCounted() {}
+ OpaqueRefCounted() = default;
int Return42() { return 42; }
private:
- virtual ~OpaqueRefCounted() {}
+ friend class RefCounted<OpaqueRefCounted>;
+ ~OpaqueRefCounted() = default;
- friend RefCounted<OpaqueRefCounted>;
DISALLOW_COPY_AND_ASSIGN(OpaqueRefCounted);
};
+class OpaqueRefCountedThreadSafe
+ : public RefCounted<OpaqueRefCountedThreadSafe> {
+ public:
+ OpaqueRefCountedThreadSafe() = default;
+
+ int Return42() { return 42; }
+
+ private:
+ friend class RefCounted<OpaqueRefCountedThreadSafe>;
+ ~OpaqueRefCountedThreadSafe() = default;
+
+ DISALLOW_COPY_AND_ASSIGN(OpaqueRefCountedThreadSafe);
+};
+
scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted() {
return new OpaqueRefCounted();
}
@@ -30,6 +44,16 @@ void TestOpaqueRefCounted(scoped_refptr<OpaqueRefCounted> p) {
EXPECT_EQ(42, p->Return42());
}
+scoped_refptr<OpaqueRefCountedThreadSafe> MakeOpaqueRefCountedThreadSafe() {
+ return new OpaqueRefCountedThreadSafe();
+}
+
+void TestOpaqueRefCountedThreadSafe(
+ scoped_refptr<OpaqueRefCountedThreadSafe> p) {
+ EXPECT_EQ(42, p->Return42());
+}
+
} // namespace base
template class scoped_refptr<base::OpaqueRefCounted>;
+template class scoped_refptr<base::OpaqueRefCountedThreadSafe>;
diff --git a/base/test/opaque_ref_counted.h b/base/test/opaque_ref_counted.h
index faf6a650fd..c0ddc87fe1 100644
--- a/base/test/opaque_ref_counted.h
+++ b/base/test/opaque_ref_counted.h
@@ -12,13 +12,18 @@ namespace base {
// OpaqueRefCounted is a test class for scoped_refptr to ensure it still works
// when the pointed-to type is opaque (i.e., incomplete).
class OpaqueRefCounted;
+class OpaqueRefCountedThreadSafe;
// Test functions that return and accept scoped_refptr<OpaqueRefCounted> values.
scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted();
void TestOpaqueRefCounted(scoped_refptr<OpaqueRefCounted> p);
+scoped_refptr<OpaqueRefCountedThreadSafe> MakeOpaqueRefCountedThreadSafe();
+void TestOpaqueRefCountedThreadSafe(
+ scoped_refptr<OpaqueRefCountedThreadSafe> p);
} // namespace base
extern template class scoped_refptr<base::OpaqueRefCounted>;
+extern template class scoped_refptr<base::OpaqueRefCountedThreadSafe>;
#endif // BASE_TEST_OPAQUE_REF_COUNTED_H_
diff --git a/base/test/scoped_feature_list.cc b/base/test/scoped_feature_list.cc
new file mode 100644
index 0000000000..f0f3f4edfb
--- /dev/null
+++ b/base/test/scoped_feature_list.cc
@@ -0,0 +1,74 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_feature_list.h"
+
+#include <string>
+
+namespace base {
+namespace test {
+
+namespace {
+
+static std::string GetFeatureString(
+ const std::initializer_list<base::Feature>& features) {
+ std::string output;
+ for (const base::Feature& feature : features) {
+ if (!output.empty())
+ output += ",";
+ output += feature.name;
+ }
+ return output;
+}
+
+} // namespace
+
+ScopedFeatureList::ScopedFeatureList() {}
+
+ScopedFeatureList::~ScopedFeatureList() {
+ if (original_feature_list_) {
+ base::FeatureList::ClearInstanceForTesting();
+ base::FeatureList::RestoreInstanceForTesting(
+ std::move(original_feature_list_));
+ }
+}
+
+void ScopedFeatureList::Init() {
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+ feature_list->InitializeFromCommandLine(std::string(), std::string());
+ InitWithFeatureList(std::move(feature_list));
+}
+
+void ScopedFeatureList::InitWithFeatures(
+ const std::initializer_list<base::Feature>& enabled_features,
+ const std::initializer_list<base::Feature>& disabled_features) {
+ InitFromCommandLine(GetFeatureString(enabled_features),
+ GetFeatureString(disabled_features));
+}
+
+void ScopedFeatureList::InitWithFeatureList(
+ std::unique_ptr<FeatureList> feature_list) {
+ DCHECK(!original_feature_list_);
+ original_feature_list_ = base::FeatureList::ClearInstanceForTesting();
+ base::FeatureList::SetInstance(std::move(feature_list));
+}
+
+void ScopedFeatureList::InitFromCommandLine(
+ const std::string& enable_features,
+ const std::string& disable_features) {
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+ feature_list->InitializeFromCommandLine(enable_features, disable_features);
+ InitWithFeatureList(std::move(feature_list));
+}
+
+void ScopedFeatureList::InitAndEnableFeature(const base::Feature& feature) {
+ InitFromCommandLine(feature.name, std::string());
+}
+
+void ScopedFeatureList::InitAndDisableFeature(const base::Feature& feature) {
+ InitFromCommandLine(std::string(), feature.name);
+}
+
+} // namespace test
+} // namespace base
diff --git a/base/test/scoped_feature_list.h b/base/test/scoped_feature_list.h
new file mode 100644
index 0000000000..99e07f5374
--- /dev/null
+++ b/base/test/scoped_feature_list.h
@@ -0,0 +1,59 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_FEATURE_LIST_H_
+#define BASE_TEST_SCOPED_FEATURE_LIST_H_
+
+#include <initializer_list>
+
+#include "base/feature_list.h"
+
+namespace base {
+namespace test {
+
+// ScopedFeatureList resets the global FeatureList instance to a new empty
+// instance and restores the original instance upon destruction.
+// Note: Re-using the same object is not allowed. To reset the feature
+// list and initialize it anew, destroy an existing scoped list and init
+// a new one.
+class ScopedFeatureList final {
+ public:
+ ScopedFeatureList();
+ ~ScopedFeatureList();
+
+ // Initializes and registers a FeatureList instance with no overrides.
+ void Init();
+
+ // Initializes and registers the given FeatureList instance.
+ void InitWithFeatureList(std::unique_ptr<FeatureList> feature_list);
+
+ // Initializes and registers a FeatureList instance with the given enabled
+ // and disabled features.
+ void InitWithFeatures(
+ const std::initializer_list<base::Feature>& enabled_features,
+ const std::initializer_list<base::Feature>& disabled_features);
+
+ // Initializes and registers a FeatureList instance with the given
+ // enabled and disabled features (comma-separated names).
+ void InitFromCommandLine(const std::string& enable_features,
+ const std::string& disable_features);
+
+ // Initializes and registers a FeatureList instance enabling a single
+ // feature.
+ void InitAndEnableFeature(const base::Feature& feature);
+
+ // Initializes and registers a FeatureList instance disabling a single
+ // feature.
+ void InitAndDisableFeature(const base::Feature& feature);
+
+ private:
+ std::unique_ptr<FeatureList> original_feature_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedFeatureList);
+};
+
+} // namespace test
+} // namespace base
+
+#endif // BASE_TEST_SCOPED_FEATURE_LIST_H_
diff --git a/base/test/sequenced_worker_pool_owner.cc b/base/test/sequenced_worker_pool_owner.cc
index 8781495d7d..324d071352 100644
--- a/base/test/sequenced_worker_pool_owner.cc
+++ b/base/test/sequenced_worker_pool_owner.cc
@@ -14,7 +14,10 @@ SequencedWorkerPoolOwner::SequencedWorkerPoolOwner(
size_t max_threads,
const std::string& thread_name_prefix)
: constructor_message_loop_(MessageLoop::current()),
- pool_(new SequencedWorkerPool(max_threads, thread_name_prefix, this)),
+ pool_(new SequencedWorkerPool(max_threads,
+ thread_name_prefix,
+ TaskPriority::USER_VISIBLE,
+ this)),
has_work_call_count_(0) {}
SequencedWorkerPoolOwner::~SequencedWorkerPoolOwner() {
@@ -25,7 +28,8 @@ SequencedWorkerPoolOwner::~SequencedWorkerPoolOwner() {
exit_loop_.Run();
}
-const scoped_refptr<SequencedWorkerPool>& SequencedWorkerPoolOwner::pool() {
+const scoped_refptr<SequencedWorkerPool>& SequencedWorkerPoolOwner::pool()
+ const {
return pool_;
}
diff --git a/base/test/sequenced_worker_pool_owner.h b/base/test/sequenced_worker_pool_owner.h
index 05fc7505fb..28a6cf070a 100644
--- a/base/test/sequenced_worker_pool_owner.h
+++ b/base/test/sequenced_worker_pool_owner.h
@@ -39,7 +39,7 @@ class SequencedWorkerPoolOwner : public SequencedWorkerPool::TestingObserver {
~SequencedWorkerPoolOwner() override;
// Don't change the returned pool's testing observer.
- const scoped_refptr<SequencedWorkerPool>& pool();
+ const scoped_refptr<SequencedWorkerPool>& pool() const;
// The given callback will be called on WillWaitForShutdown().
void SetWillWaitForShutdownCallback(const Closure& callback);
diff --git a/base/test/test_file_util.h b/base/test/test_file_util.h
index 7042e48484..d9172d757e 100644
--- a/base/test/test_file_util.h
+++ b/base/test/test_file_util.h
@@ -20,6 +20,10 @@
#include <jni.h>
#endif
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
namespace base {
class FilePath;
@@ -40,14 +44,12 @@ bool DieFileDie(const FilePath& file, bool recurse);
bool EvictFileFromSystemCache(const FilePath& file);
#if defined(OS_WIN)
-// Returns true if the volume supports Alternate Data Streams.
-bool VolumeSupportsADS(const FilePath& path);
-
-// Returns true if the ZoneIdentifier is correctly set to "Internet" (3).
-// Note that this function must be called from the same process as
-// the one that set the zone identifier. I.e. don't use it in UI/automation
-// based tests.
-bool HasInternetZoneIdentifier(const FilePath& full_path);
+// Deny |permission| on the file |path| for the current user. |permission| is an
+// ACCESS_MASK structure which is defined in
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa374892.aspx
+// Refer to https://msdn.microsoft.com/en-us/library/aa822867.aspx for a list of
+// possible values.
+bool DenyFilePermission(const FilePath& path, DWORD permission);
#endif // defined(OS_WIN)
// For testing, make the file unreadable or unwritable.
@@ -70,9 +72,6 @@ class FilePermissionRestorer {
};
#if defined(OS_ANDROID)
-// Register the ContentUriTestUrils JNI bindings.
-bool RegisterContentUriTestUtils(JNIEnv* env);
-
// Insert an image file into the MediaStore, and retrieve the content URI for
// testing purpose.
FilePath InsertImageIntoMediaStore(const FilePath& path);
diff --git a/base/test/test_io_thread.cc b/base/test/test_io_thread.cc
index 1fa041251c..ce4a8d10de 100644
--- a/base/test/test_io_thread.cc
+++ b/base/test/test_io_thread.cc
@@ -4,19 +4,7 @@
#include "base/test/test_io_thread.h"
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/synchronization/waitable_event.h"
-
-namespace {
-
-void PostTaskAndWaitHelper(base::WaitableEvent* event,
- const base::Closure& task) {
- task.Run();
- event->Signal();
-}
-
-} // namespace
+#include "base/logging.h"
namespace base {
@@ -54,13 +42,4 @@ void TestIOThread::PostTask(const tracked_objects::Location& from_here,
task_runner()->PostTask(from_here, task);
}
-void TestIOThread::PostTaskAndWait(const tracked_objects::Location& from_here,
- const base::Closure& task) {
- base::WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- task_runner()->PostTask(from_here,
- base::Bind(&PostTaskAndWaitHelper, &event, task));
- event.Wait();
-}
-
} // namespace base
diff --git a/base/test/test_io_thread.h b/base/test/test_io_thread.h
index c2ed1878d1..5d3885e81c 100644
--- a/base/test/test_io_thread.h
+++ b/base/test/test_io_thread.h
@@ -18,6 +18,13 @@ namespace base {
// Create and run an IO thread with a MessageLoop, and
// making the MessageLoop accessible from its client.
// It also provides some ideomatic API like PostTaskAndWait().
+//
+// This API is not thread-safe:
+// - Start()/Stop() should only be called from the main (creation) thread.
+// - PostTask()/message_loop()/task_runner() are also safe to call from the
+// underlying thread itself (to post tasks from other threads: get the
+// task_runner() from the main thread first, it is then safe to pass _it_
+// around).
class TestIOThread {
public:
enum Mode { kAutoStart, kManualStart };
@@ -25,19 +32,14 @@ class TestIOThread {
// Stops the I/O thread if necessary.
~TestIOThread();
- // |Start()|/|Stop()| should only be called from the main (creation) thread.
- // After |Stop()|, |Start()| may be called again to start a new I/O thread.
- // |Stop()| may be called even when the I/O thread is not started.
+ // After Stop(), Start() may be called again to start a new I/O thread.
+ // Stop() may be called even when the I/O thread is not started.
void Start();
void Stop();
// Post |task| to the IO thread.
void PostTask(const tracked_objects::Location& from_here,
const base::Closure& task);
- // Posts |task| to the IO-thread with an WaitableEvent associated blocks on
- // it until the posted |task| is executed, then returns.
- void PostTaskAndWait(const tracked_objects::Location& from_here,
- const base::Closure& task);
base::MessageLoopForIO* message_loop() {
return static_cast<base::MessageLoopForIO*>(io_thread_.message_loop());
diff --git a/base/test/test_mock_time_task_runner.cc b/base/test/test_mock_time_task_runner.cc
new file mode 100644
index 0000000000..f4bd7244b4
--- /dev/null
+++ b/base/test/test_mock_time_task_runner.cc
@@ -0,0 +1,321 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_mock_time_task_runner.h"
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/clock.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+namespace {
+
+// MockTickClock --------------------------------------------------------------
+
+// TickClock that always returns the then-current mock time ticks of
+// |task_runner| as the current time ticks.
+class MockTickClock : public TickClock {
+ public:
+ explicit MockTickClock(
+ scoped_refptr<const TestMockTimeTaskRunner> task_runner);
+
+ // TickClock:
+ TimeTicks NowTicks() override;
+
+ private:
+ scoped_refptr<const TestMockTimeTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTickClock);
+};
+
+MockTickClock::MockTickClock(
+ scoped_refptr<const TestMockTimeTaskRunner> task_runner)
+ : task_runner_(task_runner) {
+}
+
+TimeTicks MockTickClock::NowTicks() {
+ return task_runner_->NowTicks();
+}
+
+// MockClock ------------------------------------------------------------------
+
+// Clock that always returns the then-current mock time of |task_runner| as the
+// current time.
+class MockClock : public Clock {
+ public:
+ explicit MockClock(scoped_refptr<const TestMockTimeTaskRunner> task_runner);
+
+ // Clock:
+ Time Now() override;
+
+ private:
+ scoped_refptr<const TestMockTimeTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockClock);
+};
+
+MockClock::MockClock(scoped_refptr<const TestMockTimeTaskRunner> task_runner)
+ : task_runner_(task_runner) {
+}
+
+Time MockClock::Now() {
+ return task_runner_->Now();
+}
+
+} // namespace
+
+// TestMockTimeTaskRunner::TestOrderedPendingTask -----------------------------
+
+// Subclass of TestPendingTask which has a strictly monotonically increasing ID
+// for every task, so that tasks posted with the same 'time to run' can be run
+// in the order of being posted.
+struct TestMockTimeTaskRunner::TestOrderedPendingTask
+ : public base::TestPendingTask {
+ TestOrderedPendingTask();
+ TestOrderedPendingTask(const tracked_objects::Location& location,
+ const Closure& task,
+ TimeTicks post_time,
+ TimeDelta delay,
+ size_t ordinal,
+ TestNestability nestability);
+ TestOrderedPendingTask(TestOrderedPendingTask&&);
+ ~TestOrderedPendingTask();
+
+ TestOrderedPendingTask& operator=(TestOrderedPendingTask&&);
+
+ size_t ordinal;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestOrderedPendingTask);
+};
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask()
+ : ordinal(0) {
+}
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
+ TestOrderedPendingTask&&) = default;
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
+ const tracked_objects::Location& location,
+ const Closure& task,
+ TimeTicks post_time,
+ TimeDelta delay,
+ size_t ordinal,
+ TestNestability nestability)
+ : base::TestPendingTask(location, task, post_time, delay, nestability),
+ ordinal(ordinal) {}
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::~TestOrderedPendingTask() {
+}
+
+TestMockTimeTaskRunner::TestOrderedPendingTask&
+TestMockTimeTaskRunner::TestOrderedPendingTask::operator=(
+ TestOrderedPendingTask&&) = default;
+
+// TestMockTimeTaskRunner -----------------------------------------------------
+
+// TODO(gab): This should also set the SequenceToken for the current thread.
+// Ref. TestMockTimeTaskRunner::RunsTasksOnCurrentThread().
+TestMockTimeTaskRunner::ScopedContext::ScopedContext(
+ scoped_refptr<TestMockTimeTaskRunner> scope)
+ : on_destroy_(ThreadTaskRunnerHandle::OverrideForTesting(scope)) {
+ scope->RunUntilIdle();
+}
+
+TestMockTimeTaskRunner::ScopedContext::~ScopedContext() = default;
+
+bool TestMockTimeTaskRunner::TemporalOrder::operator()(
+ const TestOrderedPendingTask& first_task,
+ const TestOrderedPendingTask& second_task) const {
+ if (first_task.GetTimeToRun() == second_task.GetTimeToRun())
+ return first_task.ordinal > second_task.ordinal;
+ return first_task.GetTimeToRun() > second_task.GetTimeToRun();
+}
+
+TestMockTimeTaskRunner::TestMockTimeTaskRunner()
+ : now_(Time::UnixEpoch()), next_task_ordinal_(0) {
+}
+
+TestMockTimeTaskRunner::TestMockTimeTaskRunner(Time start_time,
+ TimeTicks start_ticks)
+ : now_(Time::UnixEpoch()), now_ticks_(start_ticks), next_task_ordinal_(0) {}
+
+TestMockTimeTaskRunner::~TestMockTimeTaskRunner() {
+}
+
+void TestMockTimeTaskRunner::FastForwardBy(TimeDelta delta) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_GE(delta, TimeDelta());
+
+ const TimeTicks original_now_ticks = now_ticks_;
+ ProcessAllTasksNoLaterThan(delta);
+ ForwardClocksUntilTickTime(original_now_ticks + delta);
+}
+
+void TestMockTimeTaskRunner::RunUntilIdle() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ProcessAllTasksNoLaterThan(TimeDelta());
+}
+
+void TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ProcessAllTasksNoLaterThan(TimeDelta::Max());
+}
+
+void TestMockTimeTaskRunner::ClearPendingTasks() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ AutoLock scoped_lock(tasks_lock_);
+ while (!tasks_.empty())
+ tasks_.pop();
+}
+
+Time TestMockTimeTaskRunner::Now() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return now_;
+}
+
+TimeTicks TestMockTimeTaskRunner::NowTicks() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return now_ticks_;
+}
+
+std::unique_ptr<Clock> TestMockTimeTaskRunner::GetMockClock() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return MakeUnique<MockClock>(this);
+}
+
+std::unique_ptr<TickClock> TestMockTimeTaskRunner::GetMockTickClock() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return MakeUnique<MockTickClock>(this);
+}
+
+std::deque<TestPendingTask> TestMockTimeTaskRunner::TakePendingTasks() {
+ AutoLock scoped_lock(tasks_lock_);
+ std::deque<TestPendingTask> tasks;
+ while (!tasks_.empty()) {
+ // It's safe to remove const and consume |task| here, since |task| is not
+ // used for ordering the item.
+ tasks.push_back(
+ std::move(const_cast<TestOrderedPendingTask&>(tasks_.top())));
+ tasks_.pop();
+ }
+ return tasks;
+}
+
+bool TestMockTimeTaskRunner::HasPendingTask() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return !tasks_.empty();
+}
+
+size_t TestMockTimeTaskRunner::GetPendingTaskCount() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return tasks_.size();
+}
+
+TimeDelta TestMockTimeTaskRunner::NextPendingTaskDelay() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return tasks_.empty() ? TimeDelta::Max()
+ : tasks_.top().GetTimeToRun() - now_ticks_;
+}
+
+// TODO(gab): Combine |thread_checker_| with a SequenceToken to differentiate
+// between tasks running in the scope of this TestMockTimeTaskRunner and other
+// task runners sharing this thread. http://crbug.com/631186
+bool TestMockTimeTaskRunner::RunsTasksOnCurrentThread() const {
+ return thread_checker_.CalledOnValidThread();
+}
+
+bool TestMockTimeTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ AutoLock scoped_lock(tasks_lock_);
+ tasks_.push(TestOrderedPendingTask(from_here, task, now_ticks_, delay,
+ next_task_ordinal_++,
+ TestPendingTask::NESTABLE));
+ return true;
+}
+
+bool TestMockTimeTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ return PostDelayedTask(from_here, task, delay);
+}
+
+bool TestMockTimeTaskRunner::IsElapsingStopped() {
+ return false;
+}
+
+void TestMockTimeTaskRunner::OnBeforeSelectingTask() {
+ // Empty default implementation.
+}
+
+void TestMockTimeTaskRunner::OnAfterTimePassed() {
+ // Empty default implementation.
+}
+
+void TestMockTimeTaskRunner::OnAfterTaskRun() {
+ // Empty default implementation.
+}
+
+void TestMockTimeTaskRunner::ProcessAllTasksNoLaterThan(TimeDelta max_delta) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_GE(max_delta, TimeDelta());
+
+ // Multiple test task runners can share the same thread for determinism in
+ // unit tests. Make sure this TestMockTimeTaskRunner's tasks run in its scope.
+ ScopedClosureRunner undo_override;
+ if (!ThreadTaskRunnerHandle::IsSet() ||
+ ThreadTaskRunnerHandle::Get() != this) {
+ undo_override = ThreadTaskRunnerHandle::OverrideForTesting(this);
+ }
+
+ const TimeTicks original_now_ticks = now_ticks_;
+ while (!IsElapsingStopped()) {
+ OnBeforeSelectingTask();
+ TestPendingTask task_info;
+ if (!DequeueNextTask(original_now_ticks, max_delta, &task_info))
+ break;
+ // If tasks were posted with a negative delay, task_info.GetTimeToRun() will
+ // be less than |now_ticks_|. ForwardClocksUntilTickTime() takes care of not
+ // moving the clock backwards in this case.
+ ForwardClocksUntilTickTime(task_info.GetTimeToRun());
+ std::move(task_info.task).Run();
+ OnAfterTaskRun();
+ }
+}
+
+void TestMockTimeTaskRunner::ForwardClocksUntilTickTime(TimeTicks later_ticks) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (later_ticks <= now_ticks_)
+ return;
+
+ now_ += later_ticks - now_ticks_;
+ now_ticks_ = later_ticks;
+ OnAfterTimePassed();
+}
+
+bool TestMockTimeTaskRunner::DequeueNextTask(const TimeTicks& reference,
+ const TimeDelta& max_delta,
+ TestPendingTask* next_task) {
+ AutoLock scoped_lock(tasks_lock_);
+ if (!tasks_.empty() &&
+ (tasks_.top().GetTimeToRun() - reference) <= max_delta) {
+ // It's safe to remove const and consume |task| here, since |task| is not
+ // used for ordering the item.
+ *next_task = std::move(const_cast<TestOrderedPendingTask&>(tasks_.top()));
+ tasks_.pop();
+ return true;
+ }
+ return false;
+}
+
+} // namespace base
diff --git a/base/test/test_mock_time_task_runner.h b/base/test/test_mock_time_task_runner.h
new file mode 100644
index 0000000000..54ebbdb7a8
--- /dev/null
+++ b/base/test/test_mock_time_task_runner.h
@@ -0,0 +1,223 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
+#define BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
+
+#include <stddef.h>
+
+#include <deque>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/callback_helpers.h"
+#include "base/macros.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/test/test_pending_task.h"
+#include "base/threading/thread_checker_impl.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class Clock;
+class TickClock;
+
+// Runs pending tasks in the order of the tasks' post time + delay, and keeps
+// track of a mock (virtual) tick clock time that can be fast-forwarded.
+//
+// TestMockTimeTaskRunner has the following properties:
+//
+// - Methods RunsTasksOnCurrentThread() and Post[Delayed]Task() can be called
+// from any thread, but the rest of the methods must be called on the same
+// thread the TaskRunner was created on.
+// - It allows for reentrancy, in that it handles the running of tasks that in
+// turn call back into it (e.g., to post more tasks).
+// - Tasks are stored in a priority queue, and executed in the increasing
+// order of post time + delay, but ignoring nestability.
+// - It does not check for overflow when doing time arithmetic. A sufficient
+// condition for preventing overflows is to make sure that the sum of all
+// posted task delays and fast-forward increments is still representable by
+// a TimeDelta, and that adding this delta to the starting values of Time
+// and TickTime is still within their respective range.
+// - Tasks aren't guaranteed to be destroyed immediately after they're run.
+//
+// This is a slightly more sophisticated version of TestSimpleTaskRunner, in
+// that it supports running delayed tasks in the correct temporal order.
+class TestMockTimeTaskRunner : public SingleThreadTaskRunner {
+ public:
+ // Everything that is executed in the scope of a ScopedContext will behave as
+ // though it ran under |scope| (i.e. ThreadTaskRunnerHandle,
+ // RunsTasksOnCurrentThread, etc.). This allows the test body to be all in one
+ // block when multiple TestMockTimeTaskRunners share the main thread. For
+ // example:
+ //
+ // class ExampleFixture {
+ // protected:
+ // DoBarOnFoo() {
+ // DCHECK(foo_task_runner_->RunsOnCurrentThread());
+ // EXPECT_EQ(foo_task_runner_, ThreadTaskRunnerHandle::Get());
+ // DoBar();
+ // }
+ //
+ // // Mock main task runner.
+ // base::MessageLoop message_loop_;
+ // base::ScopedMockTimeMessageLoopTaskRunner main_task_runner_;
+ //
+ // // Mock foo task runner.
+ // scoped_refptr<TestMockTimeTaskRunner> foo_task_runner_ =
+ // new TestMockTimeTaskRunner();
+ // };
+ //
+ // TEST_F(ExampleFixture, DoBarOnFoo) {
+ // DoThingsOnMain();
+ // {
+ // TestMockTimeTaskRunner::ScopedContext scoped_context(
+ // foo_task_runner_.get());
+ // DoBarOnFoo();
+ // }
+ // DoMoreThingsOnMain();
+ // }
+ //
+ class ScopedContext {
+ public:
+ // Note: |scope| is ran until idle as part of this constructor to ensure
+ // that anything which runs in the underlying scope runs after any already
+ // pending tasks (the contrary would break the SequencedTraskRunner
+ // contract).
+ explicit ScopedContext(scoped_refptr<TestMockTimeTaskRunner> scope);
+ ~ScopedContext();
+
+ private:
+ ScopedClosureRunner on_destroy_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedContext);
+ };
+
+ // Constructs an instance whose virtual time will start at the Unix epoch, and
+ // whose time ticks will start at zero.
+ TestMockTimeTaskRunner();
+
+ // Constructs an instance starting at the given virtual time and time ticks.
+ TestMockTimeTaskRunner(Time start_time, TimeTicks start_ticks);
+
+ // Fast-forwards virtual time by |delta|, causing all tasks with a remaining
+ // delay less than or equal to |delta| to be executed. |delta| must be
+ // non-negative.
+ void FastForwardBy(TimeDelta delta);
+
+ // Fast-forwards virtual time just until all tasks are executed.
+ void FastForwardUntilNoTasksRemain();
+
+ // Executes all tasks that have no remaining delay. Tasks with a remaining
+ // delay greater than zero will remain enqueued, and no virtual time will
+ // elapse.
+ void RunUntilIdle();
+
+ // Clears the queue of pending tasks without running them.
+ void ClearPendingTasks();
+
+ // Returns the current virtual time (initially starting at the Unix epoch).
+ Time Now() const;
+
+ // Returns the current virtual tick time (initially starting at 0).
+ TimeTicks NowTicks() const;
+
+ // Returns a Clock that uses the virtual time of |this| as its time source.
+ // The returned Clock will hold a reference to |this|.
+ std::unique_ptr<Clock> GetMockClock() const;
+
+ // Returns a TickClock that uses the virtual time ticks of |this| as its tick
+ // source. The returned TickClock will hold a reference to |this|.
+ std::unique_ptr<TickClock> GetMockTickClock() const;
+
+ std::deque<TestPendingTask> TakePendingTasks();
+ bool HasPendingTask() const;
+ size_t GetPendingTaskCount() const;
+ TimeDelta NextPendingTaskDelay() const;
+
+ // SingleThreadTaskRunner:
+ bool RunsTasksOnCurrentThread() const override;
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+
+ protected:
+ ~TestMockTimeTaskRunner() override;
+
+ // Whether the elapsing of virtual time is stopped or not. Subclasses can
+ // override this method to perform early exits from a running task runner.
+ // Defaults to always return false.
+ virtual bool IsElapsingStopped();
+
+ // Called before the next task to run is selected, so that subclasses have a
+ // last chance to make sure all tasks are posted.
+ virtual void OnBeforeSelectingTask();
+
+ // Called after the current mock time has been incremented so that subclasses
+ // can react to the passing of time.
+ virtual void OnAfterTimePassed();
+
+ // Called after each task is run so that subclasses may perform additional
+ // activities, e.g., pump additional task runners.
+ virtual void OnAfterTaskRun();
+
+ private:
+ struct TestOrderedPendingTask;
+
+ // Predicate that defines a strict weak temporal ordering of tasks.
+ class TemporalOrder {
+ public:
+ bool operator()(const TestOrderedPendingTask& first_task,
+ const TestOrderedPendingTask& second_task) const;
+ };
+
+ typedef std::priority_queue<TestOrderedPendingTask,
+ std::vector<TestOrderedPendingTask>,
+ TemporalOrder> TaskPriorityQueue;
+
+ // Core of the implementation for all flavors of fast-forward methods. Given a
+ // non-negative |max_delta|, runs all tasks with a remaining delay less than
+ // or equal to |max_delta|, and moves virtual time forward as needed for each
+ // processed task. Pass in TimeDelta::Max() as |max_delta| to run all tasks.
+ void ProcessAllTasksNoLaterThan(TimeDelta max_delta);
+
+ // Forwards |now_ticks_| until it equals |later_ticks|, and forwards |now_| by
+ // the same amount. Calls OnAfterTimePassed() if |later_ticks| > |now_ticks_|.
+ // Does nothing if |later_ticks| <= |now_ticks_|.
+ void ForwardClocksUntilTickTime(TimeTicks later_ticks);
+
+ // Returns the |next_task| to run if there is any with a running time that is
+ // at most |reference| + |max_delta|. This additional complexity is required
+ // so that |max_delta| == TimeDelta::Max() can be supported.
+ bool DequeueNextTask(const TimeTicks& reference,
+ const TimeDelta& max_delta,
+ TestPendingTask* next_task);
+
+ // Also used for non-dcheck logic (RunsTasksOnCurrentThread()) and as such
+ // needs to be a ThreadCheckerImpl.
+ ThreadCheckerImpl thread_checker_;
+
+ Time now_;
+ TimeTicks now_ticks_;
+
+ // Temporally ordered heap of pending tasks. Must only be accessed while the
+ // |tasks_lock_| is held.
+ TaskPriorityQueue tasks_;
+
+ // The ordinal to use for the next task. Must only be accessed while the
+ // |tasks_lock_| is held.
+ size_t next_task_ordinal_;
+
+ Lock tasks_lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestMockTimeTaskRunner);
+};
+
+} // namespace base
+
+#endif // BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 87b107e838..98bc0179b8 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -22,7 +22,9 @@ TestPendingTask::TestPendingTask(
delay(delay),
nestability(nestability) {}
-TestPendingTask::TestPendingTask(const TestPendingTask& other) = default;
+TestPendingTask::TestPendingTask(TestPendingTask&& other) = default;
+
+TestPendingTask& TestPendingTask::operator=(TestPendingTask&& other) = default;
TimeTicks TestPendingTask::GetTimeToRun() const {
return post_time + delay;
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 2dbdb7eecc..42f3f42c7b 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -21,7 +21,7 @@ struct TestPendingTask {
enum TestNestability { NESTABLE, NON_NESTABLE };
TestPendingTask();
- TestPendingTask(const TestPendingTask& other);
+ TestPendingTask(TestPendingTask&& other);
TestPendingTask(const tracked_objects::Location& location,
const Closure& task,
TimeTicks post_time,
@@ -29,6 +29,8 @@ struct TestPendingTask {
TestNestability nestability);
~TestPendingTask();
+ TestPendingTask& operator=(TestPendingTask&& other);
+
// Returns post_time + delay.
TimeTicks GetTimeToRun() const;
@@ -51,7 +53,7 @@ struct TestPendingTask {
bool ShouldRunBefore(const TestPendingTask& other) const;
tracked_objects::Location location;
- Closure task;
+ OnceClosure task;
TimeTicks post_time;
TimeDelta delay;
TestNestability nestability;
@@ -61,6 +63,9 @@ struct TestPendingTask {
void AsValueInto(base::trace_event::TracedValue* state) const;
std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
std::string ToString() const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestPendingTask);
};
// gtest helpers which allow pretty printing of the tasks, very useful in unit
diff --git a/base/test/test_simple_task_runner.cc b/base/test/test_simple_task_runner.cc
index cc39fab85a..090a72e96a 100644
--- a/base/test/test_simple_task_runner.cc
+++ b/base/test/test_simple_task_runner.cc
@@ -5,20 +5,20 @@
#include "base/test/test_simple_task_runner.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread_task_runner_handle.h"
namespace base {
-TestSimpleTaskRunner::TestSimpleTaskRunner() {}
+TestSimpleTaskRunner::TestSimpleTaskRunner() = default;
-TestSimpleTaskRunner::~TestSimpleTaskRunner() {
- DCHECK(thread_checker_.CalledOnValidThread());
-}
+TestSimpleTaskRunner::~TestSimpleTaskRunner() = default;
bool TestSimpleTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
const Closure& task,
TimeDelta delay) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ AutoLock auto_lock(lock_);
pending_tasks_.push_back(
TestPendingTask(from_here, task, TimeTicks(), delay,
TestPendingTask::NESTABLE));
@@ -29,48 +29,70 @@ bool TestSimpleTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
const Closure& task,
TimeDelta delay) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ AutoLock auto_lock(lock_);
pending_tasks_.push_back(
TestPendingTask(from_here, task, TimeTicks(), delay,
TestPendingTask::NON_NESTABLE));
return true;
}
+// TODO(gab): Use SequenceToken here to differentiate between tasks running in
+// the scope of this TestSimpleTaskRunner and other task runners sharing this
+// thread. http://crbug.com/631186
bool TestSimpleTaskRunner::RunsTasksOnCurrentThread() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return true;
+ return thread_ref_ == PlatformThread::CurrentRef();
}
-const std::deque<TestPendingTask>&
-TestSimpleTaskRunner::GetPendingTasks() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return pending_tasks_;
+std::deque<TestPendingTask> TestSimpleTaskRunner::TakePendingTasks() {
+ AutoLock auto_lock(lock_);
+ return std::move(pending_tasks_);
+}
+
+size_t TestSimpleTaskRunner::NumPendingTasks() const {
+ AutoLock auto_lock(lock_);
+ return pending_tasks_.size();
}
bool TestSimpleTaskRunner::HasPendingTask() const {
- DCHECK(thread_checker_.CalledOnValidThread());
+ AutoLock auto_lock(lock_);
return !pending_tasks_.empty();
}
base::TimeDelta TestSimpleTaskRunner::NextPendingTaskDelay() const {
- DCHECK(thread_checker_.CalledOnValidThread());
+ AutoLock auto_lock(lock_);
return pending_tasks_.front().GetTimeToRun() - base::TimeTicks();
}
+base::TimeDelta TestSimpleTaskRunner::FinalPendingTaskDelay() const {
+ AutoLock auto_lock(lock_);
+ return pending_tasks_.back().GetTimeToRun() - base::TimeTicks();
+}
+
void TestSimpleTaskRunner::ClearPendingTasks() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ AutoLock auto_lock(lock_);
pending_tasks_.clear();
}
void TestSimpleTaskRunner::RunPendingTasks() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(RunsTasksOnCurrentThread());
+
// Swap with a local variable to avoid re-entrancy problems.
std::deque<TestPendingTask> tasks_to_run;
- tasks_to_run.swap(pending_tasks_);
- for (std::deque<TestPendingTask>::iterator it = tasks_to_run.begin();
- it != tasks_to_run.end(); ++it) {
- it->task.Run();
+ {
+ AutoLock auto_lock(lock_);
+ tasks_to_run.swap(pending_tasks_);
}
+
+ // Multiple test task runners can share the same thread for determinism in
+ // unit tests. Make sure this TestSimpleTaskRunner's tasks run in its scope.
+ ScopedClosureRunner undo_override;
+ if (!ThreadTaskRunnerHandle::IsSet() ||
+ ThreadTaskRunnerHandle::Get() != this) {
+ undo_override = ThreadTaskRunnerHandle::OverrideForTesting(this);
+ }
+
+ for (auto& task : tasks_to_run)
+ std::move(task.task).Run();
}
void TestSimpleTaskRunner::RunUntilIdle() {
diff --git a/base/test/test_simple_task_runner.h b/base/test/test_simple_task_runner.h
index 338c634c8d..d089ba8a0b 100644
--- a/base/test/test_simple_task_runner.h
+++ b/base/test/test_simple_task_runner.h
@@ -10,8 +10,9 @@
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
#include "base/test/test_pending_task.h"
-#include "base/threading/thread_checker.h"
+#include "base/threading/platform_thread.h"
namespace base {
@@ -25,8 +26,6 @@ class TimeDelta;
//
// TestSimpleTaskRunner has the following properties which make it simple:
//
-// - It is non-thread safe; all member functions must be called on
-// the same thread.
// - Tasks are simply stored in a queue in FIFO order, ignoring delay
// and nestability.
// - Tasks aren't guaranteed to be destroyed immediately after
@@ -36,10 +35,6 @@ class TimeDelta;
// handles the running of tasks that in turn call back into itself
// (e.g., to post more tasks).
//
-// If you need more complicated properties, consider using this class
-// as a template for writing a test TaskRunner implementation using
-// TestPendingTask.
-//
// Note that, like any TaskRunner, TestSimpleTaskRunner is
// ref-counted.
class TestSimpleTaskRunner : public SingleThreadTaskRunner {
@@ -56,27 +51,36 @@ class TestSimpleTaskRunner : public SingleThreadTaskRunner {
bool RunsTasksOnCurrentThread() const override;
- const std::deque<TestPendingTask>& GetPendingTasks() const;
+ std::deque<TestPendingTask> TakePendingTasks();
+ size_t NumPendingTasks() const;
bool HasPendingTask() const;
base::TimeDelta NextPendingTaskDelay() const;
+ base::TimeDelta FinalPendingTaskDelay() const;
// Clears the queue of pending tasks without running them.
void ClearPendingTasks();
- // Runs each current pending task in order and clears the queue.
- // Any tasks posted by the tasks are not run.
- virtual void RunPendingTasks();
+ // Runs each current pending task in order and clears the queue. Tasks posted
+ // by the tasks that run within this call do not run within this call. Can
+ // only be called on the thread that created this TestSimpleTaskRunner.
+ void RunPendingTasks();
- // Runs pending tasks until the queue is empty.
+ // Runs pending tasks until the queue is empty. Can only be called on the
+ // thread that created this TestSimpleTaskRunner.
void RunUntilIdle();
protected:
~TestSimpleTaskRunner() override;
+ private:
+ // Thread on which this was instantiated.
+ const PlatformThreadRef thread_ref_ = PlatformThread::CurrentRef();
+
+ // Synchronizes access to |pending_tasks_|.
+ mutable Lock lock_;
+
std::deque<TestPendingTask> pending_tasks_;
- ThreadChecker thread_checker_;
- private:
DISALLOW_COPY_AND_ASSIGN(TestSimpleTaskRunner);
};
diff --git a/base/test/test_timeouts.cc b/base/test/test_timeouts.cc
index 55e9a79861..0dc0f49dee 100644
--- a/base/test/test_timeouts.cc
+++ b/base/test/test_timeouts.cc
@@ -46,7 +46,10 @@ void InitializeTimeout(const char* switch_name, int min_value, int* value) {
std::string string_value(base::CommandLine::ForCurrentProcess()->
GetSwitchValueASCII(switch_name));
int timeout;
- base::StringToInt(string_value, &timeout);
+ if (string_value == TestTimeouts::kNoTimeoutSwitchValue)
+ timeout = kAlmostInfiniteTimeoutMs;
+ else
+ base::StringToInt(string_value, &timeout);
*value = std::max(*value, timeout);
}
*value *= kTimeoutMultiplier;
@@ -65,6 +68,9 @@ void InitializeTimeout(const char* switch_name, int* value) {
} // namespace
// static
+constexpr const char TestTimeouts::kNoTimeoutSwitchValue[];
+
+// static
bool TestTimeouts::initialized_ = false;
// The timeout values should increase in the order they appear in this block.
diff --git a/base/test/test_timeouts.h b/base/test/test_timeouts.h
index ddaf05b5e0..9d42eb91fe 100644
--- a/base/test/test_timeouts.h
+++ b/base/test/test_timeouts.h
@@ -13,6 +13,9 @@
// the timeouts for different environments (like Valgrind).
class TestTimeouts {
public:
+ // Argument that can be passed on the command line to indicate "no timeout".
+ static constexpr const char kNoTimeoutSwitchValue[] = "-1";
+
// Initializes the timeouts. Non thread-safe. Should be called exactly once
// by the test suite.
static void Initialize();
diff --git a/base/test/trace_event_analyzer.cc b/base/test/trace_event_analyzer.cc
index 55a349acf2..e61337cccb 100644
--- a/base/test/trace_event_analyzer.cc
+++ b/base/test/trace_event_analyzer.cc
@@ -34,8 +34,8 @@ TraceEvent::~TraceEvent() {
TraceEvent& TraceEvent::operator=(TraceEvent&& rhs) = default;
bool TraceEvent::SetFromJSON(const base::Value* event_value) {
- if (event_value->GetType() != base::Value::TYPE_DICTIONARY) {
- LOG(ERROR) << "Value must be TYPE_DICTIONARY";
+ if (event_value->GetType() != base::Value::Type::DICTIONARY) {
+ LOG(ERROR) << "Value must be Type::DICTIONARY";
return false;
}
const base::DictionaryValue* dictionary =
diff --git a/base/test/trace_event_analyzer_unittest.cc b/base/test/trace_event_analyzer_unittest.cc
index b4f0950793..ce7bce22a0 100644
--- a/base/test/trace_event_analyzer_unittest.cc
+++ b/base/test/trace_event_analyzer_unittest.cc
@@ -123,7 +123,7 @@ TEST_F(TraceEventAnalyzerTest, TraceEvent) {
std::unique_ptr<base::Value> arg;
EXPECT_TRUE(event.GetArgAsValue("dict", &arg));
- EXPECT_EQ(base::Value::TYPE_DICTIONARY, arg->GetType());
+ EXPECT_EQ(base::Value::Type::DICTIONARY, arg->GetType());
}
TEST_F(TraceEventAnalyzerTest, QueryEventMember) {
diff --git a/base/third_party/dynamic_annotations/dynamic_annotations.c b/base/third_party/dynamic_annotations/dynamic_annotations.c
new file mode 100644
index 0000000000..4313ecc5be
--- /dev/null
+++ b/base/third_party/dynamic_annotations/dynamic_annotations.c
@@ -0,0 +1,269 @@
+/* Copyright (c) 2011, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef _MSC_VER
+# include <windows.h>
+#endif
+
+#ifdef __cplusplus
+# error "This file should be built as pure C to avoid name mangling"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+
+#ifdef __GNUC__
+/* valgrind.h uses gcc extensions so it won't build with other compilers */
+# include "base/third_party/valgrind/valgrind.h"
+#endif
+
+/* Compiler-based ThreadSanitizer defines
+ DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
+ and provides its own definitions of the functions. */
+
+#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
+# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
+#endif
+
+/* Each function is empty and called (via a macro) only in debug mode.
+ The arguments are captured by dynamic tools at runtime. */
+
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1 \
+ && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
+
+/* Identical code folding(-Wl,--icf=all) countermeasures.
+ This makes all Annotate* functions different, which prevents the linker from
+ folding them. */
+#ifdef __COUNTER__
+#define DYNAMIC_ANNOTATIONS_IMPL \
+ volatile unsigned short lineno = (__LINE__ << 8) + __COUNTER__; (void)lineno;
+#else
+#define DYNAMIC_ANNOTATIONS_IMPL \
+ volatile unsigned short lineno = (__LINE__ << 8); (void)lineno;
+#endif
+
+/* WARNING: always add new annotations to the end of the list.
+ Otherwise, lineno (see above) numbers for different Annotate* functions may
+ conflict. */
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
+ const char *file, int line, const volatile void *lock)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
+ const char *file, int line, const volatile void *lock)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
+ const char *file, int line, const volatile void *lock, long is_w)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
+ const char *file, int line, const volatile void *lock, long is_w)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
+ const char *file, int line, const volatile void *barrier, long count,
+ long reinitialization_allowed)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
+ const char *file, int line, const volatile void *barrier)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
+ const char *file, int line, const volatile void *barrier)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
+ const char *file, int line, const volatile void *barrier)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
+ const char *file, int line, const volatile void *cv,
+ const volatile void *lock)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
+ const char *file, int line, const volatile void *cv)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
+ const char *file, int line, const volatile void *cv)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
+ const char *file, int line, const volatile void *obj)
+{DYNAMIC_ANNOTATIONS_IMPL};
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
+ const char *file, int line, const volatile void *obj)
+{DYNAMIC_ANNOTATIONS_IMPL};
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
+ const char *file, int line, const volatile void *address, long size)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
+ const char *file, int line, const volatile void *address, long size)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
+ const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
+ const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
+ const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
+ const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
+ const char *file, int line, const volatile void *mem, long size)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
+ const char *file, int line, const volatile void *mem,
+ const char *description)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
+ const char *file, int line, const volatile void *mem,
+ const char *description)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
+ const char *file, int line, const volatile void *mem, long size,
+ const char *description)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
+ const char *file, int line, const volatile void *mu)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
+ const char *file, int line, const volatile void *mu)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
+ const char *file, int line, const volatile void *arg)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
+ const char *file, int line, const char *name)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
+ const char *file, int line, int enable)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
+ const char *file, int line, const volatile void *arg)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
+ const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1
+ && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
+
+#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 \
+ && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
+static int GetRunningOnValgrind(void) {
+#ifdef RUNNING_ON_VALGRIND
+ if (RUNNING_ON_VALGRIND) return 1;
+#endif
+
+#ifndef _MSC_VER
+ char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND");
+ if (running_on_valgrind_str) {
+ return strcmp(running_on_valgrind_str, "0") != 0;
+ }
+#else
+ /* Visual Studio issues warnings if we use getenv,
+ * so we use GetEnvironmentVariableA instead.
+ */
+ char value[100] = "1";
+ int res = GetEnvironmentVariableA("RUNNING_ON_VALGRIND",
+ value, sizeof(value));
+ /* value will remain "1" if res == 0 or res >= sizeof(value). The latter
+ * can happen only if the given value is long, in this case it can't be "0".
+ */
+ if (res > 0 && strcmp(value, "0") != 0)
+ return 1;
+#endif
+ return 0;
+}
+
+/* See the comments in dynamic_annotations.h */
+int RunningOnValgrind(void) {
+ static volatile int running_on_valgrind = -1;
+ /* C doesn't have thread-safe initialization of statics, and we
+ don't want to depend on pthread_once here, so hack it. */
+ int local_running_on_valgrind = running_on_valgrind;
+ if (local_running_on_valgrind == -1)
+ running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
+ return local_running_on_valgrind;
+}
+
+#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
+ && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
diff --git a/base/threading/non_thread_safe.h b/base/threading/non_thread_safe.h
index d41c08608c..64ae8e4da9 100644
--- a/base/threading/non_thread_safe.h
+++ b/base/threading/non_thread_safe.h
@@ -10,14 +10,7 @@
// There is a specific macro to do it: NON_EXPORTED_BASE(), defined in
// compiler_specific.h
#include "base/compiler_specific.h"
-
-// See comment at top of thread_checker.h
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
-#define ENABLE_NON_THREAD_SAFE 1
-#else
-#define ENABLE_NON_THREAD_SAFE 0
-#endif
-
+#include "base/logging.h"
#include "base/threading/non_thread_safe_impl.h"
namespace base {
@@ -58,13 +51,11 @@ class NonThreadSafeDoNothing {
// to have a base::ThreadChecker as a member, rather than inherit from
// NonThreadSafe. For more details about when to choose one over the other, see
// the documentation for base::ThreadChecker.
-#if ENABLE_NON_THREAD_SAFE
+#if DCHECK_IS_ON()
typedef NonThreadSafeImpl NonThreadSafe;
#else
typedef NonThreadSafeDoNothing NonThreadSafe;
-#endif // ENABLE_NON_THREAD_SAFE
-
-#undef ENABLE_NON_THREAD_SAFE
+#endif // DCHECK_IS_ON()
} // namespace base
diff --git a/base/threading/non_thread_safe_unittest.cc b/base/threading/non_thread_safe_unittest.cc
index d523fc55b1..5752d5f2b3 100644
--- a/base/threading/non_thread_safe_unittest.cc
+++ b/base/threading/non_thread_safe_unittest.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "base/test/gtest_util.h"
#include "base/threading/simple_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -106,8 +107,6 @@ TEST(NonThreadSafeTest, DetachThenDestructOnDifferentThread) {
delete_on_thread.Join();
}
-#if GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
-
void NonThreadSafeClass::MethodOnDifferentThreadImpl() {
std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
new NonThreadSafeClass);
@@ -120,17 +119,15 @@ void NonThreadSafeClass::MethodOnDifferentThreadImpl() {
call_on_thread.Join();
}
-#if ENABLE_NON_THREAD_SAFE
+#if DCHECK_IS_ON()
TEST(NonThreadSafeDeathTest, MethodNotAllowedOnDifferentThreadInDebug) {
- ASSERT_DEATH({
- NonThreadSafeClass::MethodOnDifferentThreadImpl();
- }, "");
+ ASSERT_DCHECK_DEATH({ NonThreadSafeClass::MethodOnDifferentThreadImpl(); });
}
#else
TEST(NonThreadSafeTest, MethodAllowedOnDifferentThreadInRelease) {
NonThreadSafeClass::MethodOnDifferentThreadImpl();
}
-#endif // ENABLE_NON_THREAD_SAFE
+#endif // DCHECK_IS_ON()
void NonThreadSafeClass::DestructorOnDifferentThreadImpl() {
std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
@@ -145,21 +142,15 @@ void NonThreadSafeClass::DestructorOnDifferentThreadImpl() {
delete_on_thread.Join();
}
-#if ENABLE_NON_THREAD_SAFE
+#if DCHECK_IS_ON()
TEST(NonThreadSafeDeathTest, DestructorNotAllowedOnDifferentThreadInDebug) {
- ASSERT_DEATH({
- NonThreadSafeClass::DestructorOnDifferentThreadImpl();
- }, "");
+ ASSERT_DCHECK_DEATH(
+ { NonThreadSafeClass::DestructorOnDifferentThreadImpl(); });
}
#else
TEST(NonThreadSafeTest, DestructorAllowedOnDifferentThreadInRelease) {
NonThreadSafeClass::DestructorOnDifferentThreadImpl();
}
-#endif // ENABLE_NON_THREAD_SAFE
-
-#endif // GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
-
-// Just in case we ever get lumped together with other compilation units.
-#undef ENABLE_NON_THREAD_SAFE
+#endif // DCHECK_IS_ON()
} // namespace base
diff --git a/base/threading/platform_thread.h b/base/threading/platform_thread.h
index 9b217a9c65..8c0d8e4432 100644
--- a/base/threading/platform_thread.h
+++ b/base/threading/platform_thread.h
@@ -18,6 +18,8 @@
#if defined(OS_WIN)
#include <windows.h>
+#elif defined(OS_MACOSX)
+#include <mach/mach_types.h>
#elif defined(OS_POSIX)
#include <pthread.h>
#include <unistd.h>
@@ -28,6 +30,8 @@ namespace base {
// Used for logging. Always an integer value.
#if defined(OS_WIN)
typedef DWORD PlatformThreadId;
+#elif defined(OS_MACOSX)
+typedef mach_port_t PlatformThreadId;
#elif defined(OS_POSIX)
typedef pid_t PlatformThreadId;
#endif
@@ -59,6 +63,8 @@ class PlatformThreadRef {
return id_ == other.id_;
}
+ bool operator!=(PlatformThreadRef other) const { return id_ != other.id_; }
+
bool is_null() const {
return id_ == 0;
}
@@ -175,6 +181,12 @@ class BASE_EXPORT PlatformThread {
// PlatformThreadHandle.
static bool CreateNonJoinable(size_t stack_size, Delegate* delegate);
+ // CreateNonJoinableWithPriority() does the same thing as CreateNonJoinable()
+ // except the priority of the thread is set based on |priority|.
+ static bool CreateNonJoinableWithPriority(size_t stack_size,
+ Delegate* delegate,
+ ThreadPriority priority);
+
// Joins with a thread created via the Create function. This function blocks
// the caller until the designated thread exits. This will invalidate
// |thread_handle|.
@@ -184,6 +196,10 @@ class BASE_EXPORT PlatformThread {
// and |thread_handle| is invalidated after this call.
static void Detach(PlatformThreadHandle thread_handle);
+ // Returns true if SetCurrentThreadPriority() can be used to increase the
+ // priority of the current thread.
+ static bool CanIncreaseCurrentThreadPriority();
+
// Toggles the current thread's priority at runtime. A thread may not be able
// to raise its priority back up after lowering it if the process does not
// have a proper permission, e.g. CAP_SYS_NICE on Linux. A thread may not be
@@ -195,6 +211,20 @@ class BASE_EXPORT PlatformThread {
static ThreadPriority GetCurrentThreadPriority();
+#if defined(OS_LINUX)
+ // Toggles a specific thread's priority at runtime. This can be used to
+ // change the priority of a thread in a different process and will fail
+ // if the calling process does not have proper permissions. The
+ // SetCurrentThreadPriority() function above is preferred in favor of
+ // security but on platforms where sandboxed processes are not allowed to
+ // change priority this function exists to allow a non-sandboxed process
+ // to change the priority of sandboxed threads for improved performance.
+ // Warning: Don't use this for a main thread because that will change the
+ // whole thread group's (i.e. process) priority.
+ static void SetThreadPriority(PlatformThreadId thread_id,
+ ThreadPriority priority);
+#endif
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread);
};
diff --git a/base/threading/platform_thread_linux.cc b/base/threading/platform_thread_linux.cc
index ab7c97ef51..474410f18a 100644
--- a/base/threading/platform_thread_linux.cc
+++ b/base/threading/platform_thread_linux.cc
@@ -8,8 +8,10 @@
#include <sched.h>
#include <stddef.h>
+#include "base/files/file_util.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
#include "base/threading/platform_thread_internal_posix.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/tracked_objects.h"
@@ -18,11 +20,68 @@
#if !defined(OS_NACL)
#include <pthread.h>
#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#endif
namespace base {
+namespace {
+#if !defined(OS_NACL)
+const FilePath::CharType kCgroupDirectory[] =
+ FILE_PATH_LITERAL("/sys/fs/cgroup");
+
+FilePath ThreadPriorityToCgroupDirectory(const FilePath& cgroup_filepath,
+ ThreadPriority priority) {
+ switch (priority) {
+ case ThreadPriority::NORMAL:
+ return cgroup_filepath;
+ case ThreadPriority::BACKGROUND:
+ return cgroup_filepath.Append(FILE_PATH_LITERAL("non-urgent"));
+ case ThreadPriority::DISPLAY:
+ case ThreadPriority::REALTIME_AUDIO:
+ return cgroup_filepath.Append(FILE_PATH_LITERAL("urgent"));
+ }
+ NOTREACHED();
+ return FilePath();
+}
+
+void SetThreadCgroup(PlatformThreadId thread_id,
+ const FilePath& cgroup_directory) {
+ FilePath tasks_filepath = cgroup_directory.Append(FILE_PATH_LITERAL("tasks"));
+ std::string tid = IntToString(thread_id);
+ int bytes_written = WriteFile(tasks_filepath, tid.c_str(), tid.size());
+ if (bytes_written != static_cast<int>(tid.size())) {
+ DVLOG(1) << "Failed to add " << tid << " to " << tasks_filepath.value();
+ }
+}
+
+void SetThreadCgroupForThreadPriority(PlatformThreadId thread_id,
+ const FilePath& cgroup_filepath,
+ ThreadPriority priority) {
+ // Append "chrome" suffix.
+ FilePath cgroup_directory = ThreadPriorityToCgroupDirectory(
+ cgroup_filepath.Append(FILE_PATH_LITERAL("chrome")), priority);
+
+ // Silently ignore request if cgroup directory doesn't exist.
+ if (!DirectoryExists(cgroup_directory))
+ return;
+
+ SetThreadCgroup(thread_id, cgroup_directory);
+}
+
+void SetThreadCgroupsForThreadPriority(PlatformThreadId thread_id,
+ ThreadPriority priority) {
+ FilePath cgroup_filepath(kCgroupDirectory);
+ SetThreadCgroupForThreadPriority(
+ thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("cpuset")), priority);
+ SetThreadCgroupForThreadPriority(
+ thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("schedtune")),
+ priority);
+}
+#endif
+} // namespace
namespace internal {
@@ -41,6 +100,7 @@ const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
#if !defined(OS_NACL)
+ SetThreadCgroupsForThreadPriority(PlatformThread::CurrentId(), priority);
return priority == ThreadPriority::REALTIME_AUDIO &&
pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
#else
@@ -90,6 +150,25 @@ void PlatformThread::SetName(const std::string& name) {
#endif // !defined(OS_NACL)
}
+#if !defined(OS_NACL)
+// static
+void PlatformThread::SetThreadPriority(PlatformThreadId thread_id,
+ ThreadPriority priority) {
+ // Changing current main threads' priority is not permitted in favor of
+ // security, this interface is restricted to change only non-main thread
+ // priority.
+ CHECK_NE(thread_id, getpid());
+
+ SetThreadCgroupsForThreadPriority(thread_id, priority);
+
+ const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
+ if (setpriority(PRIO_PROCESS, thread_id, nice_setting)) {
+ DVPLOG(1) << "Failed to set nice value of thread (" << thread_id << ") to "
+ << nice_setting;
+ }
+}
+#endif // !defined(OS_NACL)
+
void InitThreading() {}
void TerminateOnThread() {}
diff --git a/base/threading/platform_thread_mac.mm b/base/threading/platform_thread_mac.mm
index 51f3621af2..e743044ec1 100644
--- a/base/threading/platform_thread_mac.mm
+++ b/base/threading/platform_thread_mac.mm
@@ -162,6 +162,11 @@ void SetPriorityRealtimeAudio(mach_port_t mach_thread_id) {
} // anonymous namespace
// static
+bool PlatformThread::CanIncreaseCurrentThreadPriority() {
+ return true;
+}
+
+// static
void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
// Convert from pthread_t to mach thread identifier.
mach_port_t mach_thread_id =
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index 2321b3cd49..9a6a2bb999 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -11,9 +11,12 @@
#include <stdint.h>
#include <sys/resource.h>
#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
#include <memory>
+#include "base/debug/activity_tracker.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/threading/platform_thread_internal_posix.h"
@@ -23,8 +26,6 @@
#if defined(OS_LINUX)
#include <sys/syscall.h>
-#elif defined(OS_ANDROID)
-#include <sys/types.h>
#endif
namespace base {
@@ -187,21 +188,32 @@ const char* PlatformThread::GetName() {
bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
PlatformThreadHandle* thread_handle,
ThreadPriority priority) {
- return CreateThread(stack_size, true, // joinable thread
- delegate, thread_handle, priority);
+ return CreateThread(stack_size, true /* joinable thread */, delegate,
+ thread_handle, priority);
}
// static
bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+ return CreateNonJoinableWithPriority(stack_size, delegate,
+ ThreadPriority::NORMAL);
+}
+
+// static
+bool PlatformThread::CreateNonJoinableWithPriority(size_t stack_size,
+ Delegate* delegate,
+ ThreadPriority priority) {
PlatformThreadHandle unused;
bool result = CreateThread(stack_size, false /* non-joinable thread */,
- delegate, &unused, ThreadPriority::NORMAL);
+ delegate, &unused, priority);
return result;
}
// static
void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+ // Record the event that this thread is blocking upon (for hang diagnosis).
+ base::debug::ScopedThreadJoinActivity thread_activity(&thread_handle);
+
// Joining another thread may block the current thread for a long time, since
// the thread referred to by |thread_handle| may still be running long-lived /
// blocking tasks.
@@ -218,6 +230,18 @@ void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
#if !defined(OS_MACOSX)
// static
+bool PlatformThread::CanIncreaseCurrentThreadPriority() {
+#if defined(OS_NACL)
+ return false;
+#else
+ // Only root can raise thread priority on POSIX environment. On Linux, users
+ // who have CAP_SYS_NICE permission also can raise the thread priority, but
+ // libcap.so would be needed to check the capability.
+ return geteuid() == 0;
+#endif // defined(OS_NACL)
+}
+
+// static
void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
#if defined(OS_NACL)
NOTIMPLEMENTED();
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index 2d99ed8750..0febf8ba9b 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -12,8 +12,6 @@
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_POSIX)
-#include <sys/types.h>
-#include <unistd.h>
#include "base/threading/platform_thread_internal_posix.h"
#elif defined(OS_WIN)
#include <windows.h>
@@ -235,17 +233,6 @@ const ThreadPriority kThreadPriorityTestValues[] = {
ThreadPriority::NORMAL,
ThreadPriority::BACKGROUND};
-bool IsBumpingPriorityAllowed() {
-#if defined(OS_POSIX)
- // Only root can raise thread priority on POSIX environment. On Linux, users
- // who have CAP_SYS_NICE permission also can raise the thread priority, but
- // libcap.so would be needed to check the capability.
- return geteuid() == 0;
-#else
- return true;
-#endif
-}
-
class ThreadPriorityTestThread : public FunctionTestThread {
public:
explicit ThreadPriorityTestThread(ThreadPriority priority)
@@ -273,8 +260,9 @@ class ThreadPriorityTestThread : public FunctionTestThread {
// Test changing a created thread's priority (which has different semantics on
// some platforms).
TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
- const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
- if (bumping_priority_allowed) {
+ const bool increase_priority_allowed =
+ PlatformThread::CanIncreaseCurrentThreadPriority();
+ if (increase_priority_allowed) {
// Bump the priority in order to verify that new threads are started with
// normal priority.
PlatformThread::SetCurrentThreadPriority(ThreadPriority::DISPLAY);
@@ -282,7 +270,7 @@ TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
// Toggle each supported priority on the thread and confirm it affects it.
for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
- if (!bumping_priority_allowed &&
+ if (!increase_priority_allowed &&
kThreadPriorityTestValues[i] >
PlatformThread::GetCurrentThreadPriority()) {
continue;
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index c906866cfb..d16f8bd225 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -4,42 +4,46 @@
#include "base/threading/post_task_and_reply_impl.h"
+#include <utility>
+
#include "base/bind.h"
-#include "base/location.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+#include "base/sequenced_task_runner.h"
+#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
namespace {
-// This relay class remembers the MessageLoop that it was created on, and
-// ensures that both the |task| and |reply| Closures are deleted on this same
-// thread. Also, |task| is guaranteed to be deleted before |reply| is run or
-// deleted.
+// This relay class remembers the sequence that it was created on, and ensures
+// that both the |task| and |reply| Closures are deleted on this same sequence.
+// Also, |task| is guaranteed to be deleted before |reply| is run or deleted.
//
-// If this is not possible because the originating MessageLoop is no longer
-// available, the the |task| and |reply| Closures are leaked. Leaking is
-// considered preferable to having a thread-safetey violations caused by
-// invoking the Closure destructor on the wrong thread.
+// If RunReplyAndSelfDestruct() doesn't run because the originating execution
+// context is no longer available, then the |task| and |reply| Closures are
+// leaked. Leaking is considered preferable to having a thread-safetey
+// violations caused by invoking the Closure destructor on the wrong sequence.
class PostTaskAndReplyRelay {
public:
PostTaskAndReplyRelay(const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply)
- : from_here_(from_here),
- origin_task_runner_(ThreadTaskRunnerHandle::Get()) {
- task_ = task;
- reply_ = reply;
- }
+ Closure task,
+ Closure reply)
+ : sequence_checker_(),
+ from_here_(from_here),
+ origin_task_runner_(SequencedTaskRunnerHandle::Get()),
+ reply_(std::move(reply)),
+ task_(std::move(task)) {}
~PostTaskAndReplyRelay() {
- DCHECK(origin_task_runner_->BelongsToCurrentThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
task_.Reset();
reply_.Reset();
}
- void Run() {
+ void RunTaskAndPostReply() {
task_.Run();
origin_task_runner_->PostTask(
from_here_, Bind(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
@@ -48,7 +52,7 @@ class PostTaskAndReplyRelay {
private:
void RunReplyAndSelfDestruct() {
- DCHECK(origin_task_runner_->BelongsToCurrentThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
// Force |task_| to be released before |reply_| is to ensure that no one
// accidentally depends on |task_| keeping one of its arguments alive while
@@ -61,8 +65,9 @@ class PostTaskAndReplyRelay {
delete this;
}
- tracked_objects::Location from_here_;
- scoped_refptr<SingleThreadTaskRunner> origin_task_runner_;
+ const SequenceChecker sequence_checker_;
+ const tracked_objects::Location from_here_;
+ const scoped_refptr<SequencedTaskRunner> origin_task_runner_;
Closure reply_;
Closure task_;
};
@@ -73,14 +78,19 @@ namespace internal {
bool PostTaskAndReplyImpl::PostTaskAndReply(
const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply) {
- // TODO(tzik): Use DCHECK here once the crash is gone. http://crbug.com/541319
- CHECK(!task.is_null()) << from_here.ToString();
- CHECK(!reply.is_null()) << from_here.ToString();
+ Closure task,
+ Closure reply) {
+ DCHECK(!task.is_null()) << from_here.ToString();
+ DCHECK(!reply.is_null()) << from_here.ToString();
PostTaskAndReplyRelay* relay =
- new PostTaskAndReplyRelay(from_here, task, reply);
- if (!PostTask(from_here, Bind(&PostTaskAndReplyRelay::Run,
+ new PostTaskAndReplyRelay(from_here, std::move(task), std::move(reply));
+ // PostTaskAndReplyRelay self-destructs after executing |reply|. On the flip
+ // side though, it is intentionally leaked if the |task| doesn't complete
+ // before the origin sequence stops executing tasks. Annotate |relay| as leaky
+ // to avoid having to suppress every callsite which happens to flakily trigger
+ // this race.
+ ANNOTATE_LEAKING_OBJECT_PTR(relay);
+ if (!PostTask(from_here, Bind(&PostTaskAndReplyRelay::RunTaskAndPostReply,
Unretained(relay)))) {
delete relay;
return false;
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
index d21ab78de8..696b668a4c 100644
--- a/base/threading/post_task_and_reply_impl.h
+++ b/base/threading/post_task_and_reply_impl.h
@@ -8,30 +8,29 @@
#ifndef BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
#define BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
-#include "base/callback_forward.h"
+#include "base/base_export.h"
+#include "base/callback.h"
#include "base/location.h"
namespace base {
namespace internal {
-// Inherit from this in a class that implements PostTask appropriately
-// for sending to a destination thread.
+// Inherit from this in a class that implements PostTask to send a task to a
+// custom execution context.
//
-// Note that 'reply' will always get posted back to your current
-// MessageLoop.
-//
-// If you're looking for a concrete implementation of
-// PostTaskAndReply, you probably want base::SingleThreadTaskRunner, or you
-// may want base::WorkerPool.
-class PostTaskAndReplyImpl {
+// If you're looking for a concrete implementation of PostTaskAndReply, you
+// probably want base::TaskRunner, or you may want base::WorkerPool.
+class BASE_EXPORT PostTaskAndReplyImpl {
public:
virtual ~PostTaskAndReplyImpl() = default;
- // Implementation for TaskRunner::PostTaskAndReply and
- // WorkerPool::PostTaskAndReply.
+ // Posts |task| by calling PostTask(). On completion, |reply| is posted to the
+ // sequence or thread that called this. Can only be called when
+ // SequencedTaskRunnerHandle::IsSet(). Both |task| and |reply| are guaranteed
+ // to be deleted on the sequence or thread that called this.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply);
+ Closure task,
+ Closure reply);
private:
virtual bool PostTask(const tracked_objects::Location& from_here,
diff --git a/base/threading/sequenced_task_runner_handle.cc b/base/threading/sequenced_task_runner_handle.cc
index 88b36a8d64..90f68b33ab 100644
--- a/base/threading/sequenced_task_runner_handle.cc
+++ b/base/threading/sequenced_task_runner_handle.cc
@@ -16,39 +16,56 @@ namespace base {
namespace {
-base::LazyInstance<base::ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
+LazyInstance<ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
+ // Return the registered SingleThreadTaskRunner, if any. This must be at the
+ // top so that a SingleThreadTaskRunner has priority over a
+ // SequencedTaskRunner (RLZ registers both on the same thread despite that
+ // being prevented by DCHECKs).
+ // TODO(fdoray): Move this to the bottom once RLZ stops registering a
+ // SingleThreadTaskRunner and a SequencedTaskRunner on the same thread.
+ // https://crbug.com/618530#c14
+ if (ThreadTaskRunnerHandle::IsSet()) {
+ // Various modes of setting SequencedTaskRunnerHandle don't combine.
+ DCHECK(!lazy_tls_ptr.Pointer()->Get());
+ DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
+
+ return ThreadTaskRunnerHandle::Get();
+ }
+
// Return the registered SequencedTaskRunner, if any.
const SequencedTaskRunnerHandle* handle = lazy_tls_ptr.Pointer()->Get();
if (handle) {
// Various modes of setting SequencedTaskRunnerHandle don't combine.
- DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
- DCHECK(!SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread());
- return handle->task_runner_;
- }
+ DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
- // Return the SequencedTaskRunner obtained from SequencedWorkerPool, if any.
- scoped_refptr<base::SequencedTaskRunner> task_runner =
- SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread();
- if (task_runner) {
- DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
- return task_runner;
+ return handle->task_runner_;
}
- // Return the SingleThreadTaskRunner for the current thread otherwise.
- return base::ThreadTaskRunnerHandle::Get();
+ // If we are on a worker thread for a SequencedBlockingPool that is running a
+ // sequenced task, return a SequencedTaskRunner for it.
+ scoped_refptr<SequencedWorkerPool> pool =
+ SequencedWorkerPool::GetWorkerPoolForCurrentThread();
+ DCHECK(pool);
+ SequencedWorkerPool::SequenceToken sequence_token =
+ SequencedWorkerPool::GetSequenceTokenForCurrentThread();
+ DCHECK(sequence_token.IsValid());
+ scoped_refptr<SequencedTaskRunner> sequenced_task_runner(
+ pool->GetSequencedTaskRunner(sequence_token));
+ DCHECK(sequenced_task_runner->RunsTasksOnCurrentThread());
+ return sequenced_task_runner;
}
// static
bool SequencedTaskRunnerHandle::IsSet() {
return lazy_tls_ptr.Pointer()->Get() ||
- SequencedWorkerPool::GetWorkerPoolForCurrentThread() ||
- base::ThreadTaskRunnerHandle::IsSet();
+ SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid() ||
+ ThreadTaskRunnerHandle::IsSet();
}
SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
diff --git a/base/threading/sequenced_task_runner_handle.h b/base/threading/sequenced_task_runner_handle.h
index e6dec1e9f8..b7f4bae8aa 100644
--- a/base/threading/sequenced_task_runner_handle.h
+++ b/base/threading/sequenced_task_runner_handle.h
@@ -25,8 +25,9 @@ class BASE_EXPORT SequencedTaskRunnerHandle {
// instantiating a SequencedTaskRunnerHandle.
// b) The current thread has a ThreadTaskRunnerHandle (which includes any
// thread that has a MessageLoop associated with it), or
- // c) The current thread is a worker thread belonging to a
- // SequencedWorkerPool.
+ // c) The current thread is a worker thread belonging to a SequencedWorkerPool
+ // *and* is currently running a sequenced task (note: not supporting
+ // unsequenced tasks is intentional: https://crbug.com/618043#c4).
static bool IsSet();
// Binds |task_runner| to the current thread.
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index 57961b5cd5..ce594cd7fb 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -10,6 +10,7 @@
#include <map>
#include <memory>
#include <set>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -17,6 +18,7 @@
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/critical_closure.h"
+#include "base/debug/dump_without_crashing.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -25,15 +27,21 @@
#include "base/strings/stringprintf.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
+// Don't enable the redirect to TaskScheduler on Arc++ to avoid pulling a bunch
+// of dependencies. Some code also #ifdef'ed below.
+#if 0
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/task_scheduler.h"
+#endif
#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/simple_thread.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_restrictions.h"
-#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
-#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/trace_event.h"
#include "base/tracked_objects.h"
+#include "base/tracking_info.h"
#include "build/build_config.h"
#if defined(OS_MACOSX)
@@ -43,13 +51,36 @@
#endif
#if !defined(OS_NACL)
-#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
#endif
namespace base {
namespace {
+// An enum representing the state of all pools. A non-test process should only
+// ever transition from POST_TASK_DISABLED to one of the active states. A test
+// process may transition from one of the active states to POST_TASK_DISABLED
+// when DisableForProcessForTesting() is called.
+//
+// External memory synchronization is required to call a method that reads
+// |g_all_pools_state| after calling a method that modifies it.
+//
+// TODO(gab): Remove this if http://crbug.com/622400 fails (SequencedWorkerPool
+// will be phased out completely otherwise).
+enum class AllPoolsState {
+ POST_TASK_DISABLED,
+ USE_WORKER_POOL,
+ REDIRECTED_TO_TASK_SCHEDULER,
+};
+
+// TODO(fdoray): Change the initial state to POST_TASK_DISABLED. It is initially
+// USE_WORKER_POOL to avoid a revert of the CL that adds
+// debug::DumpWithoutCrashing() in case of waterfall failures.
+AllPoolsState g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
+
+TaskPriority g_max_task_priority = TaskPriority::HIGHEST;
+
struct SequencedTask : public TrackingInfo {
SequencedTask()
: sequence_token_id(0),
@@ -92,6 +123,14 @@ struct SequencedTaskLessThan {
}
};
+// Create a process-wide unique ID to represent this task in trace events. This
+// will be mangled with a Process ID hash to reduce the likelyhood of colliding
+// with MessageLoop pointers on other processes.
+uint64_t GetTaskTraceID(const SequencedTask& task, void* pool) {
+ return (static_cast<uint64_t>(task.trace_id) << 32) |
+ static_cast<uint64_t>(reinterpret_cast<intptr_t>(pool));
+}
+
// SequencedWorkerPoolTaskRunner ---------------------------------------------
// A TaskRunner which posts tasks to a SequencedWorkerPool with a
// fixed ShutdownBehavior.
@@ -142,14 +181,17 @@ bool SequencedWorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
return pool_->RunsTasksOnCurrentThread();
}
-// SequencedWorkerPoolSequencedTaskRunner ------------------------------------
+} // namespace
+
+// SequencedWorkerPool::PoolSequencedTaskRunner ------------------------------
// A SequencedTaskRunner which posts tasks to a SequencedWorkerPool with a
// fixed sequence token.
//
// Note that this class is RefCountedThreadSafe (inherited from TaskRunner).
-class SequencedWorkerPoolSequencedTaskRunner : public SequencedTaskRunner {
+class SequencedWorkerPool::PoolSequencedTaskRunner
+ : public SequencedTaskRunner {
public:
- SequencedWorkerPoolSequencedTaskRunner(
+ PoolSequencedTaskRunner(
scoped_refptr<SequencedWorkerPool> pool,
SequencedWorkerPool::SequenceToken token,
SequencedWorkerPool::WorkerShutdown shutdown_behavior);
@@ -166,7 +208,7 @@ class SequencedWorkerPoolSequencedTaskRunner : public SequencedTaskRunner {
TimeDelta delay) override;
private:
- ~SequencedWorkerPoolSequencedTaskRunner() override;
+ ~PoolSequencedTaskRunner() override;
const scoped_refptr<SequencedWorkerPool> pool_;
@@ -174,25 +216,25 @@ class SequencedWorkerPoolSequencedTaskRunner : public SequencedTaskRunner {
const SequencedWorkerPool::WorkerShutdown shutdown_behavior_;
- DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPoolSequencedTaskRunner);
+ DISALLOW_COPY_AND_ASSIGN(PoolSequencedTaskRunner);
};
-SequencedWorkerPoolSequencedTaskRunner::SequencedWorkerPoolSequencedTaskRunner(
- scoped_refptr<SequencedWorkerPool> pool,
- SequencedWorkerPool::SequenceToken token,
- SequencedWorkerPool::WorkerShutdown shutdown_behavior)
+SequencedWorkerPool::PoolSequencedTaskRunner::
+ PoolSequencedTaskRunner(
+ scoped_refptr<SequencedWorkerPool> pool,
+ SequencedWorkerPool::SequenceToken token,
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior)
: pool_(std::move(pool)),
token_(token),
shutdown_behavior_(shutdown_behavior) {}
-SequencedWorkerPoolSequencedTaskRunner::
-~SequencedWorkerPoolSequencedTaskRunner() {
-}
+SequencedWorkerPool::PoolSequencedTaskRunner::
+ ~PoolSequencedTaskRunner() = default;
-bool SequencedWorkerPoolSequencedTaskRunner::PostDelayedTask(
- const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::
+ PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
if (delay.is_zero()) {
return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
token_, from_here, task, shutdown_behavior_);
@@ -200,29 +242,20 @@ bool SequencedWorkerPoolSequencedTaskRunner::PostDelayedTask(
return pool_->PostDelayedSequencedWorkerTask(token_, from_here, task, delay);
}
-bool SequencedWorkerPoolSequencedTaskRunner::RunsTasksOnCurrentThread() const {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::
+ RunsTasksOnCurrentThread() const {
return pool_->IsRunningSequenceOnCurrentThread(token_);
}
-bool SequencedWorkerPoolSequencedTaskRunner::PostNonNestableDelayedTask(
- const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::
+ PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
// There's no way to run nested tasks, so simply forward to
// PostDelayedTask.
return PostDelayedTask(from_here, task, delay);
}
-// Create a process-wide unique ID to represent this task in trace events. This
-// will be mangled with a Process ID hash to reduce the likelyhood of colliding
-// with MessageLoop pointers on other processes.
-uint64_t GetTaskTraceID(const SequencedTask& task, void* pool) {
- return (static_cast<uint64_t>(task.trace_id) << 32) |
- static_cast<uint64_t>(reinterpret_cast<intptr_t>(pool));
-}
-
-} // namespace
-
// Worker ---------------------------------------------------------------------
class SequencedWorkerPool::Worker : public SimpleThread {
@@ -247,6 +280,14 @@ class SequencedWorkerPool::Worker : public SimpleThread {
is_processing_task_ = true;
task_sequence_token_ = token;
task_shutdown_behavior_ = shutdown_behavior;
+
+ // It is dangerous for tasks with CONTINUE_ON_SHUTDOWN to access a class
+ // that implements a non-leaky base::Singleton because they are generally
+ // destroyed before the process terminates via an AtExitManager
+ // registration. This will trigger a DCHECK to warn of such cases. See the
+ // comment about CONTINUE_ON_SHUTDOWN for more details.
+ ThreadRestrictions::SetSingletonAllowed(task_shutdown_behavior_ !=
+ CONTINUE_ON_SHUTDOWN);
}
// Indicates that the task has finished running.
@@ -292,8 +333,10 @@ class SequencedWorkerPool::Inner {
public:
// Take a raw pointer to |worker| to avoid cycles (since we're owned
// by it).
- Inner(SequencedWorkerPool* worker_pool, size_t max_threads,
+ Inner(SequencedWorkerPool* worker_pool,
+ size_t max_threads,
const std::string& thread_name_prefix,
+ base::TaskPriority task_priority,
TestingObserver* observer);
~Inner();
@@ -316,11 +359,6 @@ class SequencedWorkerPool::Inner {
bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
- bool IsRunningSequence(SequenceToken sequence_token) const;
-
- void SetRunningTaskInfoForCurrentThread(SequenceToken sequence_token,
- WorkerShutdown shutdown_behavior);
-
void CleanupForTesting();
void SignalHasWorkForTesting();
@@ -349,6 +387,25 @@ class SequencedWorkerPool::Inner {
CLEANUP_DONE,
};
+ // Clears ScheduledTasks in |tasks_to_delete| while ensuring that
+ // |this_worker| has the desired task info context during ~ScheduledTask() to
+ // allow sequence-checking.
+ void DeleteWithoutLock(std::vector<SequencedTask>* tasks_to_delete,
+ Worker* this_worker);
+
+ // Helper used by PostTask() to complete the work when redirection is on.
+ // Returns true if the task may run at some point in the future and false if
+ // it will definitely not run.
+ // Coalesce upon resolution of http://crbug.com/622400.
+ bool PostTaskToTaskScheduler(const SequencedTask& sequenced,
+ const TimeDelta& delay);
+
+ // Returns the TaskScheduler TaskRunner for the specified |sequence_token_id|
+ // and |traits|.
+ scoped_refptr<TaskRunner> GetTaskSchedulerTaskRunner(
+ int sequence_token_id,
+ const TaskTraits& traits);
+
// Called from within the lock, this converts the given token name into a
// token ID, creating a new one if necessary.
int LockedGetNamedTokenID(const std::string& name);
@@ -373,7 +430,7 @@ class SequencedWorkerPool::Inner {
// See the implementation for a more detailed description.
GetWorkStatus GetWork(SequencedTask* task,
TimeDelta* wait_time,
- std::vector<Closure>* delete_these_outside_lock);
+ std::vector<SequencedTask>* delete_these_outside_lock);
void HandleCleanup();
@@ -397,7 +454,7 @@ class SequencedWorkerPool::Inner {
// 0 or more. The caller should then call FinishStartingAdditionalThread to
// complete initialization once the lock is released.
//
- // If another thread is not necessary, returne 0;
+ // If another thread is not necessary, return 0;
//
// See the implementedion for more.
int PrepareToStartAdditionalThreadIfHelpful();
@@ -497,6 +554,27 @@ class SequencedWorkerPool::Inner {
TestingObserver* const testing_observer_;
+ // Members below are used for the experimental redirection to TaskScheduler.
+ // TODO(gab): Remove these if http://crbug.com/622400 fails
+ // (SequencedWorkerPool will be phased out completely otherwise).
+
+ // The TaskPriority to be used for SequencedWorkerPool tasks redirected to the
+ // TaskScheduler as an experiment (unused otherwise).
+ const base::TaskPriority task_priority_;
+
+ // A map of SequenceToken IDs to TaskScheduler TaskRunners used to redirect
+ // sequenced tasks to the TaskScheduler.
+ std::unordered_map<int, scoped_refptr<TaskRunner>> sequenced_task_runner_map_;
+
+ // TaskScheduler TaskRunners to redirect unsequenced tasks to the
+ // TaskScheduler. Indexed by TaskShutdownBehavior.
+ scoped_refptr<TaskRunner> unsequenced_task_runners_[3];
+
+ // A dummy TaskRunner obtained from TaskScheduler with the same TaskTraits as
+ // used by this SequencedWorkerPool to query for RunsTasksOnCurrentThread().
+ // Mutable so it can be lazily instantiated from RunsTasksOnCurrentThread().
+ mutable scoped_refptr<TaskRunner> runs_tasks_on_verifier_;
+
DISALLOW_COPY_AND_ASSIGN(Inner);
};
@@ -510,6 +588,7 @@ SequencedWorkerPool::Worker::Worker(
worker_pool_(std::move(worker_pool)),
task_shutdown_behavior_(BLOCK_SHUTDOWN),
is_processing_task_(false) {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
Start();
}
@@ -517,6 +596,8 @@ SequencedWorkerPool::Worker::~Worker() {
}
void SequencedWorkerPool::Worker::Run() {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
+
#if defined(OS_WIN)
win::ScopedCOMInitializer com_initializer;
#endif
@@ -552,11 +633,11 @@ LazyInstance<ThreadLocalPointer<SequencedWorkerPool::Worker>>::Leaky
// Inner definitions ---------------------------------------------------------
-SequencedWorkerPool::Inner::Inner(
- SequencedWorkerPool* worker_pool,
- size_t max_threads,
- const std::string& thread_name_prefix,
- TestingObserver* observer)
+SequencedWorkerPool::Inner::Inner(SequencedWorkerPool* worker_pool,
+ size_t max_threads,
+ const std::string& thread_name_prefix,
+ base::TaskPriority task_priority,
+ TestingObserver* observer)
: worker_pool_(worker_pool),
lock_(),
has_work_cv_(&lock_),
@@ -574,7 +655,13 @@ SequencedWorkerPool::Inner::Inner(
cleanup_state_(CLEANUP_DONE),
cleanup_idlers_(0),
cleanup_cv_(&lock_),
- testing_observer_(observer) {}
+ testing_observer_(observer),
+ task_priority_(static_cast<int>(task_priority) <=
+ static_cast<int>(g_max_task_priority)
+ ? task_priority
+ : g_max_task_priority) {
+ DCHECK_GT(max_threads_, 1U);
+}
SequencedWorkerPool::Inner::~Inner() {
// You must call Shutdown() before destroying the pool.
@@ -611,6 +698,13 @@ bool SequencedWorkerPool::Inner::PostTask(
const tracked_objects::Location& from_here,
const Closure& task,
TimeDelta delay) {
+ // TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
+ // revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
+ // waterfall. https://crbug.com/622400
+ // DCHECK_NE(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
+ if (g_all_pools_state == AllPoolsState::POST_TASK_DISABLED)
+ debug::DumpWithoutCrashing();
+
DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN);
SequencedTask sequenced(from_here);
sequenced.sequence_token_id = sequence_token.id_;
@@ -624,6 +718,7 @@ bool SequencedWorkerPool::Inner::PostTask(
int create_thread_id = 0;
{
AutoLock lock(lock_);
+
if (shutdown_called_) {
// Don't allow a new task to be posted if it doesn't block shutdown.
if (shutdown_behavior != BLOCK_SHUTDOWN)
@@ -659,64 +754,178 @@ bool SequencedWorkerPool::Inner::PostTask(
if (optional_token_name)
sequenced.sequence_token_id = LockedGetNamedTokenID(*optional_token_name);
- pending_tasks_.insert(sequenced);
- if (shutdown_behavior == BLOCK_SHUTDOWN)
- blocking_shutdown_pending_task_count_++;
+ // See on top of the file why we don't compile this on Arc++.
+#if 0
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ if (!PostTaskToTaskScheduler(sequenced, delay))
+ return false;
+ } else {
+#endif
+ pending_tasks_.insert(sequenced);
- create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
+ if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN)
+ blocking_shutdown_pending_task_count_++;
+
+ create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
+ }
+#if 0
}
+#endif
- // Actually start the additional thread or signal an existing one now that
- // we're outside the lock.
- if (create_thread_id)
- FinishStartingAdditionalThread(create_thread_id);
- else
- SignalHasWork();
+ // Use != REDIRECTED_TO_TASK_SCHEDULER instead of == USE_WORKER_POOL to ensure
+ // correct behavior if a task is posted to a SequencedWorkerPool before
+ // Enable(WithRedirectionToTaskScheduler)ForProcess() in a non-DCHECK build.
+ if (g_all_pools_state != AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ // Actually start the additional thread or signal an existing one outside
+ // the lock.
+ if (create_thread_id)
+ FinishStartingAdditionalThread(create_thread_id);
+ else
+ SignalHasWork();
+ }
+
+#if DCHECK_IS_ON()
+ {
+ AutoLock lock_for_dcheck(lock_);
+ // Some variables are exposed in both modes for convenience but only really
+ // intended for one of them at runtime, confirm exclusive usage here.
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ DCHECK(pending_tasks_.empty());
+ DCHECK_EQ(0, create_thread_id);
+ } else {
+ DCHECK(sequenced_task_runner_map_.empty());
+ }
+ }
+#endif // DCHECK_IS_ON()
return true;
}
-bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const {
- AutoLock lock(lock_);
- return ContainsKey(threads_, PlatformThread::CurrentId());
+bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
+ const SequencedTask& sequenced,
+ const TimeDelta& delay) {
+#if 1
+ NOTREACHED();
+ ALLOW_UNUSED_PARAM(sequenced);
+ ALLOW_UNUSED_PARAM(delay);
+ return false;
+#else
+ DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
+
+ lock_.AssertAcquired();
+
+ // Confirm that the TaskScheduler's shutdown behaviors use the same
+ // underlying values as SequencedWorkerPool.
+ static_assert(
+ static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) ==
+ static_cast<int>(CONTINUE_ON_SHUTDOWN),
+ "TaskShutdownBehavior and WorkerShutdown enum mismatch for "
+ "CONTINUE_ON_SHUTDOWN.");
+ static_assert(static_cast<int>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) ==
+ static_cast<int>(SKIP_ON_SHUTDOWN),
+ "TaskShutdownBehavior and WorkerShutdown enum mismatch for "
+ "SKIP_ON_SHUTDOWN.");
+ static_assert(static_cast<int>(TaskShutdownBehavior::BLOCK_SHUTDOWN) ==
+ static_cast<int>(BLOCK_SHUTDOWN),
+ "TaskShutdownBehavior and WorkerShutdown enum mismatch for "
+ "BLOCK_SHUTDOWN.");
+
+ const TaskShutdownBehavior task_shutdown_behavior =
+ static_cast<TaskShutdownBehavior>(sequenced.shutdown_behavior);
+ const TaskTraits traits = TaskTraits()
+ .MayBlock()
+ .WithBaseSyncPrimitives()
+ .WithPriority(task_priority_)
+ .WithShutdownBehavior(task_shutdown_behavior);
+ return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits)
+ ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay);
+#endif
}
-bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread(
- SequenceToken sequence_token) const {
+scoped_refptr<TaskRunner>
+SequencedWorkerPool::Inner::GetTaskSchedulerTaskRunner(
+ int sequence_token_id,
+ const TaskTraits& traits) {
+#if 1
+ NOTREACHED();
+ ALLOW_UNUSED_PARAM(sequence_token_id);
+ ALLOW_UNUSED_PARAM(traits);
+ return scoped_refptr<TaskRunner>();
+#else
+ DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
+
+ lock_.AssertAcquired();
+
+ static_assert(
+ static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) == 0,
+ "TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN must be equal to 0 to be "
+ "used as an index in |unsequenced_task_runners_|.");
+ static_assert(static_cast<int>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) == 1,
+ "TaskShutdownBehavior::SKIP_ON_SHUTDOWN must be equal to 1 to "
+ "be used as an index in |unsequenced_task_runners_|.");
+ static_assert(static_cast<int>(TaskShutdownBehavior::BLOCK_SHUTDOWN) == 2,
+ "TaskShutdownBehavior::BLOCK_SHUTDOWN must be equal to 2 to be "
+ "used as an index in |unsequenced_task_runners_|.");
+ static_assert(arraysize(unsequenced_task_runners_) == 3,
+ "The size of |unsequenced_task_runners_| doesn't match the "
+ "number of shutdown behaviors.");
+
+ scoped_refptr<TaskRunner>& task_runner =
+ sequence_token_id ? sequenced_task_runner_map_[sequence_token_id]
+ : unsequenced_task_runners_[static_cast<int>(
+ traits.shutdown_behavior())];
+
+ // TODO(fdoray): DCHECK that all tasks posted to the same sequence have the
+ // same shutdown behavior.
+
+ if (!task_runner) {
+ task_runner = sequence_token_id
+ ? CreateSequencedTaskRunnerWithTraits(traits)
+ : CreateTaskRunnerWithTraits(traits);
+ }
+
+ return task_runner;
+#endif
+}
+
+bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const {
AutoLock lock(lock_);
- ThreadMap::const_iterator found = threads_.find(PlatformThread::CurrentId());
- if (found == threads_.end())
- return false;
- return found->second->is_processing_task() &&
- sequence_token.Equals(found->second->task_sequence_token());
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+#if 0
+ if (!runs_tasks_on_verifier_) {
+ runs_tasks_on_verifier_ = CreateTaskRunnerWithTraits(
+ TaskTraits().MayBlock().WithBaseSyncPrimitives().WithPriority(
+ task_priority_));
+ }
+#endif
+ return runs_tasks_on_verifier_->RunsTasksOnCurrentThread();
+ } else {
+ return ContainsKey(threads_, PlatformThread::CurrentId());
+ }
}
-bool SequencedWorkerPool::Inner::IsRunningSequence(
+bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread(
SequenceToken sequence_token) const {
DCHECK(sequence_token.IsValid());
- AutoLock lock(lock_);
- return !IsSequenceTokenRunnable(sequence_token.id_);
-}
-void SequencedWorkerPool::Inner::SetRunningTaskInfoForCurrentThread(
- SequenceToken sequence_token,
- WorkerShutdown shutdown_behavior) {
AutoLock lock(lock_);
- ThreadMap::const_iterator found = threads_.find(PlatformThread::CurrentId());
- DCHECK(found != threads_.end());
- DCHECK(found->second->is_processing_task());
- DCHECK(!found->second->task_sequence_token().IsValid());
- found->second->set_running_task_info(sequence_token, shutdown_behavior);
- // Mark the sequence token as in use.
- bool success = current_sequences_.insert(sequence_token.id_).second;
- DCHECK(success);
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ const auto sequenced_task_runner_it =
+ sequenced_task_runner_map_.find(sequence_token.id_);
+ return sequenced_task_runner_it != sequenced_task_runner_map_.end() &&
+ sequenced_task_runner_it->second->RunsTasksOnCurrentThread();
+ } else {
+ ThreadMap::const_iterator found =
+ threads_.find(PlatformThread::CurrentId());
+ return found != threads_.end() && found->second->is_processing_task() &&
+ sequence_token.Equals(found->second->task_sequence_token());
+ }
}
// See https://code.google.com/p/chromium/issues/detail?id=168415
void SequencedWorkerPool::Inner::CleanupForTesting() {
- DCHECK(!RunsTasksOnCurrentThread());
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ DCHECK_NE(g_all_pools_state, AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER);
AutoLock lock(lock_);
CHECK_EQ(CLEANUP_DONE, cleanup_state_);
if (shutdown_called_)
@@ -744,8 +953,12 @@ void SequencedWorkerPool::Inner::Shutdown(
if (shutdown_called_)
return;
shutdown_called_ = true;
+
max_blocking_tasks_after_shutdown_ = max_new_blocking_tasks_after_shutdown;
+ if (g_all_pools_state != AllPoolsState::USE_WORKER_POOL)
+ return;
+
// Tickle the threads. This will wake up a waiting one so it will know that
// it can exit, which in turn will wake up any other waiting ones.
SignalHasWork();
@@ -783,6 +996,7 @@ bool SequencedWorkerPool::Inner::IsShutdownInProgress() {
}
void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
{
AutoLock lock(lock_);
DCHECK(thread_being_created_);
@@ -801,18 +1015,15 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// See GetWork for what delete_these_outside_lock is doing.
SequencedTask task;
TimeDelta wait_time;
- std::vector<Closure> delete_these_outside_lock;
+ std::vector<SequencedTask> delete_these_outside_lock;
GetWorkStatus status =
GetWork(&task, &wait_time, &delete_these_outside_lock);
if (status == GET_WORK_FOUND) {
- TRACE_EVENT_WITH_FLOW2(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- "SequencedWorkerPool::Inner::ThreadLoop",
+ TRACE_TASK_EXECUTION("SequencedWorkerPool::Inner::ThreadLoop", task);
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ "SequencedWorkerPool::Inner::PostTask",
TRACE_ID_MANGLE(GetTaskTraceID(task, static_cast<void*>(this))),
- TRACE_EVENT_FLAG_FLOW_IN,
- "src_file", task.posted_from.file_name(),
- "src_func", task.posted_from.function_name());
- TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION task_event(
- task.posted_from.file_name());
+ TRACE_EVENT_FLAG_FLOW_IN);
int new_thread_id = WillRunWorkerTask(task);
{
AutoUnlock unlock(lock_);
@@ -821,7 +1032,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// already get a signal for each new task, but it doesn't
// hurt.)
SignalHasWork();
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
// Complete thread creation outside the lock if necessary.
if (new_thread_id)
@@ -838,11 +1049,6 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
task, stopwatch);
- // Update the sequence token in case it has been set from within the
- // task, so it can be removed from the set of currently running
- // sequences in DidRunWorkerTask() below.
- task.sequence_token_id = this_worker->task_sequence_token().id_;
-
// Make sure our task is erased outside the lock for the
// same reason we do this with delete_these_oustide_lock.
// Also, do it before calling reset_running_task_info() so
@@ -857,7 +1063,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
switch (status) {
case GET_WORK_WAIT: {
AutoUnlock unlock(lock_);
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
}
break;
case GET_WORK_NOT_FOUND:
@@ -879,7 +1085,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// help this case.
if (shutdown_called_ && blocking_shutdown_pending_task_count_ == 0) {
AutoUnlock unlock(lock_);
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
break;
}
@@ -887,7 +1093,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// deletion must happen outside of the lock.
if (delete_these_outside_lock.size()) {
AutoUnlock unlock(lock_);
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
// Since the lock has been released, |status| may no longer be
// accurate. It might read GET_WORK_WAIT even if there are tasks
@@ -910,6 +1116,9 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
}
waiting_thread_count_--;
}
+ // |delete_these_outside_lock| should have been cleared via
+ // DeleteWithoutLock() above already.
+ DCHECK(delete_these_outside_lock.empty());
}
} // Release lock_.
@@ -921,7 +1130,22 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
can_shutdown_cv_.Signal();
}
+void SequencedWorkerPool::Inner::DeleteWithoutLock(
+ std::vector<SequencedTask>* tasks_to_delete,
+ Worker* this_worker) {
+ while (!tasks_to_delete->empty()) {
+ const SequencedTask& deleted_task = tasks_to_delete->back();
+ this_worker->set_running_task_info(
+ SequenceToken(deleted_task.sequence_token_id),
+ deleted_task.shutdown_behavior);
+ tasks_to_delete->pop_back();
+ }
+ this_worker->reset_running_task_info();
+}
+
void SequencedWorkerPool::Inner::HandleCleanup() {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
+
lock_.AssertAcquired();
if (cleanup_state_ == CLEANUP_DONE)
return;
@@ -986,7 +1210,9 @@ int64_t SequencedWorkerPool::Inner::LockedGetNextSequenceTaskNumber() {
SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
SequencedTask* task,
TimeDelta* wait_time,
- std::vector<Closure>* delete_these_outside_lock) {
+ std::vector<SequencedTask>* delete_these_outside_lock) {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
+
lock_.AssertAcquired();
// Find the next task with a sequence token that's not currently in use.
@@ -1030,18 +1256,17 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
// shutdown. Delete it and get more work.
//
// Note that we do not want to delete unrunnable tasks. Deleting a task
- // can have side effects (like freeing some objects) and deleting a
- // task that's supposed to run after one that's currently running could
- // cause an obscure crash.
+ // can have side effects (like freeing some objects) and deleting a task
+ // that's supposed to run after one that's currently running could cause
+ // an obscure crash.
//
// We really want to delete these tasks outside the lock in case the
- // closures are holding refs to objects that want to post work from
- // their destructorss (which would deadlock). The closures are
- // internally refcounted, so we just need to keep a copy of them alive
- // until the lock is exited. The calling code can just clear() the
- // vector they passed to us once the lock is exited to make this
- // happen.
- delete_these_outside_lock->push_back(i->task);
+ // closures are holding refs to objects that want to post work from their
+ // destructors (which would deadlock). The closures are internally
+ // refcounted, so we just need to keep a copy of them alive until the lock
+ // is exited. The calling code can just clear() the vector they passed to
+ // us once the lock is exited to make this happen.
+ delete_these_outside_lock->push_back(*i);
pending_tasks_.erase(i++);
continue;
}
@@ -1052,7 +1277,7 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
status = GET_WORK_WAIT;
if (cleanup_state_ == CLEANUP_RUNNING) {
// Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
- delete_these_outside_lock->push_back(i->task);
+ delete_these_outside_lock->push_back(*i);
pending_tasks_.erase(i);
}
break;
@@ -1073,6 +1298,8 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
}
int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
+
lock_.AssertAcquired();
// Mark the task's sequence number as in use.
@@ -1104,6 +1331,8 @@ int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) {
}
void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
+
lock_.AssertAcquired();
if (task.shutdown_behavior != CONTINUE_ON_SHUTDOWN) {
@@ -1117,6 +1346,8 @@ void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) {
bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable(
int sequence_token_id) const {
+ DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
+
lock_.AssertAcquired();
return !sequence_token_id ||
current_sequences_.find(sequence_token_id) ==
@@ -1124,6 +1355,8 @@ bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable(
}
int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() {
+ DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
+
lock_.AssertAcquired();
// How thread creation works:
//
@@ -1174,6 +1407,8 @@ int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() {
void SequencedWorkerPool::Inner::FinishStartingAdditionalThread(
int thread_number) {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
+
// Called outside of the lock.
DCHECK_GT(thread_number, 0);
@@ -1183,6 +1418,8 @@ void SequencedWorkerPool::Inner::FinishStartingAdditionalThread(
}
void SequencedWorkerPool::Inner::SignalHasWork() {
+ DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
+
has_work_cv_.Signal();
if (testing_observer_) {
testing_observer_->OnHasWork();
@@ -1190,6 +1427,7 @@ void SequencedWorkerPool::Inner::SignalHasWork() {
}
bool SequencedWorkerPool::Inner::CanShutdown() const {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
lock_.AssertAcquired();
// See PrepareToStartAdditionalThreadIfHelpful for how thread creation works.
return !thread_being_created_ &&
@@ -1227,44 +1465,61 @@ SequencedWorkerPool::GetWorkerPoolForCurrentThread() {
}
// static
-scoped_refptr<SequencedTaskRunner>
-SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread() {
- Worker* worker = Worker::GetForCurrentThread();
+void SequencedWorkerPool::EnableForProcess() {
+ // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
+ // revert of the CL that adds debug::DumpWithoutCrashing() in case of
+ // waterfall failures.
+ // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
+ g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
+}
- // If there is no worker, this thread is not a worker thread. Otherwise, it is
- // currently running a task (sequenced or unsequenced).
- if (!worker)
- return nullptr;
+// static
+void SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess(
+ TaskPriority max_task_priority) {
+#if 1
+ NOTREACHED();
+ ALLOW_UNUSED_PARAM(max_task_priority);
+#else
+ // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
+ // revert of the CL that adds debug::DumpWithoutCrashing() in case of
+ // waterfall failures.
+ // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
+ DCHECK(TaskScheduler::GetInstance());
+ g_all_pools_state = AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER;
+ g_max_task_priority = max_task_priority;
+#endif
+}
- scoped_refptr<SequencedWorkerPool> pool = worker->worker_pool();
- SequenceToken sequence_token = worker->task_sequence_token();
- WorkerShutdown shutdown_behavior = worker->task_shutdown_behavior();
- if (!sequence_token.IsValid()) {
- // Create a new sequence token and bind this thread to it, to make sure that
- // a task posted to the SequencedTaskRunner we are going to return is not
- // immediately going to run on a different thread.
- sequence_token = Inner::GetSequenceToken();
- pool->inner_->SetRunningTaskInfoForCurrentThread(sequence_token,
- shutdown_behavior);
- }
+// static
+void SequencedWorkerPool::DisableForProcessForTesting() {
+ g_all_pools_state = AllPoolsState::POST_TASK_DISABLED;
+}
- DCHECK(pool->IsRunningSequenceOnCurrentThread(sequence_token));
- return new SequencedWorkerPoolSequencedTaskRunner(
- std::move(pool), sequence_token, shutdown_behavior);
+// static
+bool SequencedWorkerPool::IsEnabled() {
+ return g_all_pools_state != AllPoolsState::POST_TASK_DISABLED;
}
SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
- const std::string& thread_name_prefix)
- : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
- inner_(new Inner(this, max_threads, thread_name_prefix, NULL)) {
-}
+ const std::string& thread_name_prefix,
+ base::TaskPriority task_priority)
+ : constructor_task_runner_(SequencedTaskRunnerHandle::Get()),
+ inner_(new Inner(this,
+ max_threads,
+ thread_name_prefix,
+ task_priority,
+ NULL)) {}
SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
const std::string& thread_name_prefix,
+ base::TaskPriority task_priority,
TestingObserver* observer)
- : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
- inner_(new Inner(this, max_threads, thread_name_prefix, observer)) {
-}
+ : constructor_task_runner_(SequencedTaskRunnerHandle::Get()),
+ inner_(new Inner(this,
+ max_threads,
+ thread_name_prefix,
+ task_priority,
+ observer)) {}
SequencedWorkerPool::~SequencedWorkerPool() {}
@@ -1295,7 +1550,7 @@ scoped_refptr<SequencedTaskRunner> SequencedWorkerPool::GetSequencedTaskRunner(
scoped_refptr<SequencedTaskRunner>
SequencedWorkerPool::GetSequencedTaskRunnerWithShutdownBehavior(
SequenceToken token, WorkerShutdown shutdown_behavior) {
- return new SequencedWorkerPoolSequencedTaskRunner(
+ return new PoolSequencedTaskRunner(
this, token, shutdown_behavior);
}
@@ -1378,18 +1633,19 @@ bool SequencedWorkerPool::RunsTasksOnCurrentThread() const {
return inner_->RunsTasksOnCurrentThread();
}
-bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread(
- SequenceToken sequence_token) const {
- return inner_->IsRunningSequenceOnCurrentThread(sequence_token);
-}
-
-bool SequencedWorkerPool::IsRunningSequence(
- SequenceToken sequence_token) const {
- return inner_->IsRunningSequence(sequence_token);
-}
-
void SequencedWorkerPool::FlushForTesting() {
- inner_->CleanupForTesting();
+ DCHECK(!RunsTasksOnCurrentThread());
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+#if 1
+ NOTREACHED();
+#else
+ // TODO(gab): Remove this if http://crbug.com/622400 fails.
+ TaskScheduler::GetInstance()->FlushForTesting();
+#endif
+ } else {
+ inner_->CleanupForTesting();
+ }
}
void SequencedWorkerPool::SignalHasWorkForTesting() {
@@ -1397,7 +1653,7 @@ void SequencedWorkerPool::SignalHasWorkForTesting() {
}
void SequencedWorkerPool::Shutdown(int max_new_blocking_tasks_after_shutdown) {
- DCHECK(constructor_task_runner_->BelongsToCurrentThread());
+ DCHECK(constructor_task_runner_->RunsTasksOnCurrentThread());
inner_->Shutdown(max_new_blocking_tasks_after_shutdown);
}
@@ -1405,4 +1661,9 @@ bool SequencedWorkerPool::IsShutdownInProgress() {
return inner_->IsShutdownInProgress();
}
+bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread(
+ SequenceToken sequence_token) const {
+ return inner_->IsRunningSequenceOnCurrentThread(sequence_token);
+}
+
} // namespace base
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index cbec39561a..0d42de9138 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -13,10 +13,11 @@
#include "base/base_export.h"
#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
+#include "base/task_scheduler/task_traits.h"
namespace tracked_objects {
class Location;
@@ -24,12 +25,10 @@ class Location;
namespace base {
-class SingleThreadTaskRunner;
+class SequencedTaskRunner;
template <class T> class DeleteHelper;
-class SequencedTaskRunner;
-
// A worker thread pool that enforces ordering between sets of tasks. It also
// allows you to specify what should happen to your tasks on shutdown.
//
@@ -47,8 +46,7 @@ class SequencedTaskRunner;
// destruction will be visible to T2.
//
// Example:
-// SequencedWorkerPool::SequenceToken token =
-// SequencedWorkerPool::GetSequenceToken();
+// SequencedWorkerPool::SequenceToken token = pool.GetSequenceToken();
// pool.PostSequencedWorkerTask(token, SequencedWorkerPool::SKIP_ON_SHUTDOWN,
// FROM_HERE, base::Bind(...));
// pool.PostSequencedWorkerTask(token, SequencedWorkerPool::SKIP_ON_SHUTDOWN,
@@ -61,6 +59,10 @@ class SequencedTaskRunner;
// These will be executed in an unspecified order. The order of execution
// between tasks with different sequence tokens is also unspecified.
//
+// You must call EnableForProcess() or
+// EnableWithRedirectionToTaskSchedulerForProcess() before starting to post
+// tasks to a process' SequencedWorkerPools.
+//
// This class may be leaked on shutdown to facilitate fast shutdown. The
// expected usage, however, is to call Shutdown(), which correctly accounts
// for CONTINUE_ON_SHUTDOWN behavior and is required for BLOCK_SHUTDOWN
@@ -164,36 +166,65 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// an unsequenced task, returns an invalid SequenceToken.
static SequenceToken GetSequenceTokenForCurrentThread();
- // Gets a SequencedTaskRunner for the current thread. If the current thread is
- // running an unsequenced task, a new SequenceToken will be generated and set,
- // so that the returned SequencedTaskRunner is guaranteed to run tasks after
- // the current task has finished running.
- static scoped_refptr<SequencedTaskRunner>
- GetSequencedTaskRunnerForCurrentThread();
+ // Returns the SequencedWorkerPool that owns this thread, or null if the
+ // current thread is not a SequencedWorkerPool worker thread.
+ //
+ // Always returns nullptr when SequencedWorkerPool is redirected to
+ // TaskScheduler.
+ //
+ // DEPRECATED. Use SequencedTaskRunnerHandle::Get() instead. Consequentially
+ // the only remaining use case is in sequenced_task_runner_handle.cc to
+ // implement that and will soon be removed along with SequencedWorkerPool:
+ // http://crbug.com/622400.
+ static scoped_refptr<SequencedWorkerPool> GetWorkerPoolForCurrentThread();
// Returns a unique token that can be used to sequence tasks posted to
// PostSequencedWorkerTask(). Valid tokens are always nonzero.
- // TODO(bauerb): Rename this to better differentiate from
- // GetSequenceTokenForCurrentThread().
static SequenceToken GetSequenceToken();
- // Returns the SequencedWorkerPool that owns this thread, or null if the
- // current thread is not a SequencedWorkerPool worker thread.
- static scoped_refptr<SequencedWorkerPool> GetWorkerPoolForCurrentThread();
+ // Enables posting tasks to this process' SequencedWorkerPools. Cannot be
+ // called if already enabled. This is not thread-safe; proper synchronization
+ // is required to use any SequencedWorkerPool method after calling this.
+ static void EnableForProcess();
+
+ // Same as EnableForProcess(), but tasks are redirected to the registered
+ // TaskScheduler. All redirections' TaskPriority will be capped to
+ // |max_task_priority|. There must be a registered TaskScheduler when this is
+ // called.
+ // TODO(gab): Remove this if http://crbug.com/622400 fails
+ // (SequencedWorkerPool will be phased out completely otherwise).
+ static void EnableWithRedirectionToTaskSchedulerForProcess(
+ TaskPriority max_task_priority = TaskPriority::HIGHEST);
+
+ // Disables posting tasks to this process' SequencedWorkerPools. Calling this
+ // while there are active SequencedWorkerPools is not supported. This is not
+ // thread-safe; proper synchronization is required to use any
+ // SequencedWorkerPool method after calling this.
+ static void DisableForProcessForTesting();
+
+ // Returns true if posting tasks to this process' SequencedWorkerPool is
+ // enabled (with or without redirection to TaskScheduler).
+ static bool IsEnabled();
// When constructing a SequencedWorkerPool, there must be a
// ThreadTaskRunnerHandle on the current thread unless you plan to
// deliberately leak it.
- // Pass the maximum number of threads (they will be lazily created as needed)
- // and a prefix for the thread name to aid in debugging.
+ // Constructs a SequencedWorkerPool which will lazily create up to
+ // |max_threads| and a prefix for the thread name to aid in debugging.
+ // |max_threads| must be greater than 1. |task_priority| will be used to hint
+ // base::TaskScheduler for an experiment in which all SequencedWorkerPool
+ // tasks will be redirected to it in processes where a base::TaskScheduler was
+ // instantiated.
SequencedWorkerPool(size_t max_threads,
- const std::string& thread_name_prefix);
+ const std::string& thread_name_prefix,
+ base::TaskPriority task_priority);
// Like above, but with |observer| for testing. Does not take ownership of
// |observer|.
SequencedWorkerPool(size_t max_threads,
const std::string& thread_name_prefix,
+ base::TaskPriority task_priority,
TestingObserver* observer);
// Returns the sequence token associated with the given name. Calling this
@@ -207,7 +238,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// delay are posted with SKIP_ON_SHUTDOWN behavior and tasks with zero delay
// are posted with BLOCK_SHUTDOWN behavior.
scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunner(
- SequenceToken token);
+ SequenceToken token) WARN_UNUSED_RESULT;
// Returns a SequencedTaskRunner wrapper which posts to this
// SequencedWorkerPool using the given sequence token. Tasks with nonzero
@@ -215,14 +246,14 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// are posted with the given shutdown behavior.
scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunnerWithShutdownBehavior(
SequenceToken token,
- WorkerShutdown shutdown_behavior);
+ WorkerShutdown shutdown_behavior) WARN_UNUSED_RESULT;
// Returns a TaskRunner wrapper which posts to this SequencedWorkerPool using
// the given shutdown behavior. Tasks with nonzero delay are posted with
// SKIP_ON_SHUTDOWN behavior and tasks with zero delay are posted with the
// given shutdown behavior.
scoped_refptr<TaskRunner> GetTaskRunnerWithShutdownBehavior(
- WorkerShutdown shutdown_behavior);
+ WorkerShutdown shutdown_behavior) WARN_UNUSED_RESULT;
// Posts the given task for execution in the worker pool. Tasks posted with
// this function will execute in an unspecified order on a background thread.
@@ -316,23 +347,21 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
- // Returns true if the current thread is processing a task with the given
- // sequence_token.
- bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
-
- // Returns true if any thread is currently processing a task with the given
- // sequence token. Should only be called with a valid sequence token.
- bool IsRunningSequence(SequenceToken sequence_token) const;
-
// Blocks until all pending tasks are complete. This should only be called in
// unit tests when you want to validate something that should have happened.
- // This will not flush delayed tasks; delayed tasks get deleted.
+ // Does not wait for delayed tasks. If redirection to TaskScheduler is
+ // disabled, delayed tasks are deleted. If redirection to TaskScheduler is
+ // enabled, this will wait for all tasks posted to TaskScheduler (not just
+ // tasks posted to this SequencedWorkerPool).
//
// Note that calling this will not prevent other threads from posting work to
// the queue while the calling thread is waiting on Flush(). In this case,
// Flush will return only when there's no more work in the queue. Normally,
// this doesn't come up since in a test, all the work is being posted from
// the main thread.
+ //
+ // TODO(gab): Remove mentions of TaskScheduler in this comment if
+ // http://crbug.com/622400 fails.
void FlushForTesting();
// Spuriously signal that there is work to be done.
@@ -368,9 +397,14 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
friend class DeleteHelper<SequencedWorkerPool>;
class Inner;
+ class PoolSequencedTaskRunner;
class Worker;
- const scoped_refptr<SingleThreadTaskRunner> constructor_task_runner_;
+ // Returns true if the current thread is processing a task with the given
+ // sequence_token.
+ bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
+
+ const scoped_refptr<SequencedTaskRunner> constructor_task_runner_;
// Avoid pulling in too many headers by putting (almost) everything
// into |inner_|.
diff --git a/base/threading/simple_thread.cc b/base/threading/simple_thread.cc
index 6c64a17d6a..9eb443afab 100644
--- a/base/threading/simple_thread.cc
+++ b/base/threading/simple_thread.cc
@@ -12,62 +12,55 @@
namespace base {
SimpleThread::SimpleThread(const std::string& name_prefix)
- : name_prefix_(name_prefix),
- name_(name_prefix),
- thread_(),
- event_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
- tid_(0),
- joined_(false) {}
+ : SimpleThread(name_prefix, Options()) {}
SimpleThread::SimpleThread(const std::string& name_prefix,
const Options& options)
: name_prefix_(name_prefix),
- name_(name_prefix),
options_(options),
- thread_(),
event_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
- tid_(0),
- joined_(false) {}
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
SimpleThread::~SimpleThread() {
DCHECK(HasBeenStarted()) << "SimpleThread was never started.";
- DCHECK(HasBeenJoined()) << "SimpleThread destroyed without being Join()ed.";
+ DCHECK(!options_.joinable || HasBeenJoined())
+ << "Joinable SimpleThread destroyed without being Join()ed.";
}
void SimpleThread::Start() {
DCHECK(!HasBeenStarted()) << "Tried to Start a thread multiple times.";
- bool success;
- if (options_.priority() == ThreadPriority::NORMAL) {
- success = PlatformThread::Create(options_.stack_size(), this, &thread_);
- } else {
- success = PlatformThread::CreateWithPriority(options_.stack_size(), this,
- &thread_, options_.priority());
- }
+ bool success =
+ options_.joinable
+ ? PlatformThread::CreateWithPriority(options_.stack_size, this,
+ &thread_, options_.priority)
+ : PlatformThread::CreateNonJoinableWithPriority(
+ options_.stack_size, this, options_.priority);
DCHECK(success);
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ ThreadRestrictions::ScopedAllowWait allow_wait;
event_.Wait(); // Wait for the thread to complete initialization.
}
void SimpleThread::Join() {
+ DCHECK(options_.joinable) << "A non-joinable thread can't be joined.";
DCHECK(HasBeenStarted()) << "Tried to Join a never-started thread.";
DCHECK(!HasBeenJoined()) << "Tried to Join a thread multiple times.";
PlatformThread::Join(thread_);
+ thread_ = PlatformThreadHandle();
joined_ = true;
}
bool SimpleThread::HasBeenStarted() {
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ ThreadRestrictions::ScopedAllowWait allow_wait;
return event_.IsSignaled();
}
void SimpleThread::ThreadMain() {
tid_ = PlatformThread::CurrentId();
// Construct our full name of the form "name_prefix_/TID".
- name_.push_back('/');
- name_.append(IntToString(tid_));
- PlatformThread::SetName(name_);
+ std::string name(name_prefix_);
+ name.push_back('/');
+ name.append(IntToString(tid_));
+ PlatformThread::SetName(name);
// We've initialized our new thread, signal that we're done to Start().
event_.Signal();
@@ -77,24 +70,26 @@ void SimpleThread::ThreadMain() {
DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
const std::string& name_prefix)
- : SimpleThread(name_prefix),
- delegate_(delegate) {
-}
+ : DelegateSimpleThread(delegate, name_prefix, Options()) {}
DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
const std::string& name_prefix,
const Options& options)
: SimpleThread(name_prefix, options),
delegate_(delegate) {
+ DCHECK(delegate_);
}
-DelegateSimpleThread::~DelegateSimpleThread() {
-}
+DelegateSimpleThread::~DelegateSimpleThread() = default;
void DelegateSimpleThread::Run() {
DCHECK(delegate_) << "Tried to call Run without a delegate (called twice?)";
- delegate_->Run();
- delegate_ = NULL;
+
+ // Non-joinable DelegateSimpleThreads are allowed to be deleted during Run().
+ // Member state must not be accessed after invoking Run().
+ Delegate* delegate = delegate_;
+ delegate_ = nullptr;
+ delegate->Run();
}
DelegateSimpleThreadPool::DelegateSimpleThreadPool(
diff --git a/base/threading/simple_thread.h b/base/threading/simple_thread.h
index 3deeb1018c..f9f5e91045 100644
--- a/base/threading/simple_thread.h
+++ b/base/threading/simple_thread.h
@@ -48,6 +48,7 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "base/macros.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
@@ -58,25 +59,26 @@ namespace base {
// virtual Run method, or you can use the DelegateSimpleThread interface.
class BASE_EXPORT SimpleThread : public PlatformThread::Delegate {
public:
- class BASE_EXPORT Options {
+ struct BASE_EXPORT Options {
public:
- Options() : stack_size_(0), priority_(ThreadPriority::NORMAL) {}
- explicit Options(ThreadPriority priority)
- : stack_size_(0), priority_(priority) {}
- ~Options() {}
+ Options() = default;
+ explicit Options(ThreadPriority priority_in) : priority(priority_in) {}
+ ~Options() = default;
- // We use the standard compiler-supplied copy constructor.
+ // Allow copies.
+ Options(const Options& other) = default;
+ Options& operator=(const Options& other) = default;
// A custom stack size, or 0 for the system default.
- void set_stack_size(size_t size) { stack_size_ = size; }
- size_t stack_size() const { return stack_size_; }
-
- // A custom thread priority.
- void set_priority(ThreadPriority priority) { priority_ = priority; }
- ThreadPriority priority() const { return priority_; }
- private:
- size_t stack_size_;
- ThreadPriority priority_;
+ size_t stack_size = 0;
+
+ ThreadPriority priority = ThreadPriority::NORMAL;
+
+ // If false, the underlying thread's PlatformThreadHandle will not be kept
+ // around and as such the SimpleThread instance will not be Join()able and
+ // must not be deleted before Run() is invoked. After that, it's up to
+ // the subclass to determine when it is safe to delete itself.
+ bool joinable = true;
};
// Create a SimpleThread. |options| should be used to manage any specific
@@ -94,19 +96,13 @@ class BASE_EXPORT SimpleThread : public PlatformThread::Delegate {
// Subclasses should override the Run method.
virtual void Run() = 0;
- // Return the thread name prefix, or "unnamed" if none was supplied.
- std::string name_prefix() { return name_prefix_; }
-
- // Return the completed name including TID, only valid after Start().
- std::string name() { return name_; }
-
// Return the thread id, only valid after Start().
PlatformThreadId tid() { return tid_; }
// Return True if Start() has ever been called.
bool HasBeenStarted();
- // Return True if Join() has evern been called.
+ // Return True if Join() has ever been called.
bool HasBeenJoined() { return joined_; }
// Overridden from PlatformThread::Delegate:
@@ -116,18 +112,24 @@ class BASE_EXPORT SimpleThread : public PlatformThread::Delegate {
const std::string name_prefix_;
std::string name_;
const Options options_;
- PlatformThreadHandle thread_; // PlatformThread handle, invalid after Join!
+ PlatformThreadHandle thread_; // PlatformThread handle, reset after Join.
WaitableEvent event_; // Signaled if Start() was ever called.
- PlatformThreadId tid_; // The backing thread's id.
- bool joined_; // True if Join has been called.
+ PlatformThreadId tid_ = kInvalidThreadId; // The backing thread's id.
+ bool joined_ = false; // True if Join has been called.
+
+ DISALLOW_COPY_AND_ASSIGN(SimpleThread);
};
+// A SimpleThread which delegates Run() to its Delegate. Non-joinable
+// DelegateSimpleThread are safe to delete after Run() was invoked, their
+// Delegates are also safe to delete after that point from this class' point of
+// view (although implementations must of course make sure that Run() will not
+// use their Delegate's member state after its deletion).
class BASE_EXPORT DelegateSimpleThread : public SimpleThread {
public:
class BASE_EXPORT Delegate {
public:
- Delegate() { }
- virtual ~Delegate() { }
+ virtual ~Delegate() = default;
virtual void Run() = 0;
};
@@ -142,6 +144,8 @@ class BASE_EXPORT DelegateSimpleThread : public SimpleThread {
private:
Delegate* delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(DelegateSimpleThread);
};
// DelegateSimpleThreadPool allows you to start up a fixed number of threads,
@@ -186,6 +190,8 @@ class BASE_EXPORT DelegateSimpleThreadPool
std::queue<Delegate*> delegates_;
base::Lock lock_; // Locks delegates_
WaitableEvent dry_; // Not signaled when there is no work to do.
+
+ DISALLOW_COPY_AND_ASSIGN(DelegateSimpleThreadPool);
};
} // namespace base
diff --git a/base/threading/simple_thread_unittest.cc b/base/threading/simple_thread_unittest.cc
index 14dd4591f1..0e52500c52 100644
--- a/base/threading/simple_thread_unittest.cc
+++ b/base/threading/simple_thread_unittest.cc
@@ -2,9 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "base/atomic_sequence_num.h"
+#include "base/memory/ptr_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
#include "base/threading/simple_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -17,11 +21,49 @@ class SetIntRunner : public DelegateSimpleThread::Delegate {
SetIntRunner(int* ptr, int val) : ptr_(ptr), val_(val) { }
~SetIntRunner() override {}
+ private:
void Run() override { *ptr_ = val_; }
- private:
int* ptr_;
int val_;
+
+ DISALLOW_COPY_AND_ASSIGN(SetIntRunner);
+};
+
+// Signals |started_| when Run() is invoked and waits until |released_| is
+// signaled to return, signaling |done_| before doing so. Useful for tests that
+// care to control Run()'s flow.
+class ControlledRunner : public DelegateSimpleThread::Delegate {
+ public:
+ ControlledRunner()
+ : started_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ released_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ done_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ ~ControlledRunner() override { ReleaseAndWaitUntilDone(); }
+
+ void WaitUntilStarted() { started_.Wait(); }
+
+ void ReleaseAndWaitUntilDone() {
+ released_.Signal();
+ done_.Wait();
+ }
+
+ private:
+ void Run() override {
+ started_.Signal();
+ released_.Wait();
+ done_.Signal();
+ }
+
+ WaitableEvent started_;
+ WaitableEvent released_;
+ WaitableEvent done_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlledRunner);
};
class WaitEventRunner : public DelegateSimpleThread::Delegate {
@@ -29,22 +71,28 @@ class WaitEventRunner : public DelegateSimpleThread::Delegate {
explicit WaitEventRunner(WaitableEvent* event) : event_(event) { }
~WaitEventRunner() override {}
+ private:
void Run() override {
EXPECT_FALSE(event_->IsSignaled());
event_->Signal();
EXPECT_TRUE(event_->IsSignaled());
}
- private:
+
WaitableEvent* event_;
+
+ DISALLOW_COPY_AND_ASSIGN(WaitEventRunner);
};
class SeqRunner : public DelegateSimpleThread::Delegate {
public:
explicit SeqRunner(AtomicSequenceNumber* seq) : seq_(seq) { }
- void Run() override { seq_->GetNext(); }
private:
+ void Run() override { seq_->GetNext(); }
+
AtomicSequenceNumber* seq_;
+
+ DISALLOW_COPY_AND_ASSIGN(SeqRunner);
};
// We count up on a sequence number, firing on the event when we've hit our
@@ -56,6 +104,7 @@ class VerifyPoolRunner : public DelegateSimpleThread::Delegate {
int total, WaitableEvent* event)
: seq_(seq), total_(total), event_(event) { }
+ private:
void Run() override {
if (seq_->GetNext() == total_) {
event_->Signal();
@@ -64,10 +113,11 @@ class VerifyPoolRunner : public DelegateSimpleThread::Delegate {
}
}
- private:
AtomicSequenceNumber* seq_;
int total_;
WaitableEvent* event_;
+
+ DISALLOW_COPY_AND_ASSIGN(VerifyPoolRunner);
};
} // namespace
@@ -108,29 +158,44 @@ TEST(SimpleThreadTest, WaitForEvent) {
thread.Join();
}
-TEST(SimpleThreadTest, NamedWithOptions) {
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+TEST(SimpleThreadTest, NonJoinableStartAndDieOnJoin) {
+ ControlledRunner runner;
- WaitEventRunner runner(&event);
SimpleThread::Options options;
- DelegateSimpleThread thread(&runner, "event_waiter", options);
- EXPECT_EQ(thread.name_prefix(), "event_waiter");
- EXPECT_FALSE(event.IsSignaled());
+ options.joinable = false;
+ DelegateSimpleThread thread(&runner, "non_joinable", options);
+ EXPECT_FALSE(thread.HasBeenStarted());
thread.Start();
- EXPECT_EQ(thread.name_prefix(), "event_waiter");
- EXPECT_EQ(thread.name(),
- std::string("event_waiter/") + IntToString(thread.tid()));
- event.Wait();
+ EXPECT_TRUE(thread.HasBeenStarted());
- EXPECT_TRUE(event.IsSignaled());
- thread.Join();
+ // Note: this is not quite the same as |thread.HasBeenStarted()| which
+ // represents ThreadMain() getting ready to invoke Run() whereas
+ // |runner.WaitUntilStarted()| ensures Run() was actually invoked.
+ runner.WaitUntilStarted();
+
+ EXPECT_FALSE(thread.HasBeenJoined());
+ EXPECT_DCHECK_DEATH({ thread.Join(); });
+}
+
+TEST(SimpleThreadTest, NonJoinableInactiveDelegateDestructionIsOkay) {
+ std::unique_ptr<ControlledRunner> runner(new ControlledRunner);
+
+ SimpleThread::Options options;
+ options.joinable = false;
+ std::unique_ptr<DelegateSimpleThread> thread(
+ new DelegateSimpleThread(runner.get(), "non_joinable", options));
+
+ thread->Start();
+ runner->WaitUntilStarted();
+
+ // Deleting a non-joinable SimpleThread after Run() was invoked is okay.
+ thread.reset();
- // We keep the name and tid, even after the thread is gone.
- EXPECT_EQ(thread.name_prefix(), "event_waiter");
- EXPECT_EQ(thread.name(),
- std::string("event_waiter/") + IntToString(thread.tid()));
+ runner->WaitUntilStarted();
+ runner->ReleaseAndWaitUntilDone();
+ // It should be safe to destroy a Delegate after its Run() method completed.
+ runner.reset();
}
TEST(SimpleThreadTest, ThreadPool) {
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
index 9cdc6912ea..c30320f0dc 100644
--- a/base/threading/thread.cc
+++ b/base/threading/thread.cc
@@ -5,8 +5,10 @@
#include "base/threading/thread.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/lazy_instance.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/run_loop.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_id_name_manager.h"
@@ -14,6 +16,10 @@
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
+#if defined(OS_POSIX) && !defined(OS_NACL)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif
+
#if defined(OS_WIN)
#include "base/win/scoped_com_initializer.h"
#endif
@@ -26,53 +32,31 @@ namespace {
// because its Stop method was called. This allows us to catch cases where
// MessageLoop::QuitWhenIdle() is called directly, which is unexpected when
// using a Thread to setup and run a MessageLoop.
-base::LazyInstance<base::ThreadLocalBoolean> lazy_tls_bool =
+base::LazyInstance<base::ThreadLocalBoolean>::Leaky lazy_tls_bool =
LAZY_INSTANCE_INITIALIZER;
} // namespace
-// This is used to trigger the message loop to exit.
-void ThreadQuitHelper() {
- MessageLoop::current()->QuitWhenIdle();
- Thread::SetThreadWasQuitProperly(true);
-}
-
-Thread::Options::Options()
- : message_loop_type(MessageLoop::TYPE_DEFAULT),
- timer_slack(TIMER_SLACK_NONE),
- stack_size(0),
- priority(ThreadPriority::NORMAL) {
-}
+Thread::Options::Options() = default;
-Thread::Options::Options(MessageLoop::Type type,
- size_t size)
- : message_loop_type(type),
- timer_slack(TIMER_SLACK_NONE),
- stack_size(size),
- priority(ThreadPriority::NORMAL) {
-}
+Thread::Options::Options(MessageLoop::Type type, size_t size)
+ : message_loop_type(type), stack_size(size) {}
Thread::Options::Options(const Options& other) = default;
-Thread::Options::~Options() {
-}
+Thread::Options::~Options() = default;
Thread::Thread(const std::string& name)
- :
-#if defined(OS_WIN)
- com_status_(NONE),
-#endif
- stopping_(false),
- running_(false),
- thread_(0),
- id_(kInvalidThreadId),
- id_event_(WaitableEvent::ResetPolicy::MANUAL,
+ : id_event_(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED),
- message_loop_(nullptr),
- message_loop_timer_slack_(TIMER_SLACK_NONE),
name_(name),
start_event_(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED) {
+ // Only bind the sequence on Start(): the state is constant between
+ // construction and Start() and it's thus valid for Start() to be called on
+ // another sequence as long as every other operation is then performed on that
+ // sequence.
+ owning_sequence_checker_.DetachFromSequence();
}
Thread::~Thread() {
@@ -80,6 +64,8 @@ Thread::~Thread() {
}
bool Thread::Start() {
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+
Options options;
#if defined(OS_WIN)
if (com_status_ == STA)
@@ -89,7 +75,11 @@ bool Thread::Start() {
}
bool Thread::StartWithOptions(const Options& options) {
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence());
DCHECK(!message_loop_);
+ DCHECK(!IsRunning());
+ DCHECK(!stopping_) << "Starting a non-joinable thread a second time? That's "
+ << "not allowed!";
#if defined(OS_WIN)
DCHECK((com_status_ != STA) ||
(options.message_loop_type == MessageLoop::TYPE_UI));
@@ -106,32 +96,41 @@ bool Thread::StartWithOptions(const Options& options) {
type = MessageLoop::TYPE_CUSTOM;
message_loop_timer_slack_ = options.timer_slack;
- std::unique_ptr<MessageLoop> message_loop =
+ std::unique_ptr<MessageLoop> message_loop_owned =
MessageLoop::CreateUnbound(type, options.message_pump_factory);
- message_loop_ = message_loop.get();
+ message_loop_ = message_loop_owned.get();
start_event_.Reset();
- // Hold the thread_lock_ while starting a new thread, so that we can make sure
- // that thread_ is populated before the newly created thread accesses it.
+ // Hold |thread_lock_| while starting the new thread to synchronize with
+ // Stop() while it's not guaranteed to be sequenced (until crbug/629139 is
+ // fixed).
{
AutoLock lock(thread_lock_);
- if (!PlatformThread::CreateWithPriority(options.stack_size, this, &thread_,
- options.priority)) {
+ bool success =
+ options.joinable
+ ? PlatformThread::CreateWithPriority(options.stack_size, this,
+ &thread_, options.priority)
+ : PlatformThread::CreateNonJoinableWithPriority(
+ options.stack_size, this, options.priority);
+ if (!success) {
DLOG(ERROR) << "failed to create thread";
message_loop_ = nullptr;
return false;
}
}
- // The ownership of message_loop is managemed by the newly created thread
+ joinable_ = options.joinable;
+
+ // The ownership of |message_loop_| is managed by the newly created thread
// within the ThreadMain.
- ignore_result(message_loop.release());
+ ignore_result(message_loop_owned.release());
DCHECK(message_loop_);
return true;
}
bool Thread::StartAndWaitForTesting() {
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence());
bool result = Start();
if (!result)
return false;
@@ -140,6 +139,7 @@ bool Thread::StartAndWaitForTesting() {
}
bool Thread::WaitUntilThreadStarted() const {
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence());
if (!message_loop_)
return false;
base::ThreadRestrictions::ScopedAllowWait allow_wait;
@@ -147,37 +147,74 @@ bool Thread::WaitUntilThreadStarted() const {
return true;
}
+void Thread::FlushForTesting() {
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+ if (!message_loop_)
+ return;
+
+ WaitableEvent done(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(FROM_HERE,
+ Bind(&WaitableEvent::Signal, Unretained(&done)));
+ done.Wait();
+}
+
void Thread::Stop() {
+ DCHECK(joinable_);
+
+ // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
+ // enable this check, until then synchronization with Start() via
+ // |thread_lock_| is required...
+ // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
AutoLock lock(thread_lock_);
- if (thread_.is_null())
- return;
StopSoon();
+ // Can't join if the |thread_| is either already gone or is non-joinable.
+ if (thread_.is_null())
+ return;
+
// Wait for the thread to exit.
//
- // TODO(darin): Unfortunately, we need to keep message_loop_ around until
+ // TODO(darin): Unfortunately, we need to keep |message_loop_| around until
// the thread exits. Some consumers are abusing the API. Make them stop.
//
PlatformThread::Join(thread_);
thread_ = base::PlatformThreadHandle();
- // The thread should nullify message_loop_ on exit.
+ // The thread should nullify |message_loop_| on exit (note: Join() adds an
+ // implicit memory barrier and no lock is thus required for this check).
DCHECK(!message_loop_);
stopping_ = false;
}
void Thread::StopSoon() {
- // We should only be called on the same thread that started us.
-
- DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
+ // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
+ // enable this check.
+ // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
if (stopping_ || !message_loop_)
return;
stopping_ = true;
- task_runner()->PostTask(FROM_HERE, base::Bind(&ThreadQuitHelper));
+
+ if (using_external_message_loop_) {
+ // Setting |stopping_| to true above should have been sufficient for this
+ // thread to be considered "stopped" per it having never set its |running_|
+ // bit by lack of its own ThreadMain.
+ DCHECK(!IsRunning());
+ message_loop_ = nullptr;
+ return;
+ }
+
+ task_runner()->PostTask(
+ FROM_HERE, base::Bind(&Thread::ThreadQuitHelper, Unretained(this)));
+}
+
+void Thread::DetachFromSequence() {
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+ owning_sequence_checker_.DetachFromSequence();
}
PlatformThreadId Thread::GetThreadId() const {
@@ -188,26 +225,36 @@ PlatformThreadId Thread::GetThreadId() const {
}
bool Thread::IsRunning() const {
- // If the thread's already started (i.e. message_loop_ is non-null) and
- // not yet requested to stop (i.e. stopping_ is false) we can just return
- // true. (Note that stopping_ is touched only on the same thread that
- // starts / started the new thread so we need no locking here.)
+ // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
+ // enable this check.
+ // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+
+ // If the thread's already started (i.e. |message_loop_| is non-null) and not
+ // yet requested to stop (i.e. |stopping_| is false) we can just return true.
+ // (Note that |stopping_| is touched only on the same sequence that starts /
+ // started the new thread so we need no locking here.)
if (message_loop_ && !stopping_)
return true;
- // Otherwise check the running_ flag, which is set to true by the new thread
+ // Otherwise check the |running_| flag, which is set to true by the new thread
// only while it is inside Run().
AutoLock lock(running_lock_);
return running_;
}
-void Thread::Run(MessageLoop*) {
- RunLoop().Run();
+void Thread::Run(RunLoop* run_loop) {
+ // Overridable protected method to be called from our |thread_| only.
+ DCHECK(id_event_.IsSignaled());
+ DCHECK_EQ(id_, PlatformThread::CurrentId());
+
+ run_loop->Run();
}
+// static
void Thread::SetThreadWasQuitProperly(bool flag) {
lazy_tls_bool.Pointer()->Set(flag);
}
+// static
bool Thread::GetThreadWasQuitProperly() {
bool quit_properly = true;
#ifndef NDEBUG
@@ -216,9 +263,27 @@ bool Thread::GetThreadWasQuitProperly() {
return quit_properly;
}
+void Thread::SetMessageLoop(MessageLoop* message_loop) {
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+ DCHECK(message_loop);
+
+ // Setting |message_loop_| should suffice for this thread to be considered
+ // as "running", until Stop() is invoked.
+ DCHECK(!IsRunning());
+ message_loop_ = message_loop;
+ DCHECK(IsRunning());
+
+ using_external_message_loop_ = true;
+}
+
void Thread::ThreadMain() {
// First, make GetThreadId() available to avoid deadlocks. It could be called
// any place in the following thread initialization code.
+ DCHECK(!id_event_.IsSignaled());
+ // Note: this read of |id_| while |id_event_| isn't signaled is exceptionally
+ // okay because ThreadMain has a happens-after relationship with the other
+ // write in StartWithOptions().
+ DCHECK_EQ(kInvalidThreadId, id_);
id_ = PlatformThread::CurrentId();
DCHECK_NE(kInvalidThreadId, id_);
id_event_.Signal();
@@ -226,12 +291,22 @@ void Thread::ThreadMain() {
// Complete the initialization of our Thread object.
PlatformThread::SetName(name_.c_str());
- // Lazily initialize the message_loop so that it can run on this thread.
+ // Lazily initialize the |message_loop| so that it can run on this thread.
DCHECK(message_loop_);
std::unique_ptr<MessageLoop> message_loop(message_loop_);
message_loop_->BindToCurrentThread();
message_loop_->SetTimerSlack(message_loop_timer_slack_);
+#if defined(OS_POSIX) && !defined(OS_NACL)
+ // Allow threads running a MessageLoopForIO to use FileDescriptorWatcher API.
+ std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher;
+ if (MessageLoopForIO::IsCurrent()) {
+ DCHECK_EQ(message_loop_, MessageLoopForIO::current());
+ file_descriptor_watcher.reset(
+ new FileDescriptorWatcher(MessageLoopForIO::current()));
+ }
+#endif
+
#if defined(OS_WIN)
std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
if (com_status_ != NONE) {
@@ -251,7 +326,9 @@ void Thread::ThreadMain() {
start_event_.Signal();
- Run(message_loop_);
+ RunLoop run_loop;
+ run_loop_ = &run_loop;
+ Run(run_loop_);
{
AutoLock lock(running_lock_);
@@ -266,15 +343,22 @@ void Thread::ThreadMain() {
#endif
if (message_loop->type() != MessageLoop::TYPE_CUSTOM) {
- // Assert that MessageLoop::QuitWhenIdle was called by ThreadQuitHelper.
- // Don't check for custom message pumps, because their shutdown might not
- // allow this.
+ // Assert that RunLoop::QuitWhenIdle was called by ThreadQuitHelper. Don't
+ // check for custom message pumps, because their shutdown might not allow
+ // this.
DCHECK(GetThreadWasQuitProperly());
}
// We can't receive messages anymore.
// (The message loop is destructed at the end of this block)
message_loop_ = nullptr;
+ run_loop_ = nullptr;
+}
+
+void Thread::ThreadQuitHelper() {
+ DCHECK(run_loop_);
+ run_loop_->QuitWhenIdle();
+ SetThreadWasQuitProperly(true);
}
} // namespace base
diff --git a/base/threading/thread.h b/base/threading/thread.h
index c9a77d7323..01f7d8e250 100644
--- a/base/threading/thread.h
+++ b/base/threading/thread.h
@@ -15,7 +15,9 @@
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/timer_slack.h"
+#include "base/sequence_checker.h"
#include "base/single_thread_task_runner.h"
+#include "base/synchronization/atomic_flag.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
@@ -24,6 +26,7 @@
namespace base {
class MessagePump;
+class RunLoop;
// A simple thread abstraction that establishes a MessageLoop on a new thread.
// The consumer uses the MessageLoop of the thread to cause code to execute on
@@ -38,6 +41,18 @@ class MessagePump;
// (1) Thread::CleanUp()
// (2) MessageLoop::~MessageLoop
// (3.b) MessageLoop::DestructionObserver::WillDestroyCurrentMessageLoop
+//
+// This API is not thread-safe: unless indicated otherwise its methods are only
+// valid from the owning sequence (which is the one from which Start() is
+// invoked -- should it differ from the one on which it was constructed).
+//
+// Sometimes it's useful to kick things off on the initial sequence (e.g.
+// construction, Start(), task_runner()), but to then hand the Thread over to a
+// pool of users for the last one of them to destroy it when done. For that use
+// case, Thread::DetachFromSequence() allows the owning sequence to give up
+// ownership. The caller is then responsible to ensure a happens-after
+// relationship between the DetachFromSequence() call and the next use of that
+// Thread object (including ~Thread()).
class BASE_EXPORT Thread : PlatformThread::Delegate {
public:
struct BASE_EXPORT Options {
@@ -50,10 +65,10 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// Specifies the type of message loop that will be allocated on the thread.
// This is ignored if message_pump_factory.is_null() is false.
- MessageLoop::Type message_loop_type;
+ MessageLoop::Type message_loop_type = MessageLoop::TYPE_DEFAULT;
// Specifies timer slack for thread message loop.
- TimerSlack timer_slack;
+ TimerSlack timer_slack = TIMER_SLACK_NONE;
// Used to create the MessagePump for the MessageLoop. The callback is Run()
// on the thread. If message_pump_factory.is_null(), then a MessagePump
@@ -64,10 +79,18 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// Specifies the maximum stack size that the thread is allowed to use.
// This does not necessarily correspond to the thread's initial stack size.
// A value of 0 indicates that the default maximum should be used.
- size_t stack_size;
+ size_t stack_size = 0;
// Specifies the initial thread priority.
- ThreadPriority priority;
+ ThreadPriority priority = ThreadPriority::NORMAL;
+
+ // If false, the thread will not be joined on destruction. This is intended
+ // for threads that want TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN
+ // semantics. Non-joinable threads can't be joined (must be leaked and
+ // can't be destroyed or Stop()'ed).
+ // TODO(gab): allow non-joinable instances to be deleted without causing
+ // user-after-frees (proposal @ https://crbug.com/629139#c14)
+ bool joinable = true;
};
// Constructor.
@@ -125,12 +148,19 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// carefully for production code.
bool WaitUntilThreadStarted() const;
- // Signals the thread to exit and returns once the thread has exited. After
- // this method returns, the Thread object is completely reset and may be used
- // as if it were newly constructed (i.e., Start may be called again).
+ // Blocks until all tasks previously posted to this thread have been executed.
+ void FlushForTesting();
+
+ // Signals the thread to exit and returns once the thread has exited. The
+ // Thread object is completely reset and may be used as if it were newly
+ // constructed (i.e., Start may be called again). Can only be called if
+ // |joinable_|.
//
// Stop may be called multiple times and is simply ignored if the thread is
- // already stopped.
+ // already stopped or currently stopping.
+ //
+ // Start/Stop are not thread-safe and callers that desire to invoke them from
+ // different threads must ensure mutual exclusion.
//
// NOTE: If you are a consumer of Thread, it is not necessary to call this
// before deleting your Thread objects, as the destructor will do it.
@@ -145,11 +175,17 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// deadlock on Windows with printer worker thread. In any other case, Stop()
// should be used.
//
- // StopSoon should not be called multiple times as it is risky to do so. It
- // could cause a timing issue in message_loop() access. Call Stop() to reset
- // the thread object once it is known that the thread has quit.
+ // Call Stop() to reset the thread object once it is known that the thread has
+ // quit.
void StopSoon();
+ // Detaches the owning sequence, indicating that the next call to this API
+ // (including ~Thread()) can happen from a different sequence (to which it
+ // will be rebound). This call itself must happen on the current owning
+ // sequence and the caller must ensure the next API call has a happens-after
+ // relationship with this one.
+ void DetachFromSequence();
+
// Returns the message loop for this thread. Use the MessageLoop's
// PostTask methods to execute code on the thread. This only returns
// non-null after a successful call to Start. After Stop has been called,
@@ -158,29 +194,52 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// NOTE: You must not call this MessageLoop's Quit method directly. Use
// the Thread's Stop method instead.
//
- MessageLoop* message_loop() const { return message_loop_; }
+ // In addition to this Thread's owning sequence, this can also safely be
+ // called from the underlying thread itself.
+ MessageLoop* message_loop() const {
+ // This class doesn't provide synchronization around |message_loop_| and as
+ // such only the owner should access it (and the underlying thread which
+ // never sees it before it's set). In practice, many callers are coming from
+ // unrelated threads but provide their own implicit (e.g. memory barriers
+ // from task posting) or explicit (e.g. locks) synchronization making the
+ // access of |message_loop_| safe... Changing all of those callers is
+ // unfeasible; instead verify that they can reliably see
+ // |message_loop_ != nullptr| without synchronization as a proof that their
+ // external synchronization catches the unsynchronized effects of Start().
+ // TODO(gab): Despite all of the above this test has to be disabled for now
+ // per crbug.com/629139#c6.
+ // DCHECK(owning_sequence_checker_.CalledOnValidSequence() ||
+ // (id_event_.IsSignaled() && id_ == PlatformThread::CurrentId()) ||
+ // message_loop_);
+ return message_loop_;
+ }
// Returns a TaskRunner for this thread. Use the TaskRunner's PostTask
// methods to execute code on the thread. Returns nullptr if the thread is not
// running (e.g. before Start or after Stop have been called). Callers can
// hold on to this even after the thread is gone; in this situation, attempts
// to PostTask() will fail.
+ //
+ // In addition to this Thread's owning sequence, this can also safely be
+ // called from the underlying thread itself.
scoped_refptr<SingleThreadTaskRunner> task_runner() const {
+ // Refer to the DCHECK and comment inside |message_loop()|.
+ DCHECK(owning_sequence_checker_.CalledOnValidSequence() ||
+ (id_event_.IsSignaled() && id_ == PlatformThread::CurrentId()) ||
+ message_loop_);
return message_loop_ ? message_loop_->task_runner() : nullptr;
}
// Returns the name of this thread (for display in debugger too).
const std::string& thread_name() const { return name_; }
- // The native thread handle.
- PlatformThreadHandle thread_handle() { return thread_; }
-
// Returns the thread ID. Should not be called before the first Start*()
// call. Keeps on returning the same ID even after a Stop() call. The next
// Start*() call renews the ID.
//
// WARNING: This function will block if the thread hasn't started yet.
//
+ // This method is thread-safe.
PlatformThreadId GetThreadId() const;
// Returns true if the thread has been started, and not yet stopped.
@@ -190,8 +249,8 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// Called just prior to starting the message loop
virtual void Init() {}
- // Called to start the message loop
- virtual void Run(MessageLoop* message_loop);
+ // Called to start the run loop
+ virtual void Run(RunLoop* run_loop);
// Called just after the message loop ends
virtual void CleanUp() {}
@@ -199,8 +258,11 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
static void SetThreadWasQuitProperly(bool flag);
static bool GetThreadWasQuitProperly();
- void set_message_loop(MessageLoop* message_loop) {
- message_loop_ = message_loop;
+ // Bind this Thread to an existing MessageLoop instead of starting a new one.
+ void SetMessageLoop(MessageLoop* message_loop);
+
+ bool using_external_message_loop() const {
+ return using_external_message_loop_;
}
private:
@@ -215,19 +277,25 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// PlatformThread::Delegate methods:
void ThreadMain() override;
+ void ThreadQuitHelper();
+
#if defined(OS_WIN)
// Whether this thread needs to initialize COM, and if so, in what mode.
- ComStatus com_status_;
+ ComStatus com_status_ = NONE;
#endif
+ // Mirrors the Options::joinable field used to start this thread. Verified
+ // on Stop() -- non-joinable threads can't be joined (must be leaked).
+ bool joinable_ = true;
+
// If true, we're in the middle of stopping, and shouldn't access
// |message_loop_|. It may non-nullptr and invalid.
// Should be written on the thread that created this thread. Also read data
// could be wrong on other threads.
- bool stopping_;
+ bool stopping_ = false;
// True while inside of Run().
- bool running_;
+ bool running_ = false;
mutable base::Lock running_lock_; // Protects |running_|.
// The thread's handle.
@@ -235,24 +303,35 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
mutable base::Lock thread_lock_; // Protects |thread_|.
// The thread's id once it has started.
- PlatformThreadId id_;
- mutable WaitableEvent id_event_; // Protects |id_|.
-
- // The thread's message loop. Valid only while the thread is alive. Set
- // by the created thread.
- MessageLoop* message_loop_;
+ PlatformThreadId id_ = kInvalidThreadId;
+ // Protects |id_| which must only be read while it's signaled.
+ mutable WaitableEvent id_event_;
+
+ // The thread's MessageLoop and RunLoop. Valid only while the thread is alive.
+ // Set by the created thread.
+ MessageLoop* message_loop_ = nullptr;
+ RunLoop* run_loop_ = nullptr;
+
+ // True only if |message_loop_| was externally provided by |SetMessageLoop()|
+ // in which case this Thread has no underlying |thread_| and should merely
+ // drop |message_loop_| on Stop(). In that event, this remains true after
+ // Stop() was invoked so that subclasses can use this state to build their own
+ // cleanup logic as required.
+ bool using_external_message_loop_ = false;
// Stores Options::timer_slack_ until the message loop has been bound to
// a thread.
- TimerSlack message_loop_timer_slack_;
+ TimerSlack message_loop_timer_slack_ = TIMER_SLACK_NONE;
// The name of the thread. Used for debugging purposes.
- std::string name_;
+ const std::string name_;
// Signaled when the created thread gets ready to use the message loop.
mutable WaitableEvent start_event_;
- friend void ThreadQuitHelper();
+ // This class is not thread-safe, use this to verify access from the owning
+ // sequence of the Thread.
+ SequenceChecker owning_sequence_checker_;
DISALLOW_COPY_AND_ASSIGN(Thread);
};
diff --git a/base/threading/thread_checker.h b/base/threading/thread_checker.h
index 1d970f093e..1d4eb1c7b0 100644
--- a/base/threading/thread_checker.h
+++ b/base/threading/thread_checker.h
@@ -8,16 +8,6 @@
#include "base/logging.h"
#include "base/threading/thread_checker_impl.h"
-// Apart from debug builds, we also enable the thread checker in
-// builds with DCHECK_ALWAYS_ON so that trybots and waterfall bots
-// with this define will get the same level of thread checking as
-// debug bots.
-#if DCHECK_IS_ON()
-#define ENABLE_THREAD_CHECKER 1
-#else
-#define ENABLE_THREAD_CHECKER 0
-#endif
-
namespace base {
// Do nothing implementation, for use in release mode.
@@ -63,16 +53,20 @@ class ThreadCheckerDoNothing {
// ThreadChecker thread_checker_;
// }
//
+// Note that, when enabled, CalledOnValidThread() returns false when called from
+// tasks posted to SingleThreadTaskRunners bound to different sequences, even if
+// the tasks happen to run on the same thread (e.g. two independent TaskRunners
+// with ExecutionMode::SINGLE_THREADED on the TaskScheduler that happen to share
+// a thread).
+//
// In Release mode, CalledOnValidThread will always return true.
-#if ENABLE_THREAD_CHECKER
+#if DCHECK_IS_ON()
class ThreadChecker : public ThreadCheckerImpl {
};
#else
class ThreadChecker : public ThreadCheckerDoNothing {
};
-#endif // ENABLE_THREAD_CHECKER
-
-#undef ENABLE_THREAD_CHECKER
+#endif // DCHECK_IS_ON()
} // namespace base
diff --git a/base/threading/thread_checker_impl.cc b/base/threading/thread_checker_impl.cc
index eb87bae772..d5ccbdb943 100644
--- a/base/threading/thread_checker_impl.cc
+++ b/base/threading/thread_checker_impl.cc
@@ -4,31 +4,54 @@
#include "base/threading/thread_checker_impl.h"
+#include "base/threading/thread_task_runner_handle.h"
+
namespace base {
-ThreadCheckerImpl::ThreadCheckerImpl()
- : valid_thread_id_() {
- EnsureThreadIdAssigned();
+ThreadCheckerImpl::ThreadCheckerImpl() {
+ AutoLock auto_lock(lock_);
+ EnsureAssigned();
}
-ThreadCheckerImpl::~ThreadCheckerImpl() {}
+ThreadCheckerImpl::~ThreadCheckerImpl() = default;
bool ThreadCheckerImpl::CalledOnValidThread() const {
- EnsureThreadIdAssigned();
AutoLock auto_lock(lock_);
- return valid_thread_id_ == PlatformThread::CurrentRef();
+ EnsureAssigned();
+
+ // Always return true when called from the task from which this
+ // ThreadCheckerImpl was assigned to a thread.
+ if (task_token_ == TaskToken::GetForCurrentThread())
+ return true;
+
+ // If this ThreadCheckerImpl is bound to a valid SequenceToken, it must be
+ // equal to the current SequenceToken and there must be a registered
+ // ThreadTaskRunnerHandle. Otherwise, the fact that the current task runs on
+ // the thread to which this ThreadCheckerImpl is bound is fortuitous.
+ if (sequence_token_.IsValid() &&
+ (sequence_token_ != SequenceToken::GetForCurrentThread() ||
+ !ThreadTaskRunnerHandle::IsSet())) {
+ return false;
+ }
+
+ return thread_id_ == PlatformThread::CurrentRef();
}
void ThreadCheckerImpl::DetachFromThread() {
AutoLock auto_lock(lock_);
- valid_thread_id_ = PlatformThreadRef();
+ thread_id_ = PlatformThreadRef();
+ task_token_ = TaskToken();
+ sequence_token_ = SequenceToken();
}
-void ThreadCheckerImpl::EnsureThreadIdAssigned() const {
- AutoLock auto_lock(lock_);
- if (valid_thread_id_.is_null()) {
- valid_thread_id_ = PlatformThread::CurrentRef();
- }
+void ThreadCheckerImpl::EnsureAssigned() const {
+ lock_.AssertAcquired();
+ if (!thread_id_.is_null())
+ return;
+
+ thread_id_ = PlatformThread::CurrentRef();
+ task_token_ = TaskToken::GetForCurrentThread();
+ sequence_token_ = SequenceToken::GetForCurrentThread();
}
} // namespace base
diff --git a/base/threading/thread_checker_impl.h b/base/threading/thread_checker_impl.h
index c92e143db0..13193d1299 100644
--- a/base/threading/thread_checker_impl.h
+++ b/base/threading/thread_checker_impl.h
@@ -7,17 +7,18 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "base/sequence_token.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
namespace base {
-// Real implementation of ThreadChecker, for use in debug mode, or
-// for temporary use in release mode (e.g. to CHECK on a threading issue
-// seen only in the wild).
+// Real implementation of ThreadChecker, for use in debug mode, or for temporary
+// use in release mode (e.g. to CHECK on a threading issue seen only in the
+// wild).
//
-// Note: You should almost always use the ThreadChecker class to get the
-// right version for your build configuration.
+// Note: You should almost always use the ThreadChecker class to get the right
+// version for your build configuration.
class BASE_EXPORT ThreadCheckerImpl {
public:
ThreadCheckerImpl();
@@ -31,12 +32,29 @@ class BASE_EXPORT ThreadCheckerImpl {
void DetachFromThread();
private:
- void EnsureThreadIdAssigned() const;
+ void EnsureAssigned() const;
+ // Members are mutable so that CalledOnValidThread() can set them.
+
+ // Synchronizes access to all members.
mutable base::Lock lock_;
- // This is mutable so that CalledOnValidThread can set it.
- // It's guarded by |lock_|.
- mutable PlatformThreadRef valid_thread_id_;
+
+ // Thread on which CalledOnValidThread() may return true.
+ mutable PlatformThreadRef thread_id_;
+
+ // TaskToken for which CalledOnValidThread() always returns true. This allows
+ // CalledOnValidThread() to return true when called multiple times from the
+ // same task, even if it's not running in a single-threaded context itself
+ // (allowing usage of ThreadChecker/NonThreadSafe objects on the stack in the
+ // scope of one-off tasks). Note: CalledOnValidThread() may return true even
+ // if the current TaskToken is not equal to this.
+ mutable TaskToken task_token_;
+
+ // SequenceToken for which CalledOnValidThread() may return true. Used to
+ // ensure that CalledOnValidThread() doesn't return true for TaskScheduler
+ // tasks that happen to run on the same thread but weren't posted to the same
+ // SingleThreadTaskRunner.
+ mutable SequenceToken sequence_token_;
};
} // namespace base
diff --git a/base/threading/thread_checker_unittest.cc b/base/threading/thread_checker_unittest.cc
index bc5b1e473a..96455e66c7 100644
--- a/base/threading/thread_checker_unittest.cc
+++ b/base/threading/thread_checker_unittest.cc
@@ -2,180 +2,194 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/threading/thread_checker.h"
-
#include <memory>
-#include "base/logging.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_token.h"
+#include "base/test/test_simple_task_runner.h"
#include "base/threading/simple_thread.h"
+#include "base/threading/thread_checker_impl.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "testing/gtest/include/gtest/gtest.h"
-// Duplicated from base/threading/thread_checker.h so that we can be
-// good citizens there and undef the macro.
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-#define ENABLE_THREAD_CHECKER 1
-#else
-#define ENABLE_THREAD_CHECKER 0
-#endif
-
namespace base {
-
namespace {
-// Simple class to exercise the basics of ThreadChecker.
-// Both the destructor and DoStuff should verify that they were
-// called on the same thread as the constructor.
-class ThreadCheckerClass : public ThreadChecker {
+// A thread that runs a callback.
+class RunCallbackThread : public SimpleThread {
public:
- ThreadCheckerClass() {}
-
- // Verifies that it was called on the same thread as the constructor.
- void DoStuff() {
- DCHECK(CalledOnValidThread());
- }
-
- void DetachFromThread() {
- ThreadChecker::DetachFromThread();
- }
-
- static void MethodOnDifferentThreadImpl();
- static void DetachThenCallFromDifferentThreadImpl();
+ explicit RunCallbackThread(const Closure& callback)
+ : SimpleThread("RunCallbackThread"), callback_(callback) {}
private:
- DISALLOW_COPY_AND_ASSIGN(ThreadCheckerClass);
-};
+ // SimpleThread:
+ void Run() override { callback_.Run(); }
-// Calls ThreadCheckerClass::DoStuff on another thread.
-class CallDoStuffOnThread : public base::SimpleThread {
- public:
- explicit CallDoStuffOnThread(ThreadCheckerClass* thread_checker_class)
- : SimpleThread("call_do_stuff_on_thread"),
- thread_checker_class_(thread_checker_class) {
- }
+ const Closure callback_;
- void Run() override { thread_checker_class_->DoStuff(); }
+ DISALLOW_COPY_AND_ASSIGN(RunCallbackThread);
+};
- private:
- ThreadCheckerClass* thread_checker_class_;
+// Runs a callback on a new thread synchronously.
+void RunCallbackOnNewThreadSynchronously(const Closure& callback) {
+ RunCallbackThread run_callback_thread(callback);
+ run_callback_thread.Start();
+ run_callback_thread.Join();
+}
- DISALLOW_COPY_AND_ASSIGN(CallDoStuffOnThread);
-};
+void ExpectCalledOnValidThread(ThreadCheckerImpl* thread_checker) {
+ ASSERT_TRUE(thread_checker);
-// Deletes ThreadCheckerClass on a different thread.
-class DeleteThreadCheckerClassOnThread : public base::SimpleThread {
- public:
- explicit DeleteThreadCheckerClassOnThread(
- ThreadCheckerClass* thread_checker_class)
- : SimpleThread("delete_thread_checker_class_on_thread"),
- thread_checker_class_(thread_checker_class) {
- }
+ // This should bind |thread_checker| to the current thread if it wasn't
+ // already bound to a thread.
+ EXPECT_TRUE(thread_checker->CalledOnValidThread());
- void Run() override { thread_checker_class_.reset(); }
+ // Since |thread_checker| is now bound to the current thread, another call to
+ // CalledOnValidThread() should return true.
+ EXPECT_TRUE(thread_checker->CalledOnValidThread());
+}
- private:
- std::unique_ptr<ThreadCheckerClass> thread_checker_class_;
+void ExpectNotCalledOnValidThread(ThreadCheckerImpl* thread_checker) {
+ ASSERT_TRUE(thread_checker);
+ EXPECT_FALSE(thread_checker->CalledOnValidThread());
+}
- DISALLOW_COPY_AND_ASSIGN(DeleteThreadCheckerClassOnThread);
-};
+void ExpectNotCalledOnValidThreadWithSequenceTokenAndThreadTaskRunnerHandle(
+ ThreadCheckerImpl* thread_checker,
+ SequenceToken sequence_token) {
+ ThreadTaskRunnerHandle thread_task_runner_handle(
+ make_scoped_refptr(new TestSimpleTaskRunner));
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(sequence_token);
+ ExpectNotCalledOnValidThread(thread_checker);
+}
} // namespace
-TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
- std::unique_ptr<ThreadCheckerClass> thread_checker_class(
- new ThreadCheckerClass);
+TEST(ThreadCheckerTest, AllowedSameThreadNoSequenceToken) {
+ ThreadCheckerImpl thread_checker;
+ EXPECT_TRUE(thread_checker.CalledOnValidThread());
+}
- // Verify that DoStuff doesn't assert.
- thread_checker_class->DoStuff();
+TEST(ThreadCheckerTest,
+ AllowedSameThreadAndSequenceDifferentTasksWithThreadTaskRunnerHandle) {
+ ThreadTaskRunnerHandle thread_task_runner_handle(
+ make_scoped_refptr(new TestSimpleTaskRunner));
- // Verify that the destructor doesn't assert.
- thread_checker_class.reset();
-}
+ std::unique_ptr<ThreadCheckerImpl> thread_checker;
+ const SequenceToken sequence_token = SequenceToken::Create();
-TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
- std::unique_ptr<ThreadCheckerClass> thread_checker_class(
- new ThreadCheckerClass);
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(sequence_token);
+ thread_checker.reset(new ThreadCheckerImpl);
+ }
- // Verify that the destructor doesn't assert
- // when called on a different thread.
- DeleteThreadCheckerClassOnThread delete_on_thread(
- thread_checker_class.release());
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(sequence_token);
+ EXPECT_TRUE(thread_checker->CalledOnValidThread());
+ }
+}
- delete_on_thread.Start();
- delete_on_thread.Join();
+TEST(ThreadCheckerTest,
+ AllowedSameThreadSequenceAndTaskNoThreadTaskRunnerHandle) {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ ThreadCheckerImpl thread_checker;
+ EXPECT_TRUE(thread_checker.CalledOnValidThread());
}
-TEST(ThreadCheckerTest, DetachFromThread) {
- std::unique_ptr<ThreadCheckerClass> thread_checker_class(
- new ThreadCheckerClass);
+TEST(ThreadCheckerTest,
+ DisallowedSameThreadAndSequenceDifferentTasksNoThreadTaskRunnerHandle) {
+ std::unique_ptr<ThreadCheckerImpl> thread_checker;
- // Verify that DoStuff doesn't assert when called on a different thread after
- // a call to DetachFromThread.
- thread_checker_class->DetachFromThread();
- CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ thread_checker.reset(new ThreadCheckerImpl);
+ }
- call_on_thread.Start();
- call_on_thread.Join();
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ EXPECT_FALSE(thread_checker->CalledOnValidThread());
+ }
}
-#if GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerTest, DisallowedDifferentThreadsNoSequenceToken) {
+ ThreadCheckerImpl thread_checker;
+ RunCallbackOnNewThreadSynchronously(
+ Bind(&ExpectNotCalledOnValidThread, Unretained(&thread_checker)));
+}
-void ThreadCheckerClass::MethodOnDifferentThreadImpl() {
- std::unique_ptr<ThreadCheckerClass> thread_checker_class(
- new ThreadCheckerClass);
+TEST(ThreadCheckerTest, DisallowedDifferentThreadsSameSequence) {
+ ThreadTaskRunnerHandle thread_task_runner_handle(
+ make_scoped_refptr(new TestSimpleTaskRunner));
+ const SequenceToken sequence_token(SequenceToken::Create());
- // DoStuff should assert in debug builds only when called on a
- // different thread.
- CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(sequence_token);
+ ThreadCheckerImpl thread_checker;
+ EXPECT_TRUE(thread_checker.CalledOnValidThread());
- call_on_thread.Start();
- call_on_thread.Join();
+ RunCallbackOnNewThreadSynchronously(Bind(
+ &ExpectNotCalledOnValidThreadWithSequenceTokenAndThreadTaskRunnerHandle,
+ Unretained(&thread_checker), sequence_token));
}
-#if ENABLE_THREAD_CHECKER
-TEST(ThreadCheckerDeathTest, MethodNotAllowedOnDifferentThreadInDebug) {
- ASSERT_DEATH({
- ThreadCheckerClass::MethodOnDifferentThreadImpl();
- }, "");
-}
-#else
-TEST(ThreadCheckerTest, MethodAllowedOnDifferentThreadInRelease) {
- ThreadCheckerClass::MethodOnDifferentThreadImpl();
-}
-#endif // ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerTest, DisallowedSameThreadDifferentSequence) {
+ std::unique_ptr<ThreadCheckerImpl> thread_checker;
-void ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl() {
- std::unique_ptr<ThreadCheckerClass> thread_checker_class(
- new ThreadCheckerClass);
+ ThreadTaskRunnerHandle thread_task_runner_handle(
+ make_scoped_refptr(new TestSimpleTaskRunner));
- // DoStuff doesn't assert when called on a different thread
- // after a call to DetachFromThread.
- thread_checker_class->DetachFromThread();
- CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+ {
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ thread_checker.reset(new ThreadCheckerImpl);
+ }
- call_on_thread.Start();
- call_on_thread.Join();
+ {
+ // Different SequenceToken.
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ EXPECT_FALSE(thread_checker->CalledOnValidThread());
+ }
- // DoStuff should assert in debug builds only after moving to
- // another thread.
- thread_checker_class->DoStuff();
+ // No SequenceToken.
+ EXPECT_FALSE(thread_checker->CalledOnValidThread());
}
-#if ENABLE_THREAD_CHECKER
-TEST(ThreadCheckerDeathTest, DetachFromThreadInDebug) {
- ASSERT_DEATH({
- ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
- }, "");
-}
-#else
-TEST(ThreadCheckerTest, DetachFromThreadInRelease) {
- ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
+TEST(ThreadCheckerTest, DetachFromThread) {
+ ThreadCheckerImpl thread_checker;
+ thread_checker.DetachFromThread();
+
+ // Verify that CalledOnValidThread() returns true when called on a different
+ // thread after a call to DetachFromThread().
+ RunCallbackOnNewThreadSynchronously(
+ Bind(&ExpectCalledOnValidThread, Unretained(&thread_checker)));
+
+ EXPECT_FALSE(thread_checker.CalledOnValidThread());
}
-#endif // ENABLE_THREAD_CHECKER
-#endif // GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerTest, DetachFromThreadWithSequenceToken) {
+ ThreadTaskRunnerHandle thread_task_runner_handle(
+ make_scoped_refptr(new TestSimpleTaskRunner));
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+ ThreadCheckerImpl thread_checker;
+ thread_checker.DetachFromThread();
-// Just in case we ever get lumped together with other compilation units.
-#undef ENABLE_THREAD_CHECKER
+ // Verify that CalledOnValidThread() returns true when called on a different
+ // thread after a call to DetachFromThread().
+ RunCallbackOnNewThreadSynchronously(
+ Bind(&ExpectCalledOnValidThread, Unretained(&thread_checker)));
+
+ EXPECT_FALSE(thread_checker.CalledOnValidThread());
+}
} // namespace base
diff --git a/base/threading/thread_local.h b/base/threading/thread_local.h
index f40420cd2f..cad9add3a9 100644
--- a/base/threading/thread_local.h
+++ b/base/threading/thread_local.h
@@ -2,35 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// WARNING: Thread local storage is a bit tricky to get right. Please make
-// sure that this is really the proper solution for what you're trying to
-// achieve. Don't prematurely optimize, most likely you can just use a Lock.
+// WARNING: Thread local storage is a bit tricky to get right. Please make sure
+// that this is really the proper solution for what you're trying to achieve.
+// Don't prematurely optimize, most likely you can just use a Lock.
//
-// These classes implement a wrapper around the platform's TLS storage
-// mechanism. On construction, they will allocate a TLS slot, and free the
-// TLS slot on destruction. No memory management (creation or destruction) is
-// handled. This means for uses of ThreadLocalPointer, you must correctly
-// manage the memory yourself, these classes will not destroy the pointer for
-// you. There are no at-thread-exit actions taken by these classes.
+// These classes implement a wrapper around ThreadLocalStorage::Slot. On
+// construction, they will allocate a TLS slot, and free the TLS slot on
+// destruction. No memory management (creation or destruction) is handled. This
+// means for uses of ThreadLocalPointer, you must correctly manage the memory
+// yourself, these classes will not destroy the pointer for you. There are no
+// at-thread-exit actions taken by these classes.
//
-// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
-// destruction, so memory management must be handled elsewhere. The first call
-// to Get() on a thread will return NULL. You can update the pointer with a
-// call to Set().
+// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
+// destruction, so memory management must be handled elsewhere. The first call
+// to Get() on a thread will return NULL. You can update the pointer with a call
+// to Set().
//
-// ThreadLocalBoolean wraps a bool. It will default to false if it has never
+// ThreadLocalBoolean wraps a bool. It will default to false if it has never
// been set otherwise with Set().
//
-// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
-// once it has been created. If you want to dynamically create an instance,
-// you must of course properly deal with safety and race conditions. This
-// means a function-level static initializer is generally inappropiate.
+// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
+// once it has been created. If you want to dynamically create an instance, you
+// must of course properly deal with safety and race conditions. This means a
+// function-level static initializer is generally inappropiate.
//
-// In Android, the system TLS is limited, the implementation is backed with
-// ThreadLocalStorage.
+// In Android, the system TLS is limited.
//
// Example usage:
-// // My class is logically attached to a single thread. We cache a pointer
+// // My class is logically attached to a single thread. We cache a pointer
// // on the thread it was created on, so we can implement current().
// MyClass::MyClass() {
// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL);
@@ -51,76 +50,42 @@
#ifndef BASE_THREADING_THREAD_LOCAL_H_
#define BASE_THREADING_THREAD_LOCAL_H_
-#include "base/base_export.h"
#include "base/macros.h"
#include "base/threading/thread_local_storage.h"
-#include "build/build_config.h"
-
-#if defined(OS_POSIX)
-#include <pthread.h>
-#endif
namespace base {
-namespace internal {
-
-// Helper functions that abstract the cross-platform APIs. Do not use directly.
-struct BASE_EXPORT ThreadLocalPlatform {
-#if defined(OS_WIN)
- typedef unsigned long SlotType;
-#elif defined(OS_ANDROID)
- typedef ThreadLocalStorage::StaticSlot SlotType;
-#elif defined(OS_POSIX)
- typedef pthread_key_t SlotType;
-#endif
-
- static void AllocateSlot(SlotType* slot);
- static void FreeSlot(SlotType slot);
- static void* GetValueFromSlot(SlotType slot);
- static void SetValueInSlot(SlotType slot, void* value);
-};
-
-} // namespace internal
template <typename Type>
class ThreadLocalPointer {
public:
- ThreadLocalPointer() : slot_() {
- internal::ThreadLocalPlatform::AllocateSlot(&slot_);
- }
-
- ~ThreadLocalPointer() {
- internal::ThreadLocalPlatform::FreeSlot(slot_);
- }
+ ThreadLocalPointer() = default;
+ ~ThreadLocalPointer() = default;
Type* Get() {
- return static_cast<Type*>(
- internal::ThreadLocalPlatform::GetValueFromSlot(slot_));
+ return static_cast<Type*>(slot_.Get());
}
void Set(Type* ptr) {
- internal::ThreadLocalPlatform::SetValueInSlot(
- slot_, const_cast<void*>(static_cast<const void*>(ptr)));
+ slot_.Set(const_cast<void*>(static_cast<const void*>(ptr)));
}
private:
- typedef internal::ThreadLocalPlatform::SlotType SlotType;
-
- SlotType slot_;
+ ThreadLocalStorage::Slot slot_;
DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>);
};
class ThreadLocalBoolean {
public:
- ThreadLocalBoolean() {}
- ~ThreadLocalBoolean() {}
+ ThreadLocalBoolean() = default;
+ ~ThreadLocalBoolean() = default;
bool Get() {
- return tlp_.Get() != NULL;
+ return tlp_.Get() != nullptr;
}
void Set(bool val) {
- tlp_.Set(val ? this : NULL);
+ tlp_.Set(val ? this : nullptr);
}
private:
diff --git a/base/threading/thread_local_posix.cc b/base/threading/thread_local_posix.cc
deleted file mode 100644
index 8bc46ad190..0000000000
--- a/base/threading/thread_local_posix.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/threading/thread_local.h"
-
-#include <pthread.h>
-
-#include "base/logging.h"
-#include "build/build_config.h"
-
-#if !defined(OS_ANDROID)
-
-namespace base {
-namespace internal {
-
-// static
-void ThreadLocalPlatform::AllocateSlot(SlotType* slot) {
- int error = pthread_key_create(slot, NULL);
- CHECK_EQ(error, 0);
-}
-
-// static
-void ThreadLocalPlatform::FreeSlot(SlotType slot) {
- int error = pthread_key_delete(slot);
- DCHECK_EQ(0, error);
-}
-
-// static
-void* ThreadLocalPlatform::GetValueFromSlot(SlotType slot) {
- return pthread_getspecific(slot);
-}
-
-// static
-void ThreadLocalPlatform::SetValueInSlot(SlotType slot, void* value) {
- int error = pthread_setspecific(slot, value);
- DCHECK_EQ(error, 0);
-}
-
-} // namespace internal
-} // namespace base
-
-#endif // !defined(OS_ANDROID)
diff --git a/base/threading/thread_local_storage.cc b/base/threading/thread_local_storage.cc
index a7eb527888..48c1dd58c2 100644
--- a/base/threading/thread_local_storage.cc
+++ b/base/threading/thread_local_storage.cc
@@ -6,10 +6,57 @@
#include "base/atomicops.h"
#include "base/logging.h"
+#include "base/synchronization/lock.h"
#include "build/build_config.h"
using base::internal::PlatformThreadLocalStorage;
+// Chrome Thread Local Storage (TLS)
+//
+// This TLS system allows Chrome to use a single OS level TLS slot process-wide,
+// and allows us to control the slot limits instead of being at the mercy of the
+// platform. To do this, Chrome TLS replicates an array commonly found in the OS
+// thread metadata.
+//
+// Overview:
+//
+// OS TLS Slots Per-Thread Per-Process Global
+// ...
+// [] Chrome TLS Array Chrome TLS Metadata
+// [] ----------> [][][][][ ][][][][] [][][][][ ][][][][]
+// [] | |
+// ... V V
+// Metadata Version Slot Information
+// Your Data!
+//
+// Using a single OS TLS slot, Chrome TLS allocates an array on demand for the
+// lifetime of each thread that requests Chrome TLS data. Each per-thread TLS
+// array matches the length of the per-process global metadata array.
+//
+// A per-process global TLS metadata array tracks information about each item in
+// the per-thread array:
+// * Status: Tracks if the slot is allocated or free to assign.
+// * Destructor: An optional destructor to call on thread destruction for that
+// specific slot.
+// * Version: Tracks the current version of the TLS slot. Each TLS slot
+// allocation is associated with a unique version number.
+//
+// Most OS TLS APIs guarantee that a newly allocated TLS slot is
+// initialized to 0 for all threads. The Chrome TLS system provides
+// this guarantee by tracking the version for each TLS slot here
+// on each per-thread Chrome TLS array entry. Threads that access
+// a slot with a mismatched version will receive 0 as their value.
+// The metadata version is incremented when the client frees a
+// slot. The per-thread metadata version is updated when a client
+// writes to the slot. This scheme allows for constant time
+// invalidation and avoids the need to iterate through each Chrome
+// TLS array to mark the slot as zero.
+//
+// Just like an OS TLS API, clients of the Chrome TLS are responsible for
+// managing any necessary lifetime of the data in their slots. The only
+// convenience provided is automatic destruction when a thread ends. If a client
+// frees a slot, that client is responsible for destroying the data in the slot.
+
namespace {
// In order to make TLS destructors work, we need to keep around a function
// pointer to the destructor for each slot. We keep this array of pointers in a
@@ -18,37 +65,42 @@ namespace {
// hold a pointer to a per-thread array (table) of slots that we allocate to
// Chromium consumers.
-// g_native_tls_key is the one native TLS that we use. It stores our table.
+// g_native_tls_key is the one native TLS that we use. It stores our table.
base::subtle::Atomic32 g_native_tls_key =
PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
-// g_last_used_tls_key is the high-water-mark of allocated thread local storage.
-// Each allocation is an index into our g_tls_destructors[]. Each such index is
-// assigned to the instance variable slot_ in a ThreadLocalStorage::Slot
-// instance. We reserve the value slot_ == 0 to indicate that the corresponding
-// instance of ThreadLocalStorage::Slot has been freed (i.e., destructor called,
-// etc.). This reserved use of 0 is then stated as the initial value of
-// g_last_used_tls_key, so that the first issued index will be 1.
-base::subtle::Atomic32 g_last_used_tls_key = 0;
+// The maximum number of slots in our thread local storage stack.
+constexpr int kThreadLocalStorageSize = 256;
+constexpr int kInvalidSlotValue = -1;
+
+enum TlsStatus {
+ FREE,
+ IN_USE,
+};
+
+struct TlsMetadata {
+ TlsStatus status;
+ base::ThreadLocalStorage::TLSDestructorFunc destructor;
+ uint32_t version;
+};
-// The maximum number of 'slots' in our thread local storage stack.
-const int kThreadLocalStorageSize = 256;
+struct TlsVectorEntry {
+ void* data;
+ uint32_t version;
+};
+
+// This lock isn't needed until after we've constructed the per-thread TLS
+// vector, so it's safe to use.
+base::Lock* GetTLSMetadataLock() {
+ static auto* lock = new base::Lock();
+ return lock;
+}
+TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
+size_t g_last_assigned_slot = 0;
// The maximum number of times to try to clear slots by calling destructors.
// Use pthread naming convention for clarity.
-const int kMaxDestructorIterations = kThreadLocalStorageSize;
-
-// An array of destructor function pointers for the slots. If a slot has a
-// destructor, it will be stored in its corresponding entry in this array.
-// The elements are volatile to ensure that when the compiler reads the value
-// to potentially call the destructor, it does so once, and that value is tested
-// for null-ness and then used. Yes, that would be a weird de-optimization,
-// but I can imagine some register machines where it was just as easy to
-// re-fetch an array element, and I want to be sure a call to free the key
-// (i.e., null out the destructor entry) that happens on a separate thread can't
-// hurt the racy calls to the destructors on another thread.
-volatile base::ThreadLocalStorage::TLSDestructorFunc
- g_tls_destructors[kThreadLocalStorageSize];
+constexpr int kMaxDestructorIterations = kThreadLocalStorageSize;
// This function is called to initialize our entire Chromium TLS system.
// It may be called very early, and we need to complete most all of the setup
@@ -56,7 +108,7 @@ volatile base::ThreadLocalStorage::TLSDestructorFunc
// recursively depend on this initialization.
// As a result, we use Atomics, and avoid anything (like a singleton) that might
// require memory allocations.
-void** ConstructTlsVector() {
+TlsVectorEntry* ConstructTlsVector() {
PlatformThreadLocalStorage::TLSKey key =
base::subtle::NoBarrier_Load(&g_native_tls_key);
if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
@@ -73,8 +125,8 @@ void** ConstructTlsVector() {
key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
PlatformThreadLocalStorage::FreeTLS(tmp);
}
- // Atomically test-and-set the tls_key. If the key is
- // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
+ // Atomically test-and-set the tls_key. If the key is
+ // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
// another thread already did our dirty work.
if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES !=
static_cast<PlatformThreadLocalStorage::TLSKey>(
@@ -90,39 +142,38 @@ void** ConstructTlsVector() {
}
CHECK(!PlatformThreadLocalStorage::GetTLSValue(key));
- // Some allocators, such as TCMalloc, make use of thread local storage.
- // As a result, any attempt to call new (or malloc) will lazily cause such a
- // system to initialize, which will include registering for a TLS key. If we
- // are not careful here, then that request to create a key will call new back,
- // and we'll have an infinite loop. We avoid that as follows:
- // Use a stack allocated vector, so that we don't have dependence on our
- // allocator until our service is in place. (i.e., don't even call new until
- // after we're setup)
- void* stack_allocated_tls_data[kThreadLocalStorageSize];
+ // Some allocators, such as TCMalloc, make use of thread local storage. As a
+ // result, any attempt to call new (or malloc) will lazily cause such a system
+ // to initialize, which will include registering for a TLS key. If we are not
+ // careful here, then that request to create a key will call new back, and
+ // we'll have an infinite loop. We avoid that as follows: Use a stack
+ // allocated vector, so that we don't have dependence on our allocator until
+ // our service is in place. (i.e., don't even call new until after we're
+ // setup)
+ TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
// Ensure that any rentrant calls change the temp version.
PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
// Allocate an array to store our data.
- void** tls_data = new void*[kThreadLocalStorageSize];
+ TlsVectorEntry* tls_data = new TlsVectorEntry[kThreadLocalStorageSize];
memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
PlatformThreadLocalStorage::SetTLSValue(key, tls_data);
return tls_data;
}
-void OnThreadExitInternal(void* value) {
- DCHECK(value);
- void** tls_data = static_cast<void**>(value);
- // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
+void OnThreadExitInternal(TlsVectorEntry* tls_data) {
+ DCHECK(tls_data);
+ // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
// terminates, one of the destructor calls we make may be to shut down an
- // allocator. We have to be careful that after we've shutdown all of the
- // known destructors (perchance including an allocator), that we don't call
- // the allocator and cause it to resurrect itself (with no possibly destructor
- // call to follow). We handle this problem as follows:
- // Switch to using a stack allocated vector, so that we don't have dependence
- // on our allocator after we have called all g_tls_destructors. (i.e., don't
- // even call delete[] after we're done with destructors.)
- void* stack_allocated_tls_data[kThreadLocalStorageSize];
+ // allocator. We have to be careful that after we've shutdown all of the known
+ // destructors (perchance including an allocator), that we don't call the
+ // allocator and cause it to resurrect itself (with no possibly destructor
+ // call to follow). We handle this problem as follows: Switch to using a stack
+ // allocated vector, so that we don't have dependence on our allocator after
+ // we have called all g_tls_metadata destructors. (i.e., don't even call
+ // delete[] after we're done with destructors.)
+ TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
// Ensure that any re-entrant calls change the temp version.
PlatformThreadLocalStorage::TLSKey key =
@@ -130,32 +181,38 @@ void OnThreadExitInternal(void* value) {
PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
delete[] tls_data; // Our last dependence on an allocator.
+ // Snapshot the TLS Metadata so we don't have to lock on every access.
+ TlsMetadata tls_metadata[kThreadLocalStorageSize];
+ {
+ base::AutoLock auto_lock(*GetTLSMetadataLock());
+ memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
+ }
+
int remaining_attempts = kMaxDestructorIterations;
bool need_to_scan_destructors = true;
while (need_to_scan_destructors) {
need_to_scan_destructors = false;
// Try to destroy the first-created-slot (which is slot 1) in our last
- // destructor call. That user was able to function, and define a slot with
+ // destructor call. That user was able to function, and define a slot with
// no other services running, so perhaps it is a basic service (like an
- // allocator) and should also be destroyed last. If we get the order wrong,
- // then we'll itterate several more times, so it is really not that
- // critical (but it might help).
- base::subtle::Atomic32 last_used_tls_key =
- base::subtle::NoBarrier_Load(&g_last_used_tls_key);
- for (int slot = last_used_tls_key; slot > 0; --slot) {
- void* tls_value = stack_allocated_tls_data[slot];
- if (tls_value == NULL)
+ // allocator) and should also be destroyed last. If we get the order wrong,
+ // then we'll iterate several more times, so it is really not that critical
+ // (but it might help).
+ for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) {
+ void* tls_value = stack_allocated_tls_data[slot].data;
+ if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE ||
+ stack_allocated_tls_data[slot].version != tls_metadata[slot].version)
continue;
base::ThreadLocalStorage::TLSDestructorFunc destructor =
- g_tls_destructors[slot];
- if (destructor == NULL)
+ tls_metadata[slot].destructor;
+ if (!destructor)
continue;
- stack_allocated_tls_data[slot] = NULL; // pre-clear the slot.
+ stack_allocated_tls_data[slot].data = nullptr; // pre-clear the slot.
destructor(tls_value);
- // Any destructor might have called a different service, which then set
- // a different slot to a non-NULL value. Hence we need to check
- // the whole vector again. This is a pthread standard.
+ // Any destructor might have called a different service, which then set a
+ // different slot to a non-null value. Hence we need to check the whole
+ // vector again. This is a pthread standard.
need_to_scan_destructors = true;
}
if (--remaining_attempts <= 0) {
@@ -165,7 +222,7 @@ void OnThreadExitInternal(void* value) {
}
// Remove our stack allocated vector.
- PlatformThreadLocalStorage::SetTLSValue(key, NULL);
+ PlatformThreadLocalStorage::SetTLSValue(key, nullptr);
}
} // namespace
@@ -184,69 +241,107 @@ void PlatformThreadLocalStorage::OnThreadExit() {
// Maybe we have never initialized TLS for this thread.
if (!tls_data)
return;
- OnThreadExitInternal(tls_data);
+ OnThreadExitInternal(static_cast<TlsVectorEntry*>(tls_data));
}
#elif defined(OS_POSIX)
void PlatformThreadLocalStorage::OnThreadExit(void* value) {
- OnThreadExitInternal(value);
+ OnThreadExitInternal(static_cast<TlsVectorEntry*>(value));
}
#endif // defined(OS_WIN)
} // namespace internal
-ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
- slot_ = 0;
- base::subtle::Release_Store(&initialized_, 0);
- Initialize(destructor);
-}
-
void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
PlatformThreadLocalStorage::TLSKey key =
base::subtle::NoBarrier_Load(&g_native_tls_key);
if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
- !PlatformThreadLocalStorage::GetTLSValue(key))
+ !PlatformThreadLocalStorage::GetTLSValue(key)) {
ConstructTlsVector();
+ }
// Grab a new slot.
- slot_ = base::subtle::NoBarrier_AtomicIncrement(&g_last_used_tls_key, 1);
- DCHECK_GT(slot_, 0);
+ slot_ = kInvalidSlotValue;
+ version_ = 0;
+ {
+ base::AutoLock auto_lock(*GetTLSMetadataLock());
+ for (int i = 0; i < kThreadLocalStorageSize; ++i) {
+ // Tracking the last assigned slot is an attempt to find the next
+ // available slot within one iteration. Under normal usage, slots remain
+ // in use for the lifetime of the process (otherwise before we reclaimed
+ // slots, we would have run out of slots). This makes it highly likely the
+ // next slot is going to be a free slot.
+ size_t slot_candidate =
+ (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
+ if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
+ g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
+ g_tls_metadata[slot_candidate].destructor = destructor;
+ g_last_assigned_slot = slot_candidate;
+ slot_ = slot_candidate;
+ version_ = g_tls_metadata[slot_candidate].version;
+ break;
+ }
+ }
+ }
+ CHECK_NE(slot_, kInvalidSlotValue);
CHECK_LT(slot_, kThreadLocalStorageSize);
// Setup our destructor.
- g_tls_destructors[slot_] = destructor;
base::subtle::Release_Store(&initialized_, 1);
}
void ThreadLocalStorage::StaticSlot::Free() {
- // At this time, we don't reclaim old indices for TLS slots.
- // So all we need to do is wipe the destructor.
- DCHECK_GT(slot_, 0);
+ DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
- g_tls_destructors[slot_] = NULL;
- slot_ = 0;
+ {
+ base::AutoLock auto_lock(*GetTLSMetadataLock());
+ g_tls_metadata[slot_].status = TlsStatus::FREE;
+ g_tls_metadata[slot_].destructor = nullptr;
+ ++(g_tls_metadata[slot_].version);
+ }
+ slot_ = kInvalidSlotValue;
base::subtle::Release_Store(&initialized_, 0);
}
void* ThreadLocalStorage::StaticSlot::Get() const {
- void** tls_data = static_cast<void**>(
+ TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key)));
if (!tls_data)
tls_data = ConstructTlsVector();
- DCHECK_GT(slot_, 0);
+ DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
- return tls_data[slot_];
+ // Version mismatches means this slot was previously freed.
+ if (tls_data[slot_].version != version_)
+ return nullptr;
+ return tls_data[slot_].data;
}
void ThreadLocalStorage::StaticSlot::Set(void* value) {
- void** tls_data = static_cast<void**>(
+ TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key)));
if (!tls_data)
tls_data = ConstructTlsVector();
- DCHECK_GT(slot_, 0);
+ DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
- tls_data[slot_] = value;
+ tls_data[slot_].data = value;
+ tls_data[slot_].version = version_;
+}
+
+ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
+ tls_slot_.Initialize(destructor);
+}
+
+ThreadLocalStorage::Slot::~Slot() {
+ tls_slot_.Free();
+}
+
+void* ThreadLocalStorage::Slot::Get() const {
+ return tls_slot_.Get();
+}
+
+void ThreadLocalStorage::Slot::Set(void* value) {
+ tls_slot_.Set(value);
}
} // namespace base
diff --git a/base/threading/thread_local_storage.h b/base/threading/thread_local_storage.h
index 0c7a692a66..5e70410af9 100644
--- a/base/threading/thread_local_storage.h
+++ b/base/threading/thread_local_storage.h
@@ -5,6 +5,8 @@
#ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
#define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+#include <stdint.h>
+
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/macros.h"
@@ -20,9 +22,12 @@ namespace base {
namespace internal {
-// WARNING: You should *NOT* be using this class directly.
-// PlatformThreadLocalStorage is low-level abstraction to the OS's TLS
-// interface, you should instead be using ThreadLocalStorage::StaticSlot/Slot.
+// WARNING: You should *NOT* use this class directly.
+// PlatformThreadLocalStorage is a low-level abstraction of the OS's TLS
+// interface. Instead, you should use one of the following:
+// * ThreadLocalBoolean (from thread_local.h) for booleans.
+// * ThreadLocalPointer (from thread_local.h) for pointers.
+// * ThreadLocalStorage::StaticSlot/Slot for more direct control of the slot.
class BASE_EXPORT PlatformThreadLocalStorage {
public:
@@ -89,7 +94,7 @@ class BASE_EXPORT ThreadLocalStorage {
// initialization, as base's LINKER_INITIALIZED requires a constructor and on
// some compilers (notably gcc 4.4) this still ends up needing runtime
// initialization.
- #define TLS_INITIALIZER {false, 0}
+#define TLS_INITIALIZER {false, 0, 0}
// A key representing one value stored in TLS.
// Initialize like
@@ -123,18 +128,25 @@ class BASE_EXPORT ThreadLocalStorage {
// The internals of this struct should be considered private.
base::subtle::Atomic32 initialized_;
int slot_;
+ uint32_t version_;
};
// A convenience wrapper around StaticSlot with a constructor. Can be used
// as a member variable.
- class BASE_EXPORT Slot : public StaticSlot {
+ class BASE_EXPORT Slot {
public:
- // Calls StaticSlot::Initialize().
explicit Slot(TLSDestructorFunc destructor = NULL);
+ ~Slot();
+
+ // Get the thread-local value stored in this slot.
+ // Values are guaranteed to initially be zero.
+ void* Get() const;
+
+ // Set the slot's thread-local value to |value|.
+ void Set(void* value);
private:
- using StaticSlot::initialized_;
- using StaticSlot::slot_;
+ StaticSlot tls_slot_;
DISALLOW_COPY_AND_ASSIGN(Slot);
};
diff --git a/base/threading/thread_local_storage_unittest.cc b/base/threading/thread_local_storage_unittest.cc
index 322524b10e..335252b18e 100644
--- a/base/threading/thread_local_storage_unittest.cc
+++ b/base/threading/thread_local_storage_unittest.cc
@@ -127,4 +127,14 @@ TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) {
tls_slot.Free(); // Stop doing callbacks to cleanup threads.
}
+TEST(ThreadLocalStorageTest, TLSReclaim) {
+ // Creates and destroys many TLS slots and ensures they all zero-inited.
+ for (int i = 0; i < 1000; ++i) {
+ ThreadLocalStorage::Slot slot(nullptr);
+ EXPECT_EQ(nullptr, slot.Get());
+ slot.Set(reinterpret_cast<void*>(0xBAADF00D));
+ EXPECT_EQ(reinterpret_cast<void*>(0xBAADF00D), slot.Get());
+ }
+}
+
} // namespace base
diff --git a/base/threading/thread_restrictions.cc b/base/threading/thread_restrictions.cc
index 00306c5ae7..8dd7743332 100644
--- a/base/threading/thread_restrictions.cc
+++ b/base/threading/thread_restrictions.cc
@@ -4,7 +4,7 @@
#include "base/threading/thread_restrictions.h"
-#if ENABLE_THREAD_RESTRICTIONS
+#if DCHECK_IS_ON()
#include "base/lazy_instance.h"
#include "base/logging.h"
@@ -35,7 +35,7 @@ bool ThreadRestrictions::SetIOAllowed(bool allowed) {
// static
void ThreadRestrictions::AssertIOAllowed() {
if (g_io_disallowed.Get().Get()) {
- LOG(FATAL) <<
+ NOTREACHED() <<
"Function marked as IO-only was called from a thread that "
"disallows IO! If this thread really should be allowed to "
"make IO calls, adjust the call to "
@@ -54,10 +54,14 @@ bool ThreadRestrictions::SetSingletonAllowed(bool allowed) {
// static
void ThreadRestrictions::AssertSingletonAllowed() {
if (g_singleton_disallowed.Get().Get()) {
- LOG(FATAL) << "LazyInstance/Singleton is not allowed to be used on this "
- << "thread. Most likely it's because this thread is not "
- << "joinable, so AtExitManager may have deleted the object "
- << "on shutdown, leading to a potential shutdown crash.";
+ NOTREACHED() << "LazyInstance/Singleton is not allowed to be used on this "
+ << "thread. Most likely it's because this thread is not "
+ << "joinable (or the current task is running with "
+ << "TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN semantics), so "
+ << "AtExitManager may have deleted the object on shutdown, "
+ << "leading to a potential shutdown crash. If you need to use "
+ << "the object from this context, it'll have to be updated to "
+ << "use Leaky traits.";
}
}
@@ -69,8 +73,8 @@ void ThreadRestrictions::DisallowWaiting() {
// static
void ThreadRestrictions::AssertWaitAllowed() {
if (g_wait_disallowed.Get().Get()) {
- LOG(FATAL) << "Waiting is not allowed to be used on this thread to prevent "
- << "jank and deadlock.";
+ NOTREACHED() << "Waiting is not allowed to be used on this thread to "
+ << "prevent jank and deadlock.";
}
}
@@ -82,4 +86,4 @@ bool ThreadRestrictions::SetWaitAllowed(bool allowed) {
} // namespace base
-#endif // ENABLE_THREAD_RESTRICTIONS
+#endif // DCHECK_IS_ON()
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index 4212a4b6eb..a86dd452b8 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -6,15 +6,9 @@
#define BASE_THREADING_THREAD_RESTRICTIONS_H_
#include "base/base_export.h"
+#include "base/logging.h"
#include "base/macros.h"
-// See comment at top of thread_checker.h
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
-#define ENABLE_THREAD_RESTRICTIONS 1
-#else
-#define ENABLE_THREAD_RESTRICTIONS 0
-#endif
-
class BrowserProcessImpl;
class HistogramSynchronizer;
class NativeBackendKWallet;
@@ -57,10 +51,10 @@ namespace gpu {
class GpuChannelHost;
}
namespace mojo {
-namespace common {
-class MessagePumpMojo;
-}
class SyncCallRestrictions;
+namespace edk {
+class ScopedIPCSupport;
+}
}
namespace ui {
class CommandBufferClientImpl;
@@ -92,6 +86,10 @@ namespace android {
class JavaHandlerThread;
}
+namespace internal {
+class TaskTracker;
+}
+
class SequencedWorkerPool;
class SimpleThread;
class Thread;
@@ -137,21 +135,7 @@ class BASE_EXPORT ThreadRestrictions {
DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO);
};
- // Constructing a ScopedAllowSingleton temporarily allows accessing for the
- // current thread. Doing this is almost always incorrect.
- class BASE_EXPORT ScopedAllowSingleton {
- public:
- ScopedAllowSingleton() { previous_value_ = SetSingletonAllowed(true); }
- ~ScopedAllowSingleton() { SetSingletonAllowed(previous_value_); }
- private:
- // Whether singleton use is allowed when the ScopedAllowSingleton was
- // constructed.
- bool previous_value_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedAllowSingleton);
- };
-
-#if ENABLE_THREAD_RESTRICTIONS
+#if DCHECK_IS_ON()
// Set whether the current thread to make IO calls.
// Threads start out in the *allowed* state.
// Returns the previous value.
@@ -197,6 +181,7 @@ class BASE_EXPORT ThreadRestrictions {
friend class content::ScopedAllowWaitForAndroidLayoutTests;
friend class content::ScopedAllowWaitForDebugURL;
friend class ::HistogramSynchronizer;
+ friend class internal::TaskTracker;
friend class ::ScopedAllowWaitForLegacyWebViewApi;
friend class cc::CompletionEvent;
friend class cc::SingleThreadTaskGraphRunner;
@@ -210,8 +195,8 @@ class BASE_EXPORT ThreadRestrictions {
friend class ThreadTestHelper;
friend class PlatformThread;
friend class android::JavaHandlerThread;
- friend class mojo::common::MessagePumpMojo;
friend class mojo::SyncCallRestrictions;
+ friend class mojo::edk::ScopedIPCSupport;
friend class ui::CommandBufferClientImpl;
friend class ui::CommandBufferLocal;
friend class ui::GpuState;
@@ -240,7 +225,7 @@ class BASE_EXPORT ThreadRestrictions {
friend class views::ScreenMus;
// END USAGE THAT NEEDS TO BE FIXED.
-#if ENABLE_THREAD_RESTRICTIONS
+#if DCHECK_IS_ON()
static bool SetWaitAllowed(bool allowed);
#else
static bool SetWaitAllowed(bool) { return true; }
diff --git a/base/threading/thread_task_runner_handle.cc b/base/threading/thread_task_runner_handle.cc
index 190e18ffc6..00deaa4e20 100644
--- a/base/threading/thread_task_runner_handle.cc
+++ b/base/threading/thread_task_runner_handle.cc
@@ -6,8 +6,10 @@
#include <utility>
+#include "base/bind.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread_local.h"
@@ -32,6 +34,50 @@ bool ThreadTaskRunnerHandle::IsSet() {
return !!lazy_tls_ptr.Pointer()->Get();
}
+// static
+ScopedClosureRunner ThreadTaskRunnerHandle::OverrideForTesting(
+ scoped_refptr<SingleThreadTaskRunner> overriding_task_runner) {
+ // OverrideForTesting() is not compatible with a SequencedTaskRunnerHandle
+ // being set (but SequencedTaskRunnerHandle::IsSet() includes
+ // ThreadTaskRunnerHandle::IsSet() so that's discounted as the only valid
+ // excuse for it to be true). Sadly this means that tests that merely need a
+ // SequencedTaskRunnerHandle on their main thread can be forced to use a
+ // ThreadTaskRunnerHandle if they're also using test task runners (that
+ // OverrideForTesting() when running their tasks from said main thread). To
+ // solve this: sequence_task_runner_handle.cc and thread_task_runner_handle.cc
+ // would have to be merged into a single impl file and share TLS state. This
+ // was deemed unecessary for now as most tests should use higher level
+ // constructs and not have to instantiate task runner handles on their own.
+ DCHECK(!SequencedTaskRunnerHandle::IsSet() || IsSet());
+
+ if (!IsSet()) {
+ std::unique_ptr<ThreadTaskRunnerHandle> top_level_ttrh =
+ MakeUnique<ThreadTaskRunnerHandle>(std::move(overriding_task_runner));
+ return ScopedClosureRunner(base::Bind(
+ [](std::unique_ptr<ThreadTaskRunnerHandle>) {},
+ base::Passed(&top_level_ttrh)));
+ }
+
+ ThreadTaskRunnerHandle* ttrh = lazy_tls_ptr.Pointer()->Get();
+ // Swap the two (and below bind |overriding_task_runner|, which is now the
+ // previous one, as the |task_runner_to_restore|).
+ ttrh->task_runner_.swap(overriding_task_runner);
+
+ return ScopedClosureRunner(base::Bind(
+ [](scoped_refptr<SingleThreadTaskRunner> task_runner_to_restore,
+ SingleThreadTaskRunner* expected_task_runner_before_restore) {
+ ThreadTaskRunnerHandle* ttrh = lazy_tls_ptr.Pointer()->Get();
+
+ DCHECK_EQ(expected_task_runner_before_restore, ttrh->task_runner_.get())
+ << "Nested overrides must expire their ScopedClosureRunners "
+ "in LIFO order.";
+
+ ttrh->task_runner_.swap(task_runner_to_restore);
+ },
+ base::Passed(&overriding_task_runner),
+ base::Unretained(ttrh->task_runner_.get())));
+}
+
ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
scoped_refptr<SingleThreadTaskRunner> task_runner)
: task_runner_(std::move(task_runner)) {
diff --git a/base/threading/thread_task_runner_handle.h b/base/threading/thread_task_runner_handle.h
index c8e58935f0..7ae85e6dcf 100644
--- a/base/threading/thread_task_runner_handle.h
+++ b/base/threading/thread_task_runner_handle.h
@@ -6,6 +6,7 @@
#define BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
#include "base/base_export.h"
+#include "base/callback_helpers.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
@@ -26,6 +27,17 @@ class BASE_EXPORT ThreadTaskRunnerHandle {
// the current thread.
static bool IsSet();
+ // Overrides ThreadTaskRunnerHandle::Get()'s |task_runner_| to point at
+ // |overriding_task_runner| until the returned ScopedClosureRunner goes out of
+ // scope (instantiates a ThreadTaskRunnerHandle for that scope if |!IsSet()|).
+ // Nested overrides are allowed but callers must ensure the
+ // ScopedClosureRunners expire in LIFO (stack) order. Note: nesting
+ // ThreadTaskRunnerHandles isn't generally desired but it's useful in unit
+ // tests where multiple task runners can share the main thread for simplicity
+ // and determinism.
+ static ScopedClosureRunner OverrideForTesting(
+ scoped_refptr<SingleThreadTaskRunner> overriding_task_runner);
+
// Binds |task_runner| to the current thread. |task_runner| must belong
// to the current thread for this to succeed.
explicit ThreadTaskRunnerHandle(
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index b0fd26521a..af8347432b 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -5,13 +5,22 @@
#include "base/threading/thread.h"
#include <stddef.h>
+#include <stdint.h>
#include <vector>
#include "base/bind.h"
-#include "base/location.h"
+#include "base/debug/leak_annotations.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
@@ -38,8 +47,11 @@ class SleepInsideInitThread : public Thread {
init_called_ = true;
}
bool InitCalled() { return init_called_; }
+
private:
bool init_called_;
+
+ DISALLOW_COPY_AND_ASSIGN(SleepInsideInitThread);
};
enum ThreadEvent {
@@ -76,6 +88,8 @@ class CaptureToEventList : public Thread {
private:
EventList* event_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(CaptureToEventList);
};
// Observer that writes a value into |event_list| when a message loop has been
@@ -96,6 +110,8 @@ class CapturingDestructionObserver
private:
EventList* event_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(CapturingDestructionObserver);
};
// Task that adds a destruction observer to the current message loop.
@@ -115,59 +131,79 @@ void ReturnThreadId(base::Thread* thread,
} // namespace
-TEST_F(ThreadTest, Restart) {
- Thread a("Restart");
- a.Stop();
- EXPECT_FALSE(a.message_loop());
- EXPECT_FALSE(a.IsRunning());
- EXPECT_TRUE(a.Start());
- EXPECT_TRUE(a.message_loop());
- EXPECT_TRUE(a.IsRunning());
- a.Stop();
- EXPECT_FALSE(a.message_loop());
- EXPECT_FALSE(a.IsRunning());
- EXPECT_TRUE(a.Start());
- EXPECT_TRUE(a.message_loop());
- EXPECT_TRUE(a.IsRunning());
- a.Stop();
- EXPECT_FALSE(a.message_loop());
- EXPECT_FALSE(a.IsRunning());
- a.Stop();
- EXPECT_FALSE(a.message_loop());
- EXPECT_FALSE(a.IsRunning());
-}
-
TEST_F(ThreadTest, StartWithOptions_StackSize) {
Thread a("StartWithStackSize");
// Ensure that the thread can work with only 12 kb and still process a
- // message.
+ // message. At the same time, we should scale with the bitness of the system
+ // where 12 kb is definitely not enough.
+ // 12 kb = 3072 Slots on a 32-bit system, so we'll scale based off of that.
Thread::Options options;
-#if defined(ADDRESS_SANITIZER)
- // ASan bloats the stack variables and overflows the 12 kb stack.
- options.stack_size = 24*1024;
+#if defined(ADDRESS_SANITIZER) || !defined(NDEBUG)
+ // ASan bloats the stack variables and overflows the 3072 slot stack. Some
+ // debug builds also grow the stack too much.
+ options.stack_size = 2 * 3072 * sizeof(uintptr_t);
#else
- options.stack_size = 12*1024;
+ options.stack_size = 3072 * sizeof(uintptr_t);
#endif
EXPECT_TRUE(a.StartWithOptions(options));
EXPECT_TRUE(a.message_loop());
EXPECT_TRUE(a.IsRunning());
- bool was_invoked = false;
- a.task_runner()->PostTask(FROM_HERE, base::Bind(&ToggleValue, &was_invoked));
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ a.task_runner()->PostTask(FROM_HERE, base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&event)));
+ event.Wait();
+}
- // wait for the task to run (we could use a kernel event here
- // instead to avoid busy waiting, but this is sufficient for
- // testing purposes).
- for (int i = 100; i >= 0 && !was_invoked; --i) {
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
- }
- EXPECT_TRUE(was_invoked);
+// Intentional test-only race for otherwise untestable code, won't fix.
+// https://crbug.com/634383
+#if !defined(THREAD_SANITIZER)
+TEST_F(ThreadTest, StartWithOptions_NonJoinable) {
+ Thread* a = new Thread("StartNonJoinable");
+ // Non-joinable threads have to be leaked for now (see
+ // Thread::Options::joinable for details).
+ ANNOTATE_LEAKING_OBJECT_PTR(a);
+
+ Thread::Options options;
+ options.joinable = false;
+ EXPECT_TRUE(a->StartWithOptions(options));
+ EXPECT_TRUE(a->message_loop());
+ EXPECT_TRUE(a->IsRunning());
+
+ // Without this call this test is racy. The above IsRunning() succeeds because
+ // of an early-return condition while between Start() and StopSoon(), after
+ // invoking StopSoon() below this early-return condition is no longer
+ // satisfied and the real |is_running_| bit has to be checked. It could still
+ // be false if the message loop hasn't started for real in practice. This is
+ // only a requirement for this test because the non-joinable property forces
+ // it to use StopSoon() and not wait for a complete Stop().
+ EXPECT_TRUE(a->WaitUntilThreadStarted());
+
+ // Make the thread block until |block_event| is signaled.
+ base::WaitableEvent block_event(
+ base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ a->task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::WaitableEvent::Wait, base::Unretained(&block_event)));
+
+ a->StopSoon();
+ EXPECT_TRUE(a->IsRunning());
+
+ // Unblock the task and give a bit of extra time to unwind QuitWhenIdle().
+ block_event.Signal();
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+
+ // The thread should now have stopped on its own.
+ EXPECT_FALSE(a->IsRunning());
}
+#endif
-TEST_F(ThreadTest, TwoTasks) {
+TEST_F(ThreadTest, TwoTasksOnJoinableThread) {
bool was_invoked = false;
{
- Thread a("TwoTasks");
+ Thread a("TwoTasksOnJoinableThread");
EXPECT_TRUE(a.Start());
EXPECT_TRUE(a.message_loop());
@@ -184,18 +220,164 @@ TEST_F(ThreadTest, TwoTasks) {
EXPECT_TRUE(was_invoked);
}
+TEST_F(ThreadTest, DestroyWhileRunningIsSafe) {
+ Thread a("DestroyWhileRunningIsSafe");
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.WaitUntilThreadStarted());
+}
+
+// TODO(gab): Enable this test when destroying a non-joinable Thread instance
+// is supported (proposal @ https://crbug.com/629139#c14).
+TEST_F(ThreadTest, DISABLED_DestroyWhileRunningNonJoinableIsSafe) {
+ {
+ Thread a("DestroyWhileRunningNonJoinableIsSafe");
+ Thread::Options options;
+ options.joinable = false;
+ EXPECT_TRUE(a.StartWithOptions(options));
+ EXPECT_TRUE(a.WaitUntilThreadStarted());
+ }
+
+ // Attempt to catch use-after-frees from the non-joinable thread in the
+ // scope of this test if any.
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+}
+
TEST_F(ThreadTest, StopSoon) {
Thread a("StopSoon");
EXPECT_TRUE(a.Start());
EXPECT_TRUE(a.message_loop());
EXPECT_TRUE(a.IsRunning());
a.StopSoon();
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+}
+
+TEST_F(ThreadTest, StopTwiceNop) {
+ Thread a("StopTwiceNop");
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+ a.StopSoon();
+ // Calling StopSoon() a second time should be a nop.
a.StopSoon();
a.Stop();
+ // Same with Stop().
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+ // Calling them when not running should also nop.
+ a.StopSoon();
+ a.Stop();
+}
+
+// TODO(gab): Enable this test in conjunction with re-enabling the sequence
+// check in Thread::Stop() as part of http://crbug.com/629139.
+TEST_F(ThreadTest, DISABLED_StopOnNonOwningThreadIsDeath) {
+ Thread a("StopOnNonOwningThreadDeath");
+ EXPECT_TRUE(a.StartAndWaitForTesting());
+
+ Thread b("NonOwningThread");
+ b.Start();
+ EXPECT_DCHECK_DEATH({
+ // Stopping |a| on |b| isn't allowed.
+ b.task_runner()->PostTask(FROM_HERE,
+ base::Bind(&Thread::Stop, base::Unretained(&a)));
+ // Block here so the DCHECK on |b| always happens in this scope.
+ base::PlatformThread::Sleep(base::TimeDelta::Max());
+ });
+}
+
+TEST_F(ThreadTest, TransferOwnershipAndStop) {
+ std::unique_ptr<Thread> a =
+ base::MakeUnique<Thread>("TransferOwnershipAndStop");
+ EXPECT_TRUE(a->StartAndWaitForTesting());
+ EXPECT_TRUE(a->IsRunning());
+
+ Thread b("TakingOwnershipThread");
+ b.Start();
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ // a->DetachFromSequence() should allow |b| to use |a|'s Thread API.
+ a->DetachFromSequence();
+ b.task_runner()->PostTask(
+ FROM_HERE, base::Bind(
+ [](std::unique_ptr<Thread> thread_to_stop,
+ base::WaitableEvent* event_to_signal) -> void {
+ thread_to_stop->Stop();
+ event_to_signal->Signal();
+ },
+ base::Passed(&a), base::Unretained(&event)));
+
+ event.Wait();
+}
+
+TEST_F(ThreadTest, StartTwice) {
+ Thread a("StartTwice");
+
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+
+ a.Stop();
EXPECT_FALSE(a.message_loop());
EXPECT_FALSE(a.IsRunning());
}
+// Intentional test-only race for otherwise untestable code, won't fix.
+// https://crbug.com/634383
+#if !defined(THREAD_SANITIZER)
+TEST_F(ThreadTest, StartTwiceNonJoinableNotAllowed) {
+ LOG(ERROR) << __FUNCTION__;
+ Thread* a = new Thread("StartTwiceNonJoinable");
+ // Non-joinable threads have to be leaked for now (see
+ // Thread::Options::joinable for details).
+ ANNOTATE_LEAKING_OBJECT_PTR(a);
+
+ Thread::Options options;
+ options.joinable = false;
+ EXPECT_TRUE(a->StartWithOptions(options));
+ EXPECT_TRUE(a->message_loop());
+ EXPECT_TRUE(a->IsRunning());
+
+ // Signaled when last task on |a| is processed.
+ base::WaitableEvent last_task_event(
+ base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ a->task_runner()->PostTask(FROM_HERE,
+ base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&last_task_event)));
+
+ // StopSoon() is non-blocking, Yield() to |a|, wait for last task to be
+ // processed and a little more for QuitWhenIdle() to unwind before considering
+ // the thread "stopped".
+ a->StopSoon();
+ base::PlatformThread::YieldCurrentThread();
+ last_task_event.Wait();
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+
+ // This test assumes that the above was sufficient to let the thread fully
+ // stop.
+ ASSERT_FALSE(a->IsRunning());
+
+ // Restarting it should not be allowed.
+ EXPECT_DCHECK_DEATH(a->Start());
+}
+#endif
+
TEST_F(ThreadTest, ThreadName) {
Thread a("ThreadName");
EXPECT_TRUE(a.Start());
@@ -297,3 +479,91 @@ TEST_F(ThreadTest, MultipleWaitUntilThreadStarted) {
EXPECT_TRUE(a.WaitUntilThreadStarted());
EXPECT_TRUE(a.WaitUntilThreadStarted());
}
+
+TEST_F(ThreadTest, FlushForTesting) {
+ Thread a("FlushForTesting");
+
+ // Flushing a non-running thread should be a no-op.
+ a.FlushForTesting();
+
+ ASSERT_TRUE(a.Start());
+
+ // Flushing a thread with no tasks shouldn't block.
+ a.FlushForTesting();
+
+ constexpr base::TimeDelta kSleepPerTestTask =
+ base::TimeDelta::FromMilliseconds(50);
+ constexpr size_t kNumSleepTasks = 5;
+
+ const base::TimeTicks ticks_before_post = base::TimeTicks::Now();
+
+ for (size_t i = 0; i < kNumSleepTasks; ++i) {
+ a.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&base::PlatformThread::Sleep, kSleepPerTestTask));
+ }
+
+ // All tasks should have executed, as reflected by the elapsed time.
+ a.FlushForTesting();
+ EXPECT_GE(base::TimeTicks::Now() - ticks_before_post,
+ kNumSleepTasks * kSleepPerTestTask);
+
+ a.Stop();
+
+ // Flushing a stopped thread should be a no-op.
+ a.FlushForTesting();
+}
+
+namespace {
+
+// A Thread which uses a MessageLoop on the stack. It won't start a real
+// underlying thread (instead its messages can be processed by a RunLoop on the
+// stack).
+class ExternalMessageLoopThread : public Thread {
+ public:
+ ExternalMessageLoopThread() : Thread("ExternalMessageLoopThread") {}
+
+ ~ExternalMessageLoopThread() override { Stop(); }
+
+ void InstallMessageLoop() { SetMessageLoop(&external_message_loop_); }
+
+ void VerifyUsingExternalMessageLoop(
+ bool expected_using_external_message_loop) {
+ EXPECT_EQ(expected_using_external_message_loop,
+ using_external_message_loop());
+ }
+
+ private:
+ base::MessageLoop external_message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalMessageLoopThread);
+};
+
+} // namespace
+
+TEST_F(ThreadTest, ExternalMessageLoop) {
+ ExternalMessageLoopThread a;
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+ a.VerifyUsingExternalMessageLoop(false);
+
+ a.InstallMessageLoop();
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+ a.VerifyUsingExternalMessageLoop(true);
+
+ bool ran = false;
+ a.task_runner()->PostTask(
+ FROM_HERE, base::Bind([](bool* toggled) { *toggled = true; }, &ran));
+ base::RunLoop().RunUntilIdle();
+ EXPECT_TRUE(ran);
+
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+ a.VerifyUsingExternalMessageLoop(true);
+
+ // Confirm that running any remaining tasks posted from Stop() goes smoothly
+ // (e.g. https://codereview.chromium.org/2135413003/#ps300001 crashed if
+ // StopSoon() posted Thread::ThreadQuitHelper() while |run_loop_| was null).
+ base::RunLoop().RunUntilIdle();
+}
diff --git a/base/threading/worker_pool.cc b/base/threading/worker_pool.cc
index 0b7bf8eca1..d47037d79a 100644
--- a/base/threading/worker_pool.cc
+++ b/base/threading/worker_pool.cc
@@ -4,9 +4,11 @@
#include "base/threading/worker_pool.h"
+#include <utility>
+
#include "base/bind.h"
#include "base/compiler_specific.h"
-#include "base/lazy_instance.h"
+#include "base/debug/leak_annotations.h"
#include "base/macros.h"
#include "base/task_runner.h"
#include "base/threading/post_task_and_reply_impl.h"
@@ -97,27 +99,27 @@ struct TaskRunnerHolder {
scoped_refptr<TaskRunner> taskrunners_[2];
};
-base::LazyInstance<TaskRunnerHolder>::Leaky
- g_taskrunners = LAZY_INSTANCE_INITIALIZER;
-
} // namespace
bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply,
+ Closure task,
+ Closure reply,
bool task_is_slow) {
// Do not report PostTaskAndReplyRelay leaks in tests. There's nothing we can
// do about them because WorkerPool doesn't have a flushing API.
// http://crbug.com/248513
// http://crbug.com/290897
- return PostTaskAndReplyWorkerPool(task_is_slow).PostTaskAndReply(
- from_here, task, reply);
+ // Note: this annotation does not cover tasks posted through a TaskRunner.
+ ANNOTATE_SCOPED_MEMORY_LEAK;
+ return PostTaskAndReplyWorkerPool(task_is_slow)
+ .PostTaskAndReply(from_here, std::move(task), std::move(reply));
}
// static
const scoped_refptr<TaskRunner>&
WorkerPool::GetTaskRunner(bool tasks_are_slow) {
- return g_taskrunners.Get().taskrunners_[tasks_are_slow];
+ static auto* task_runner_holder = new TaskRunnerHolder();
+ return task_runner_holder->taskrunners_[tasks_are_slow];
}
} // namespace base
diff --git a/base/threading/worker_pool.h b/base/threading/worker_pool.h
index a52a41428b..865948e437 100644
--- a/base/threading/worker_pool.h
+++ b/base/threading/worker_pool.h
@@ -6,11 +6,9 @@
#define BASE_THREADING_WORKER_POOL_H_
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/memory/ref_counted.h"
-class Task;
-
namespace tracked_objects {
class Location;
} // namespace tracked_objects
@@ -40,8 +38,8 @@ class BASE_EXPORT WorkerPool {
// for |task| is a worker thread and you can specify |task_is_slow| just
// like you can for PostTask above.
static bool PostTaskAndReply(const tracked_objects::Location& from_here,
- const Closure& task,
- const Closure& reply,
+ Closure task,
+ Closure reply,
bool task_is_slow);
// Return true if the current thread is one that this WorkerPool runs tasks
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index 6b4c42f601..0e19a1a0fe 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -30,10 +30,21 @@ base::LazyInstance<ThreadLocalBoolean>::Leaky
const int kIdleSecondsBeforeExit = 10 * 60;
+#if defined(OS_MACOSX)
+// On Mac OS X a background thread's default stack size is 512Kb. We need at
+// least 1MB for compilation tasks in V8, so increase this default.
+const int kStackSize = 1 * 1024 * 1024;
+#else
+const int kStackSize = 0;
+#endif
+
class WorkerPoolImpl {
public:
WorkerPoolImpl();
- ~WorkerPoolImpl();
+
+ // WorkerPoolImpl is only instantiated as a leaky LazyInstance, so the
+ // destructor is never called.
+ ~WorkerPoolImpl() = delete;
void PostTask(const tracked_objects::Location& from_here,
const base::Closure& task,
@@ -47,17 +58,13 @@ WorkerPoolImpl::WorkerPoolImpl()
: pool_(new base::PosixDynamicThreadPool("WorkerPool",
kIdleSecondsBeforeExit)) {}
-WorkerPoolImpl::~WorkerPoolImpl() {
- pool_->Terminate();
-}
-
void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
const base::Closure& task,
bool /*task_is_slow*/) {
pool_->PostTask(from_here, task);
}
-base::LazyInstance<WorkerPoolImpl> g_lazy_worker_pool =
+base::LazyInstance<WorkerPoolImpl>::Leaky g_lazy_worker_pool =
LAZY_INSTANCE_INITIALIZER;
class WorkerThread : public PlatformThread::Delegate {
@@ -90,7 +97,7 @@ void WorkerThread::ThreadMain() {
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
- pending_task.task.Run();
+ std::move(pending_task.task).Run();
stopwatch.Stop();
tracked_objects::ThreadData::TallyRunOnWorkerThreadIfTracking(
@@ -121,23 +128,13 @@ PosixDynamicThreadPool::PosixDynamicThreadPool(const std::string& name_prefix,
: name_prefix_(name_prefix),
idle_seconds_before_exit_(idle_seconds_before_exit),
pending_tasks_available_cv_(&lock_),
- num_idle_threads_(0),
- terminated_(false) {}
+ num_idle_threads_(0) {}
PosixDynamicThreadPool::~PosixDynamicThreadPool() {
while (!pending_tasks_.empty())
pending_tasks_.pop();
}
-void PosixDynamicThreadPool::Terminate() {
- {
- AutoLock locked(lock_);
- DCHECK(!terminated_) << "Thread pool is already terminated.";
- terminated_ = true;
- }
- pending_tasks_available_cv_.Broadcast();
-}
-
void PosixDynamicThreadPool::PostTask(
const tracked_objects::Location& from_here,
const base::Closure& task) {
@@ -147,8 +144,6 @@ void PosixDynamicThreadPool::PostTask(
void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
AutoLock locked(lock_);
- DCHECK(!terminated_)
- << "This thread pool is already terminated. Do not post new tasks.";
pending_tasks_.push(std::move(*pending_task));
@@ -159,16 +154,13 @@ void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
// The new PlatformThread will take ownership of the WorkerThread object,
// which will delete itself on exit.
WorkerThread* worker = new WorkerThread(name_prefix_, this);
- PlatformThread::CreateNonJoinable(0, worker);
+ PlatformThread::CreateNonJoinable(kStackSize, worker);
}
}
PendingTask PosixDynamicThreadPool::WaitForTask() {
AutoLock locked(lock_);
- if (terminated_)
- return PendingTask(FROM_HERE, base::Closure());
-
if (pending_tasks_.empty()) { // No work available, wait for work.
num_idle_threads_++;
if (num_idle_threads_cv_.get())
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index 628e2b6420..d65ae8f8cf 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -38,8 +38,6 @@
#include "base/threading/platform_thread.h"
#include "base/tracked_objects.h"
-class Task;
-
namespace base {
class BASE_EXPORT PosixDynamicThreadPool
@@ -52,10 +50,6 @@ class BASE_EXPORT PosixDynamicThreadPool
PosixDynamicThreadPool(const std::string& name_prefix,
int idle_seconds_before_exit);
- // Indicates that the thread pool is going away. Stops handing out tasks to
- // worker threads. Wakes up all the idle threads to let them exit.
- void Terminate();
-
// Adds |task| to the thread pool.
void PostTask(const tracked_objects::Location& from_here,
const Closure& task);
@@ -85,7 +79,6 @@ class BASE_EXPORT PosixDynamicThreadPool
ConditionVariable pending_tasks_available_cv_;
int num_idle_threads_;
TaskQueue pending_tasks_;
- bool terminated_;
// Only used for tests to ensure correct thread ordering. It will always be
// NULL in non-test code.
std::unique_ptr<ConditionVariable> num_idle_threads_cv_;
diff --git a/base/threading/worker_pool_posix_unittest.cc b/base/threading/worker_pool_posix_unittest.cc
index 6cefeed34e..b4e8b58520 100644
--- a/base/threading/worker_pool_posix_unittest.cc
+++ b/base/threading/worker_pool_posix_unittest.cc
@@ -103,12 +103,6 @@ class PosixDynamicThreadPoolTest : public testing::Test {
peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
}
- void TearDown() override {
- // Wake up the idle threads so they can terminate.
- if (pool_.get())
- pool_->Terminate();
- }
-
void WaitForTasksToStart(int num_tasks) {
base::AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
while (num_waiting_to_start_ < num_tasks) {
diff --git a/base/time/time.cc b/base/time/time.cc
index 3670f55758..d1c6a4783c 100644
--- a/base/time/time.cc
+++ b/base/time/time.cc
@@ -21,11 +21,6 @@ namespace base {
// TimeDelta ------------------------------------------------------------------
-// static
-TimeDelta TimeDelta::Max() {
- return TimeDelta(std::numeric_limits<int64_t>::max());
-}
-
int TimeDelta::InDays() const {
if (is_max()) {
// Preserve max to prevent overflow.
@@ -104,33 +99,29 @@ namespace time_internal {
int64_t SaturatedAdd(TimeDelta delta, int64_t value) {
CheckedNumeric<int64_t> rv(delta.delta_);
rv += value;
- return FromCheckedNumeric(rv);
+ if (rv.IsValid())
+ return rv.ValueOrDie();
+ // Positive RHS overflows. Negative RHS underflows.
+ if (value < 0)
+ return -std::numeric_limits<int64_t>::max();
+ return std::numeric_limits<int64_t>::max();
}
int64_t SaturatedSub(TimeDelta delta, int64_t value) {
CheckedNumeric<int64_t> rv(delta.delta_);
rv -= value;
- return FromCheckedNumeric(rv);
-}
-
-int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value) {
- if (value.IsValid())
- return value.ValueUnsafe();
-
- // We could return max/min but we don't really expose what the maximum delta
- // is. Instead, return max/(-max), which is something that clients can reason
- // about.
- // TODO(rvargas) crbug.com/332611: don't use internal values.
- int64_t limit = std::numeric_limits<int64_t>::max();
- if (value.validity() == internal::RANGE_UNDERFLOW)
- limit = -limit;
- return value.ValueOrDefault(limit);
+ if (rv.IsValid())
+ return rv.ValueOrDie();
+ // Negative RHS overflows. Positive RHS underflows.
+ if (value < 0)
+ return std::numeric_limits<int64_t>::max();
+ return -std::numeric_limits<int64_t>::max();
}
} // namespace time_internal
std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
- return os << time_delta.InSecondsF() << "s";
+ return os << time_delta.InSecondsF() << " s";
}
// Time -----------------------------------------------------------------------
@@ -207,6 +198,11 @@ double Time::ToJsTime() const {
kMicrosecondsPerMillisecond);
}
+Time Time::FromJavaTime(int64_t ms_since_epoch) {
+ return base::Time::UnixEpoch() +
+ base::TimeDelta::FromMilliseconds(ms_since_epoch);
+}
+
int64_t Time::ToJavaTime() const {
if (is_null()) {
// Preserve 0 so the invalid result doesn't depend on the platform.
@@ -234,7 +230,12 @@ Time Time::LocalMidnight() const {
exploded.minute = 0;
exploded.second = 0;
exploded.millisecond = 0;
- return FromLocalExploded(exploded);
+ Time out_time;
+ if (FromLocalExploded(exploded, &out_time))
+ return out_time;
+ // This function must not fail.
+ NOTREACHED();
+ return Time();
}
// static
diff --git a/base/time/time.h b/base/time/time.h
index efece969b0..ff8bdde3dc 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -21,9 +21,11 @@
// ThreadTicks will "stand still" whenever the thread has been de-scheduled by
// the operating system.
//
-// All time classes are copyable, assignable, and occupy 64-bits per
-// instance. Thus, they can be efficiently passed by-value (as opposed to
-// by-reference).
+// All time classes are copyable, assignable, and occupy 64-bits per instance.
+// As a result, prefer passing them by value:
+// void MyFunction(TimeDelta arg);
+// If circumstances require, you may also pass by const reference:
+// void MyFunction(const TimeDelta& arg); // Not preferred.
//
// Definitions of operator<< are provided to make these types work with
// DCHECK_EQ() and other log macros. For human-readable formatting, see
@@ -57,6 +59,7 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "base/logging.h"
#include "base/numerics/safe_math.h"
#include "build/build_config.h"
@@ -93,10 +96,6 @@ namespace time_internal {
BASE_EXPORT int64_t SaturatedAdd(TimeDelta delta, int64_t value);
BASE_EXPORT int64_t SaturatedSub(TimeDelta delta, int64_t value);
-// Clamp |value| on overflow and underflow conditions. The int64_t argument and
-// return value are in terms of a microsecond timebase.
-BASE_EXPORT int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value);
-
} // namespace time_internal
// TimeDelta ------------------------------------------------------------------
@@ -115,6 +114,9 @@ class BASE_EXPORT TimeDelta {
static constexpr TimeDelta FromSecondsD(double secs);
static constexpr TimeDelta FromMillisecondsD(double ms);
static constexpr TimeDelta FromMicroseconds(int64_t us);
+#if defined(OS_POSIX)
+ static TimeDelta FromTimeSpec(const timespec& ts);
+#endif
#if defined(OS_WIN)
static TimeDelta FromQPCValue(LONGLONG qpc_value);
#endif
@@ -128,7 +130,7 @@ class BASE_EXPORT TimeDelta {
// Returns the maximum time delta, which should be greater than any reasonable
// time delta we might compare it to. Adding or subtracting the maximum time
// delta to a time or another time delta has an undefined result.
- static TimeDelta Max();
+ static constexpr TimeDelta Max();
// Returns the internal numeric value of the TimeDelta object. Please don't
// use this and do arithmetic on it, as it is more error prone than using the
@@ -200,13 +202,24 @@ class BASE_EXPORT TimeDelta {
TimeDelta operator*(T a) const {
CheckedNumeric<int64_t> rv(delta_);
rv *= a;
- return TimeDelta(time_internal::FromCheckedNumeric(rv));
+ if (rv.IsValid())
+ return TimeDelta(rv.ValueOrDie());
+ // Matched sign overflows. Mismatched sign underflows.
+ if ((delta_ < 0) ^ (a < 0))
+ return TimeDelta(-std::numeric_limits<int64_t>::max());
+ return TimeDelta(std::numeric_limits<int64_t>::max());
}
template<typename T>
TimeDelta operator/(T a) const {
CheckedNumeric<int64_t> rv(delta_);
rv /= a;
- return TimeDelta(time_internal::FromCheckedNumeric(rv));
+ if (rv.IsValid())
+ return TimeDelta(rv.ValueOrDie());
+ // Matched sign overflows. Mismatched sign underflows.
+ // Special case to catch divide by zero.
+ if ((delta_ < 0) ^ (a <= 0))
+ return TimeDelta(-std::numeric_limits<int64_t>::max());
+ return TimeDelta(std::numeric_limits<int64_t>::max());
}
template<typename T>
TimeDelta& operator*=(T a) {
@@ -242,6 +255,11 @@ class BASE_EXPORT TimeDelta {
return delta_ >= other.delta_;
}
+#if defined(OS_WIN)
+ // This works around crbug.com/635974
+ constexpr TimeDelta(const TimeDelta& other) : delta_(other.delta_) {}
+#endif
+
private:
friend int64_t time_internal::SaturatedAdd(TimeDelta delta, int64_t value);
friend int64_t time_internal::SaturatedSub(TimeDelta delta, int64_t value);
@@ -452,8 +470,6 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
static Time NowFromSystemTime();
// Converts to/from time_t in UTC and a Time class.
- // TODO(brettw) this should be removed once everybody starts using the |Time|
- // class.
static Time FromTimeT(time_t tt);
time_t ToTimeT() const;
@@ -479,8 +495,9 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
static Time FromJsTime(double ms_since_epoch);
double ToJsTime() const;
- // Converts to Java convention for times, a number of
+ // Converts to/from Java convention for times, a number of
// milliseconds since the epoch.
+ static Time FromJavaTime(int64_t ms_since_epoch);
int64_t ToJavaTime() const;
#if defined(OS_POSIX)
@@ -521,23 +538,8 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
#endif
// Converts an exploded structure representing either the local time or UTC
- // into a Time class.
- // TODO(maksims): Get rid of these in favor of the methods below when
- // all the callers stop using these ones.
- static Time FromUTCExploded(const Exploded& exploded) {
- base::Time time;
- ignore_result(FromUTCExploded(exploded, &time));
- return time;
- }
- static Time FromLocalExploded(const Exploded& exploded) {
- base::Time time;
- ignore_result(FromLocalExploded(exploded, &time));
- return time;
- }
-
- // Converts an exploded structure representing either the local time or UTC
// into a Time class. Returns false on a failure when, for example, a day of
- // month is set to 31 on a 28-30 day month.
+ // month is set to 31 on a 28-30 day month. Returns Time(0) on overflow.
static bool FromUTCExploded(const Exploded& exploded,
Time* time) WARN_UNUSED_RESULT {
return FromExploded(false, exploded, time);
@@ -555,10 +557,12 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// specified in RFC822) is treated as if the timezone is not specified.
// TODO(iyengar) Move the FromString/FromTimeT/ToTimeT/FromFileTime to
// a new time converter class.
- static bool FromString(const char* time_string, Time* parsed_time) {
+ static bool FromString(const char* time_string,
+ Time* parsed_time) WARN_UNUSED_RESULT {
return FromStringInternal(time_string, true, parsed_time);
}
- static bool FromUTCString(const char* time_string, Time* parsed_time) {
+ static bool FromUTCString(const char* time_string,
+ Time* parsed_time) WARN_UNUSED_RESULT {
return FromStringInternal(time_string, false, parsed_time);
}
@@ -601,10 +605,11 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// timezone is not specified.
static bool FromStringInternal(const char* time_string,
bool is_local,
- Time* parsed_time);
+ Time* parsed_time) WARN_UNUSED_RESULT;
// Comparison does not consider |day_of_week| when doing the operation.
- static bool ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs);
+ static bool ExplodedMostlyEquals(const Exploded& lhs,
+ const Exploded& rhs) WARN_UNUSED_RESULT;
};
// static
@@ -654,6 +659,11 @@ constexpr TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
}
// static
+constexpr TimeDelta TimeDelta::Max() {
+ return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
+// static
constexpr TimeDelta TimeDelta::FromDouble(double value) {
// TODO(crbug.com/612601): Use saturated_cast<int64_t>(value) once we sort out
// the Min() behavior.
@@ -711,7 +721,14 @@ class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
// Now() will return high resolution values. Note that, on systems where the
// high resolution clock works but is deemed inefficient, the low resolution
// clock will be used instead.
- static bool IsHighResolution();
+ static bool IsHighResolution() WARN_UNUSED_RESULT;
+
+ // Returns true if TimeTicks is consistent across processes, meaning that
+ // timestamps taken on different processes can be safely compared with one
+ // another. (Note that, even on platforms where this returns true, time values
+ // from different threads that are within one tick of each other must be
+ // considered to have an ambiguous ordering.)
+ static bool IsConsistentAcrossProcesses() WARN_UNUSED_RESULT;
#if defined(OS_WIN)
// Translates an absolute QPC timestamp into a TimeTicks value. The returned
@@ -720,6 +737,10 @@ class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
static TimeTicks FromQPCValue(LONGLONG qpc_value);
#endif
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
// Get an estimate of the TimeTick value at the time of the UnixEpoch. Because
// Time and TimeTicks respond differently to user-set time and NTP
// adjustments, this number is only an estimate. Nevertheless, this can be
@@ -768,7 +789,7 @@ class BASE_EXPORT ThreadTicks : public time_internal::TimeBase<ThreadTicks> {
}
// Returns true if ThreadTicks::Now() is supported on this system.
- static bool IsSupported() {
+ static bool IsSupported() WARN_UNUSED_RESULT {
#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
(defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_ANDROID)
return true;
@@ -819,7 +840,7 @@ class BASE_EXPORT ThreadTicks : public time_internal::TimeBase<ThreadTicks> {
// allow testing.
static double TSCTicksPerSecond();
- static bool IsSupportedWin();
+ static bool IsSupportedWin() WARN_UNUSED_RESULT;
static void WaitUntilInitializedWin();
#endif
};
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
index 373ec3a3bc..c75423df9c 100644
--- a/base/time/time_mac.cc
+++ b/base/time/time_mac.cc
@@ -25,22 +25,8 @@
namespace {
-int64_t ComputeCurrentTicks() {
-#if defined(OS_IOS)
- // On iOS mach_absolute_time stops while the device is sleeping. Instead use
- // now - KERN_BOOTTIME to get a time difference that is not impacted by clock
- // changes. KERN_BOOTTIME will be updated by the system whenever the system
- // clock change.
- struct timeval boottime;
- int mib[2] = {CTL_KERN, KERN_BOOTTIME};
- size_t size = sizeof(boottime);
- int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
- DCHECK_EQ(KERN_SUCCESS, kr);
- base::TimeDelta time_difference = base::Time::Now() -
- (base::Time::FromTimeT(boottime.tv_sec) +
- base::TimeDelta::FromMicroseconds(boottime.tv_usec));
- return time_difference.InMicroseconds();
-#else
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+int64_t MachAbsoluteTimeToTicks(uint64_t mach_absolute_time) {
static mach_timebase_info_data_t timebase_info;
if (timebase_info.denom == 0) {
// Zero-initialization of statics guarantees that denom will be 0 before
@@ -52,14 +38,10 @@ int64_t ComputeCurrentTicks() {
MACH_DCHECK(kr == KERN_SUCCESS, kr) << "mach_timebase_info";
}
- // mach_absolute_time is it when it comes to ticks on the Mac. Other calls
- // with less precision (such as TickCount) just call through to
- // mach_absolute_time.
-
// timebase_info converts absolute time tick units into nanoseconds. Convert
// to microseconds up front to stave off overflows.
- base::CheckedNumeric<uint64_t> result(
- mach_absolute_time() / base::Time::kNanosecondsPerMicrosecond);
+ base::CheckedNumeric<uint64_t> result(mach_absolute_time /
+ base::Time::kNanosecondsPerMicrosecond);
result *= timebase_info.numer;
result /= timebase_info.denom;
@@ -67,6 +49,29 @@ int64_t ComputeCurrentTicks() {
// With numer and denom = 1 (the expected case), the 64-bit absolute time
// reported in nanoseconds is enough to last nearly 585 years.
return base::checked_cast<int64_t>(result.ValueOrDie());
+}
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+int64_t ComputeCurrentTicks() {
+#if defined(OS_IOS)
+ // On iOS mach_absolute_time stops while the device is sleeping. Instead use
+ // now - KERN_BOOTTIME to get a time difference that is not impacted by clock
+ // changes. KERN_BOOTTIME will be updated by the system whenever the system
+ // clock change.
+ struct timeval boottime;
+ int mib[2] = {CTL_KERN, KERN_BOOTTIME};
+ size_t size = sizeof(boottime);
+ int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
+ DCHECK_EQ(KERN_SUCCESS, kr);
+ base::TimeDelta time_difference =
+ base::Time::Now() - (base::Time::FromTimeT(boottime.tv_sec) +
+ base::TimeDelta::FromMicroseconds(boottime.tv_usec));
+ return time_difference.InMicroseconds();
+#else
+ // mach_absolute_time is it when it comes to ticks on the Mac. Other calls
+ // with less precision (such as TickCount) just call through to
+ // mach_absolute_time.
+ return MachAbsoluteTimeToTicks(mach_absolute_time());
#endif // defined(OS_IOS)
}
@@ -185,9 +190,18 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
exploded.millisecond);
CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
- base::Time converted_time =
- Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
- kWindowsEpochDeltaMicroseconds);
+ // CFAbsolutTime is typedef of double. Convert seconds to
+ // microseconds and then cast to int64. If
+ // it cannot be suited to int64, then fail to avoid overflows.
+ double microseconds =
+ (seconds * kMicrosecondsPerSecond) + kWindowsEpochDeltaMicroseconds;
+ if (microseconds > std::numeric_limits<int64_t>::max() ||
+ microseconds < std::numeric_limits<int64_t>::min()) {
+ *time = Time(0);
+ return false;
+ }
+
+ base::Time converted_time = Time(static_cast<int64_t>(microseconds));
// If |exploded.day_of_month| is set to 31
// on a 28-30 day month, it will return the first day of the next month.
@@ -259,6 +273,18 @@ bool TimeTicks::IsHighResolution() {
}
// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+ return true;
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+// static
+TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
+ return TimeTicks(MachAbsoluteTimeToTicks(mach_absolute_time));
+}
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+// static
TimeTicks::Clock TimeTicks::GetClock() {
#if defined(OS_IOS)
return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
diff --git a/base/time/time_posix.cc b/base/time/time_posix.cc
index 495e249f00..2cceb0c610 100644
--- a/base/time/time_posix.cc
+++ b/base/time/time_posix.cc
@@ -16,6 +16,7 @@
#include <ostream>
#include "base/logging.h"
+#include "base/numerics/safe_math.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
@@ -25,7 +26,6 @@
#endif
#if !defined(OS_MACOSX)
-#include "base/lazy_instance.h"
#include "base/synchronization/lock.h"
#endif
@@ -34,8 +34,10 @@ namespace {
#if !defined(OS_MACOSX)
// This prevents a crash on traversing the environment global and looking up
// the 'TZ' variable in libc. See: crbug.com/390567.
-base::LazyInstance<base::Lock>::Leaky
- g_sys_time_to_time_struct_lock = LAZY_INSTANCE_INITIALIZER;
+base::Lock* GetSysTimeToTimeStructLock() {
+ static auto* lock = new base::Lock();
+ return lock;
+}
// Define a system-specific SysTime that wraps either to a time_t or
// a time64_t depending on the host system, and associated convertion.
@@ -44,7 +46,7 @@ base::LazyInstance<base::Lock>::Leaky
typedef time64_t SysTime;
SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
- base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
return mktime64(timestruct);
else
@@ -52,7 +54,7 @@ SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
}
void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
- base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
localtime64_r(&t, timestruct);
else
@@ -63,7 +65,7 @@ void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
typedef time_t SysTime;
SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
- base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
return mktime(timestruct);
else
@@ -71,7 +73,7 @@ SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
}
void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
- base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
localtime_r(&t, timestruct);
else
@@ -80,10 +82,19 @@ void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
#endif // OS_ANDROID
int64_t ConvertTimespecToMicros(const struct timespec& ts) {
- base::CheckedNumeric<int64_t> result(ts.tv_sec);
- result *= base::Time::kMicrosecondsPerSecond;
- result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
- return result.ValueOrDie();
+ // On 32-bit systems, the calculation cannot overflow int64_t.
+ // 2**32 * 1000000 + 2**64 / 1000 < 2**63
+ if (sizeof(ts.tv_sec) <= 4 && sizeof(ts.tv_nsec) <= 8) {
+ int64_t result = ts.tv_sec;
+ result *= base::Time::kMicrosecondsPerSecond;
+ result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+ return result;
+ } else {
+ base::CheckedNumeric<int64_t> result(ts.tv_sec);
+ result *= base::Time::kMicrosecondsPerSecond;
+ result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+ return result.ValueOrDie();
+ }
}
// Helper function to get results from clock_gettime() and convert to a
@@ -110,6 +121,12 @@ int64_t ClockNow(clockid_t clk_id) {
namespace base {
+// static
+TimeDelta TimeDelta::FromTimeSpec(const timespec& ts) {
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
struct timespec TimeDelta::ToTimeSpec() const {
int64_t microseconds = InMicroseconds();
time_t seconds = 0;
@@ -212,22 +229,30 @@ void Time::Explode(bool is_local, Exploded* exploded) const {
// static
bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+ CheckedNumeric<int> month = exploded.month;
+ month--;
+ CheckedNumeric<int> year = exploded.year;
+ year -= 1900;
+ if (!month.IsValid() || !year.IsValid()) {
+ *time = Time(0);
+ return false;
+ }
+
struct tm timestruct;
- timestruct.tm_sec = exploded.second;
- timestruct.tm_min = exploded.minute;
- timestruct.tm_hour = exploded.hour;
- timestruct.tm_mday = exploded.day_of_month;
- timestruct.tm_mon = exploded.month - 1;
- timestruct.tm_year = exploded.year - 1900;
- timestruct.tm_wday = exploded.day_of_week; // mktime/timegm ignore this
- timestruct.tm_yday = 0; // mktime/timegm ignore this
- timestruct.tm_isdst = -1; // attempt to figure it out
+ timestruct.tm_sec = exploded.second;
+ timestruct.tm_min = exploded.minute;
+ timestruct.tm_hour = exploded.hour;
+ timestruct.tm_mday = exploded.day_of_month;
+ timestruct.tm_mon = month.ValueOrDie();
+ timestruct.tm_year = year.ValueOrDie();
+ timestruct.tm_wday = exploded.day_of_week; // mktime/timegm ignore this
+ timestruct.tm_yday = 0; // mktime/timegm ignore this
+ timestruct.tm_isdst = -1; // attempt to figure it out
#if !defined(OS_NACL) && !defined(OS_SOLARIS)
- timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
- timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
#endif
- int64_t milliseconds;
SysTime seconds;
// Certain exploded dates do not really exist due to daylight saving times,
@@ -265,6 +290,7 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
// return is the best that can be done here. It's not ideal, but it's better
// than failing here or ignoring the overflow case and treating each time
// overflow as one second prior to the epoch.
+ int64_t milliseconds = 0;
if (seconds == -1 &&
(exploded.year < 1969 || exploded.year > 1970)) {
// If exploded.year is 1969 or 1970, take -1 as correct, with the
@@ -297,13 +323,25 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
milliseconds += (kMillisecondsPerSecond - 1);
}
} else {
- milliseconds = seconds * kMillisecondsPerSecond + exploded.millisecond;
+ base::CheckedNumeric<int64_t> checked_millis = seconds;
+ checked_millis *= kMillisecondsPerSecond;
+ checked_millis += exploded.millisecond;
+ if (!checked_millis.IsValid()) {
+ *time = base::Time(0);
+ return false;
+ }
+ milliseconds = checked_millis.ValueOrDie();
}
- // Adjust from Unix (1970) to Windows (1601) epoch.
- base::Time converted_time =
- Time((milliseconds * kMicrosecondsPerMillisecond) +
- kWindowsEpochDeltaMicroseconds);
+ // Adjust from Unix (1970) to Windows (1601) epoch avoiding overflows.
+ base::CheckedNumeric<int64_t> checked_microseconds_win_epoch = milliseconds;
+ checked_microseconds_win_epoch *= kMicrosecondsPerMillisecond;
+ checked_microseconds_win_epoch += kWindowsEpochDeltaMicroseconds;
+ if (!checked_microseconds_win_epoch.IsValid()) {
+ *time = base::Time(0);
+ return false;
+ }
+ base::Time converted_time(checked_microseconds_win_epoch.ValueOrDie());
// If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
// return the first day of the next month. Thus round-trip the time and
@@ -340,6 +378,11 @@ bool TimeTicks::IsHighResolution() {
}
// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+ return true;
+}
+
+// static
ThreadTicks ThreadTicks::Now() {
#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
defined(OS_ANDROID)
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
index 4f47d56522..8906c3bee1 100644
--- a/base/time/time_unittest.cc
+++ b/base/time/time_unittest.cc
@@ -54,6 +54,16 @@ TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
{{2016, 10, 0, 25, 7, 47, 234, 0}, false},
// Milliseconds are too large
{{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
+ // Test overflow. Time is valid, but overflow case
+ // results in Time(0).
+ {{9840633, 1, 0, 1, 1, 1, 0, 0}, true},
+ // Underflow will fail as well.
+ {{-9840633, 1, 0, 1, 1, 1, 0, 0}, true},
+ // Test integer overflow and underflow cases for the values themselves.
+ {{std::numeric_limits<int>::min(), 1, 0, 1, 1, 1, 0, 0}, true},
+ {{std::numeric_limits<int>::max(), 1, 0, 1, 1, 1, 0, 0}, true},
+ {{2016, std::numeric_limits<int>::min(), 0, 1, 1, 1, 0, 0}, false},
+ {{2016, std::numeric_limits<int>::max(), 0, 1, 1, 1, 0, 0}, false},
};
for (const auto& test : kDateTestData) {
@@ -806,22 +816,29 @@ TEST(TimeDelta, FromAndIn) {
#if defined(OS_POSIX)
TEST(TimeDelta, TimeSpecConversion) {
- struct timespec result = TimeDelta::FromSeconds(0).ToTimeSpec();
+ TimeDelta delta = TimeDelta::FromSeconds(0);
+ struct timespec result = delta.ToTimeSpec();
EXPECT_EQ(result.tv_sec, 0);
EXPECT_EQ(result.tv_nsec, 0);
+ EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
- result = TimeDelta::FromSeconds(1).ToTimeSpec();
+ delta = TimeDelta::FromSeconds(1);
+ result = delta.ToTimeSpec();
EXPECT_EQ(result.tv_sec, 1);
EXPECT_EQ(result.tv_nsec, 0);
+ EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
- result = TimeDelta::FromMicroseconds(1).ToTimeSpec();
+ delta = TimeDelta::FromMicroseconds(1);
+ result = delta.ToTimeSpec();
EXPECT_EQ(result.tv_sec, 0);
EXPECT_EQ(result.tv_nsec, 1000);
+ EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
- result = TimeDelta::FromMicroseconds(
- Time::kMicrosecondsPerSecond + 1).ToTimeSpec();
+ delta = TimeDelta::FromMicroseconds(Time::kMicrosecondsPerSecond + 1);
+ result = delta.ToTimeSpec();
EXPECT_EQ(result.tv_sec, 1);
EXPECT_EQ(result.tv_nsec, 1000);
+ EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
}
#endif // OS_POSIX
@@ -1099,17 +1116,17 @@ TEST(TimeDeltaLogging, DCheckEqCompiles) {
TEST(TimeDeltaLogging, EmptyIsZero) {
TimeDelta zero;
- EXPECT_EQ("0s", AnyToString(zero));
+ EXPECT_EQ("0 s", AnyToString(zero));
}
TEST(TimeDeltaLogging, FiveHundredMs) {
TimeDelta five_hundred_ms = TimeDelta::FromMilliseconds(500);
- EXPECT_EQ("0.5s", AnyToString(five_hundred_ms));
+ EXPECT_EQ("0.5 s", AnyToString(five_hundred_ms));
}
TEST(TimeDeltaLogging, MinusTenSeconds) {
TimeDelta minus_ten_seconds = TimeDelta::FromSeconds(-10);
- EXPECT_EQ("-10s", AnyToString(minus_ten_seconds));
+ EXPECT_EQ("-10 s", AnyToString(minus_ten_seconds));
}
TEST(TimeDeltaLogging, DoesNotMessUpFormattingFlags) {
diff --git a/base/timer/timer.cc b/base/timer/timer.cc
index e554905fff..6ec18f1814 100644
--- a/base/timer/timer.cc
+++ b/base/timer/timer.cc
@@ -6,11 +6,15 @@
#include <stddef.h>
+#include <utility>
+
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/tick_clock.h"
namespace base {
@@ -60,26 +64,36 @@ class BaseTimerTaskInternal {
};
Timer::Timer(bool retain_user_task, bool is_repeating)
- : scheduled_task_(NULL),
+ : Timer(retain_user_task, is_repeating, nullptr) {}
+
+Timer::Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock)
+ : scheduled_task_(nullptr),
thread_id_(0),
is_repeating_(is_repeating),
retain_user_task_(retain_user_task),
- is_running_(false) {
-}
+ tick_clock_(tick_clock),
+ is_running_(false) {}
Timer::Timer(const tracked_objects::Location& posted_from,
TimeDelta delay,
const base::Closure& user_task,
bool is_repeating)
- : scheduled_task_(NULL),
+ : Timer(posted_from, delay, user_task, is_repeating, nullptr) {}
+
+Timer::Timer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating,
+ TickClock* tick_clock)
+ : scheduled_task_(nullptr),
posted_from_(posted_from),
delay_(delay),
user_task_(user_task),
thread_id_(0),
is_repeating_(is_repeating),
retain_user_task_(true),
- is_running_(false) {
-}
+ tick_clock_(tick_clock),
+ is_running_(false) {}
Timer::~Timer() {
StopAndAbandon();
@@ -123,7 +137,7 @@ void Timer::Reset() {
// Set the new desired_run_time_.
if (delay_ > TimeDelta::FromMicroseconds(0))
- desired_run_time_ = TimeTicks::Now() + delay_;
+ desired_run_time_ = Now() + delay_;
else
desired_run_time_ = TimeTicks();
@@ -139,6 +153,10 @@ void Timer::Reset() {
PostNewScheduledTask(delay_);
}
+TimeTicks Timer::Now() const {
+ return tick_clock_ ? tick_clock_->NowTicks() : TimeTicks::Now();
+}
+
void Timer::SetTaskInfo(const tracked_objects::Location& posted_from,
TimeDelta delay,
const base::Closure& user_task) {
@@ -155,7 +173,7 @@ void Timer::PostNewScheduledTask(TimeDelta delay) {
GetTaskRunner()->PostDelayedTask(posted_from_,
base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)),
delay);
- scheduled_run_time_ = desired_run_time_ = TimeTicks::Now() + delay;
+ scheduled_run_time_ = desired_run_time_ = Now() + delay;
} else {
GetTaskRunner()->PostTask(posted_from_,
base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)));
@@ -163,10 +181,8 @@ void Timer::PostNewScheduledTask(TimeDelta delay) {
}
// Remember the thread ID that posts the first task -- this will be verified
// later when the task is abandoned to detect misuse from multiple threads.
- if (!thread_id_) {
- DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ if (!thread_id_)
thread_id_ = static_cast<int>(PlatformThread::CurrentId());
- }
}
scoped_refptr<SingleThreadTaskRunner> Timer::GetTaskRunner() {
@@ -189,9 +205,9 @@ void Timer::RunScheduledTask() {
// First check if we need to delay the task because of a new target time.
if (desired_run_time_ > scheduled_run_time_) {
- // TimeTicks::Now() can be expensive, so only call it if we know the user
- // has changed the desired_run_time_.
- TimeTicks now = TimeTicks::Now();
+ // Now() can be expensive, so only call it if we know the user has changed
+ // the desired_run_time_.
+ TimeTicks now = Now();
// Task runner may have called us late anyway, so only post a continuation
// task if the desired_run_time_ is in the future.
if (desired_run_time_ > now) {
diff --git a/base/timer/timer.h b/base/timer/timer.h
index 661829b513..8aac279def 100644
--- a/base/timer/timer.h
+++ b/base/timer/timer.h
@@ -49,6 +49,8 @@
// because they're flaky on the buildbot, but when you run them locally you
// should be able to tell the difference.
+#include <memory>
+
#include "base/base_export.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -61,6 +63,7 @@ namespace base {
class BaseTimerTaskInternal;
class SingleThreadTaskRunner;
+class TickClock;
//-----------------------------------------------------------------------------
// This class wraps MessageLoop::PostDelayedTask to manage delayed and repeating
@@ -71,14 +74,23 @@ class BASE_EXPORT Timer {
public:
// Construct a timer in repeating or one-shot mode. Start or SetTaskInfo must
// be called later to set task info. |retain_user_task| determines whether the
- // user_task is retained or reset when it runs or stops.
+ // user_task is retained or reset when it runs or stops. If |tick_clock| is
+ // provided, it is used instead of TimeTicks::Now() to get TimeTicks when
+ // scheduling tasks.
Timer(bool retain_user_task, bool is_repeating);
+ Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock);
- // Construct a timer with retained task info.
+ // Construct a timer with retained task info. If |tick_clock| is provided, it
+ // is used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
Timer(const tracked_objects::Location& posted_from,
TimeDelta delay,
const base::Closure& user_task,
bool is_repeating);
+ Timer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating,
+ TickClock* tick_clock);
virtual ~Timer();
@@ -111,6 +123,9 @@ class BASE_EXPORT Timer {
const TimeTicks& desired_run_time() const { return desired_run_time_; }
protected:
+ // Returns the current tick count.
+ TimeTicks Now() const;
+
// Used to initiate a new delayed task. This has the side-effect of disabling
// scheduled_task_ if it is non-null.
void SetTaskInfo(const tracked_objects::Location& posted_from,
@@ -148,8 +163,10 @@ class BASE_EXPORT Timer {
// Stop running task (if any) and abandon scheduled task (if any).
void StopAndAbandon() {
- Stop();
AbandonScheduledTask();
+
+ Stop();
+ // No more member accesses here: |this| could be deleted at this point.
}
// When non-NULL, the scheduled_task_ is waiting in the MessageLoop to call
@@ -191,6 +208,9 @@ class BASE_EXPORT Timer {
// If true, hold on to the user_task_ closure object for reuse.
const bool retain_user_task_;
+ // The tick clock used to calculate the run time for scheduled tasks.
+ TickClock* const tick_clock_;
+
// If true, user_task_ is scheduled to run sometime in the future.
bool is_running_;
@@ -210,8 +230,8 @@ class BaseTimerMethodPointer : public Timer {
using Timer::Start;
enum RepeatMode { ONE_SHOT, REPEATING };
- BaseTimerMethodPointer(RepeatMode mode)
- : Timer(mode == REPEATING, mode == REPEATING) {}
+ BaseTimerMethodPointer(RepeatMode mode, TickClock* tick_clock)
+ : Timer(mode == REPEATING, mode == REPEATING, tick_clock) {}
// Start the timer to run at the given |delay| from now. If the timer is
// already running, it will be replaced to call a task formed from
@@ -230,14 +250,18 @@ class BaseTimerMethodPointer : public Timer {
// A simple, one-shot timer. See usage notes at the top of the file.
class OneShotTimer : public BaseTimerMethodPointer {
public:
- OneShotTimer() : BaseTimerMethodPointer(ONE_SHOT) {}
+ OneShotTimer() : OneShotTimer(nullptr) {}
+ explicit OneShotTimer(TickClock* tick_clock)
+ : BaseTimerMethodPointer(ONE_SHOT, tick_clock) {}
};
//-----------------------------------------------------------------------------
// A simple, repeating timer. See usage notes at the top of the file.
class RepeatingTimer : public BaseTimerMethodPointer {
public:
- RepeatingTimer() : BaseTimerMethodPointer(REPEATING) {}
+ RepeatingTimer() : RepeatingTimer(nullptr) {}
+ explicit RepeatingTimer(TickClock* tick_clock)
+ : BaseTimerMethodPointer(REPEATING, tick_clock) {}
};
//-----------------------------------------------------------------------------
@@ -258,10 +282,19 @@ class DelayTimer : protected Timer {
TimeDelta delay,
Receiver* receiver,
void (Receiver::*method)())
+ : DelayTimer(posted_from, delay, receiver, method, nullptr) {}
+
+ template <class Receiver>
+ DelayTimer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ Receiver* receiver,
+ void (Receiver::*method)(),
+ TickClock* tick_clock)
: Timer(posted_from,
delay,
base::Bind(method, base::Unretained(receiver)),
- false) {}
+ false,
+ tick_clock) {}
void Reset() override;
};
diff --git a/base/timer/timer_unittest.cc b/base/timer/timer_unittest.cc
index 6fcd25b93a..69338eb211 100644
--- a/base/timer/timer_unittest.cc
+++ b/base/timer/timer_unittest.cc
@@ -8,189 +8,274 @@
#include <memory>
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
-#include "base/test/test_simple_task_runner.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/sequenced_worker_pool_owner.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
-using base::TimeDelta;
-using base::SingleThreadTaskRunner;
+namespace base {
namespace {
// The message loops on which each timer should be tested.
-const base::MessageLoop::Type testing_message_loops[] = {
- base::MessageLoop::TYPE_DEFAULT,
- base::MessageLoop::TYPE_IO,
+const MessageLoop::Type testing_message_loops[] = {
+ MessageLoop::TYPE_DEFAULT, MessageLoop::TYPE_IO,
#if !defined(OS_IOS) // iOS does not allow direct running of the UI loop.
- base::MessageLoop::TYPE_UI,
+ MessageLoop::TYPE_UI,
#endif
};
const int kNumTestingMessageLoops = arraysize(testing_message_loops);
-class OneShotTimerTester {
+class Receiver {
public:
- explicit OneShotTimerTester(bool* did_run, unsigned milliseconds = 10)
- : did_run_(did_run),
- delay_ms_(milliseconds),
- quit_message_loop_(true) {
- }
+ Receiver() : count_(0) {}
+ void OnCalled() { count_++; }
+ bool WasCalled() { return count_ > 0; }
+ int TimesCalled() { return count_; }
+
+ private:
+ int count_;
+};
+
+// A basic helper class that can start a one-shot timer and signal a
+// WaitableEvent when this timer fires.
+class OneShotTimerTesterBase {
+ public:
+ // |did_run|, if provided, will be signaled when Run() fires.
+ explicit OneShotTimerTesterBase(
+ WaitableEvent* did_run = nullptr,
+ const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
+ : did_run_(did_run), delay_(delay) {}
+
+ virtual ~OneShotTimerTesterBase() = default;
void Start() {
- timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(delay_ms_), this,
- &OneShotTimerTester::Run);
+ started_time_ = TimeTicks::Now();
+ timer_->Start(FROM_HERE, delay_, this, &OneShotTimerTesterBase::Run);
}
- void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
- quit_message_loop_ = false;
- timer_.SetTaskRunner(task_runner);
- }
+ bool IsRunning() { return timer_->IsRunning(); }
- private:
- void Run() {
- *did_run_ = true;
- if (quit_message_loop_) {
- base::MessageLoop::current()->QuitWhenIdle();
+ TimeTicks started_time() const { return started_time_; }
+ TimeDelta delay() const { return delay_; }
+
+ protected:
+ virtual void Run() {
+ if (did_run_) {
+ EXPECT_FALSE(did_run_->IsSignaled());
+ did_run_->Signal();
}
}
- bool* did_run_;
- base::OneShotTimer timer_;
- const unsigned delay_ms_;
- bool quit_message_loop_;
+ std::unique_ptr<OneShotTimer> timer_ = MakeUnique<OneShotTimer>();
+
+ private:
+ WaitableEvent* const did_run_;
+ const TimeDelta delay_;
+ TimeTicks started_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneShotTimerTesterBase);
};
-class OneShotSelfDeletingTimerTester {
+// Extends functionality of OneShotTimerTesterBase with the abilities to wait
+// until the timer fires and to change task runner for the timer.
+class OneShotTimerTester : public OneShotTimerTesterBase {
public:
- explicit OneShotSelfDeletingTimerTester(bool* did_run)
- : did_run_(did_run), timer_(new base::OneShotTimer()) {}
+ // |did_run|, if provided, will be signaled when Run() fires.
+ explicit OneShotTimerTester(
+ WaitableEvent* did_run = nullptr,
+ const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
+ : OneShotTimerTesterBase(did_run, delay),
+ quit_closure_(run_loop_.QuitClosure()) {}
- void Start() {
- timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(10), this,
- &OneShotSelfDeletingTimerTester::Run);
+ ~OneShotTimerTester() override = default;
+
+ void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ timer_->SetTaskRunner(std::move(task_runner));
+
+ // Run() will be invoked on |task_runner| but |run_loop_|'s QuitClosure
+ // needs to run on this thread (where the MessageLoop lives).
+ quit_closure_ =
+ Bind(IgnoreResult(&SingleThreadTaskRunner::PostTask),
+ ThreadTaskRunnerHandle::Get(), FROM_HERE, run_loop_.QuitClosure());
}
+ // Blocks until Run() executes and confirms that Run() didn't fire before
+ // |delay_| expired.
+ void WaitAndConfirmTimerFiredAfterDelay() {
+ run_loop_.Run();
+
+ EXPECT_NE(TimeTicks(), started_time());
+ EXPECT_GE(TimeTicks::Now() - started_time(), delay());
+ }
+
+ protected:
+ // Overridable method to do things on Run() before signaling events/closures
+ // managed by this helper.
+ virtual void OnRun() {}
+
private:
- void Run() {
- *did_run_ = true;
- timer_.reset();
- base::MessageLoop::current()->QuitWhenIdle();
+ void Run() override {
+ OnRun();
+ OneShotTimerTesterBase::Run();
+ quit_closure_.Run();
}
- bool* did_run_;
- std::unique_ptr<base::OneShotTimer> timer_;
+ RunLoop run_loop_;
+ Closure quit_closure_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneShotTimerTester);
+};
+
+class OneShotSelfDeletingTimerTester : public OneShotTimerTester {
+ protected:
+ void OnRun() override { timer_.reset(); }
};
+constexpr int kNumRepeats = 10;
+
class RepeatingTimerTester {
public:
- explicit RepeatingTimerTester(bool* did_run, const TimeDelta& delay)
- : did_run_(did_run), counter_(10), delay_(delay) {
- }
+ explicit RepeatingTimerTester(WaitableEvent* did_run, const TimeDelta& delay)
+ : counter_(kNumRepeats),
+ quit_closure_(run_loop_.QuitClosure()),
+ did_run_(did_run),
+ delay_(delay) {}
void Start() {
+ started_time_ = TimeTicks::Now();
timer_.Start(FROM_HERE, delay_, this, &RepeatingTimerTester::Run);
}
+ void WaitAndConfirmTimerFiredRepeatedlyAfterDelay() {
+ run_loop_.Run();
+
+ EXPECT_NE(TimeTicks(), started_time_);
+ EXPECT_GE(TimeTicks::Now() - started_time_, kNumRepeats * delay_);
+ }
+
private:
void Run() {
if (--counter_ == 0) {
- *did_run_ = true;
+ if (did_run_) {
+ EXPECT_FALSE(did_run_->IsSignaled());
+ did_run_->Signal();
+ }
timer_.Stop();
- base::MessageLoop::current()->QuitWhenIdle();
+ quit_closure_.Run();
}
}
- bool* did_run_;
+ RepeatingTimer timer_;
int counter_;
- TimeDelta delay_;
- base::RepeatingTimer timer_;
+
+ RunLoop run_loop_;
+ Closure quit_closure_;
+ WaitableEvent* const did_run_;
+
+ const TimeDelta delay_;
+ TimeTicks started_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(RepeatingTimerTester);
};
-void RunTest_OneShotTimer(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+// Basic test with same setup as RunTest_OneShotTimers_Cancel below to confirm
+// that |did_run_a| would be signaled in that test if it wasn't for the
+// deletion.
+void RunTest_OneShotTimers(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
- bool did_run = false;
- OneShotTimerTester f(&did_run);
- f.Start();
+ WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ OneShotTimerTester a(&did_run_a);
+ a.Start();
- base::RunLoop().Run();
+ OneShotTimerTester b;
+ b.Start();
- EXPECT_TRUE(did_run);
+ b.WaitAndConfirmTimerFiredAfterDelay();
+
+ EXPECT_TRUE(did_run_a.IsSignaled());
}
-void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_OneShotTimers_Cancel(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
- bool did_run_a = false;
+ WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
// This should run before the timer expires.
- base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+ SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
- bool did_run_b = false;
- OneShotTimerTester b(&did_run_b);
+ OneShotTimerTester b;
b.Start();
- base::RunLoop().Run();
+ b.WaitAndConfirmTimerFiredAfterDelay();
- EXPECT_FALSE(did_run_a);
- EXPECT_TRUE(did_run_b);
+ EXPECT_FALSE(did_run_a.IsSignaled());
}
-void RunTest_OneShotSelfDeletingTimer(
- base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_OneShotSelfDeletingTimer(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
- bool did_run = false;
- OneShotSelfDeletingTimerTester f(&did_run);
+ OneShotSelfDeletingTimerTester f;
f.Start();
-
- base::RunLoop().Run();
-
- EXPECT_TRUE(did_run);
+ f.WaitAndConfirmTimerFiredAfterDelay();
}
-void RunTest_RepeatingTimer(base::MessageLoop::Type message_loop_type,
+void RunTest_RepeatingTimer(MessageLoop::Type message_loop_type,
const TimeDelta& delay) {
- base::MessageLoop loop(message_loop_type);
+ MessageLoop loop(message_loop_type);
- bool did_run = false;
- RepeatingTimerTester f(&did_run, delay);
+ RepeatingTimerTester f(nullptr, delay);
f.Start();
-
- base::RunLoop().Run();
-
- EXPECT_TRUE(did_run);
+ f.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
}
-void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
+void RunTest_RepeatingTimer_Cancel(MessageLoop::Type message_loop_type,
const TimeDelta& delay) {
- base::MessageLoop loop(message_loop_type);
+ MessageLoop loop(message_loop_type);
- bool did_run_a = false;
+ WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
// This should run before the timer expires.
- base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+ SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
- bool did_run_b = false;
- RepeatingTimerTester b(&did_run_b, delay);
+ RepeatingTimerTester b(nullptr, delay);
b.Start();
- base::RunLoop().Run();
+ b.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
- EXPECT_FALSE(did_run_a);
- EXPECT_TRUE(did_run_b);
+ // |a| should not have fired despite |b| starting after it on the same
+ // sequence and being complete by now.
+ EXPECT_FALSE(did_run_a.IsSignaled());
}
class DelayTimerTarget {
@@ -206,40 +291,38 @@ class DelayTimerTarget {
bool signaled_ = false;
};
-void RunTest_DelayTimer_NoCall(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_NoCall(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
// If Delay is never called, the timer shouldn't go off.
DelayTimerTarget target;
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
- &DelayTimerTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
- bool did_run = false;
- OneShotTimerTester tester(&did_run);
+ OneShotTimerTester tester;
tester.Start();
- base::RunLoop().Run();
+ tester.WaitAndConfirmTimerFiredAfterDelay();
ASSERT_FALSE(target.signaled());
}
-void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_OneCall(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
DelayTimerTarget target;
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
- &DelayTimerTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
timer.Reset();
- bool did_run = false;
- OneShotTimerTester tester(&did_run, 100 /* milliseconds */);
+ OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(100));
tester.Start();
- base::RunLoop().Run();
+ tester.WaitAndConfirmTimerFiredAfterDelay();
ASSERT_TRUE(target.signaled());
}
struct ResetHelper {
- ResetHelper(base::DelayTimer* timer, DelayTimerTarget* target)
+ ResetHelper(DelayTimer* timer, DelayTimerTarget* target)
: timer_(timer), target_(target) {}
void Reset() {
@@ -248,31 +331,30 @@ struct ResetHelper {
}
private:
- base::DelayTimer* const timer_;
+ DelayTimer* const timer_;
DelayTimerTarget* const target_;
};
-void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_Reset(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
// If Delay is never called, the timer shouldn't go off.
DelayTimerTarget target;
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
- &DelayTimerTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerTarget::Signal);
timer.Reset();
ResetHelper reset_helper(&timer, &target);
- base::OneShotTimer timers[20];
+ OneShotTimer timers[20];
for (size_t i = 0; i < arraysize(timers); ++i) {
timers[i].Start(FROM_HERE, TimeDelta::FromMilliseconds(i * 10),
&reset_helper, &ResetHelper::Reset);
}
- bool did_run = false;
- OneShotTimerTester tester(&did_run, 300);
+ OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(300));
tester.Start();
- base::RunLoop().Run();
+ tester.WaitAndConfirmTimerFiredAfterDelay();
ASSERT_TRUE(target.signaled());
}
@@ -284,21 +366,20 @@ class DelayTimerFatalTarget {
}
};
-
-void RunTest_DelayTimer_Deleted(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_Deleted(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
DelayTimerFatalTarget target;
{
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
- &DelayTimerFatalTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerFatalTarget::Signal);
timer.Reset();
}
// When the timer is deleted, the DelayTimerFatalTarget should never be
// called.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
}
} // namespace
@@ -307,15 +388,15 @@ void RunTest_DelayTimer_Deleted(base::MessageLoop::Type message_loop_type) {
// Each test is run against each type of MessageLoop. That way we are sure
// that timers work properly in all configurations.
-TEST(TimerTest, OneShotTimer) {
+TEST(TimerTest, OneShotTimers) {
for (int i = 0; i < kNumTestingMessageLoops; i++) {
- RunTest_OneShotTimer(testing_message_loops[i]);
+ RunTest_OneShotTimers(testing_message_loops[i]);
}
}
-TEST(TimerTest, OneShotTimer_Cancel) {
+TEST(TimerTest, OneShotTimers_Cancel) {
for (int i = 0; i < kNumTestingMessageLoops; i++) {
- RunTest_OneShotTimer_Cancel(testing_message_loops[i]);
+ RunTest_OneShotTimers_Cancel(testing_message_loops[i]);
}
}
@@ -328,17 +409,42 @@ TEST(TimerTest, OneShotSelfDeletingTimer) {
}
TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
- scoped_refptr<base::TestSimpleTaskRunner> task_runner =
- new base::TestSimpleTaskRunner();
+ // A MessageLoop is required for the timer events on the other thread to
+ // communicate back to the Timer under test.
+ MessageLoop loop;
+
+ Thread other_thread("OneShotTimer_CustomTaskRunner");
+ other_thread.Start();
- bool did_run = false;
+ WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
OneShotTimerTester f(&did_run);
- f.SetTaskRunner(task_runner);
+ f.SetTaskRunner(other_thread.task_runner());
f.Start();
+ EXPECT_TRUE(f.IsRunning());
+
+ f.WaitAndConfirmTimerFiredAfterDelay();
+ EXPECT_TRUE(did_run.IsSignaled());
+
+ // |f| should already have communicated back to this |loop| before invoking
+ // Run() and as such this thread should already be aware that |f| is no longer
+ // running.
+ EXPECT_TRUE(loop.IsIdleForTesting());
+ EXPECT_FALSE(f.IsRunning());
+}
- EXPECT_FALSE(did_run);
- task_runner->RunUntilIdle();
- EXPECT_TRUE(did_run);
+TEST(TimerTest, OneShotTimerWithTickClock) {
+ scoped_refptr<TestMockTimeTaskRunner> task_runner(
+ new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+ std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
+ MessageLoop message_loop;
+ message_loop.SetTaskRunner(task_runner);
+ Receiver receiver;
+ OneShotTimer timer(tick_clock.get());
+ timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+ Bind(&Receiver::OnCalled, Unretained(&receiver)));
+ task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
+ EXPECT_TRUE(receiver.WasCalled());
}
TEST(TimerTest, RepeatingTimer) {
@@ -369,6 +475,22 @@ TEST(TimerTest, RepeatingTimerZeroDelay_Cancel) {
}
}
+TEST(TimerTest, RepeatingTimerWithTickClock) {
+ scoped_refptr<TestMockTimeTaskRunner> task_runner(
+ new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+ std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
+ MessageLoop message_loop;
+ message_loop.SetTaskRunner(task_runner);
+ Receiver receiver;
+ const int expected_times_called = 10;
+ RepeatingTimer timer(tick_clock.get());
+ timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+ Bind(&Receiver::OnCalled, Unretained(&receiver)));
+ task_runner->FastForwardBy(TimeDelta::FromSeconds(expected_times_called));
+ timer.Stop();
+ EXPECT_EQ(expected_times_called, receiver.TimesCalled());
+}
+
TEST(TimerTest, DelayTimer_NoCall) {
for (int i = 0; i < kNumTestingMessageLoops; i++) {
RunTest_DelayTimer_NoCall(testing_message_loops[i]);
@@ -394,25 +516,89 @@ TEST(TimerTest, DelayTimer_Deleted) {
}
}
+TEST(TimerTest, DelayTimerWithTickClock) {
+ scoped_refptr<TestMockTimeTaskRunner> task_runner(
+ new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+ std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
+ MessageLoop message_loop;
+ message_loop.SetTaskRunner(task_runner);
+ Receiver receiver;
+ DelayTimer timer(FROM_HERE, TimeDelta::FromSeconds(1), &receiver,
+ &Receiver::OnCalled, tick_clock.get());
+ task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
+ EXPECT_FALSE(receiver.WasCalled());
+ timer.Reset();
+ task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
+ EXPECT_FALSE(receiver.WasCalled());
+ timer.Reset();
+ task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
+ EXPECT_TRUE(receiver.WasCalled());
+}
+
TEST(TimerTest, MessageLoopShutdown) {
// This test is designed to verify that shutdown of the
// message loop does not cause crashes if there were pending
// timers not yet fired. It may only trigger exceptions
// if debug heap checking is enabled.
- bool did_run = false;
+ WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
{
- OneShotTimerTester a(&did_run);
- OneShotTimerTester b(&did_run);
- OneShotTimerTester c(&did_run);
- OneShotTimerTester d(&did_run);
+ OneShotTimerTesterBase a(&did_run);
+ OneShotTimerTesterBase b(&did_run);
+ OneShotTimerTesterBase c(&did_run);
+ OneShotTimerTesterBase d(&did_run);
{
- base::MessageLoop loop;
+ MessageLoop loop;
a.Start();
b.Start();
} // MessageLoop destructs by falling out of scope.
} // OneShotTimers destruct. SHOULD NOT CRASH, of course.
- EXPECT_FALSE(did_run);
+ EXPECT_FALSE(did_run.IsSignaled());
+}
+
+// Ref counted class which owns a Timer. The class passes a reference to itself
+// via the |user_task| parameter in Timer::Start(). |Timer::user_task_| might
+// end up holding the last reference to the class.
+class OneShotSelfOwningTimerTester
+ : public RefCounted<OneShotSelfOwningTimerTester> {
+ public:
+ OneShotSelfOwningTimerTester() = default;
+
+ void StartTimer() {
+ // Start timer with long delay in order to test the timer getting destroyed
+ // while a timer task is still pending.
+ timer_.Start(FROM_HERE, TimeDelta::FromDays(1),
+ base::Bind(&OneShotSelfOwningTimerTester::Run, this));
+ }
+
+ private:
+ friend class RefCounted<OneShotSelfOwningTimerTester>;
+ ~OneShotSelfOwningTimerTester() = default;
+
+ void Run() {
+ ADD_FAILURE() << "Timer unexpectedly fired.";
+ }
+
+ OneShotTimer timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneShotSelfOwningTimerTester);
+};
+
+TEST(TimerTest, MessageLoopShutdownSelfOwningTimer) {
+ // This test verifies that shutdown of the message loop does not cause crashes
+ // if there is a pending timer not yet fired and |Timer::user_task_| owns the
+ // timer. The test may only trigger exceptions if debug heap checking is
+ // enabled.
+
+ MessageLoop loop;
+ scoped_refptr<OneShotSelfOwningTimerTester> tester =
+ new OneShotSelfOwningTimerTester();
+
+ std::move(tester)->StartTimer();
+ // |Timer::user_task_| owns sole reference to |tester|.
+
+ // MessageLoop destructs by falling out of scope. SHOULD NOT CRASH.
}
void TimerTestCallback() {
@@ -420,11 +606,10 @@ void TimerTestCallback() {
TEST(TimerTest, NonRepeatIsRunning) {
{
- base::MessageLoop loop;
- base::Timer timer(false, false);
+ MessageLoop loop;
+ Timer timer(false, false);
EXPECT_FALSE(timer.IsRunning());
- timer.Start(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback));
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
EXPECT_TRUE(timer.IsRunning());
timer.Stop();
EXPECT_FALSE(timer.IsRunning());
@@ -432,11 +617,10 @@ TEST(TimerTest, NonRepeatIsRunning) {
}
{
- base::Timer timer(true, false);
- base::MessageLoop loop;
+ Timer timer(true, false);
+ MessageLoop loop;
EXPECT_FALSE(timer.IsRunning());
- timer.Start(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback));
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
EXPECT_TRUE(timer.IsRunning());
timer.Stop();
EXPECT_FALSE(timer.IsRunning());
@@ -447,12 +631,11 @@ TEST(TimerTest, NonRepeatIsRunning) {
}
TEST(TimerTest, NonRepeatMessageLoopDeath) {
- base::Timer timer(false, false);
+ Timer timer(false, false);
{
- base::MessageLoop loop;
+ MessageLoop loop;
EXPECT_FALSE(timer.IsRunning());
- timer.Start(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback));
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
EXPECT_TRUE(timer.IsRunning());
}
EXPECT_FALSE(timer.IsRunning());
@@ -460,9 +643,9 @@ TEST(TimerTest, NonRepeatMessageLoopDeath) {
}
TEST(TimerTest, RetainRepeatIsRunning) {
- base::MessageLoop loop;
- base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback), true);
+ MessageLoop loop;
+ Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
+ true);
EXPECT_FALSE(timer.IsRunning());
timer.Reset();
EXPECT_TRUE(timer.IsRunning());
@@ -473,9 +656,9 @@ TEST(TimerTest, RetainRepeatIsRunning) {
}
TEST(TimerTest, RetainNonRepeatIsRunning) {
- base::MessageLoop loop;
- base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback), false);
+ MessageLoop loop;
+ Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
+ false);
EXPECT_FALSE(timer.IsRunning());
timer.Reset();
EXPECT_TRUE(timer.IsRunning());
@@ -497,25 +680,27 @@ void ClearAllCallbackHappened() {
void SetCallbackHappened1() {
g_callback_happened1 = true;
- base::MessageLoop::current()->QuitWhenIdle();
+ MessageLoop::current()->QuitWhenIdle();
}
void SetCallbackHappened2() {
g_callback_happened2 = true;
- base::MessageLoop::current()->QuitWhenIdle();
+ MessageLoop::current()->QuitWhenIdle();
}
+} // namespace
+
TEST(TimerTest, ContinuationStopStart) {
{
ClearAllCallbackHappened();
- base::MessageLoop loop;
- base::Timer timer(false, false);
+ MessageLoop loop;
+ Timer timer(false, false);
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
- base::Bind(&SetCallbackHappened1));
+ Bind(&SetCallbackHappened1));
timer.Stop();
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
- base::Bind(&SetCallbackHappened2));
- base::RunLoop().Run();
+ Bind(&SetCallbackHappened2));
+ RunLoop().Run();
EXPECT_FALSE(g_callback_happened1);
EXPECT_TRUE(g_callback_happened2);
}
@@ -524,16 +709,16 @@ TEST(TimerTest, ContinuationStopStart) {
TEST(TimerTest, ContinuationReset) {
{
ClearAllCallbackHappened();
- base::MessageLoop loop;
- base::Timer timer(false, false);
+ MessageLoop loop;
+ Timer timer(false, false);
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
- base::Bind(&SetCallbackHappened1));
+ Bind(&SetCallbackHappened1));
timer.Reset();
// Since Reset happened before task ran, the user_task must not be cleared:
ASSERT_FALSE(timer.user_task().is_null());
- base::RunLoop().Run();
+ RunLoop().Run();
EXPECT_TRUE(g_callback_happened1);
}
}
-} // namespace
+} // namespace base
diff --git a/base/trace_event/category_registry.cc b/base/trace_event/category_registry.cc
new file mode 100644
index 0000000000..e7c14606d6
--- /dev/null
+++ b/base/trace_event/category_registry.cc
@@ -0,0 +1,156 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/category_registry.h"
+
+#include <string.h>
+
+#include <type_traits>
+
+#include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/trace_event/trace_category.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+constexpr size_t kMaxCategories = 200;
+const int kNumBuiltinCategories = 4;
+
+// |g_categories| might end up causing creating dynamic initializers if not POD.
+static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
+
+// These entries must be kept consistent with the kCategory* consts below.
+TraceCategory g_categories[kMaxCategories] = {
+ {0, 0, "tracing categories exhausted; must increase kMaxCategories"},
+ {0, 0, "tracing already shutdown"}, // See kCategoryAlreadyShutdown below.
+ {0, 0, "__metadata"}, // See kCategoryMetadata below.
+ {0, 0, "toplevel"}, // Warmup the toplevel category.
+};
+
+base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
+
+bool IsValidCategoryPtr(const TraceCategory* category) {
+ // If any of these are hit, something has cached a corrupt category pointer.
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(category);
+ return ptr % sizeof(void*) == 0 &&
+ ptr >= reinterpret_cast<uintptr_t>(&g_categories[0]) &&
+ ptr <= reinterpret_cast<uintptr_t>(&g_categories[kMaxCategories - 1]);
+}
+
+} // namespace
+
+// static
+TraceCategory* const CategoryRegistry::kCategoryExhausted = &g_categories[0];
+TraceCategory* const CategoryRegistry::kCategoryAlreadyShutdown =
+ &g_categories[1];
+TraceCategory* const CategoryRegistry::kCategoryMetadata = &g_categories[2];
+
+// static
+void CategoryRegistry::Initialize() {
+ // Trace is enabled or disabled on one thread while other threads are
+ // accessing the enabled flag. We don't care whether edge-case events are
+ // traced or not, so we allow races on the enabled flag to keep the trace
+ // macros fast.
+ for (size_t i = 0; i < kMaxCategories; ++i) {
+ ANNOTATE_BENIGN_RACE(g_categories[i].state_ptr(),
+ "trace_event category enabled");
+ // If this DCHECK is hit in a test it means that ResetForTesting() is not
+ // called and the categories state leaks between test fixtures.
+ DCHECK(!g_categories[i].is_enabled());
+ }
+}
+
+// static
+void CategoryRegistry::ResetForTesting() {
+ // reset_for_testing clears up only the enabled state and filters. The
+ // categories themselves cannot be cleared up because the static pointers
+ // injected by the macros still point to them and cannot be reset.
+ for (size_t i = 0; i < kMaxCategories; ++i)
+ g_categories[i].reset_for_testing();
+}
+
+// static
+TraceCategory* CategoryRegistry::GetCategoryByName(const char* category_name) {
+ DCHECK(!strchr(category_name, '"'))
+ << "Category names may not contain double quote";
+
+ // The g_categories is append only, avoid using a lock for the fast path.
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_categories[i].name(), category_name) == 0) {
+ return &g_categories[i];
+ }
+ }
+ return nullptr;
+}
+
+bool CategoryRegistry::GetOrCreateCategoryLocked(
+ const char* category_name,
+ CategoryInitializerFn category_initializer_fn,
+ TraceCategory** category) {
+ // This is the slow path: the lock is not held in the fastpath
+ // (GetCategoryByName), so more than one thread could have reached here trying
+ // to add the same category.
+ *category = GetCategoryByName(category_name);
+ if (*category)
+ return false;
+
+ // Create a new category.
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ if (category_index >= kMaxCategories) {
+ NOTREACHED() << "must increase kMaxCategories";
+ *category = kCategoryExhausted;
+ return false;
+ }
+
+ // TODO(primiano): this strdup should be removed. The only documented reason
+ // for it was TraceWatchEvent, which is gone. However, something might have
+ // ended up relying on this. Needs some auditing before removal.
+ const char* category_name_copy = strdup(category_name);
+ ANNOTATE_LEAKING_OBJECT_PTR(category_name_copy);
+
+ *category = &g_categories[category_index];
+ DCHECK(!(*category)->is_valid());
+ DCHECK(!(*category)->is_enabled());
+ (*category)->set_name(category_name_copy);
+ category_initializer_fn(*category);
+
+ // Update the max index now.
+ base::subtle::Release_Store(&g_category_index, category_index + 1);
+ return true;
+}
+
+// static
+const TraceCategory* CategoryRegistry::GetCategoryByStatePtr(
+ const uint8_t* category_state) {
+ const TraceCategory* category = TraceCategory::FromStatePtr(category_state);
+ DCHECK(IsValidCategoryPtr(category));
+ return category;
+}
+
+// static
+bool CategoryRegistry::IsBuiltinCategory(const TraceCategory* category) {
+ DCHECK(IsValidCategoryPtr(category));
+ return category < &g_categories[kNumBuiltinCategories];
+}
+
+// static
+CategoryRegistry::Range CategoryRegistry::GetAllCategories() {
+ // The |g_categories| array is append only. We have to only guarantee to
+ // not return an index to a category which is being initialized by
+ // GetOrCreateCategoryByName().
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ return CategoryRegistry::Range(&g_categories[0],
+ &g_categories[category_index]);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/category_registry.h b/base/trace_event/category_registry.h
new file mode 100644
index 0000000000..9c08efa3e1
--- /dev/null
+++ b/base/trace_event/category_registry.h
@@ -0,0 +1,93 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
+#define BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+struct TraceCategory;
+class TraceCategoryTest;
+class TraceLog;
+
+// Allows fast and thread-safe acces to the state of all tracing categories.
+// All the methods in this class can be concurrently called on multiple threads,
+// unless otherwise noted (e.g., GetOrCreateCategoryLocked).
+// The reason why this is a fully static class with global state is to allow to
+// statically define known categories as global linker-initialized structs,
+// without requiring static initializers.
+class BASE_EXPORT CategoryRegistry {
+ public:
+ // Allows for-each iterations over a slice of the categories array.
+ class Range {
+ public:
+ Range(TraceCategory* begin, TraceCategory* end) : begin_(begin), end_(end) {
+ DCHECK_LE(begin, end);
+ }
+ TraceCategory* begin() const { return begin_; }
+ TraceCategory* end() const { return end_; }
+
+ private:
+ TraceCategory* const begin_;
+ TraceCategory* const end_;
+ };
+
+ // Known categories.
+ static TraceCategory* const kCategoryExhausted;
+ static TraceCategory* const kCategoryMetadata;
+ static TraceCategory* const kCategoryAlreadyShutdown;
+
+ // Returns a category entry from the Category.state_ptr() pointer.
+ // TODO(primiano): trace macros should just keep a pointer to the entire
+ // TraceCategory, not just the enabled state pointer. That would remove the
+ // need for this function and make everything cleaner at no extra cost (as
+ // long as the |state_| is the first field of the struct, which can be
+ // guaranteed via static_assert, see TraceCategory ctor).
+ static const TraceCategory* GetCategoryByStatePtr(
+ const uint8_t* category_state);
+
+ // Returns a category from its name or nullptr if not found.
+ // The output |category| argument is an undefinitely lived pointer to the
+ // TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
+ // pointer and use it for checks in their fast-paths.
+ static TraceCategory* GetCategoryByName(const char* category_name);
+
+ static bool IsBuiltinCategory(const TraceCategory*);
+
+ private:
+ friend class TraceCategoryTest;
+ friend class TraceLog;
+ using CategoryInitializerFn = void (*)(TraceCategory*);
+
+ // Only for debugging/testing purposes, is a no-op on release builds.
+ static void Initialize();
+
+ // Resets the state of all categories, to clear up the state between tests.
+ static void ResetForTesting();
+
+ // Used to get/create a category in the slow-path. If the category exists
+ // already, this has the same effect of GetCategoryByName and returns false.
+ // If not, a new category is created and the CategoryInitializerFn is invoked
+ // before retuning true. The caller must guarantee serialization: either call
+ // this method from a single thread or hold a lock when calling this.
+ static bool GetOrCreateCategoryLocked(const char* category_name,
+ CategoryInitializerFn,
+ TraceCategory**);
+
+ // Allows to iterate over the valid categories in a for-each loop.
+ // This includes builtin categories such as __metadata.
+ static Range GetAllCategories();
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index 0a04d62710..bb6fa1b82b 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -223,49 +223,6 @@
flow_flags, arg1_name, arg1_val, \
arg2_name, arg2_val)
-// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
-// included in official builds.
-
-#if OFFICIAL_BUILD
-#undef TRACING_IS_OFFICIAL_BUILD
-#define TRACING_IS_OFFICIAL_BUILD 1
-#elif !defined(TRACING_IS_OFFICIAL_BUILD)
-#define TRACING_IS_OFFICIAL_BUILD 0
-#endif
-
-#if TRACING_IS_OFFICIAL_BUILD
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- (void)0
-#else
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
- TRACE_EVENT0(category_group, name)
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
- TRACE_EVENT_INSTANT0(category_group, name, scope)
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-#endif
-
// Records a single event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -297,20 +254,10 @@
#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE | scope)
-// Syntactic sugars for the sampling tracing in the main thread.
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_GET_SAMPLING_STATE() \
- TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
-#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
- TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
-
// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -395,10 +342,15 @@
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
arg2_name, arg2_val)
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
#define TRACE_EVENT_COPY_MARK(category_group, name) \
@@ -406,8 +358,8 @@
TRACE_EVENT_FLAG_COPY)
#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_COPY)
// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
@@ -544,6 +496,12 @@
TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SAMPLE, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+
// ASYNC_STEP_* APIs should be only used by legacy code. New code should
// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
// event.
@@ -612,6 +570,13 @@
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@@ -701,6 +666,13 @@
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -760,16 +732,19 @@
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with one associated argument. If the category is not enabled, then this
-// does nothing.
+// with none, one or two associated argument. If the category is not enabled,
+// then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+
#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
arg1_name, arg1_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
category_group, name, id, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with 2 associated arguments. If the category is not enabled, then this
-// does nothing.
+
#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
@@ -944,48 +919,58 @@
#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \
INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
- issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \
- "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
+ issue_end_ts, TRACE_EVENT_FLAG_NONE, \
+ "sync_id", sync_id, "issue_ts", issue_ts)
// Macros to track the life time and value of arbitrary client objects.
// See also TraceTrackableObject.
#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
snapshot) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+ id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
-#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
- category_group, name, id, timestamp, snapshot) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
- TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ id, TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "snapshot", snapshot)
#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
// Records entering and leaving trace event contexts. |category_group| and
// |name| specify the context category and type. |context| is a
// snapshotted context object id.
-#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \
- TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \
- TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, context, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, context, \
+ TRACE_EVENT_FLAG_NONE)
#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
- TRACE_ID_DONT_MANGLE(context))
+ INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
+
+// Macro to specify that two trace IDs are identical. For example,
+// TRACE_LINK_IDS(
+// "category", "name",
+// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
+// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
+// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
+// the current process have the same ID as events with ID
+// ("blink::ResourceFetcher::FetchRequest", 0x2000).
+#define TRACE_LINK_IDS(category_group, name, id, linked_id) \
+ INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id, linked_id);
// Macro to efficiently determine if a given category group is enabled.
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
@@ -1052,11 +1037,13 @@
#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
+#define TRACE_EVENT_PHASE_LINK_IDS ('=')
// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+// TODO(crbug.com/639003): Free this bit after ID mangling is deprecated.
#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
@@ -1067,6 +1054,8 @@
#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
+#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast<unsigned int>(1 << 12))
+#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast<unsigned int>(1 << 13))
#define TRACE_EVENT_FLAG_SCOPE_MASK \
(static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
diff --git a/base/trace_event/etw_manifest/etw_manifest.gyp b/base/trace_event/etw_manifest/etw_manifest.gyp
deleted file mode 100644
index b2f0eb8ea1..0000000000
--- a/base/trace_event/etw_manifest/etw_manifest.gyp
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'targets': [
- {
- # GN version: //base/trace_event/etw_manifest/BUILD.gn
- 'target_name': 'etw_manifest',
- 'type': 'none',
- 'toolsets': ['host', 'target'],
- 'hard_dependency': 1,
- 'conditions': [
- ['OS=="win"', {
- 'sources': [
- 'chrome_events_win.man',
- ],
- 'variables': {
- 'man_output_dir': '<(SHARED_INTERMEDIATE_DIR)/base/trace_event/etw_manifest',
- },
- 'rules': [{
- # Rule to run the message compiler.
- 'rule_name': 'message_compiler',
- 'extension': 'man',
- 'outputs': [
- '<(man_output_dir)/chrome_events_win.h',
- '<(man_output_dir)/chrome_events_win.rc',
- ],
- 'action': [
- 'mc.exe',
- '-h', '<(man_output_dir)',
- '-r', '<(man_output_dir)/.',
- '-um',
- '<(RULE_INPUT_PATH)',
- ],
- 'message': 'Running message compiler on <(RULE_INPUT_PATH)',
- }],
- }],
- ],
- }
- ]
-}
diff --git a/base/trace_event/event_name_filter.cc b/base/trace_event/event_name_filter.cc
new file mode 100644
index 0000000000..8d0058c147
--- /dev/null
+++ b/base/trace_event/event_name_filter.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+const char EventNameFilter::kName[] = "event_whitelist_predicate";
+
+EventNameFilter::EventNameFilter(
+ std::unique_ptr<EventNamesWhitelist> event_names_whitelist)
+ : event_names_whitelist_(std::move(event_names_whitelist)) {}
+
+EventNameFilter::~EventNameFilter() {}
+
+bool EventNameFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+ return event_names_whitelist_->count(trace_event.name()) != 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/event_name_filter.h b/base/trace_event/event_name_filter.h
new file mode 100644
index 0000000000..19333b3e03
--- /dev/null
+++ b/base/trace_event/event_name_filter.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+#define BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+
+#include <memory>
+#include <string>
+#include <unordered_set>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// Filters trace events by checking the full name against a whitelist.
+// The current implementation is quite simple and dumb and just uses a
+// hashtable which requires char* to std::string conversion. It could be smarter
+// and use a bloom filter trie. However, today this is used too rarely to
+// justify that cost.
+class BASE_EXPORT EventNameFilter : public TraceEventFilter {
+ public:
+ using EventNamesWhitelist = std::unordered_set<std::string>;
+ static const char kName[];
+
+ EventNameFilter(std::unique_ptr<EventNamesWhitelist>);
+ ~EventNameFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent&) const override;
+
+ private:
+ std::unique_ptr<const EventNamesWhitelist> event_names_whitelist_;
+
+ DISALLOW_COPY_AND_ASSIGN(EventNameFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
diff --git a/base/trace_event/event_name_filter_unittest.cc b/base/trace_event/event_name_filter_unittest.cc
new file mode 100644
index 0000000000..0bc2a4dafc
--- /dev/null
+++ b/base/trace_event/event_name_filter_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+const TraceEvent& MakeTraceEvent(const char* name) {
+ static TraceEvent event;
+ event.Reset();
+ event.Initialize(0, TimeTicks(), ThreadTicks(), 'b', nullptr, name, "", 0, 0,
+ 0, nullptr, nullptr, nullptr, nullptr, 0);
+ return event;
+}
+
+TEST(TraceEventNameFilterTest, Whitelist) {
+ auto empty_whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
+ auto filter = MakeUnique<EventNameFilter>(std::move(empty_whitelist));
+
+ // No events should be filtered if the whitelist is empty.
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+
+ auto whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
+ whitelist->insert("foo");
+ whitelist->insert("bar");
+ filter = MakeUnique<EventNameFilter>(std::move(whitelist));
+ EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("fooz")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("afoo")));
+ EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("bar")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foobar")));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
index 31f311a918..b47dc16edd 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -29,7 +29,6 @@ const size_t kMaxStackDepth = 128u;
const size_t kMaxTaskDepth = 16u;
AllocationContextTracker* const kInitializingSentinel =
reinterpret_cast<AllocationContextTracker*>(-1);
-const char kTracingOverhead[] = "tracing_overhead";
ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
@@ -108,17 +107,17 @@ void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
}
void AllocationContextTracker::PushPseudoStackFrame(
- const char* trace_event_name) {
+ AllocationContextTracker::PseudoStackFrame stack_frame) {
// Impose a limit on the height to verify that every push is popped, because
// in practice the pseudo stack never grows higher than ~20 frames.
if (pseudo_stack_.size() < kMaxStackDepth)
- pseudo_stack_.push_back(trace_event_name);
+ pseudo_stack_.push_back(stack_frame);
else
NOTREACHED();
}
void AllocationContextTracker::PopPseudoStackFrame(
- const char* trace_event_name) {
+ AllocationContextTracker::PseudoStackFrame stack_frame) {
// Guard for stack underflow. If tracing was started with a TRACE_EVENT in
// scope, the frame was never pushed, so it is possible that pop is called
// on an empty stack.
@@ -128,8 +127,10 @@ void AllocationContextTracker::PopPseudoStackFrame(
// Assert that pushes and pops are nested correctly. This DCHECK can be
// hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
// without a corresponding TRACE_EVENT_BEGIN).
- DCHECK_EQ(trace_event_name, pseudo_stack_.back())
- << "Encountered an unmatched TRACE_EVENT_END";
+ DCHECK(stack_frame == pseudo_stack_.back())
+ << "Encountered an unmatched TRACE_EVENT_END: "
+ << stack_frame.trace_event_name
+ << " vs event in stack: " << pseudo_stack_.back().trace_event_name;
pseudo_stack_.pop_back();
}
@@ -155,21 +156,15 @@ void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
}
// static
-AllocationContext AllocationContextTracker::GetContextSnapshot() {
- AllocationContext ctx;
-
- if (ignore_scope_depth_) {
- ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
- ctx.type_name = kTracingOverhead;
- ctx.backtrace.frame_count = 1;
- return ctx;
- }
+bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
+ if (ignore_scope_depth_)
+ return false;
CaptureMode mode = static_cast<CaptureMode>(
subtle::NoBarrier_Load(&capture_mode_));
- auto* backtrace = std::begin(ctx.backtrace.frames);
- auto* backtrace_end = std::end(ctx.backtrace.frames);
+ auto* backtrace = std::begin(ctx->backtrace.frames);
+ auto* backtrace_end = std::end(ctx->backtrace.frames);
if (!thread_name_) {
// Ignore the string allocation made by GetAndLeakThreadName to avoid
@@ -193,11 +188,12 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
}
case CaptureMode::PSEUDO_STACK:
{
- for (const char* event_name: pseudo_stack_) {
+ for (const PseudoStackFrame& stack_frame : pseudo_stack_) {
if (backtrace == backtrace_end) {
break;
}
- *backtrace++ = StackFrame::FromTraceEventName(event_name);
+ *backtrace++ =
+ StackFrame::FromTraceEventName(stack_frame.trace_event_name);
}
break;
}
@@ -222,24 +218,32 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
// Copy frames backwards
size_t backtrace_capacity = backtrace_end - backtrace;
- size_t top_frame_index = (backtrace_capacity >= frame_count) ?
- 0 :
- frame_count - backtrace_capacity;
- for (size_t i = frame_count; i > top_frame_index;) {
- const void* frame = frames[--i];
+ int32_t top_frame_index = (backtrace_capacity >= frame_count)
+ ? 0
+ : frame_count - backtrace_capacity;
+ for (int32_t i = frame_count - 1; i >= top_frame_index; --i) {
+ const void* frame = frames[i];
*backtrace++ = StackFrame::FromProgramCounter(frame);
}
break;
}
}
- ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
+ ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
// TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
// (component name) in the heap profiler and not piggy back on the type name.
- ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
+ if (!task_contexts_.empty()) {
+ ctx->type_name = task_contexts_.back();
+ } else if (!pseudo_stack_.empty()) {
+ // If task context was unavailable, then the category names are taken from
+ // trace events.
+ ctx->type_name = pseudo_stack_.back().trace_event_category;
+ } else {
+ ctx->type_name = nullptr;
+ }
- return ctx;
+ return true;
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
index 454200c474..4f2a8c9502 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -10,7 +10,6 @@
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/debug/stack_trace.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
@@ -30,6 +29,17 @@ class BASE_EXPORT AllocationContextTracker {
NATIVE_STACK // GetContextSnapshot() returns native (real) stack trace
};
+ // Stack frame constructed from trace events in codebase.
+ struct BASE_EXPORT PseudoStackFrame {
+ const char* trace_event_category;
+ const char* trace_event_name;
+
+ bool operator==(const PseudoStackFrame& other) const {
+ return trace_event_category == other.trace_event_category &&
+ trace_event_name == other.trace_event_name;
+ }
+ };
+
// Globally sets capturing mode.
// TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
static void SetCaptureMode(CaptureMode mode);
@@ -60,8 +70,8 @@ class BASE_EXPORT AllocationContextTracker {
static void SetCurrentThreadName(const char* name);
// Starts and ends a new ignore scope between which the allocations are
- // ignored in the heap profiler. A dummy context that short circuits to
- // "tracing_overhead" is returned for these allocations.
+ // ignored by the heap profiler. GetContextSnapshot() returns false when
+ // allocations are ignored.
void begin_ignore_scope() { ignore_scope_depth_++; }
void end_ignore_scope() {
if (ignore_scope_depth_)
@@ -69,18 +79,19 @@ class BASE_EXPORT AllocationContextTracker {
}
// Pushes a frame onto the thread-local pseudo stack.
- void PushPseudoStackFrame(const char* trace_event_name);
+ void PushPseudoStackFrame(PseudoStackFrame stack_frame);
// Pops a frame from the thread-local pseudo stack.
- void PopPseudoStackFrame(const char* trace_event_name);
+ void PopPseudoStackFrame(PseudoStackFrame stack_frame);
// Push and pop current task's context. A stack is used to support nested
// tasks and the top of the stack will be used in allocation context.
void PushCurrentTaskContext(const char* context);
void PopCurrentTaskContext(const char* context);
- // Returns a snapshot of the current thread-local context.
- AllocationContext GetContextSnapshot();
+ // Fills a snapshot of the current thread-local context. Doesn't fill and
+ // returns false if allocations are being ignored.
+ bool GetContextSnapshot(AllocationContext* snapshot);
~AllocationContextTracker();
@@ -90,7 +101,7 @@ class BASE_EXPORT AllocationContextTracker {
static subtle::Atomic32 capture_mode_;
// The pseudo stack where frames are |TRACE_EVENT| names.
- std::vector<const char*> pseudo_stack_;
+ std::vector<PseudoStackFrame> pseudo_stack_;
// The thread name is used as the first entry in the pseudo stack.
const char* thread_name_;
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 3064a6a711..577f50043d 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -11,6 +11,7 @@
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -26,13 +27,25 @@ const char kEclair[] = "Eclair";
const char kFroyo[] = "Froyo";
const char kGingerbread[] = "Gingerbread";
+const char kFilteringTraceConfig[] =
+ "{"
+ " \"event_filters\": ["
+ " {"
+ " \"excluded_categories\": [],"
+ " \"filter_args\": {},"
+ " \"filter_predicate\": \"heap_profiler_predicate\","
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}";
+
// Asserts that the fixed-size array |expected_backtrace| matches the backtrace
// in |AllocationContextTracker::GetContextSnapshot|.
template <size_t N>
void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
auto* actual = std::begin(ctx.backtrace.frames);
auto* actual_bottom = actual + ctx.backtrace.frame_count;
@@ -52,9 +65,9 @@ void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
void AssertBacktraceContainsOnlyThreadName() {
StackFrame t = StackFrame::FromThreadName(kThreadName);
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_EQ(1u, ctx.backtrace.frame_count);
ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -63,17 +76,19 @@ void AssertBacktraceContainsOnlyThreadName() {
class AllocationContextTrackerTest : public testing::Test {
public:
void SetUp() override {
- TraceConfig config("");
- TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+ // Enabling memory-infra category sets default memory dump config which
+ // includes filters for capturing pseudo stack.
+ TraceConfig config(kFilteringTraceConfig);
+ TraceLog::GetInstance()->SetEnabled(config, TraceLog::FILTERING_MODE);
AllocationContextTracker::SetCurrentThreadName(kThreadName);
}
void TearDown() override {
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::DISABLED);
- TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
}
};
@@ -106,6 +121,12 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
AssertBacktraceEquals(frame_ce);
}
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
+ StackFrame frame_cc[] = {t, c, c};
+ AssertBacktraceEquals(frame_cc);
+ }
+
AssertBacktraceEquals(frame_c);
}
@@ -222,9 +243,9 @@ TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
{
TRACE_EVENT0("Testing", kGingerbread);
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
// The pseudo stack relies on pointer equality, not deep string comparisons.
ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -233,38 +254,54 @@ TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
}
{
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_EQ(t, ctx.backtrace.frames[0]);
ASSERT_EQ(c, ctx.backtrace.frames[1]);
ASSERT_EQ(f, ctx.backtrace.frames[11]);
}
}
-TEST_F(AllocationContextTrackerTest, TrackTaskContext) {
+TEST_F(AllocationContextTrackerTest, TrackCategoryName) {
const char kContext1[] = "context1";
const char kContext2[] = "context2";
{
// The context from the scoped task event should be used as type name.
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
- AllocationContext ctx1 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx1;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx1));
ASSERT_EQ(kContext1, ctx1.type_name);
// In case of nested events, the last event's context should be used.
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
- AllocationContext ctx2 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx2;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx2));
ASSERT_EQ(kContext2, ctx2.type_name);
}
+ {
+ // Type should be category name of the last seen trace event.
+ TRACE_EVENT0("Testing", kCupcake);
+ AllocationContext ctx1;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx1));
+ ASSERT_EQ("Testing", std::string(ctx1.type_name));
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
+ AllocationContext ctx2;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx2));
+ ASSERT_EQ(TRACE_DISABLED_BY_DEFAULT("Testing"),
+ std::string(ctx2.type_name));
+ }
+
// Type should be nullptr without task event.
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_FALSE(ctx.type_name);
}
@@ -272,13 +309,9 @@ TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
TRACE_EVENT0("Testing", kCupcake);
TRACE_EVENT0("Testing", kDonut);
HEAP_PROFILER_SCOPED_IGNORE;
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
- const StringPiece kTracingOverhead("tracing_overhead");
- ASSERT_EQ(kTracingOverhead,
- static_cast<const char*>(ctx.backtrace.frames[0].value));
- ASSERT_EQ(1u, ctx.backtrace.frame_count);
+ AllocationContext ctx;
+ ASSERT_FALSE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 2c2cd378bb..63d40611a6 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -60,12 +60,12 @@ size_t AllocationRegister::AddressHasher::operator () (
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
// recorded from a Chrome trace run). It is the first prime after 2^17. For
- // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
- // buckets. Microbenchmarks show that this simple scheme outperforms fancy
- // hashes like Murmur3 by 20 to 40 percent.
+ // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
+ // Microbenchmarks show that this simple scheme outperforms fancy hashes like
+ // Murmur3 by 20 to 40 percent.
const uintptr_t key = reinterpret_cast<uintptr_t>(address);
const uintptr_t a = 131101;
- const uintptr_t shift = 14;
+ const uintptr_t shift = 15;
const uintptr_t h = (key * a) >> shift;
return h;
}
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index 86e2721c56..d6a02faeae 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -16,6 +16,7 @@
#include "base/process/process_metrics.h"
#include "base/template_util.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "build/build_config.h"
namespace base {
namespace trace_event {
@@ -45,8 +46,7 @@ class FixedHashMap {
using KVPair = std::pair<const Key, Value>;
// For implementation simplicity API uses integer index instead
- // of iterators. Most operations (except FindValidIndex) on KVIndex
- // are O(1).
+ // of iterators. Most operations (except Find) on KVIndex are O(1).
using KVIndex = size_t;
static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
@@ -199,7 +199,9 @@ class FixedHashMap {
// the simplest solution is to just allocate a humongous chunk of address
// space.
- DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+ CHECK_LT(next_unused_cell_, num_cells_ + 1)
+ << "Allocation Register hash table has too little capacity. Increase "
+ "the capacity to run heap profiler in large sessions.";
return &cells_[idx];
}
@@ -300,15 +302,25 @@ class BASE_EXPORT AllocationRegister {
private:
friend AllocationRegisterTest;
- // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
- // hashing and should be changed together with AddressHasher.
+// Expect lower number of allocations from mobile platforms. Load factor
+// (capacity / bucket count) is kept less than 10 for optimal hashing. The
+// number of buckets should be changed together with AddressHasher.
+#if defined(OS_ANDROID) || defined(OS_IOS)
static const size_t kAllocationBuckets = 1 << 18;
static const size_t kAllocationCapacity = 1500000;
-
- // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
- // needing to tweak BacktraceHasher implementation.
- static const size_t kBacktraceBuckets = 1 << 15;
- static const size_t kBacktraceCapacity = kBacktraceBuckets;
+#else
+ static const size_t kAllocationBuckets = 1 << 19;
+ static const size_t kAllocationCapacity = 5000000;
+#endif
+
+ // 2^16 works well with BacktraceHasher. When increasing this number make
+ // sure BacktraceHasher still produces low number of collisions.
+ static const size_t kBacktraceBuckets = 1 << 16;
+#if defined(OS_ANDROID)
+ static const size_t kBacktraceCapacity = 32000; // 22K was observed
+#else
+ static const size_t kBacktraceCapacity = 55000; // 45K was observed on Linux
+#endif
struct BacktraceHasher {
size_t operator () (const Backtrace& backtrace) const;
diff --git a/base/trace_event/heap_profiler_event_filter.cc b/base/trace_event/heap_profiler_event_filter.cc
new file mode 100644
index 0000000000..6c91c91b13
--- /dev/null
+++ b/base/trace_event/heap_profiler_event_filter.cc
@@ -0,0 +1,67 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_event_filter.h"
+
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/trace_category.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+inline bool IsPseudoStackEnabled() {
+ return AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK;
+}
+
+inline AllocationContextTracker* GetThreadLocalTracker() {
+ return AllocationContextTracker::GetInstanceForCurrentThread();
+}
+
+} // namespace
+
+// static
+const char HeapProfilerEventFilter::kName[] = "heap_profiler_predicate";
+
+HeapProfilerEventFilter::HeapProfilerEventFilter() {}
+HeapProfilerEventFilter::~HeapProfilerEventFilter() {}
+
+bool HeapProfilerEventFilter::FilterTraceEvent(
+ const TraceEvent& trace_event) const {
+ if (!IsPseudoStackEnabled())
+ return true;
+
+ // TODO(primiano): Add support for events with copied name crbug.com/581079.
+ if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
+ return true;
+
+ const auto* category = CategoryRegistry::GetCategoryByStatePtr(
+ trace_event.category_group_enabled());
+ AllocationContextTracker::PseudoStackFrame frame = {category->name(),
+ trace_event.name()};
+ if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
+ trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
+ GetThreadLocalTracker()->PushPseudoStackFrame(frame);
+ } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
+ // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
+ GetThreadLocalTracker()->PopPseudoStackFrame(frame);
+ }
+ // Do not filter-out any events and always return true. TraceLog adds the
+ // event only if it is enabled for recording.
+ return true;
+}
+
+void HeapProfilerEventFilter::EndEvent(const char* category_name,
+ const char* event_name) const {
+ if (IsPseudoStackEnabled())
+ GetThreadLocalTracker()->PopPseudoStackFrame({category_name, event_name});
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/heap_profiler_event_filter.h b/base/trace_event/heap_profiler_event_filter.h
new file mode 100644
index 0000000000..47368a1b07
--- /dev/null
+++ b/base/trace_event/heap_profiler_event_filter.h
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// This filter unconditionally accepts all events and pushes/pops them from the
+// thread-local AllocationContextTracker instance as they are seen.
+// This is used to cheaply construct the heap profiler pseudo stack without
+// having to actually record all events.
+class BASE_EXPORT HeapProfilerEventFilter : public TraceEventFilter {
+ public:
+ static const char kName[];
+
+ HeapProfilerEventFilter();
+ ~HeapProfilerEventFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+ void EndEvent(const char* category_name,
+ const char* event_name) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HeapProfilerEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
index 1bf06dbd97..8043fff995 100644
--- a/base/trace_event/heap_profiler_heap_dump_writer.cc
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -314,8 +314,7 @@ std::unique_ptr<TracedValue> ExportHeapDump(
internal::HeapDumpWriter writer(
session_state.stack_frame_deduplicator(),
session_state.type_name_deduplicator(),
- session_state.memory_dump_config().heap_profiler_options
- .breakdown_threshold_bytes);
+ session_state.heap_profiler_breakdown_threshold_bytes());
return Serialize(writer.Summarize(metrics_by_context));
}
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index 49a235051c..fc5da0d1dd 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -11,6 +11,7 @@
#include <utility>
#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
#include "base/trace_event/trace_event_argument.h"
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -23,6 +24,10 @@ StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
StackFrameDeduplicator::FrameNode::~FrameNode() {}
+size_t StackFrameDeduplicator::FrameNode::EstimateMemoryUsage() const {
+ return base::trace_event::EstimateMemoryUsage(children);
+}
+
StackFrameDeduplicator::StackFrameDeduplicator() {}
StackFrameDeduplicator::~StackFrameDeduplicator() {}
@@ -116,19 +121,10 @@ void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const {
void StackFrameDeduplicator::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
- // The sizes here are only estimates; they fail to take into account the
- // overhead of the tree nodes for the map, but as an estimate this should be
- // fine.
- size_t maps_size = roots_.size() * sizeof(std::pair<StackFrame, int>);
- size_t frames_allocated = frames_.capacity() * sizeof(FrameNode);
- size_t frames_resident = frames_.size() * sizeof(FrameNode);
-
- for (const FrameNode& node : frames_)
- maps_size += node.children.size() * sizeof(std::pair<StackFrame, int>);
-
+ size_t memory_usage =
+ EstimateMemoryUsage(frames_) + EstimateMemoryUsage(roots_);
overhead->Add("StackFrameDeduplicator",
- sizeof(StackFrameDeduplicator) + maps_size + frames_allocated,
- sizeof(StackFrameDeduplicator) + maps_size + frames_resident);
+ sizeof(StackFrameDeduplicator) + memory_usage);
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
index 4932534e1d..66d430f2ee 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.h
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -34,6 +34,8 @@ class BASE_EXPORT StackFrameDeduplicator : public ConvertableToTraceFormat {
FrameNode(const FrameNode& other);
~FrameNode();
+ size_t EstimateMemoryUsage() const;
+
StackFrame frame;
// The index of the parent stack frame in |frames_|, or -1 if there is no
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.cc b/base/trace_event/heap_profiler_type_name_deduplicator.cc
index 055f86abf0..a6dab51ad2 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -10,7 +10,10 @@
#include <utility>
#include "base/json/string_escape.h"
+#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
+#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_memory_overhead.h"
namespace base {
@@ -18,16 +21,24 @@ namespace trace_event {
namespace {
-// Extract directory name if |type_name| was file name. Otherwise, return
-// |type_name|.
-StringPiece ExtractDirNameFromFileName(const char* type_name) {
+// If |type_name| is file name then extract directory name. Or if |type_name| is
+// category name, then disambiguate multple categories and remove
+// "disabled-by-default" prefix if present.
+StringPiece ExtractCategoryFromTypeName(const char* type_name) {
StringPiece result(type_name);
size_t last_seperator = result.find_last_of("\\/");
// If |type_name| was a not a file path, the seperator will not be found, so
// the whole type name is returned.
- if (last_seperator == StringPiece::npos)
+ if (last_seperator == StringPiece::npos) {
+ // Use the first the category name if it has ",".
+ size_t first_comma_position = result.find(',');
+ if (first_comma_position != StringPiece::npos)
+ result = result.substr(0, first_comma_position);
+ if (result.starts_with(TRACE_DISABLED_BY_DEFAULT("")))
+ result.remove_prefix(sizeof(TRACE_DISABLED_BY_DEFAULT("")) - 1);
return result;
+ }
// Remove the file name from the path.
result.remove_suffix(result.length() - last_seperator);
@@ -82,7 +93,7 @@ void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
// TODO(ssid): crbug.com/594803 the type name is misused for file name in
// some cases.
- StringPiece type_info = ExtractDirNameFromFileName(it->first);
+ StringPiece type_info = ExtractCategoryFromTypeName(it->first);
// |EscapeJSONString| appends, it does not overwrite |buffer|.
bool put_in_quotes = true;
@@ -95,12 +106,9 @@ void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
void TypeNameDeduplicator::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
- // The size here is only an estimate; it fails to take into account the size
- // of the tree nodes for the map, but as an estimate this should be fine.
- size_t map_size = type_ids_.size() * sizeof(std::pair<const char*, int>);
-
+ size_t memory_usage = EstimateMemoryUsage(type_ids_);
overhead->Add("TypeNameDeduplicator",
- sizeof(TypeNameDeduplicator) + map_size);
+ sizeof(TypeNameDeduplicator) + memory_usage);
}
} // namespace trace_event
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index c3d3258651..3565b8b95b 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -9,6 +9,7 @@
#include "base/allocator/allocator_extension.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/features.h"
+#include "base/debug/profiler.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_allocation_register.h"
@@ -22,26 +23,32 @@
#else
#include <malloc.h>
#endif
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
namespace base {
namespace trace_event {
-#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
namespace {
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
using allocator::AllocatorDispatch;
-void* HookAlloc(const AllocatorDispatch* self, size_t size) {
+void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->alloc_function(next, size);
+ void* ptr = next->alloc_function(next, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
-void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
+void* HookZeroInitAlloc(const AllocatorDispatch* self,
+ size_t n,
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->alloc_zero_initialized_function(next, n, size);
+ void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
return ptr;
@@ -49,41 +56,127 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
void* HookllocAligned(const AllocatorDispatch* self,
size_t alignment,
- size_t size) {
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->alloc_aligned_function(next, alignment, size);
+ void* ptr = next->alloc_aligned_function(next, alignment, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
-void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) {
+void* HookRealloc(const AllocatorDispatch* self,
+ void* address,
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->realloc_function(next, address, size);
+ void* ptr = next->realloc_function(next, address, size, context);
MallocDumpProvider::GetInstance()->RemoveAllocation(address);
if (size > 0) // realloc(size == 0) means free().
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
-void HookFree(const AllocatorDispatch* self, void* address) {
+void HookFree(const AllocatorDispatch* self, void* address, void* context) {
if (address)
MallocDumpProvider::GetInstance()->RemoveAllocation(address);
const AllocatorDispatch* const next = self->next;
- next->free_function(next, address);
+ next->free_function(next, address, context);
+}
+
+size_t HookGetSizeEstimate(const AllocatorDispatch* self,
+ void* address,
+ void* context) {
+ const AllocatorDispatch* const next = self->next;
+ return next->get_size_estimate_function(next, address, context);
+}
+
+unsigned HookBatchMalloc(const AllocatorDispatch* self,
+ size_t size,
+ void** results,
+ unsigned num_requested,
+ void* context) {
+ const AllocatorDispatch* const next = self->next;
+ unsigned count =
+ next->batch_malloc_function(next, size, results, num_requested, context);
+ for (unsigned i = 0; i < count; ++i) {
+ MallocDumpProvider::GetInstance()->InsertAllocation(results[i], size);
+ }
+ return count;
+}
+
+void HookBatchFree(const AllocatorDispatch* self,
+ void** to_be_freed,
+ unsigned num_to_be_freed,
+ void* context) {
+ const AllocatorDispatch* const next = self->next;
+ for (unsigned i = 0; i < num_to_be_freed; ++i) {
+ MallocDumpProvider::GetInstance()->RemoveAllocation(to_be_freed[i]);
+ }
+ next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
+}
+
+void HookFreeDefiniteSize(const AllocatorDispatch* self,
+ void* ptr,
+ size_t size,
+ void* context) {
+ if (ptr)
+ MallocDumpProvider::GetInstance()->RemoveAllocation(ptr);
+ const AllocatorDispatch* const next = self->next;
+ next->free_definite_size_function(next, ptr, size, context);
}
AllocatorDispatch g_allocator_hooks = {
- &HookAlloc, /* alloc_function */
- &HookZeroInitAlloc, /* alloc_zero_initialized_function */
- &HookllocAligned, /* alloc_aligned_function */
- &HookRealloc, /* realloc_function */
- &HookFree, /* free_function */
- nullptr, /* next */
+ &HookAlloc, /* alloc_function */
+ &HookZeroInitAlloc, /* alloc_zero_initialized_function */
+ &HookllocAligned, /* alloc_aligned_function */
+ &HookRealloc, /* realloc_function */
+ &HookFree, /* free_function */
+ &HookGetSizeEstimate, /* get_size_estimate_function */
+ &HookBatchMalloc, /* batch_malloc_function */
+ &HookBatchFree, /* batch_free_function */
+ &HookFreeDefiniteSize, /* free_definite_size_function */
+ nullptr, /* next */
};
+#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+#if defined(OS_WIN)
+// A structure containing some information about a given heap.
+struct WinHeapInfo {
+ size_t committed_size;
+ size_t uncommitted_size;
+ size_t allocated_size;
+ size_t block_count;
+};
+
+// NOTE: crbug.com/665516
+// Unfortunately, there is no safe way to collect information from secondary
+// heaps due to limitations and racy nature of this piece of WinAPI.
+void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
+#if defined(SYZYASAN)
+ if (base::debug::IsBinaryInstrumented())
+ return;
+#endif
+
+ // Iterate through whichever heap our CRT is using.
+ HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
+ ::HeapLock(crt_heap);
+ PROCESS_HEAP_ENTRY heap_entry;
+ heap_entry.lpData = nullptr;
+ // Walk over all the entries in the main heap.
+ while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
+ if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
+ crt_heap_info->allocated_size += heap_entry.cbData;
+ crt_heap_info->block_count++;
+ } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
+ crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
+ crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
+ }
+ }
+ CHECK(::HeapUnlock(crt_heap) == TRUE);
+}
+#endif // defined(OS_WIN)
} // namespace
-#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
// static
const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
@@ -106,6 +199,7 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
size_t total_virtual_size = 0;
size_t resident_size = 0;
size_t allocated_objects_size = 0;
+ size_t allocated_objects_count = 0;
#if defined(USE_TCMALLOC)
bool res =
allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
@@ -117,18 +211,35 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
&allocated_objects_size);
DCHECK(res);
#elif defined(OS_MACOSX) || defined(OS_IOS)
- malloc_statistics_t stats;
- memset(&stats, 0, sizeof(stats));
+ malloc_statistics_t stats = {0};
malloc_zone_statistics(nullptr, &stats);
total_virtual_size = stats.size_allocated;
allocated_objects_size = stats.size_in_use;
- // The resident size is approximated to the max size in use, which would count
- // the total size of all regions other than the free bytes at the end of each
- // region. In each allocation region the allocations are rounded off to a
- // fixed quantum, so the excess region will not be resident.
- // See crrev.com/1531463004 for detailed explanation.
- resident_size = stats.max_size_in_use;
+ // Resident size is approximated pretty well by stats.max_size_in_use.
+ // However, on macOS, freed blocks are both resident and reusable, which is
+ // semantically equivalent to deallocated. The implementation of libmalloc
+ // will also only hold a fixed number of freed regions before actually
+ // starting to deallocate them, so stats.max_size_in_use is also not
+ // representative of the peak size. As a result, stats.max_size_in_use is
+ // typically somewhere between actually resident [non-reusable] pages, and
+ // peak size. This is not very useful, so we just use stats.size_in_use for
+ // resident_size, even though it's an underestimate and fails to account for
+ // fragmentation. See
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
+ resident_size = stats.size_in_use;
+#elif defined(OS_WIN)
+ WinHeapInfo main_heap_info = {};
+ WinHeapMemoryDumpImpl(&main_heap_info);
+ total_virtual_size =
+ main_heap_info.committed_size + main_heap_info.uncommitted_size;
+ // Resident size is approximated with committed heap size. Note that it is
+ // possible to do this with better accuracy on windows by intersecting the
+ // working set with the virtual memory ranges occuipied by the heap. It's not
+ // clear that this is worth it, as it's fairly expensive to do.
+ resident_size = main_heap_info.committed_size;
+ allocated_objects_size = main_heap_info.allocated_size;
+ allocated_objects_count = main_heap_info.block_count;
#else
struct mallinfo info = mallinfo();
DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
@@ -138,6 +249,8 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
// |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
total_virtual_size = info.arena + info.hblkhd;
resident_size = info.uordblks;
+
+ // Total allocated space is given by |uordblks|.
allocated_objects_size = info.uordblks;
#endif
@@ -147,13 +260,17 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, resident_size);
- // Total allocated space is given by |uordblks|.
MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
allocated_objects_size);
+ if (allocated_objects_count != 0) {
+ inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects,
+ allocated_objects_count);
+ }
- if (resident_size - allocated_objects_size > 0) {
+ if (resident_size > allocated_objects_size) {
// Explicitly specify why is extra memory resident. In tcmalloc it accounts
// for free lists and caches. In mac and ios it accounts for the
// fragmentation and metadata.
@@ -233,7 +350,10 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
if (!tracker)
return;
- AllocationContext context = tracker->GetContextSnapshot();
+
+ AllocationContext context;
+ if (!tracker->GetContextSnapshot(&context))
+ return;
AutoLock lock(allocation_register_lock_);
if (!allocation_register_)
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
index 4746cf5896..384033c9b8 100644
--- a/base/trace_event/malloc_dump_provider.h
+++ b/base/trace_event/malloc_dump_provider.h
@@ -15,7 +15,7 @@
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
-#if defined(OS_LINUX) || defined(OS_ANDROID) || \
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_WIN) || \
(defined(OS_MACOSX) && !defined(OS_IOS))
#define MALLOC_MEMORY_TRACING_SUPPORTED
#endif
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index 7d1023606b..c781f071bb 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -19,7 +19,6 @@
namespace base {
namespace trace_event {
-class MemoryDumpManager;
class ProcessMemoryDump;
class TracedValue;
@@ -70,11 +69,6 @@ class BASE_EXPORT MemoryAllocatorDump {
// Called at trace generation time to populate the TracedValue.
void AsValueInto(TracedValue* value) const;
- // Get the ProcessMemoryDump instance that owns this.
- ProcessMemoryDump* process_memory_dump() const {
- return process_memory_dump_;
- }
-
// Use enum Flags to set values.
void set_flags(int flags) { flags_ |= flags; }
void clear_flags(int flags) { flags_ &= ~flags; }
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index eed070a782..5a54a773c5 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -7,21 +7,26 @@
#include <algorithm>
#include <utility>
+#include "base/allocator/features.h"
#include "base/atomic_sequence_num.h"
#include "base/base_switches.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
#include "base/debug/debugging_flags.h"
#include "base/debug/stack_trace.h"
+#include "base/debug/thread_heap_usage_tracker.h"
#include "base/memory/ptr_util.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_dump_session_state.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
@@ -33,10 +38,6 @@
#include "base/trace_event/java_heap_dump_provider_android.h"
#endif
-#if defined(OS_WIN)
-#include "base/trace_event/winheap_dump_provider_win.h"
-#endif
-
namespace base {
namespace trace_event {
@@ -49,6 +50,31 @@ const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
MemoryDumpManager* g_instance_for_testing = nullptr;
+// The list of names of dump providers that are blacklisted from strict thread
+// affinity check on unregistration. These providers could potentially cause
+// crashes on build bots if they do not unregister on right thread.
+// TODO(ssid): Fix all the dump providers to unregister if needed and clear the
+// blacklist, crbug.com/643438.
+const char* const kStrictThreadCheckBlacklist[] = {
+ "ClientDiscardableSharedMemoryManager",
+ "ContextProviderCommandBuffer",
+ "DiscardableSharedMemoryManager",
+ "FontCaches",
+ "GpuMemoryBufferVideoFramePool",
+ "IndexedDBBackingStore",
+ "Sql",
+ "ThreadLocalEventBuffer",
+ "TraceLog",
+ "URLRequestContext",
+ "VpxVideoDecoder",
+ "cc::SoftwareImageDecodeCache",
+ "cc::StagingBufferPool",
+ "gpu::BufferManager",
+ "gpu::MappedMemoryManager",
+ "gpu::RenderbufferManager",
+ "BlacklistTestDumpProvider" // for testing
+};
+
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -110,8 +136,6 @@ const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
const char* const MemoryDumpManager::kSystemAllocatorPoolName =
#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
MallocDumpProvider::kAllocatedObjects;
-#elif defined(OS_WIN)
- WinHeapDumpProvider::kAllocatedObjects;
#else
nullptr;
#endif
@@ -142,6 +166,9 @@ MemoryDumpManager::MemoryDumpManager()
// At this point the command line may not be initialized but we try to
// enable the heap profiler to capture allocations as soon as possible.
EnableHeapProfilingIfNeeded();
+
+ strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
+ std::end(kStrictThreadCheckBlacklist));
}
MemoryDumpManager::~MemoryDumpManager() {
@@ -162,18 +189,20 @@ void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
if (profiling_mode == "") {
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::PSEUDO_STACK);
- }
- else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
#if HAVE_TRACE_STACK_FRAME_POINTERS && \
(BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
+ } else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
// We need frame pointers for native tracing to work, and they are
// enabled in profiling and debug builds.
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::NATIVE_STACK);
-#else
- CHECK(false) << "'" << profiling_mode << "' mode for "
- << switches::kEnableHeapProfiling << " flag is not supported "
- << "for this platform / build type.";
+#endif
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ } else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) {
+ // Enable heap tracking, which in turn enables capture of heap usage
+ // tracking in tracked_objects.cc.
+ if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
+ base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
#endif
} else {
CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
@@ -206,14 +235,33 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
nullptr);
#endif
-#if defined(OS_WIN)
- RegisterDumpProvider(WinHeapDumpProvider::GetInstance(), "WinHeap", nullptr);
-#endif
+ TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
+
+ // TODO(ssid): This should be done in EnableHeapProfiling so that we capture
+ // more allocations (crbug.com/625170).
+ if (AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
+ !(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
+ // Create trace config with heap profiling filter.
+ TraceConfig::EventFilterConfig heap_profiler_filter_config(
+ HeapProfilerEventFilter::kName);
+ heap_profiler_filter_config.AddIncludedCategory("*");
+ heap_profiler_filter_config.AddIncludedCategory(
+ MemoryDumpManager::kTraceCategory);
+ TraceConfig::EventFilters filters;
+ filters.push_back(heap_profiler_filter_config);
+ TraceConfig filtering_trace_config;
+ filtering_trace_config.SetEventFilters(filters);
+
+ TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
+ TraceLog::FILTERING_MODE);
+ }
// If tracing was enabled before initializing MemoryDumpManager, we missed the
// OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
+ // IsEnabled is called before adding observer to avoid calling
+ // OnTraceLogEnabled twice.
bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
- TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
TraceLog::GetInstance()->AddEnabledStateObserver(this);
if (is_tracing_already_enabled)
OnTraceLogEnabled();
@@ -262,6 +310,11 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
whitelisted_for_background_mode);
+ if (options.is_fast_polling_supported) {
+ DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
+ "polling must NOT be thread bound.";
+ }
+
{
AutoLock lock(lock_);
bool already_registered = !dump_providers_.insert(mdpinfo).second;
@@ -269,6 +322,15 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
// path for RenderThreadImpl::Init().
if (already_registered)
return;
+
+ // The list of polling MDPs is populated OnTraceLogEnabled(). This code
+ // deals with the case of a MDP capable of fast polling that is registered
+ // after the OnTraceLogEnabled()
+ if (options.is_fast_polling_supported && dump_thread_) {
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
+ Unretained(this), mdpinfo));
+ }
}
if (heap_profiling_enabled_)
@@ -307,9 +369,18 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
// - At the end of this function, if no dump is in progress.
// - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
// removed from |pending_dump_providers|.
+ // - When the provider is removed from |dump_providers_for_polling_|.
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
- } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
+ subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ // If dump provider's name is on |strict_thread_check_blacklist_|, then the
+ // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
+ // fired even when tracing is not enabled (stricter).
+ // TODO(ssid): Remove this condition after removing all the dump providers
+ // in the blacklist and the buildbots are no longer flakily hitting the
+ // DCHECK, crbug.com/643438.
+
// If you hit this DCHECK, your dump provider has a bug.
// Unregistration of a MemoryDumpProvider is safe only if:
// - The MDP has specified a sequenced task runner affinity AND the
@@ -325,6 +396,13 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
<< "unregister itself in a racy way. Please file a crbug.";
}
+ if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) {
+ DCHECK(take_mdp_ownership_and_delete_async);
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
+ Unretained(this), *mdp_iter));
+ }
+
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
// the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
@@ -334,6 +412,28 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
dump_providers_.erase(mdp_iter);
}
+void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.insert(mdpinfo);
+
+ // Notify ready for polling when first polling supported provider is
+ // registered. This handles the case where OnTraceLogEnabled() did not notify
+ // ready since no polling supported mdp has yet been registered.
+ if (dump_providers_for_polling_.size() == 1)
+ dump_scheduler_->NotifyPollingSupported();
+}
+
+void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.erase(mdpinfo);
+ DCHECK(!dump_providers_for_polling_.empty())
+ << "All polling MDPs cannot be unregistered.";
+}
+
void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
@@ -413,8 +513,10 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
// with disallowed modes. If |session_state_| is null then tracing is
// disabled.
CHECK(!session_state_ ||
- session_state_->memory_dump_config().allowed_dump_modes.count(
- args.level_of_detail));
+ session_state_->IsDumpModeAllowed(args.level_of_detail));
+
+ if (dump_scheduler_)
+ dump_scheduler_->NotifyDumpTriggered();
}
TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
@@ -570,6 +672,16 @@ void MemoryDumpManager::InvokeOnMemoryDump(
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
"dump_provider.name", mdpinfo->name);
+ // A stack allocated string with dump provider name is useful to debug
+ // crashes while invoking dump after a |dump_provider| is not unregistered
+ // in safe way.
+ // TODO(ssid): Remove this after fixing crbug.com/643438.
+ char provider_name_for_debugging[16];
+ strncpy(provider_name_for_debugging, mdpinfo->name,
+ sizeof(provider_name_for_debugging) - 1);
+ provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
+ base::debug::Alias(provider_name_for_debugging);
+
// Pid of the target process being dumped. Often kNullProcessId (= current
// process), non-zero when the coordinator process creates dumps on behalf
// of child processes (see crbug.com/461788).
@@ -587,6 +699,28 @@ void MemoryDumpManager::InvokeOnMemoryDump(
SetupNextMemoryDump(std::move(pmd_async_state));
}
+bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
+#if DCHECK_IS_ON()
+ {
+ AutoLock lock(lock_);
+ if (dump_thread_)
+ DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
+ }
+#endif
+ if (dump_providers_for_polling_.empty())
+ return false;
+
+ *memory_total = 0;
+ // Note that we call PollFastMemoryTotal() even if the dump provider is
+ // disabled (unregistered). This is to avoid taking lock while polling.
+ for (const auto& mdpinfo : dump_providers_for_polling_) {
+ uint64_t value = 0;
+ mdpinfo->dump_provider->PollFastMemoryTotal(&value);
+ *memory_total += value;
+ }
+ return true;
+}
+
// static
void MemoryDumpManager::FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
@@ -663,11 +797,15 @@ void MemoryDumpManager::OnTraceLogEnabled() {
return;
}
- const TraceConfig trace_config =
+ const TraceConfig& trace_config =
TraceLog::GetInstance()->GetCurrentTraceConfig();
+ const TraceConfig::MemoryDumpConfig& memory_dump_config =
+ trace_config.memory_dump_config();
scoped_refptr<MemoryDumpSessionState> session_state =
new MemoryDumpSessionState;
- session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
+ session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
+ session_state->set_heap_profiler_breakdown_threshold_bytes(
+ memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
if (heap_profiling_enabled_) {
// If heap profiling is enabled, the stack frame deduplicator and type name
// deduplicator will be in use. Add a metadata events to write the frames
@@ -681,14 +819,26 @@ void MemoryDumpManager::OnTraceLogEnabled() {
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
"stackFrames",
- WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
- session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
+ MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
+ session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
"typeNames",
- WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
- session_state, &MemoryDumpSessionState::type_name_deduplicator)));
+ MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
+ session_state, &MemoryDumpSessionState::type_name_deduplicator));
+ }
+
+ std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
+ new MemoryDumpScheduler(this, dump_thread->task_runner()));
+ DCHECK_LE(memory_dump_config.triggers.size(), 3u);
+ for (const auto& trigger : memory_dump_config.triggers) {
+ if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
+ NOTREACHED();
+ continue;
+ }
+ dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
+ trigger.min_time_between_dumps_ms);
}
{
@@ -699,48 +849,65 @@ void MemoryDumpManager::OnTraceLogEnabled() {
DCHECK(!dump_thread_);
dump_thread_ = std::move(dump_thread);
+ dump_scheduler_ = std::move(dump_scheduler);
subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
- // TODO(primiano): This is a temporary hack to disable periodic memory dumps
- // when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps. See crbug.com/529184 .
- if (!is_coordinator_ ||
- CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
- return;
+ dump_providers_for_polling_.clear();
+ for (const auto& mdpinfo : dump_providers_) {
+ if (mdpinfo->options.is_fast_polling_supported)
+ dump_providers_for_polling_.insert(mdpinfo);
}
+ // Notify polling supported only if some polling supported provider was
+ // registered, else RegisterPollingMDPOnDumpThread() will notify when first
+ // polling MDP registers.
+ if (!dump_providers_for_polling_.empty())
+ dump_scheduler_->NotifyPollingSupported();
+
+ // Only coordinator process triggers periodic global memory dumps.
+ if (is_coordinator_)
+ dump_scheduler_->NotifyPeriodicTriggerSupported();
}
- // Enable periodic dumps if necessary.
- periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
}
void MemoryDumpManager::OnTraceLogDisabled() {
// There might be a memory dump in progress while this happens. Therefore,
// ensure that the MDM state which depends on the tracing enabled / disabled
// state is always accessed by the dumping methods holding the |lock_|.
+ if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
+ return;
subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
std::unique_ptr<Thread> dump_thread;
+ std::unique_ptr<MemoryDumpScheduler> scheduler;
{
AutoLock lock(lock_);
dump_thread = std::move(dump_thread_);
session_state_ = nullptr;
+ scheduler = std::move(dump_scheduler_);
}
+ scheduler->DisableAllTriggers();
// Thread stops are blocking and must be performed outside of the |lock_|
// or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
- periodic_dump_timer_.Stop();
if (dump_thread)
dump_thread->Stop();
+
+ // |dump_providers_for_polling_| must be cleared only after the dump thread is
+ // stopped (polling tasks are done).
+ {
+ AutoLock lock(lock_);
+ for (const auto& mdpinfo : dump_providers_for_polling_)
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+ dump_providers_for_polling_.clear();
+ }
}
bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
AutoLock lock(lock_);
if (!session_state_)
return false;
- return session_state_->memory_dump_config().allowed_dump_modes.count(
- dump_mode) != 0;
+ return session_state_->IsDumpModeAllowed(dump_mode);
}
uint64_t MemoryDumpManager::GetTracingProcessId() const {
@@ -806,78 +973,5 @@ ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
return iter->second.get();
}
-MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
-
-MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
- Stop();
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
- const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
- if (triggers_list.empty())
- return;
-
- // At the moment the periodic support is limited to at most one periodic
- // trigger per dump mode. All intervals should be an integer multiple of the
- // smallest interval specified.
- periodic_dumps_count_ = 0;
- uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
- uint32_t light_dump_period_ms = 0;
- uint32_t heavy_dump_period_ms = 0;
- DCHECK_LE(triggers_list.size(), 3u);
- auto* mdm = MemoryDumpManager::GetInstance();
- for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
- DCHECK_NE(0u, config.periodic_interval_ms);
- switch (config.level_of_detail) {
- case MemoryDumpLevelOfDetail::BACKGROUND:
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
- break;
- case MemoryDumpLevelOfDetail::LIGHT:
- DCHECK_EQ(0u, light_dump_period_ms);
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
- light_dump_period_ms = config.periodic_interval_ms;
- break;
- case MemoryDumpLevelOfDetail::DETAILED:
- DCHECK_EQ(0u, heavy_dump_period_ms);
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
- heavy_dump_period_ms = config.periodic_interval_ms;
- break;
- }
- min_timer_period_ms =
- std::min(min_timer_period_ms, config.periodic_interval_ms);
- }
-
- DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
- light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
- DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
- heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
-
- timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
- base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
- base::Unretained(this)));
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
- if (IsRunning()) {
- timer_.Stop();
- }
-}
-
-bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
- return timer_.IsRunning();
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
- MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
- if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
- level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
- level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
- ++periodic_dumps_count_;
-
- MemoryDumpManager::GetInstance()->RequestGlobalDump(
- MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 06b772c6e4..92cc2f401b 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -18,7 +18,6 @@
#include "base/memory/ref_counted.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
-#include "base/timer/timer.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
@@ -33,6 +32,7 @@ namespace trace_event {
class MemoryDumpManagerDelegate;
class MemoryDumpProvider;
class MemoryDumpSessionState;
+class MemoryDumpScheduler;
// This is the interface exposed to the rest of the codebase to deal with
// memory tracing. The main entry point for clients is represented by
@@ -94,7 +94,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// This method takes ownership of the dump provider and guarantees that:
// - The |mdp| will be deleted at some point in the near future.
// - Its deletion will not happen concurrently with the OnMemoryDump() call.
- // Note that OnMemoryDump() calls can still happen after this method returns.
+ // Note that OnMemoryDump() and PollFastMemoryTotal() calls can still happen
+ // after this method returns.
void UnregisterAndDeleteDumpProviderSoon(
std::unique_ptr<MemoryDumpProvider> mdp);
@@ -116,6 +117,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void OnTraceLogEnabled() override;
void OnTraceLogDisabled() override;
+ // Enable heap profiling if kEnableHeapProfiling is specified.
+ void EnableHeapProfilingIfNeeded();
+
// Returns true if the dump mode is allowed for current tracing session.
bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
@@ -151,6 +155,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend struct DefaultSingletonTraits<MemoryDumpManager>;
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
+ friend class MemoryDumpScheduler;
// Descriptor used to hold information about registered MDPs.
// Some important considerations about lifetime of this object:
@@ -273,31 +278,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
};
- // Sets up periodic memory dump timers to start global dump requests based on
- // the dump triggers from trace config.
- class BASE_EXPORT PeriodicGlobalDumpTimer {
- public:
- PeriodicGlobalDumpTimer();
- ~PeriodicGlobalDumpTimer();
-
- void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
- triggers_list);
- void Stop();
-
- bool IsRunning();
-
- private:
- // Periodically called by the timer.
- void RequestPeriodicGlobalDump();
-
- RepeatingTimer timer_;
- uint32_t periodic_dumps_count_;
- uint32_t light_dump_rate_;
- uint32_t heavy_dump_rate_;
-
- DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
- };
-
static const int kMaxConsecutiveFailuresCount;
static const char* const kSystemAllocatorPoolName;
@@ -308,9 +288,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
static void FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
- // Enable heap profiling if kEnableHeapProfiling is specified.
- void EnableHeapProfilingIfNeeded();
-
// Internal, used only by MemoryDumpManagerDelegate.
// Creates a memory dump for the current process and appends it to the trace.
// |callback| will be invoked asynchronously upon completion on the same
@@ -329,6 +306,14 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// runner.
void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+ // Records a quick total memory usage in |memory_total|. This is used to track
+ // and detect peaks in the memory usage of the process without having to
+ // record all data from dump providers. This value is approximate to trade-off
+ // speed, and not consistent with the rest of the memory-infra metrics. Must
+ // be called on the dump thread.
+ // Returns true if |memory_total| was updated by polling at least 1 MDP.
+ bool PollFastMemoryTotal(uint64_t* memory_total);
+
// Helper for RegierDumpProvider* functions.
void RegisterDumpProviderInternal(
MemoryDumpProvider* mdp,
@@ -340,13 +325,29 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async);
+ // Adds / removes provider that supports polling to
+ // |dump_providers_for_polling_|.
+ void RegisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
+ void UnregisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
+
// An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
// runner affinity (MDPs belonging to the same task runners are adjacent).
MemoryDumpProviderInfo::OrderedSet dump_providers_;
+ // A copy of mdpinfo list that support polling. It must be accessed only on
+ // the dump thread if dump thread exists.
+ MemoryDumpProviderInfo::OrderedSet dump_providers_for_polling_;
+
// Shared among all the PMDs to keep state scoped to the tracing session.
scoped_refptr<MemoryDumpSessionState> session_state_;
+ // The list of names of dump providers that are blacklisted from strict thread
+ // affinity check on unregistration.
+ std::unordered_set<StringPiece, StringPieceHash>
+ strict_thread_check_blacklist_;
+
MemoryDumpManagerDelegate* delegate_; // Not owned.
// When true, this instance is in charge of coordinating periodic dumps.
@@ -360,8 +361,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// dump_providers_enabled_ list) when tracing is not enabled.
subtle::AtomicWord memory_tracing_enabled_;
- // For time-triggered periodic dumps.
- PeriodicGlobalDumpTimer periodic_dump_timer_;
+ // For triggering memory dumps.
+ std::unique_ptr<MemoryDumpScheduler> dump_scheduler_;
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index d14093cbcc..51d41943fb 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -16,13 +16,16 @@
#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/sequenced_worker_pool_owner.h"
#include "base/test/test_io_thread.h"
#include "base/test/trace_event_analyzer.h"
#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/sequenced_worker_pool.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
@@ -70,8 +73,10 @@ void RegisterDumpProvider(
mdm->set_dumper_registrations_ignored_for_testing(true);
}
-void RegisterDumpProvider(MemoryDumpProvider* mdp) {
- RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
+void RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ RegisterDumpProvider(mdp, task_runner, MemoryDumpProvider::Options());
}
void RegisterDumpProviderWithSequencedTaskRunner(
@@ -94,6 +99,20 @@ void OnTraceDataCollected(Closure quit_closure,
quit_closure.Run();
}
+// Posts |task| to |task_runner| and blocks until it is executed.
+void PostTaskAndWait(const tracked_objects::Location& from_here,
+ SequencedTaskRunner* task_runner,
+ const base::Closure& task) {
+ base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner->PostTask(from_here, task);
+ task_runner->PostTask(
+ FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
+ // The SequencedTaskRunner guarantees that |event| will only be signaled after
+ // |task| is executed.
+ event.Wait();
+}
+
} // namespace
// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
@@ -124,6 +143,8 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
MOCK_METHOD0(Destructor, void());
MOCK_METHOD2(OnMemoryDump,
bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
+ MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
+ MOCK_METHOD0(SuspendFastMemoryPolling, void());
MockMemoryDumpProvider() : enable_mock_destructor(false) {
ON_CALL(*this, OnMemoryDump(_, _))
@@ -135,6 +156,10 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
EXPECT_TRUE(pmd->session_state().get() != nullptr);
return true;
}));
+
+ ON_CALL(*this, PollFastMemoryTotal(_))
+ .WillByDefault(
+ Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
}
~MockMemoryDumpProvider() override {
if (enable_mock_destructor)
@@ -147,8 +172,7 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
class TestSequencedTaskRunner : public SequencedTaskRunner {
public:
TestSequencedTaskRunner()
- : worker_pool_(
- new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
+ : worker_pool_(2 /* max_threads */, "Test Task Runner"),
enabled_(true),
num_of_post_tasks_(0) {}
@@ -166,19 +190,21 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
const Closure& task,
TimeDelta delay) override {
num_of_post_tasks_++;
- if (enabled_)
- return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
+ if (enabled_) {
+ return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
+ task);
+ }
return false;
}
bool RunsTasksOnCurrentThread() const override {
- return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
+ return worker_pool_.pool()->RunsTasksOnCurrentThread();
}
private:
~TestSequencedTaskRunner() override {}
- scoped_refptr<SequencedWorkerPool> worker_pool_;
+ SequencedWorkerPoolOwner worker_pool_;
const SequencedWorkerPool::SequenceToken token_;
bool enabled_;
unsigned num_of_post_tasks_;
@@ -215,6 +241,10 @@ class MemoryDumpManagerTest : public testing::Test {
task_runner->PostTask(FROM_HERE, closure);
}
+ void PollFastMemoryTotal(uint64_t* memory_total) {
+ mdm_->PollFastMemoryTotal(memory_total);
+ }
+
protected:
void InitializeMemoryDumpManager(bool is_coordinator) {
mdm_->set_dumper_registrations_ignored_for_testing(true);
@@ -244,7 +274,7 @@ class MemoryDumpManagerTest : public testing::Test {
void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
bool IsPeriodicDumpingEnabled() const {
- return mdm_->periodic_dump_timer_.IsRunning();
+ return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting();
}
int GetMaxConsecutiveFailuresCount() const {
@@ -268,7 +298,7 @@ class MemoryDumpManagerTest : public testing::Test {
TEST_F(MemoryDumpManagerTest, SingleDumper) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
// Check that the dumper is not called if the memory category is not enabled.
EnableTracingWithLegacyCategories("foobar-but-not-memory");
@@ -309,7 +339,7 @@ TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
@@ -320,7 +350,7 @@ TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
// Check that requesting dumps with low level of detail actually propagates to
// OnMemoryDump() call on dump providers.
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
@@ -335,8 +365,8 @@ TEST_F(MemoryDumpManagerTest, SharedSessionState) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp1, nullptr);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
const MemoryDumpSessionState* session_state =
@@ -372,7 +402,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
MockMemoryDumpProvider mdp2;
// Enable only mdp1.
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -383,7 +413,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
// Invert: enable mdp1 and disable mdp2.
mdm_->UnregisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -393,7 +423,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
DisableTracing();
// Enable both mdp1 and mdp2.
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -409,7 +439,7 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
{
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -431,7 +461,7 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
DisableTracing();
}
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
mdm_->UnregisterDumpProvider(&mdp);
{
@@ -443,9 +473,9 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
DisableTracing();
}
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
mdm_->UnregisterDumpProvider(&mdp);
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
{
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -567,8 +597,8 @@ TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp1, nullptr);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
@@ -601,7 +631,7 @@ TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
@@ -611,7 +641,7 @@ TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
.WillOnce(Return(true))
.WillOnce(
Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp2, nullptr);
return true;
}))
.WillRepeatedly(Return(true));
@@ -687,13 +717,16 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
// unregister the other one.
for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
int other_idx = (mdps.front() == mdp);
- TestIOThread* other_thread = threads[other_idx].get();
+ // TestIOThread's task runner must be obtained from the main thread but can
+ // then be used from other threads.
+ scoped_refptr<SingleThreadTaskRunner> other_runner =
+ threads[other_idx]->task_runner();
MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
- auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
+ auto on_dump = [this, other_runner, other_mdp, &on_memory_dump_call_count](
const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
- other_thread->PostTaskAndWait(
- FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
- base::Unretained(&*mdm_), other_mdp));
+ PostTaskAndWait(FROM_HERE, other_runner.get(),
+ base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
+ base::Unretained(&*mdm_), other_mdp));
on_memory_dump_call_count++;
return true;
};
@@ -716,6 +749,75 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestPollingOnDumpThread) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider());
+ std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider());
+ mdp1->enable_mock_destructor = true;
+ mdp2->enable_mock_destructor = true;
+
+ EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
+ EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1);
+ EXPECT_CALL(*mdp1, Destructor());
+ EXPECT_CALL(*mdp2, Destructor());
+
+ MemoryDumpProvider::Options options;
+ options.is_fast_polling_supported = true;
+ RegisterDumpProvider(mdp1.get(), nullptr, options);
+
+ RunLoop run_loop;
+ scoped_refptr<SingleThreadTaskRunner> test_task_runner =
+ ThreadTaskRunnerHandle::Get();
+ auto quit_closure = run_loop.QuitClosure();
+
+ const int kPollsToQuit = 10;
+ int call_count = 0;
+ MemoryDumpManager* mdm = mdm_.get();
+ const auto poll_function1 = [&call_count, &test_task_runner, quit_closure,
+ &mdp2, mdm, &options, kPollsToQuit,
+ this](uint64_t* total) -> void {
+ ++call_count;
+ if (call_count == 1)
+ RegisterDumpProvider(mdp2.get(), nullptr, options, kMDPName);
+ else if (call_count == 4)
+ mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
+ else if (call_count == kPollsToQuit)
+ test_task_runner->PostTask(FROM_HERE, quit_closure);
+
+ // Record increase of 1 GiB of memory at each call.
+ *total = static_cast<uint64_t>(call_count) * 1024 * 1024 * 1024;
+ };
+ EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
+ .Times(testing::AtLeast(kPollsToQuit))
+ .WillRepeatedly(Invoke(poll_function1));
+
+ // Depending on the order of PostTask calls the mdp2 might be registered after
+ // all polls or in between polls.
+ EXPECT_CALL(*mdp2, PollFastMemoryTotal(_))
+ .Times(Between(0, kPollsToQuit - 1))
+ .WillRepeatedly(Return());
+
+ MemoryDumpScheduler::SetPollingIntervalForTesting(1);
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(3));
+
+ int last_poll_to_request_dump = -2;
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
+ .Times(testing::AtLeast(2))
+ .WillRepeatedly(Invoke([&last_poll_to_request_dump, &call_count](
+ const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) -> void {
+ // Minimum number of polls between dumps must be 3 (polling interval is
+ // 1ms).
+ EXPECT_GE(call_count - last_poll_to_request_dump, 3);
+ last_poll_to_request_dump = call_count;
+ }));
+
+ run_loop.Run();
+ DisableTracing();
+ mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1));
+}
+
// If a thread (with a dump provider living on it) is torn down during a dump
// its dump provider should be skipped but the dump itself should succeed.
TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
@@ -738,9 +840,14 @@ TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
int other_idx = (mdps.front() == mdp);
TestIOThread* other_thread = threads[other_idx].get();
- auto on_dump = [other_thread, &on_memory_dump_call_count](
+ // TestIOThread isn't thread-safe and must be stopped on the |main_runner|.
+ scoped_refptr<SequencedTaskRunner> main_runner =
+ SequencedTaskRunnerHandle::Get();
+ auto on_dump = [other_thread, main_runner, &on_memory_dump_call_count](
const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
- other_thread->Stop();
+ PostTaskAndWait(
+ FROM_HERE, main_runner.get(),
+ base::Bind(&TestIOThread::Stop, base::Unretained(other_thread)));
on_memory_dump_call_count++;
return true;
};
@@ -768,7 +875,7 @@ TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp1;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -783,7 +890,7 @@ TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
// began, it will still late-join the party (real use case: startup tracing).
TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
// First check that a RequestGlobalDump() issued before the MemoryDumpManager
@@ -966,7 +1073,7 @@ TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
// Create both same-thread MDP and another MDP with dedicated thread
MockMemoryDumpProvider mdp1;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
MockMemoryDumpProvider mdp2;
RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
@@ -1085,8 +1192,8 @@ TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
thread_ref = PlatformThread::CurrentRef();
TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
- thread_for_unregistration.PostTaskAndWait(
- FROM_HERE,
+ PostTaskAndWait(
+ FROM_HERE, thread_for_unregistration.task_runner().get(),
base::Bind(
&MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
base::Unretained(MemoryDumpManager::GetInstance()),
@@ -1116,7 +1223,7 @@ TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
InitializeMemoryDumpManager(false /* is_coordinator */);
SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
- RegisterDumpProvider(mdp1.get());
+ RegisterDumpProvider(mdp1.get(), nullptr);
std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
kWhitelistedMDPName);
@@ -1167,5 +1274,22 @@ TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestBlacklistedUnsafeUnregistration) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ RegisterDumpProvider(&mdp1, nullptr, kDefaultOptions,
+ "BlacklistTestDumpProvider");
+ // Not calling UnregisterAndDeleteDumpProviderSoon() should not crash.
+ mdm_->UnregisterDumpProvider(&mdp1);
+
+ Thread thread("test thread");
+ thread.Start();
+ RegisterDumpProvider(&mdp1, thread.task_runner(), kDefaultOptions,
+ "BlacklistTestDumpProvider");
+ // Unregistering on wrong thread should not crash.
+ mdm_->UnregisterDumpProvider(&mdp1);
+ thread.Stop();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index 2c502861d8..76c2969e96 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -22,7 +22,8 @@ class BASE_EXPORT MemoryDumpProvider {
struct Options {
Options()
: target_pid(kNullProcessId),
- dumps_on_single_thread_task_runner(false) {}
+ dumps_on_single_thread_task_runner(false),
+ is_fast_polling_supported(false) {}
// If the dump provider generates dumps on behalf of another process,
// |target_pid| contains the pid of that process.
@@ -34,6 +35,11 @@ class BASE_EXPORT MemoryDumpProvider {
// a SingleThreadTaskRunner, which is usually the case. It is faster to run
// all providers that run on the same thread together without thread hops.
bool dumps_on_single_thread_task_runner;
+
+ // Set to true if the dump provider implementation supports high frequency
+ // polling. Only providers running without task runner affinity are
+ // supported.
+ bool is_fast_polling_supported;
};
virtual ~MemoryDumpProvider() {}
@@ -52,6 +58,18 @@ class BASE_EXPORT MemoryDumpProvider {
// collecting extensive allocation data, if supported.
virtual void OnHeapProfilingEnabled(bool) {}
+ // Quickly record the total memory usage in |memory_total|. This method will
+ // be called only when the dump provider registration has
+ // |is_fast_polling_supported| set to true. This method is used for polling at
+ // high frequency for detecting peaks. See comment on
+ // |is_fast_polling_supported| option if you need to override this method.
+ virtual void PollFastMemoryTotal(uint64_t* /* memory_total */) {}
+
+ // Indicates that fast memory polling is not going to be used in the near
+ // future and the MDP can tear down any resource kept around for fast memory
+ // polling.
+ virtual void SuspendFastMemoryPolling() {}
+
protected:
MemoryDumpProvider() {}
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index e6c5b87b22..bf72bef5e4 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -12,19 +12,28 @@ namespace trace_event {
// static
const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
switch (dump_type) {
- case MemoryDumpType::TASK_BEGIN:
- return "task_begin";
- case MemoryDumpType::TASK_END:
- return "task_end";
case MemoryDumpType::PERIODIC_INTERVAL:
return "periodic_interval";
case MemoryDumpType::EXPLICITLY_TRIGGERED:
return "explicitly_triggered";
+ case MemoryDumpType::PEAK_MEMORY_USAGE:
+ return "peak_memory_usage";
}
NOTREACHED();
return "unknown";
}
+MemoryDumpType StringToMemoryDumpType(const std::string& str) {
+ if (str == "periodic_interval")
+ return MemoryDumpType::PERIODIC_INTERVAL;
+ if (str == "explicitly_triggered")
+ return MemoryDumpType::EXPLICITLY_TRIGGERED;
+ if (str == "peak_memory_usage")
+ return MemoryDumpType::PEAK_MEMORY_USAGE;
+ NOTREACHED();
+ return MemoryDumpType::LAST;
+}
+
const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail) {
switch (level_of_detail) {
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index f3ff9d8e3b..90a866fa7a 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -18,16 +18,19 @@ namespace base {
namespace trace_event {
// Captures the reason why a memory dump is being requested. This is to allow
-// selective enabling of dumps, filtering and post-processing.
+// selective enabling of dumps, filtering and post-processing. Important: this
+// must be kept consistent with
+// services/resource_coordinator/public/cpp/memory/memory_infra_traits.cc.
enum class MemoryDumpType {
- TASK_BEGIN, // Dumping memory at the beginning of a message-loop task.
- TASK_END, // Dumping memory at the ending of a message-loop task.
- PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
+ PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
EXPLICITLY_TRIGGERED, // Non maskable dump request.
- LAST = EXPLICITLY_TRIGGERED // For IPC macros.
+ PEAK_MEMORY_USAGE, // Dumping memory at detected peak total memory usage.
+ LAST = PEAK_MEMORY_USAGE // For IPC macros.
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
+// Important: this must be kept consistent with
+// services/resource_Coordinator/public/cpp/memory/memory_infra_traits.cc.
enum class MemoryDumpLevelOfDetail : uint32_t {
FIRST,
@@ -50,7 +53,8 @@ enum class MemoryDumpLevelOfDetail : uint32_t {
};
// Initial request arguments for a global memory dump. (see
-// MemoryDumpManager::RequestGlobalMemoryDump()).
+// MemoryDumpManager::RequestGlobalMemoryDump()). Important: this must be kept
+// consistent with services/memory_infra/public/cpp/memory_infra_traits.cc.
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
@@ -72,6 +76,8 @@ using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+BASE_EXPORT MemoryDumpType StringToMemoryDumpType(const std::string& str);
+
BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail);
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
new file mode 100644
index 0000000000..eaa8d63661
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -0,0 +1,304 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_scheduler.h"
+
+#include "base/process/process_metrics.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+// Threshold on increase in memory from last dump beyond which a new dump must
+// be triggered.
+int64_t kDefaultMemoryIncreaseThreshold = 50 * 1024 * 1024; // 50MiB
+const uint32_t kMemoryTotalsPollingInterval = 25;
+uint32_t g_polling_interval_ms_for_testing = 0;
+} // namespace
+
+MemoryDumpScheduler::MemoryDumpScheduler(
+ MemoryDumpManager* mdm,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
+ : mdm_(mdm), polling_state_(polling_task_runner) {}
+
+MemoryDumpScheduler::~MemoryDumpScheduler() {}
+
+void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
+ MemoryDumpLevelOfDetail level_of_detail,
+ uint32_t min_time_between_dumps_ms) {
+ if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
+ DCHECK(!periodic_state_.is_configured);
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
+ DCHECK_NE(0u, min_time_between_dumps_ms);
+
+ polling_state_.level_of_detail = level_of_detail;
+ polling_state_.min_polls_between_dumps =
+ (min_time_between_dumps_ms + polling_state_.polling_interval_ms - 1) /
+ polling_state_.polling_interval_ms;
+ polling_state_.current_state = PollingTriggerState::CONFIGURED;
+ } else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
+ periodic_state_.is_configured = true;
+ DCHECK_NE(0u, min_time_between_dumps_ms);
+ switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ break;
+ case MemoryDumpLevelOfDetail::LIGHT:
+ DCHECK_EQ(0u, periodic_state_.light_dump_period_ms);
+ periodic_state_.light_dump_period_ms = min_time_between_dumps_ms;
+ break;
+ case MemoryDumpLevelOfDetail::DETAILED:
+ DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms);
+ periodic_state_.heavy_dump_period_ms = min_time_between_dumps_ms;
+ break;
+ }
+
+ periodic_state_.min_timer_period_ms = std::min(
+ periodic_state_.min_timer_period_ms, min_time_between_dumps_ms);
+ DCHECK_EQ(0u, periodic_state_.light_dump_period_ms %
+ periodic_state_.min_timer_period_ms);
+ DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms %
+ periodic_state_.min_timer_period_ms);
+ }
+}
+
+void MemoryDumpScheduler::NotifyPeriodicTriggerSupported() {
+ if (!periodic_state_.is_configured || periodic_state_.timer.IsRunning())
+ return;
+ periodic_state_.light_dumps_rate = periodic_state_.light_dump_period_ms /
+ periodic_state_.min_timer_period_ms;
+ periodic_state_.heavy_dumps_rate = periodic_state_.heavy_dump_period_ms /
+ periodic_state_.min_timer_period_ms;
+
+ periodic_state_.dump_count = 0;
+ periodic_state_.timer.Start(
+ FROM_HERE,
+ TimeDelta::FromMilliseconds(periodic_state_.min_timer_period_ms),
+ Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
+}
+
+void MemoryDumpScheduler::NotifyPollingSupported() {
+ if (polling_state_.current_state != PollingTriggerState::CONFIGURED)
+ return;
+
+ polling_state_.current_state = PollingTriggerState::ENABLED;
+ polling_state_.ResetTotals();
+
+ polling_state_.polling_task_runner->PostTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
+}
+
+void MemoryDumpScheduler::NotifyDumpTriggered() {
+ if (polling_state_.polling_task_runner &&
+ polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
+ polling_state_.polling_task_runner->PostTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
+ return;
+ }
+ if (polling_state_.current_state != PollingTriggerState::ENABLED)
+ return;
+
+ polling_state_.ResetTotals();
+}
+
+void MemoryDumpScheduler::DisableAllTriggers() {
+ if (periodic_state_.timer.IsRunning())
+ periodic_state_.timer.Stop();
+ DisablePolling();
+}
+
+void MemoryDumpScheduler::DisablePolling() {
+ if (polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
+ if (polling_state_.polling_task_runner->PostTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::DisablePolling, Unretained(this))))
+ return;
+ }
+ polling_state_.current_state = PollingTriggerState::DISABLED;
+ polling_state_.polling_task_runner = nullptr;
+}
+
+// static
+void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
+ g_polling_interval_ms_for_testing = interval;
+}
+
+bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
+ return periodic_state_.timer.IsRunning();
+}
+
+void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
+ MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+ if (periodic_state_.light_dumps_rate > 0 &&
+ periodic_state_.dump_count % periodic_state_.light_dumps_rate == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+ if (periodic_state_.heavy_dumps_rate > 0 &&
+ periodic_state_.dump_count % periodic_state_.heavy_dumps_rate == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+ ++periodic_state_.dump_count;
+
+ mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
+void MemoryDumpScheduler::PollMemoryOnPollingThread() {
+ if (polling_state_.current_state != PollingTriggerState::ENABLED)
+ return;
+
+ uint64_t polled_memory = 0;
+ bool res = mdm_->PollFastMemoryTotal(&polled_memory);
+ DCHECK(res);
+ if (polling_state_.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+ TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
+ polled_memory / 1024 / 1024);
+ }
+
+ if (ShouldTriggerDump(polled_memory)) {
+ TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
+ "Peak memory dump Triggered",
+ TRACE_EVENT_SCOPE_PROCESS, "total_usage_MB",
+ polled_memory / 1024 / 1024);
+
+ mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
+ polling_state_.level_of_detail);
+ }
+
+ // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
+ TimeDelta::FromMilliseconds(polling_state_.polling_interval_ms));
+}
+
+bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
+ // This function tries to detect peak memory usage as discussed in
+ // https://goo.gl/0kOU4A.
+
+ if (current_memory_total == 0)
+ return false;
+
+ bool should_dump = false;
+ ++polling_state_.num_polls_from_last_dump;
+ if (polling_state_.last_dump_memory_total == 0) {
+ // If it's first sample then trigger memory dump.
+ should_dump = true;
+ } else if (polling_state_.min_polls_between_dumps >
+ polling_state_.num_polls_from_last_dump) {
+ return false;
+ }
+
+ int64_t increase_from_last_dump =
+ current_memory_total - polling_state_.last_dump_memory_total;
+ should_dump |=
+ increase_from_last_dump > polling_state_.memory_increase_threshold;
+ should_dump |= IsCurrentSamplePeak(current_memory_total);
+ if (should_dump)
+ polling_state_.ResetTotals();
+ return should_dump;
+}
+
+bool MemoryDumpScheduler::IsCurrentSamplePeak(
+ uint64_t current_memory_total_bytes) {
+ uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
+ polling_state_.last_memory_totals_kb_index =
+ (polling_state_.last_memory_totals_kb_index + 1) %
+ PollingTriggerState::kMaxNumMemorySamples;
+ uint64_t mean = 0;
+ for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
+ if (polling_state_.last_memory_totals_kb[i] == 0) {
+ // Not enough samples to detect peaks.
+ polling_state_
+ .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ current_memory_total_kb;
+ return false;
+ }
+ mean += polling_state_.last_memory_totals_kb[i];
+ }
+ mean = mean / PollingTriggerState::kMaxNumMemorySamples;
+ uint64_t variance = 0;
+ for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
+ variance += (polling_state_.last_memory_totals_kb[i] - mean) *
+ (polling_state_.last_memory_totals_kb[i] - mean);
+ }
+ variance = variance / PollingTriggerState::kMaxNumMemorySamples;
+
+ polling_state_
+ .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ current_memory_total_kb;
+
+ // If stddev is less than 0.2% then we consider that the process is inactive.
+ bool is_stddev_low = variance < mean / 500 * mean / 500;
+ if (is_stddev_low)
+ return false;
+
+ // (mean + 3.69 * stddev) corresponds to a value that is higher than current
+ // sample with 99.99% probability.
+ return (current_memory_total_kb - mean) * (current_memory_total_kb - mean) >
+ (3.69 * 3.69 * variance);
+}
+
+MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState()
+ : is_configured(false),
+ dump_count(0),
+ min_timer_period_ms(std::numeric_limits<uint32_t>::max()),
+ light_dumps_rate(0),
+ heavy_dumps_rate(0),
+ light_dump_period_ms(0),
+ heavy_dump_period_ms(0) {}
+
+MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
+ DCHECK(!timer.IsRunning());
+}
+
+MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
+ : current_state(DISABLED),
+ level_of_detail(MemoryDumpLevelOfDetail::FIRST),
+ polling_task_runner(polling_task_runner),
+ polling_interval_ms(g_polling_interval_ms_for_testing
+ ? g_polling_interval_ms_for_testing
+ : kMemoryTotalsPollingInterval),
+ min_polls_between_dumps(0),
+ num_polls_from_last_dump(-1),
+ last_dump_memory_total(0),
+ memory_increase_threshold(0),
+ last_memory_totals_kb_index(0) {}
+
+MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
+ DCHECK(!polling_task_runner);
+}
+
+void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
+ if (!memory_increase_threshold) {
+ memory_increase_threshold = kDefaultMemoryIncreaseThreshold;
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+ // Set threshold to 1% of total system memory.
+ SystemMemoryInfoKB meminfo;
+ bool res = GetSystemMemoryInfo(&meminfo);
+ if (res)
+ memory_increase_threshold = (meminfo.total / 100) * 1024;
+#endif
+ }
+
+ // Update the |last_dump_memory_total|'s value from the totals if it's not
+ // first poll.
+ if (num_polls_from_last_dump >= 0 &&
+ last_memory_totals_kb[last_memory_totals_kb_index]) {
+ last_dump_memory_total =
+ last_memory_totals_kb[last_memory_totals_kb_index] * 1024;
+ }
+ num_polls_from_last_dump = 0;
+ for (uint32_t i = 0; i < kMaxNumMemorySamples; ++i)
+ last_memory_totals_kb[i] = 0;
+ last_memory_totals_kb_index = 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_dump_scheduler.h b/base/trace_event/memory_dump_scheduler.h
new file mode 100644
index 0000000000..fd21fce834
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler.h
@@ -0,0 +1,141 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/timer/timer.h"
+#include "base/trace_event/memory_dump_request_args.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+
+namespace trace_event {
+
+class MemoryDumpManager;
+
+// Schedules global dump requests based on the triggers added.
+class BASE_EXPORT MemoryDumpScheduler {
+ public:
+ MemoryDumpScheduler(
+ MemoryDumpManager* mdm_,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+ ~MemoryDumpScheduler();
+
+ // Adds triggers for scheduling global dumps. Both periodic and peak triggers
+ // cannot be added together. At the moment the periodic support is limited to
+ // at most one periodic trigger per dump mode and peak triggers are limited to
+ // at most one. All intervals should be an integeral multiple of the smallest
+ // interval specified.
+ void AddTrigger(MemoryDumpType trigger_type,
+ MemoryDumpLevelOfDetail level_of_detail,
+ uint32_t min_time_between_dumps_ms);
+
+ // Starts periodic dumps.
+ void NotifyPeriodicTriggerSupported();
+
+ // Starts polling memory total.
+ void NotifyPollingSupported();
+
+ // Resets time for triggering dump to account for minimum time between the
+ // dumps.
+ void NotifyDumpTriggered();
+
+ // Disables all triggers.
+ void DisableAllTriggers();
+
+ private:
+ friend class MemoryDumpManagerTest;
+ FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, TestPollingOnDumpThread);
+
+ // Helper class to schdule periodic memory dumps.
+ struct PeriodicTriggerState {
+ PeriodicTriggerState();
+ ~PeriodicTriggerState();
+
+ bool is_configured;
+
+ RepeatingTimer timer;
+ uint32_t dump_count;
+ uint32_t min_timer_period_ms;
+ uint32_t light_dumps_rate;
+ uint32_t heavy_dumps_rate;
+
+ uint32_t light_dump_period_ms;
+ uint32_t heavy_dump_period_ms;
+
+ DISALLOW_COPY_AND_ASSIGN(PeriodicTriggerState);
+ };
+
+ struct PollingTriggerState {
+ enum State {
+ CONFIGURED, // Polling trigger was added.
+ ENABLED, // Polling is running.
+ DISABLED // Polling is disabled.
+ };
+
+ static const uint32_t kMaxNumMemorySamples = 50;
+
+ explicit PollingTriggerState(
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+ ~PollingTriggerState();
+
+ // Helper to clear the tracked memory totals and poll count from last dump.
+ void ResetTotals();
+
+ State current_state;
+ MemoryDumpLevelOfDetail level_of_detail;
+
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
+ uint32_t polling_interval_ms;
+
+ // Minimum numer of polls after the last dump at which next dump can be
+ // triggered.
+ int min_polls_between_dumps;
+ int num_polls_from_last_dump;
+
+ uint64_t last_dump_memory_total;
+ int64_t memory_increase_threshold;
+ uint64_t last_memory_totals_kb[kMaxNumMemorySamples];
+ uint32_t last_memory_totals_kb_index;
+
+ DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
+ };
+
+ // Helper to set polling disabled on the polling thread.
+ void DisablePolling();
+
+ // Periodically called by the timer.
+ void RequestPeriodicGlobalDump();
+
+ // Called for polling memory usage and trigger dumps if peak is detected.
+ void PollMemoryOnPollingThread();
+
+ // Returns true if peak memory value is detected.
+ bool ShouldTriggerDump(uint64_t current_memory_total);
+
+ // Helper to detect peaks in memory usage.
+ bool IsCurrentSamplePeak(uint64_t current_memory_total);
+
+ // Must be set before enabling tracing.
+ static void SetPollingIntervalForTesting(uint32_t interval);
+
+ // True if periodic dumping is enabled.
+ bool IsPeriodicTimerRunningForTesting();
+
+ MemoryDumpManager* mdm_;
+
+ PeriodicTriggerState periodic_state_;
+ PollingTriggerState polling_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
diff --git a/base/trace_event/memory_dump_session_state.cc b/base/trace_event/memory_dump_session_state.cc
index b3d9a8ccfc..d26b82a5b7 100644
--- a/base/trace_event/memory_dump_session_state.cc
+++ b/base/trace_event/memory_dump_session_state.cc
@@ -7,8 +7,8 @@
namespace base {
namespace trace_event {
-MemoryDumpSessionState::MemoryDumpSessionState() {}
-
+MemoryDumpSessionState::MemoryDumpSessionState()
+ : heap_profiler_breakdown_threshold_bytes_(0) {}
MemoryDumpSessionState::~MemoryDumpSessionState() {}
void MemoryDumpSessionState::SetStackFrameDeduplicator(
@@ -23,9 +23,14 @@ void MemoryDumpSessionState::SetTypeNameDeduplicator(
type_name_deduplicator_ = std::move(type_name_deduplicator);
}
-void MemoryDumpSessionState::SetMemoryDumpConfig(
- const TraceConfig::MemoryDumpConfig& config) {
- memory_dump_config_ = config;
+void MemoryDumpSessionState::SetAllowedDumpModes(
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes) {
+ allowed_dump_modes_ = allowed_dump_modes;
+}
+
+bool MemoryDumpSessionState::IsDumpModeAllowed(
+ MemoryDumpLevelOfDetail dump_mode) const {
+ return allowed_dump_modes_.count(dump_mode) != 0;
}
} // namespace trace_event
diff --git a/base/trace_event/memory_dump_session_state.h b/base/trace_event/memory_dump_session_state.h
index f199ec1a2f..46092cb483 100644
--- a/base/trace_event/memory_dump_session_state.h
+++ b/base/trace_event/memory_dump_session_state.h
@@ -6,11 +6,12 @@
#define BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
#include <memory>
+#include <set>
#include "base/base_export.h"
#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-#include "base/trace_event/trace_config.h"
+#include "base/trace_event/memory_dump_request_args.h"
namespace base {
namespace trace_event {
@@ -40,11 +41,18 @@ class BASE_EXPORT MemoryDumpSessionState
void SetTypeNameDeduplicator(
std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
- const TraceConfig::MemoryDumpConfig& memory_dump_config() const {
- return memory_dump_config_;
+ void SetAllowedDumpModes(
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes);
+
+ bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) const;
+
+ void set_heap_profiler_breakdown_threshold_bytes(uint32_t value) {
+ heap_profiler_breakdown_threshold_bytes_ = value;
}
- void SetMemoryDumpConfig(const TraceConfig::MemoryDumpConfig& config);
+ uint32_t heap_profiler_breakdown_threshold_bytes() const {
+ return heap_profiler_breakdown_threshold_bytes_;
+ }
private:
friend class RefCountedThreadSafe<MemoryDumpSessionState>;
@@ -58,9 +66,9 @@ class BASE_EXPORT MemoryDumpSessionState
// trace is finalized.
std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
- // The memory dump config, copied at the time when the tracing session was
- // started.
- TraceConfig::MemoryDumpConfig memory_dump_config_;
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes_;
+
+ uint32_t heap_profiler_breakdown_threshold_bytes_;
};
} // namespace trace_event
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
index aed187fa1d..ae74322040 100644
--- a/base/trace_event/memory_infra_background_whitelist.cc
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -17,20 +17,26 @@ namespace {
// providers can be added here only if the background mode dump has very
// less performance and memory overhead.
const char* const kDumpProviderWhitelist[] = {
+ "android::ResourceManagerImpl",
"BlinkGC",
- "ChildDiscardableSharedMemoryManager",
+ "ClientDiscardableSharedMemoryManager",
"DOMStorage",
- "HostDiscardableSharedMemoryManager",
+ "DiscardableSharedMemoryManager",
"IndexedDBBackingStore",
"JavaHeap",
+ "LevelDB",
"LeveldbValueStore",
"Malloc",
+ "MemoryCache",
"PartitionAlloc",
"ProcessMemoryMetrics",
"Skia",
"Sql",
+ "URLRequestContext",
"V8Isolate",
"WinHeap",
+ "SyncDirectory",
+ "TabRestoreServiceHelper",
nullptr // End of list marker.
};
@@ -46,6 +52,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"java_heap",
"java_heap/allocated_objects",
"leveldb/index_db/0x?",
+ "leveldb/leveldb_proto/0x?",
"leveldb/value_store/Extensions.Database.Open.Settings/0x?",
"leveldb/value_store/Extensions.Database.Open.Rules/0x?",
"leveldb/value_store/Extensions.Database.Open.State/0x?",
@@ -55,14 +62,33 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"malloc",
"malloc/allocated_objects",
"malloc/metadata_fragmentation_caches",
+ "net/http_network_session_0x?",
+ "net/http_network_session_0x?/quic_stream_factory",
+ "net/http_network_session_0x?/socket_pool",
+ "net/http_network_session_0x?/spdy_session_pool",
+ "net/http_network_session_0x?/stream_factory",
+ "net/sdch_manager_0x?",
+ "net/ssl_session_cache",
+ "net/url_request_context_0x?",
+ "net/url_request_context_0x?/http_cache",
+ "net/url_request_context_0x?/http_network_session",
+ "net/url_request_context_0x?/sdch_manager",
+ "web_cache/Image_resources",
+ "web_cache/CSS stylesheet_resources",
+ "web_cache/Script_resources",
+ "web_cache/XSL stylesheet_resources",
+ "web_cache/Font_resources",
+ "web_cache/Other_resources",
"partition_alloc/allocated_objects",
"partition_alloc/partitions",
+ "partition_alloc/partitions/array_buffer",
"partition_alloc/partitions/buffer",
"partition_alloc/partitions/fast_malloc",
"partition_alloc/partitions/layout",
"skia/sk_glyph_cache",
"skia/sk_resource_cache",
"sqlite",
+ "ui/resource_manager_0x?",
"v8/isolate_0x?/heap_spaces",
"v8/isolate_0x?/heap_spaces/code_space",
"v8/isolate_0x?/heap_spaces/large_object_space",
@@ -74,6 +100,47 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"v8/isolate_0x?/zapped_for_debug",
"winheap",
"winheap/allocated_objects",
+ "sync/0x?/kernel",
+ "sync/0x?/store",
+ "sync/0x?/model_type/APP",
+ "sync/0x?/model_type/APP_LIST",
+ "sync/0x?/model_type/APP_NOTIFICATION",
+ "sync/0x?/model_type/APP_SETTING",
+ "sync/0x?/model_type/ARC_PACKAGE",
+ "sync/0x?/model_type/ARTICLE",
+ "sync/0x?/model_type/AUTOFILL",
+ "sync/0x?/model_type/AUTOFILL_PROFILE",
+ "sync/0x?/model_type/AUTOFILL_WALLET",
+ "sync/0x?/model_type/BOOKMARK",
+ "sync/0x?/model_type/DEVICE_INFO",
+ "sync/0x?/model_type/DICTIONARY",
+ "sync/0x?/model_type/EXPERIMENTS",
+ "sync/0x?/model_type/EXTENSION",
+ "sync/0x?/model_type/EXTENSION_SETTING",
+ "sync/0x?/model_type/FAVICON_IMAGE",
+ "sync/0x?/model_type/FAVICON_TRACKING",
+ "sync/0x?/model_type/HISTORY_DELETE_DIRECTIVE",
+ "sync/0x?/model_type/MANAGED_USER",
+ "sync/0x?/model_type/MANAGED_USER_SETTING",
+ "sync/0x?/model_type/MANAGED_USER_SHARED_SETTING",
+ "sync/0x?/model_type/MANAGED_USER_WHITELIST",
+ "sync/0x?/model_type/NIGORI",
+ "sync/0x?/model_type/PASSWORD",
+ "sync/0x?/model_type/PREFERENCE",
+ "sync/0x?/model_type/PRINTER",
+ "sync/0x?/model_type/PRIORITY_PREFERENCE",
+ "sync/0x?/model_type/READING_LIST",
+ "sync/0x?/model_type/SEARCH_ENGINE",
+ "sync/0x?/model_type/SESSION",
+ "sync/0x?/model_type/SYNCED_NOTIFICATION",
+ "sync/0x?/model_type/SYNCED_NOTIFICATION_APP_INFO",
+ "sync/0x?/model_type/THEME",
+ "sync/0x?/model_type/TYPED_URL",
+ "sync/0x?/model_type/WALLET_METADATA",
+ "sync/0x?/model_type/WIFI_CREDENTIAL",
+ "tab_restore/service_helper_0x?/entries",
+ "tab_restore/service_helper_0x?/entries/tab_0x?",
+ "tab_restore/service_helper_0x?/entries/window_0x?",
nullptr // End of list marker.
};
diff --git a/base/trace_event/memory_usage_estimator.cc b/base/trace_event/memory_usage_estimator.cc
new file mode 100644
index 0000000000..c769d5b6f1
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator.cc
@@ -0,0 +1,14 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+namespace base {
+namespace trace_event {
+
+template size_t EstimateMemoryUsage(const std::string&);
+template size_t EstimateMemoryUsage(const string16&);
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_usage_estimator.h b/base/trace_event/memory_usage_estimator.h
new file mode 100644
index 0000000000..db4ea6956c
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator.h
@@ -0,0 +1,549 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+#define BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+
+#include <stdint.h>
+
+#include <array>
+#include <deque>
+#include <list>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <stack>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/linked_list.h"
+#include "base/strings/string16.h"
+#include "base/template_util.h"
+
+// Composable memory usage estimators.
+//
+// This file defines set of EstimateMemoryUsage(object) functions that return
+// approximate memory usage of their argument.
+//
+// The ultimate goal is to make memory usage estimation for a class simply a
+// matter of aggregating EstimateMemoryUsage() results over all fields.
+//
+// That is achieved via composability: if EstimateMemoryUsage() is defined
+// for T then EstimateMemoryUsage() is also defined for any combination of
+// containers holding T (e.g. std::map<int, std::vector<T>>).
+//
+// There are two ways of defining EstimateMemoryUsage() for a type:
+//
+// 1. As a global function 'size_t EstimateMemoryUsage(T)' in
+// in base::trace_event namespace.
+//
+// 2. As 'size_t T::EstimateMemoryUsage() const' method. In this case
+// EstimateMemoryUsage(T) function in base::trace_event namespace is
+// provided automatically.
+//
+// Here is an example implementation:
+//
+// size_t foo::bar::MyClass::EstimateMemoryUsage() const {
+// return base::trace_event::EstimateMemoryUsage(name_) +
+// base::trace_event::EstimateMemoryUsage(id_) +
+// base::trace_event::EstimateMemoryUsage(items_);
+// }
+//
+// The approach is simple: first call EstimateMemoryUsage() on all members,
+// then recursively fix compilation errors that are caused by types not
+// implementing EstimateMemoryUsage().
+
+namespace base {
+namespace trace_event {
+
+// Declarations
+
+// If T declares 'EstimateMemoryUsage() const' member function, then
+// global function EstimateMemoryUsage(T) is available, and just calls
+// the member function.
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+ -> decltype(object.EstimateMemoryUsage());
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array);
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]);
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length);
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr);
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+ size_t array_length);
+
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr);
+
+// Containers
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list);
+
+template <class T>
+size_t EstimateMemoryUsage(const base::LinkedList<T>& list);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<T, H, KE, A>& set);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<T, H, KE, A>& set);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::deque<T, A>& deque);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::queue<T, C>& queue);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::stack<T, C>& stack);
+
+// TODO(dskiba):
+// std::forward_list
+
+// Definitions
+
+namespace internal {
+
+// HasEMU<T>::value is true iff EstimateMemoryUsage(T) is available.
+// (This is the default version, which is false.)
+template <class T, class X = void>
+struct HasEMU : std::false_type {};
+
+// This HasEMU specialization is only picked up if there exists function
+// EstimateMemoryUsage(const T&) that returns size_t. Simpler ways to
+// achieve this don't work on MSVC.
+template <class T>
+struct HasEMU<
+ T,
+ typename std::enable_if<std::is_same<
+ size_t,
+ decltype(EstimateMemoryUsage(std::declval<const T&>()))>::value>::type>
+ : std::true_type {};
+
+// EMUCaller<T> does three things:
+// 1. Defines Call() method that calls EstimateMemoryUsage(T) if it's
+// available.
+// 2. If EstimateMemoryUsage(T) is not available, but T has trivial dtor
+// (i.e. it's POD, integer, pointer, enum, etc.) then it defines Call()
+// method that returns 0. This is useful for containers, which allocate
+// memory regardless of T (also for cases like std::map<int, MyClass>).
+// 3. Finally, if EstimateMemoryUsage(T) is not available, then it triggers
+// a static_assert with a helpful message. That cuts numbers of errors
+// considerably - if you just call EstimateMemoryUsage(T) but it's not
+// available for T, then compiler will helpfully list *all* possible
+// variants of it, with an explanation for each.
+template <class T, class X = void>
+struct EMUCaller {
+ // std::is_same<> below makes static_assert depend on T, in order to
+ // prevent it from asserting regardless instantiation.
+ static_assert(std::is_same<T, std::false_type>::value,
+ "Neither global function 'size_t EstimateMemoryUsage(T)' "
+ "nor member function 'size_t T::EstimateMemoryUsage() const' "
+ "is defined for the type.");
+
+ static size_t Call(const T&) { return 0; }
+};
+
+template <class T>
+struct EMUCaller<T, typename std::enable_if<HasEMU<T>::value>::type> {
+ static size_t Call(const T& value) { return EstimateMemoryUsage(value); }
+};
+
+template <class T>
+struct EMUCaller<
+ T,
+ typename std::enable_if<!HasEMU<T>::value &&
+ is_trivially_destructible<T>::value>::type> {
+ static size_t Call(const T&) { return 0; }
+};
+
+// Returns reference to the underlying container of a container adapter.
+// Works for std::stack, std::queue and std::priority_queue.
+template <class A>
+const typename A::container_type& GetUnderlyingContainer(const A& adapter) {
+ struct ExposedAdapter : A {
+ using A::c;
+ };
+ return adapter.*&ExposedAdapter::c;
+}
+
+} // namespace internal
+
+// Proxy that deducts T and calls EMUCaller<T>.
+// To be used by EstimateMemoryUsage() implementations for containers.
+template <class T>
+size_t EstimateItemMemoryUsage(const T& value) {
+ return internal::EMUCaller<T>::Call(value);
+}
+
+template <class I>
+size_t EstimateIterableMemoryUsage(const I& iterable) {
+ size_t memory_usage = 0;
+ for (const auto& item : iterable) {
+ memory_usage += EstimateItemMemoryUsage(item);
+ }
+ return memory_usage;
+}
+
+// Global EstimateMemoryUsage(T) that just calls T::EstimateMemoryUsage().
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+ -> decltype(object.EstimateMemoryUsage()) {
+ static_assert(
+ std::is_same<decltype(object.EstimateMemoryUsage()), size_t>::value,
+ "'T::EstimateMemoryUsage() const' must return size_t.");
+ return object.EstimateMemoryUsage();
+}
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string) {
+ using string_type = std::basic_string<C, T, A>;
+ using value_type = typename string_type::value_type;
+ // C++11 doesn't leave much room for implementors - std::string can
+ // use short string optimization, but that's about it. We detect SSO
+ // by checking that c_str() points inside |string|.
+ const uint8_t* cstr = reinterpret_cast<const uint8_t*>(string.c_str());
+ const uint8_t* inline_cstr = reinterpret_cast<const uint8_t*>(&string);
+ if (cstr >= inline_cstr && cstr < inline_cstr + sizeof(string)) {
+ // SSO string
+ return 0;
+ }
+ return (string.capacity() + 1) * sizeof(value_type);
+}
+
+// Use explicit instantiations from the .cc file (reduces bloat).
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const std::string&);
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const string16&);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array) {
+ return EstimateIterableMemoryUsage(array);
+}
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]) {
+ return EstimateIterableMemoryUsage(array);
+}
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length) {
+ size_t memory_usage = sizeof(T) * array_length;
+ for (size_t i = 0; i != array_length; ++i) {
+ memory_usage += EstimateItemMemoryUsage(array[i]);
+ }
+ return memory_usage;
+}
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr) {
+ return ptr ? (sizeof(T) + EstimateItemMemoryUsage(*ptr)) : 0;
+}
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+ size_t array_length) {
+ return EstimateMemoryUsage(array.get(), array_length);
+}
+
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr) {
+ auto use_count = ptr.use_count();
+ if (use_count == 0) {
+ return 0;
+ }
+ // Model shared_ptr after libc++,
+ // see __shared_ptr_pointer from include/memory
+ struct SharedPointer {
+ void* vtbl;
+ long shared_owners;
+ long shared_weak_owners;
+ T* value;
+ };
+ // If object of size S shared N > S times we prefer to (potentially)
+ // overestimate than to return 0.
+ return sizeof(SharedPointer) +
+ (EstimateItemMemoryUsage(*ptr) + (use_count - 1)) / use_count;
+}
+
+// std::pair
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair) {
+ return EstimateItemMemoryUsage(pair.first) +
+ EstimateItemMemoryUsage(pair.second);
+}
+
+// std::vector
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector) {
+ return sizeof(T) * vector.capacity() + EstimateIterableMemoryUsage(vector);
+}
+
+// std::list
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list) {
+ using value_type = typename std::list<T, A>::value_type;
+ struct Node {
+ Node* prev;
+ Node* next;
+ value_type value;
+ };
+ return sizeof(Node) * list.size() +
+ EstimateIterableMemoryUsage(list);
+}
+
+template <class T>
+size_t EstimateMemoryUsage(const base::LinkedList<T>& list) {
+ size_t memory_usage = 0u;
+ for (base::LinkNode<T>* node = list.head(); node != list.end();
+ node = node->next()) {
+ // Since we increment by calling node = node->next() we know that node
+ // isn't nullptr.
+ memory_usage += EstimateMemoryUsage(*node->value()) + sizeof(T);
+ }
+ return memory_usage;
+}
+
+// Tree containers
+
+template <class V>
+size_t EstimateTreeMemoryUsage(size_t size) {
+ // Tree containers are modeled after libc++
+ // (__tree_node from include/__tree)
+ struct Node {
+ Node* left;
+ Node* right;
+ Node* parent;
+ bool is_black;
+ V value;
+ };
+ return sizeof(Node) * size;
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set) {
+ using value_type = typename std::set<T, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set) {
+ using value_type = typename std::multiset<T, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map) {
+ using value_type = typename std::map<K, V, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map) {
+ using value_type = typename std::multimap<K, V, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+// HashMap containers
+
+namespace internal {
+
+// While hashtable containers model doesn't depend on STL implementation, one
+// detail still crept in: bucket_count. It's used in size estimation, but its
+// value after inserting N items is not predictable.
+// This function is specialized by unittests to return constant value, thus
+// excluding bucket_count from testing.
+template <class V>
+size_t HashMapBucketCountForTesting(size_t bucket_count) {
+ return bucket_count;
+}
+
+} // namespace internal
+
+template <class V>
+size_t EstimateHashMapMemoryUsage(size_t bucket_count, size_t size) {
+ // Hashtable containers are modeled after libc++
+ // (__hash_node from include/__hash_table)
+ struct Node {
+ void* next;
+ size_t hash;
+ V value;
+ };
+ using Bucket = void*;
+ bucket_count = internal::HashMapBucketCountForTesting<V>(bucket_count);
+ return sizeof(Bucket) * bucket_count + sizeof(Node) * size;
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<K, H, KE, A>& set) {
+ using value_type = typename std::unordered_set<K, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+ set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<K, H, KE, A>& set) {
+ using value_type = typename std::unordered_multiset<K, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+ set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map) {
+ using value_type = typename std::unordered_map<K, V, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+ map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map) {
+ using value_type =
+ typename std::unordered_multimap<K, V, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+ map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+// std::deque
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::deque<T, A>& deque) {
+// Since std::deque implementations are wildly different
+// (see crbug.com/674287), we can't have one "good enough"
+// way to estimate.
+
+// kBlockSize - minimum size of a block, in bytes
+// kMinBlockLength - number of elements in a block
+// if sizeof(T) > kBlockSize
+#if defined(_LIBCPP_VERSION)
+ size_t kBlockSize = 4096;
+ size_t kMinBlockLength = 16;
+#elif defined(__GLIBCXX__)
+ size_t kBlockSize = 512;
+ size_t kMinBlockLength = 1;
+#elif defined(_MSC_VER)
+ size_t kBlockSize = 16;
+ size_t kMinBlockLength = 1;
+#else
+ size_t kBlockSize = 0;
+ size_t kMinBlockLength = 1;
+#endif
+
+ size_t block_length =
+ (sizeof(T) > kBlockSize) ? kMinBlockLength : kBlockSize / sizeof(T);
+
+ size_t blocks = (deque.size() + block_length - 1) / block_length;
+
+#if defined(__GLIBCXX__)
+ // libstdc++: deque always has at least one block
+ if (!blocks)
+ blocks = 1;
+#endif
+
+#if defined(_LIBCPP_VERSION)
+ // libc++: deque keeps at most two blocks when it shrinks,
+ // so even if the size is zero, deque might be holding up
+ // to 4096 * 2 bytes. One way to know whether deque has
+ // ever allocated (and hence has 1 or 2 blocks) is to check
+ // iterator's pointer. Non-zero value means that deque has
+ // at least one block.
+ if (!blocks && deque.begin().operator->())
+ blocks = 1;
+#endif
+
+ return (blocks * block_length * sizeof(T)) +
+ EstimateIterableMemoryUsage(deque);
+}
+
+// Container adapters
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::queue<T, C>& queue) {
+ return EstimateMemoryUsage(internal::GetUnderlyingContainer(queue));
+}
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue) {
+ return EstimateMemoryUsage(internal::GetUnderlyingContainer(queue));
+}
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::stack<T, C>& stack) {
+ return EstimateMemoryUsage(internal::GetUnderlyingContainer(stack));
+}
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
diff --git a/base/trace_event/memory_usage_estimator_unittest.cc b/base/trace_event/memory_usage_estimator_unittest.cc
new file mode 100644
index 0000000000..80237c0192
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator_unittest.cc
@@ -0,0 +1,244 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+#include <stdlib.h>
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(ARCH_CPU_64_BITS)
+#define EXPECT_EQ_32_64(_, e, a) EXPECT_EQ(e, a)
+#else
+#define EXPECT_EQ_32_64(e, _, a) EXPECT_EQ(e, a)
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Test class with predictable memory usage.
+class Data {
+ public:
+ explicit Data(size_t size = 17): size_(size) {
+ }
+
+ size_t size() const { return size_; }
+
+ size_t EstimateMemoryUsage() const {
+ return size_;
+ }
+
+ bool operator < (const Data& other) const {
+ return size_ < other.size_;
+ }
+ bool operator == (const Data& other) const {
+ return size_ == other.size_;
+ }
+
+ struct Hasher {
+ size_t operator () (const Data& data) const {
+ return data.size();
+ }
+ };
+
+ private:
+ size_t size_;
+};
+
+} // namespace
+
+namespace internal {
+
+// This kills variance of bucket_count across STL implementations.
+template <>
+size_t HashMapBucketCountForTesting<Data>(size_t) {
+ return 10;
+}
+template <>
+size_t HashMapBucketCountForTesting<std::pair<const Data, short>>(size_t) {
+ return 10;
+}
+
+} // namespace internal
+
+TEST(EstimateMemoryUsageTest, String) {
+ std::string string(777, 'a');
+ EXPECT_EQ(string.capacity() + 1, EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, String16) {
+ string16 string(777, 'a');
+ EXPECT_EQ(sizeof(char16) * (string.capacity() + 1),
+ EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, Arrays) {
+ // std::array
+ {
+ std::array<Data, 10> array;
+ EXPECT_EQ(170u, EstimateMemoryUsage(array));
+ }
+
+ // T[N]
+ {
+ Data array[10];
+ EXPECT_EQ(170u, EstimateMemoryUsage(array));
+ }
+
+ // C array
+ {
+ struct Item {
+ char payload[10];
+ };
+ Item* array = new Item[7];
+ EXPECT_EQ(70u, EstimateMemoryUsage(array, 7));
+ delete[] array;
+ }
+}
+
+TEST(EstimateMemoryUsageTest, UniquePtr) {
+ // Empty
+ {
+ std::unique_ptr<Data> ptr;
+ EXPECT_EQ(0u, EstimateMemoryUsage(ptr));
+ }
+
+ // Not empty
+ {
+ std::unique_ptr<Data> ptr(new Data());
+ EXPECT_EQ_32_64(21u, 25u, EstimateMemoryUsage(ptr));
+ }
+
+ // With a pointer
+ {
+ std::unique_ptr<Data*> ptr(new Data*());
+ EXPECT_EQ(sizeof(void*), EstimateMemoryUsage(ptr));
+ }
+
+ // With an array
+ {
+ struct Item {
+ uint32_t payload[10];
+ };
+ std::unique_ptr<Item[]> ptr(new Item[7]);
+ EXPECT_EQ(280u, EstimateMemoryUsage(ptr, 7));
+ }
+}
+
+TEST(EstimateMemoryUsageTest, Vector) {
+ std::vector<Data> vector;
+ vector.reserve(1000);
+
+ // For an empty vector we should return memory usage of its buffer
+ size_t capacity = vector.capacity();
+ size_t expected_size = capacity * sizeof(Data);
+ EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+
+ // If vector is not empty, its size should also include memory usages
+ // of all elements.
+ for (size_t i = 0; i != capacity / 2; ++i) {
+ vector.push_back(Data(i));
+ expected_size += EstimateMemoryUsage(vector.back());
+ }
+ EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+}
+
+TEST(EstimateMemoryUsageTest, List) {
+ struct POD {
+ short data;
+ };
+ std::list<POD> list;
+ for (int i = 0; i != 1000; ++i) {
+ list.push_back(POD());
+ }
+ EXPECT_EQ_32_64(12000u, 24000u, EstimateMemoryUsage(list));
+}
+
+TEST(EstimateMemoryUsageTest, Set) {
+ std::set<std::pair<int, Data>> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert({i, Data(i)});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, MultiSet) {
+ std::multiset<bool> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert((i & 1) != 0);
+ }
+ EXPECT_EQ_32_64(16000u, 32000u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, Map) {
+ std::map<Data, int> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), i});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, MultiMap) {
+ std::multimap<char, Data> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({static_cast<char>(i), Data(i)});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedSet) {
+ std::unordered_set<Data, Data::Hasher> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert(Data(i));
+ }
+ EXPECT_EQ_32_64(511540u, 523580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiSet) {
+ std::unordered_multiset<Data, Data::Hasher> set;
+ for (int i = 0; i != 500; ++i) {
+ set.insert(Data(i));
+ set.insert(Data(i));
+ }
+ EXPECT_EQ_32_64(261540u, 273580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMap) {
+ std::unordered_map<Data, short, Data::Hasher> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), static_cast<short>(i)});
+ }
+ EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiMap) {
+ std::unordered_multimap<Data, short, Data::Hasher> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), static_cast<short>(i)});
+ }
+ EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, Deque) {
+ std::deque<Data> deque;
+
+ // Pick a large value so that platform-specific accounting
+ // for deque's blocks is small compared to usage of all items.
+ constexpr size_t kDataSize = 100000;
+ for (int i = 0; i != 1500; ++i) {
+ deque.push_back(Data(kDataSize));
+ }
+
+ // Compare against a reasonable minimum (i.e. no overhead).
+ size_t min_expected_usage = deque.size() * (sizeof(Data) + kDataSize);
+ EXPECT_LE(min_expected_usage, EstimateMemoryUsage(deque));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index 826989237b..63d1340e42 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -18,7 +18,7 @@
#include "build/build_config.h"
#if defined(OS_IOS)
-#include <sys/sysctl.h>
+#include <mach/vm_page_size.h>
#endif
#if defined(OS_POSIX)
@@ -57,19 +57,13 @@ bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
size_t ProcessMemoryDump::GetSystemPageSize() {
#if defined(OS_IOS)
// On iOS, getpagesize() returns the user page sizes, but for allocating
- // arrays for mincore(), kernel page sizes is needed. sysctlbyname() should
- // be used for this. Refer to crbug.com/542671 and Apple rdar://23651782
- int pagesize;
- size_t pagesize_len;
- int status = sysctlbyname("vm.pagesize", NULL, &pagesize_len, nullptr, 0);
- if (!status && pagesize_len == sizeof(pagesize)) {
- if (!sysctlbyname("vm.pagesize", &pagesize, &pagesize_len, nullptr, 0))
- return pagesize;
- }
- LOG(ERROR) << "sysctlbyname(\"vm.pagesize\") failed.";
- // Falls back to getpagesize() although it may be wrong in certain cases.
-#endif // defined(OS_IOS)
+ // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
+ // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
+ // Refer to http://crbug.com/542671 and Apple rdar://23651782
+ return vm_kernel_page_size;
+#else
return base::GetPageSize();
+#endif // defined(OS_IOS)
}
// static
@@ -164,14 +158,14 @@ ProcessMemoryDump::~ProcessMemoryDump() {}
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
const std::string& absolute_name) {
return AddAllocatorDumpInternal(
- WrapUnique(new MemoryAllocatorDump(absolute_name, this)));
+ MakeUnique<MemoryAllocatorDump>(absolute_name, this));
}
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
const std::string& absolute_name,
const MemoryAllocatorDumpGuid& guid) {
return AddAllocatorDumpInternal(
- WrapUnique(new MemoryAllocatorDump(absolute_name, this, guid)));
+ MakeUnique<MemoryAllocatorDump>(absolute_name, this, guid));
}
MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
index d020c7d652..6f8d167273 100644
--- a/base/trace_event/process_memory_dump.h
+++ b/base/trace_event/process_memory_dump.h
@@ -31,7 +31,6 @@
namespace base {
namespace trace_event {
-class MemoryDumpManager;
class MemoryDumpSessionState;
class TracedValue;
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
index d40f4302fe..e26e9fd28f 100644
--- a/base/trace_event/trace_buffer.cc
+++ b/base/trace_event/trace_buffer.cc
@@ -168,7 +168,8 @@ class TraceBufferVector : public TraceBuffer {
// have to add the metadata events and flush thread-local buffers even if
// the buffer is full.
*index = chunks_.size();
- chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
+ // Put nullptr in the slot of a in-flight chunk.
+ chunks_.push_back(nullptr);
++in_flight_chunk_count_;
// + 1 because zero chunk_seq is not allowed.
return std::unique_ptr<TraceBufferChunk>(
@@ -181,7 +182,7 @@ class TraceBufferVector : public TraceBuffer {
DCHECK_LT(index, chunks_.size());
DCHECK(!chunks_[index]);
--in_flight_chunk_count_;
- chunks_[index] = chunk.release();
+ chunks_[index] = std::move(chunk);
}
bool IsFull() const override { return chunks_.size() >= max_chunks_; }
@@ -198,7 +199,7 @@ class TraceBufferVector : public TraceBuffer {
TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
return NULL;
- TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
if (!chunk || chunk->seq() != handle.chunk_seq)
return NULL;
return chunk->GetEventAt(handle.event_index);
@@ -207,7 +208,7 @@ class TraceBufferVector : public TraceBuffer {
const TraceBufferChunk* NextChunk() override {
while (current_iteration_index_ < chunks_.size()) {
// Skip in-flight chunks.
- const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
+ const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
if (chunk)
return chunk;
}
@@ -223,7 +224,7 @@ class TraceBufferVector : public TraceBuffer {
overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
chunks_ptr_vector_resident_size);
for (size_t i = 0; i < chunks_.size(); ++i) {
- TraceBufferChunk* chunk = chunks_[i];
+ TraceBufferChunk* chunk = chunks_[i].get();
// Skip the in-flight (nullptr) chunks. They will be accounted by the
// per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
if (chunk)
@@ -235,7 +236,7 @@ class TraceBufferVector : public TraceBuffer {
size_t in_flight_chunk_count_;
size_t current_iteration_index_;
size_t max_chunks_;
- ScopedVector<TraceBufferChunk> chunks_;
+ std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
};
diff --git a/base/trace_event/trace_category.h b/base/trace_event/trace_category.h
new file mode 100644
index 0000000000..5a7915ac03
--- /dev/null
+++ b/base/trace_event/trace_category.h
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+#define BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+
+#include <stdint.h>
+
+namespace base {
+namespace trace_event {
+
+// Captures the state of an invidivual trace category. Nothing except tracing
+// internals (e.g., TraceLog) is supposed to have non-const Category pointers.
+struct TraceCategory {
+ // The TRACE_EVENT macros should only use this value as a bool.
+ // These enum values are effectively a public API and third_party projects
+ // depend on their value. Hence, never remove or recycle existing bits, unless
+ // you are sure that all the third-party projects that depend on this have
+ // been updated.
+ enum StateFlags : uint8_t {
+ ENABLED_FOR_RECORDING = 1 << 0,
+
+ // Not used anymore.
+ DEPRECATED_ENABLED_FOR_MONITORING = 1 << 1,
+ DEPRECATED_ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+
+ ENABLED_FOR_ETW_EXPORT = 1 << 3,
+ ENABLED_FOR_FILTERING = 1 << 4
+ };
+
+ static const TraceCategory* FromStatePtr(const uint8_t* state_ptr) {
+ static_assert(
+ offsetof(TraceCategory, state_) == 0,
+ "|state_| must be the first field of the TraceCategory class.");
+ return reinterpret_cast<const TraceCategory*>(state_ptr);
+ }
+
+ bool is_valid() const { return name_ != nullptr; }
+ void set_name(const char* name) { name_ = name; }
+ const char* name() const {
+ DCHECK(is_valid());
+ return name_;
+ }
+
+ // TODO(primiano): This is an intermediate solution to deal with the fact that
+ // today TRACE_EVENT* macros cache the state ptr. They should just cache the
+ // full TraceCategory ptr, which is immutable, and use these helper function
+ // here. This will get rid of the need of this awkward ptr getter completely.
+ const uint8_t* state_ptr() const {
+ return const_cast<const uint8_t*>(&state_);
+ }
+
+ uint8_t state() const {
+ return *const_cast<volatile const uint8_t*>(&state_);
+ }
+
+ bool is_enabled() const { return state() != 0; }
+
+ void set_state(uint8_t state) {
+ *const_cast<volatile uint8_t*>(&state_) = state;
+ }
+
+ void clear_state_flag(StateFlags flag) { set_state(state() & (~flag)); }
+ void set_state_flag(StateFlags flag) { set_state(state() | flag); }
+
+ uint32_t enabled_filters() const {
+ return *const_cast<volatile const uint32_t*>(&enabled_filters_);
+ }
+
+ bool is_filter_enabled(size_t index) const {
+ DCHECK(index < sizeof(enabled_filters_) * 8);
+ return (enabled_filters() & (1 << index)) != 0;
+ }
+
+ void set_enabled_filters(uint32_t enabled_filters) {
+ *const_cast<volatile uint32_t*>(&enabled_filters_) = enabled_filters;
+ }
+
+ void reset_for_testing() {
+ set_state(0);
+ set_enabled_filters(0);
+ }
+
+ // These fields should not be accessed directly, not even by tracing code.
+ // The only reason why these are not private is because it makes it impossible
+ // to have a global array of TraceCategory in category_registry.cc without
+ // creating initializers. See discussion on goo.gl/qhZN94 and
+ // crbug.com/{660967,660828}.
+
+ // The enabled state. TRACE_EVENT* macros will capture events if any of the
+ // flags here are set. Since TRACE_EVENTx macros are used in a lot of
+ // fast-paths, accesses to this field are non-barriered and racy by design.
+ // This field is mutated when starting/stopping tracing and we don't care
+ // about missing some events.
+ uint8_t state_;
+
+ // When ENABLED_FOR_FILTERING is set, this contains a bitmap to the
+ // coressponding filter (see event_filters.h).
+ uint32_t enabled_filters_;
+
+ // TraceCategory group names are long lived static strings.
+ const char* name_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CATEGORY_H_
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index b343ea00bc..36de107bf8 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -30,13 +30,11 @@ const char kRecordUntilFull[] = "record-until-full";
const char kRecordContinuously[] = "record-continuously";
const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
const char kTraceToConsole[] = "trace-to-console";
-const char kEnableSampling[] = "enable-sampling";
const char kEnableSystrace[] = "enable-systrace";
const char kEnableArgumentFilter[] = "enable-argument-filter";
// String parameters that can be used to parse the trace config string.
const char kRecordModeParam[] = "record_mode";
-const char kEnableSamplingParam[] = "enable_sampling";
const char kEnableSystraceParam[] = "enable_systrace";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
const char kIncludedCategoriesParam[] = "included_categories";
@@ -50,24 +48,32 @@ const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
const char kMemoryDumpConfigParam[] = "memory_dump_config";
const char kAllowedDumpModesParam[] = "allowed_dump_modes";
const char kTriggersParam[] = "triggers";
-const char kPeriodicIntervalParam[] = "periodic_interval_ms";
-const char kModeParam[] = "mode";
+const char kTriggerModeParam[] = "mode";
+const char kMinTimeBetweenDumps[] = "min_time_between_dumps_ms";
+const char kTriggerTypeParam[] = "type";
+const char kPeriodicIntervalLegacyParam[] = "periodic_interval_ms";
const char kHeapProfilerOptions[] = "heap_profiler_options";
const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
+// String parameters used to parse category event filters.
+const char kEventFiltersParam[] = "event_filters";
+const char kFilterPredicateParam[] = "filter_predicate";
+const char kFilterArgsParam[] = "filter_args";
+
// Default configuration of memory dumps.
const TraceConfig::MemoryDumpConfig::Trigger kDefaultHeavyMemoryDumpTrigger = {
- 2000, // periodic_interval_ms
- MemoryDumpLevelOfDetail::DETAILED};
+ 2000, // min_time_between_dumps_ms
+ MemoryDumpLevelOfDetail::DETAILED, MemoryDumpType::PERIODIC_INTERVAL};
const TraceConfig::MemoryDumpConfig::Trigger kDefaultLightMemoryDumpTrigger = {
- 250, // periodic_interval_ms
- MemoryDumpLevelOfDetail::LIGHT};
+ 250, // min_time_between_dumps_ms
+ MemoryDumpLevelOfDetail::LIGHT, MemoryDumpType::PERIODIC_INTERVAL};
class ConvertableTraceConfigToTraceFormat
: public base::trace_event::ConvertableToTraceFormat {
public:
explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
: trace_config_(trace_config) {}
+
~ConvertableTraceConfigToTraceFormat() override {}
void AppendAsTraceFormat(std::string* out) const override {
@@ -115,6 +121,94 @@ void TraceConfig::MemoryDumpConfig::Clear() {
heap_profiler_options.Clear();
}
+void TraceConfig::MemoryDumpConfig::Merge(
+ const TraceConfig::MemoryDumpConfig& config) {
+ triggers.insert(triggers.end(), config.triggers.begin(),
+ config.triggers.end());
+ allowed_dump_modes.insert(config.allowed_dump_modes.begin(),
+ config.allowed_dump_modes.end());
+ heap_profiler_options.breakdown_threshold_bytes =
+ std::min(heap_profiler_options.breakdown_threshold_bytes,
+ config.heap_profiler_options.breakdown_threshold_bytes);
+}
+
+TraceConfig::EventFilterConfig::EventFilterConfig(
+ const std::string& predicate_name)
+ : predicate_name_(predicate_name) {}
+
+TraceConfig::EventFilterConfig::~EventFilterConfig() {}
+
+TraceConfig::EventFilterConfig::EventFilterConfig(const EventFilterConfig& tc) {
+ *this = tc;
+}
+
+TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
+ const TraceConfig::EventFilterConfig& rhs) {
+ if (this == &rhs)
+ return *this;
+
+ predicate_name_ = rhs.predicate_name_;
+ included_categories_ = rhs.included_categories_;
+ excluded_categories_ = rhs.excluded_categories_;
+ if (rhs.args_)
+ args_ = rhs.args_->CreateDeepCopy();
+
+ return *this;
+}
+
+void TraceConfig::EventFilterConfig::AddIncludedCategory(
+ const std::string& category) {
+ included_categories_.push_back(category);
+}
+
+void TraceConfig::EventFilterConfig::AddExcludedCategory(
+ const std::string& category) {
+ excluded_categories_.push_back(category);
+}
+
+void TraceConfig::EventFilterConfig::SetArgs(
+ std::unique_ptr<base::DictionaryValue> args) {
+ args_ = std::move(args);
+}
+
+bool TraceConfig::EventFilterConfig::GetArgAsSet(
+ const char* key,
+ std::unordered_set<std::string>* out_set) const {
+ const ListValue* list = nullptr;
+ if (!args_->GetList(key, &list))
+ return false;
+ for (size_t i = 0; i < list->GetSize(); ++i) {
+ std::string value;
+ if (list->GetString(i, &value))
+ out_set->insert(value);
+ }
+ return true;
+}
+
+bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
+ const char* category_group_name) const {
+ CStringTokenizer category_group_tokens(
+ category_group_name, category_group_name + strlen(category_group_name),
+ ",");
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+
+ for (const auto& excluded_category : excluded_categories_) {
+ if (base::MatchPattern(category_group_token, excluded_category)) {
+ return false;
+ }
+ }
+
+ for (const auto& included_category : included_categories_) {
+ if (base::MatchPattern(category_group_token, included_category)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
TraceConfig::TraceConfig() {
InitializeDefault();
}
@@ -159,14 +253,14 @@ TraceConfig::TraceConfig(StringPiece config_string) {
TraceConfig::TraceConfig(const TraceConfig& tc)
: record_mode_(tc.record_mode_),
- enable_sampling_(tc.enable_sampling_),
enable_systrace_(tc.enable_systrace_),
enable_argument_filter_(tc.enable_argument_filter_),
memory_dump_config_(tc.memory_dump_config_),
included_categories_(tc.included_categories_),
disabled_categories_(tc.disabled_categories_),
excluded_categories_(tc.excluded_categories_),
- synthetic_delays_(tc.synthetic_delays_) {}
+ synthetic_delays_(tc.synthetic_delays_),
+ event_filters_(tc.event_filters_) {}
TraceConfig::~TraceConfig() {
}
@@ -176,7 +270,6 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
return *this;
record_mode_ = rhs.record_mode_;
- enable_sampling_ = rhs.enable_sampling_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
memory_dump_config_ = rhs.memory_dump_config_;
@@ -184,6 +277,7 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
disabled_categories_ = rhs.disabled_categories_;
excluded_categories_ = rhs.excluded_categories_;
synthetic_delays_ = rhs.synthetic_delays_;
+ event_filters_ = rhs.event_filters_;
return *this;
}
@@ -200,7 +294,7 @@ std::string TraceConfig::ToString() const {
std::unique_ptr<ConvertableToTraceFormat>
TraceConfig::AsConvertableToTraceFormat() const {
- return WrapUnique(new ConvertableTraceConfigToTraceFormat(*this));
+ return MakeUnique<ConvertableTraceConfigToTraceFormat>(*this);
}
std::string TraceConfig::ToCategoryFilterString() const {
@@ -271,7 +365,6 @@ bool TraceConfig::IsCategoryGroupEnabled(
void TraceConfig::Merge(const TraceConfig& config) {
if (record_mode_ != config.record_mode_
- || enable_sampling_ != config.enable_sampling_
|| enable_systrace_ != config.enable_systrace_
|| enable_argument_filter_ != config.enable_argument_filter_) {
DLOG(ERROR) << "Attempting to merge trace config with a different "
@@ -289,9 +382,7 @@ void TraceConfig::Merge(const TraceConfig& config) {
included_categories_.clear();
}
- memory_dump_config_.triggers.insert(memory_dump_config_.triggers.end(),
- config.memory_dump_config_.triggers.begin(),
- config.memory_dump_config_.triggers.end());
+ memory_dump_config_.Merge(config.memory_dump_config_);
disabled_categories_.insert(disabled_categories_.end(),
config.disabled_categories_.begin(),
@@ -302,11 +393,12 @@ void TraceConfig::Merge(const TraceConfig& config) {
synthetic_delays_.insert(synthetic_delays_.end(),
config.synthetic_delays_.begin(),
config.synthetic_delays_.end());
+ event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
+ config.event_filters().end());
}
void TraceConfig::Clear() {
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
included_categories_.clear();
@@ -314,11 +406,11 @@ void TraceConfig::Clear() {
excluded_categories_.clear();
synthetic_delays_.clear();
memory_dump_config_.Clear();
+ event_filters_.clear();
}
void TraceConfig::InitializeDefault() {
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
}
@@ -339,7 +431,6 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
}
bool val;
- enable_sampling_ = dict.GetBoolean(kEnableSamplingParam, &val) ? val : false;
enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
enable_argument_filter_ =
dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
@@ -352,6 +443,10 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
if (dict.GetList(kSyntheticDelaysParam, &category_list))
SetSyntheticDelaysFromList(*category_list);
+ const base::ListValue* category_event_filters = nullptr;
+ if (dict.GetList(kEventFiltersParam, &category_event_filters))
+ SetEventFiltersFromConfigList(*category_event_filters);
+
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
@@ -406,7 +501,6 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
}
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
if (!trace_options_string.empty()) {
@@ -421,8 +515,6 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
record_mode_ = ECHO_TO_CONSOLE;
} else if (token == kRecordAsMuchAsPossible) {
record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
- } else if (token == kEnableSampling) {
- enable_sampling_ = true;
} else if (token == kEnableSystrace) {
enable_systrace_ = true;
} else if (token == kEnableArgumentFilter) {
@@ -516,17 +608,26 @@ void TraceConfig::SetMemoryDumpConfigFromConfigDict(
if (!trigger_list->GetDictionary(i, &trigger))
continue;
+ MemoryDumpConfig::Trigger dump_config;
int interval = 0;
- if (!trigger->GetInteger(kPeriodicIntervalParam, &interval))
- continue;
-
+ if (!trigger->GetInteger(kMinTimeBetweenDumps, &interval)) {
+ // If "min_time_between_dumps_ms" param was not given, then the trace
+ // config uses old format where only periodic dumps are supported.
+ trigger->GetInteger(kPeriodicIntervalLegacyParam, &interval);
+ dump_config.trigger_type = MemoryDumpType::PERIODIC_INTERVAL;
+ } else {
+ std::string trigger_type_str;
+ trigger->GetString(kTriggerTypeParam, &trigger_type_str);
+ dump_config.trigger_type = StringToMemoryDumpType(trigger_type_str);
+ }
DCHECK_GT(interval, 0);
- MemoryDumpConfig::Trigger dump_config;
- dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
+ dump_config.min_time_between_dumps_ms = static_cast<uint32_t>(interval);
+
std::string level_of_detail_str;
- trigger->GetString(kModeParam, &level_of_detail_str);
+ trigger->GetString(kTriggerModeParam, &level_of_detail_str);
dump_config.level_of_detail =
StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+
memory_dump_config_.triggers.push_back(dump_config);
}
}
@@ -555,6 +656,50 @@ void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
+void TraceConfig::SetEventFiltersFromConfigList(
+ const base::ListValue& category_event_filters) {
+ event_filters_.clear();
+
+ for (size_t event_filter_index = 0;
+ event_filter_index < category_event_filters.GetSize();
+ ++event_filter_index) {
+ const base::DictionaryValue* event_filter = nullptr;
+ if (!category_event_filters.GetDictionary(event_filter_index,
+ &event_filter))
+ continue;
+
+ std::string predicate_name;
+ CHECK(event_filter->GetString(kFilterPredicateParam, &predicate_name))
+ << "Invalid predicate name in category event filter.";
+
+ EventFilterConfig new_config(predicate_name);
+ const base::ListValue* included_list = nullptr;
+ CHECK(event_filter->GetList(kIncludedCategoriesParam, &included_list))
+ << "Missing included_categories in category event filter.";
+
+ for (size_t i = 0; i < included_list->GetSize(); ++i) {
+ std::string category;
+ if (included_list->GetString(i, &category))
+ new_config.AddIncludedCategory(category);
+ }
+
+ const base::ListValue* excluded_list = nullptr;
+ if (event_filter->GetList(kExcludedCategoriesParam, &excluded_list)) {
+ for (size_t i = 0; i < excluded_list->GetSize(); ++i) {
+ std::string category;
+ if (excluded_list->GetString(i, &category))
+ new_config.AddExcludedCategory(category);
+ }
+ }
+
+ const base::DictionaryValue* args_dict = nullptr;
+ if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
+ new_config.SetArgs(args_dict->CreateDeepCopy());
+
+ event_filters_.push_back(new_config);
+ }
+}
+
std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
auto dict = MakeUnique<DictionaryValue>();
switch (record_mode_) {
@@ -574,7 +719,6 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
NOTREACHED();
}
- dict->SetBoolean(kEnableSamplingParam, enable_sampling_);
dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
@@ -586,6 +730,41 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
+ if (!event_filters_.empty()) {
+ std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
+ for (const EventFilterConfig& filter : event_filters_) {
+ std::unique_ptr<base::DictionaryValue> filter_dict(
+ new base::DictionaryValue());
+ filter_dict->SetString(kFilterPredicateParam, filter.predicate_name());
+
+ std::unique_ptr<base::ListValue> included_categories_list(
+ new base::ListValue());
+ for (const std::string& included_category : filter.included_categories())
+ included_categories_list->AppendString(included_category);
+
+ filter_dict->Set(kIncludedCategoriesParam,
+ std::move(included_categories_list));
+
+ if (!filter.excluded_categories().empty()) {
+ std::unique_ptr<base::ListValue> excluded_categories_list(
+ new base::ListValue());
+ for (const std::string& excluded_category :
+ filter.excluded_categories())
+ excluded_categories_list->AppendString(excluded_category);
+
+ filter_dict->Set(kExcludedCategoriesParam,
+ std::move(excluded_categories_list));
+ }
+
+ if (filter.filter_args())
+ filter_dict->Set(kFilterArgsParam,
+ filter.filter_args()->CreateDeepCopy());
+
+ filter_list->Append(std::move(filter_dict));
+ }
+ dict->Set(kEventFiltersParam, std::move(filter_list));
+ }
+
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
auto allowed_modes = MakeUnique<ListValue>();
for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
@@ -597,10 +776,14 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
auto triggers_list = MakeUnique<ListValue>();
for (const auto& config : memory_dump_config_.triggers) {
auto trigger_dict = MakeUnique<DictionaryValue>();
- trigger_dict->SetInteger(kPeriodicIntervalParam,
- static_cast<int>(config.periodic_interval_ms));
+ trigger_dict->SetString(kTriggerTypeParam,
+ MemoryDumpTypeToString(config.trigger_type));
+ trigger_dict->SetInteger(
+ kMinTimeBetweenDumps,
+ static_cast<int>(config.min_time_between_dumps_ms));
trigger_dict->SetString(
- kModeParam, MemoryDumpLevelOfDetailToString(config.level_of_detail));
+ kTriggerModeParam,
+ MemoryDumpLevelOfDetailToString(config.level_of_detail));
triggers_list->Append(std::move(trigger_dict));
}
@@ -639,8 +822,6 @@ std::string TraceConfig::ToTraceOptionsString() const {
default:
NOTREACHED();
}
- if (enable_sampling_)
- ret = ret + "," + kEnableSampling;
if (enable_systrace_)
ret = ret + "," + kEnableSystrace;
if (enable_argument_filter_)
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 91d6f1f3bd..717c261316 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -7,8 +7,10 @@
#include <stdint.h>
+#include <memory>
#include <set>
#include <string>
+#include <unordered_set>
#include <vector>
#include "base/base_export.h"
@@ -51,8 +53,9 @@ class BASE_EXPORT TraceConfig {
// Specifies the triggers in the memory dump config.
struct Trigger {
- uint32_t periodic_interval_ms;
+ uint32_t min_time_between_dumps_ms;
MemoryDumpLevelOfDetail level_of_detail;
+ MemoryDumpType trigger_type;
};
// Specifies the configuration options for the heap profiler.
@@ -71,6 +74,8 @@ class BASE_EXPORT TraceConfig {
// Reset the values in the config.
void Clear();
+ void Merge(const MemoryDumpConfig& config);
+
// Set of memory dump modes allowed for the tracing session. The explicitly
// triggered dumps will be successful only if the dump mode is allowed in
// the config.
@@ -80,6 +85,39 @@ class BASE_EXPORT TraceConfig {
HeapProfiler heap_profiler_options;
};
+ class BASE_EXPORT EventFilterConfig {
+ public:
+ EventFilterConfig(const std::string& predicate_name);
+ EventFilterConfig(const EventFilterConfig& tc);
+
+ ~EventFilterConfig();
+
+ EventFilterConfig& operator=(const EventFilterConfig& rhs);
+
+ void AddIncludedCategory(const std::string& category);
+ void AddExcludedCategory(const std::string& category);
+ void SetArgs(std::unique_ptr<base::DictionaryValue> args);
+ bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
+
+ bool IsCategoryGroupEnabled(const char* category_group_name) const;
+
+ const std::string& predicate_name() const { return predicate_name_; }
+ base::DictionaryValue* filter_args() const { return args_.get(); }
+ const StringList& included_categories() const {
+ return included_categories_;
+ }
+ const StringList& excluded_categories() const {
+ return excluded_categories_;
+ }
+
+ private:
+ std::string predicate_name_;
+ StringList included_categories_;
+ StringList excluded_categories_;
+ std::unique_ptr<base::DictionaryValue> args_;
+ };
+ typedef std::vector<EventFilterConfig> EventFilters;
+
TraceConfig();
// Create TraceConfig object from category filter and trace options strings.
@@ -93,22 +131,22 @@ class BASE_EXPORT TraceConfig {
//
// |trace_options_string| is a comma-delimited list of trace options.
// Possible options are: "record-until-full", "record-continuously",
- // "record-as-much-as-possible", "trace-to-console", "enable-sampling",
- // "enable-systrace" and "enable-argument-filter".
+ // "record-as-much-as-possible", "trace-to-console", "enable-systrace" and
+ // "enable-argument-filter".
// The first 4 options are trace recoding modes and hence
// mutually exclusive. If more than one trace recording modes appear in the
// options_string, the last one takes precedence. If none of the trace
// recording mode is specified, recording mode is RECORD_UNTIL_FULL.
//
// The trace option will first be reset to the default option
- // (record_mode set to RECORD_UNTIL_FULL, enable_sampling, enable_systrace,
- // and enable_argument_filter set to false) before options parsed from
+ // (record_mode set to RECORD_UNTIL_FULL, enable_systrace and
+ // enable_argument_filter set to false) before options parsed from
// |trace_options_string| are applied on it. If |trace_options_string| is
// invalid, the final state of trace options is undefined.
//
// Example: TraceConfig("test_MyTest*", "record-until-full");
// Example: TraceConfig("test_MyTest*,test_OtherStuff",
- // "record-continuously, enable-sampling");
+ // "record-continuously");
// Example: TraceConfig("-excluded_category1,-excluded_category2",
// "record-until-full, trace-to-console");
// would set ECHO_TO_CONSOLE as the recording mode.
@@ -138,7 +176,6 @@ class BASE_EXPORT TraceConfig {
// Example:
// {
// "record_mode": "record-continuously",
- // "enable_sampling": true,
// "enable_systrace": true,
// "enable_argument_filter": true,
// "included_categories": ["included",
@@ -174,12 +211,10 @@ class BASE_EXPORT TraceConfig {
const StringList& GetSyntheticDelayValues() const;
TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
- bool IsSamplingEnabled() const { return enable_sampling_; }
bool IsSystraceEnabled() const { return enable_systrace_; }
bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
- void EnableSampling() { enable_sampling_ = true; }
void EnableSystrace() { enable_systrace_ = true; }
void EnableArgumentFilter() { enable_argument_filter_ = true; }
@@ -196,7 +231,7 @@ class BASE_EXPORT TraceConfig {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group) const;
+ bool IsCategoryGroupEnabled(const char* category_group_name) const;
// Merges config with the current TraceConfig
void Merge(const TraceConfig& config);
@@ -210,6 +245,11 @@ class BASE_EXPORT TraceConfig {
return memory_dump_config_;
}
+ const EventFilters& event_filters() const { return event_filters_; }
+ void SetEventFilters(const EventFilters& filter_configs) {
+ event_filters_ = filter_configs;
+ }
+
private:
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -250,6 +290,7 @@ class BASE_EXPORT TraceConfig {
const DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
+ void SetEventFiltersFromConfigList(const base::ListValue& event_filters);
std::unique_ptr<DictionaryValue> ToDict() const;
std::string ToTraceOptionsString() const;
@@ -271,7 +312,6 @@ class BASE_EXPORT TraceConfig {
bool HasIncludedPatterns() const;
TraceRecordMode record_mode_;
- bool enable_sampling_ : 1;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
@@ -281,6 +321,7 @@ class BASE_EXPORT TraceConfig {
StringList disabled_categories_;
StringList excluded_categories_;
StringList synthetic_delays_;
+ EventFilters event_filters_;
};
} // namespace trace_event
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
index 6b47f8dc55..744e8a8acc 100644
--- a/base/trace_event/trace_config_memory_test_util.h
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -13,87 +13,144 @@ namespace trace_event {
class TraceConfigMemoryTestUtil {
public:
+ static std::string GetTraceConfig_LegacyPeriodicTriggers(int light_period,
+ int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":2048"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"light\","
+ "\"periodic_interval_ms\":%d"
+ "},"
+ "{"
+ "\"mode\":\"detailed\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ ;
+ }
+
static std::string GetTraceConfig_PeriodicTriggers(int light_period,
int heavy_period) {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"heap_profiler_options\":{"
- "\"breakdown_threshold_bytes\":2048"
- "},"
- "\"triggers\":["
- "{"
- "\"mode\":\"light\","
- "\"periodic_interval_ms\":%d"
- "},"
- "{"
- "\"mode\":\"detailed\","
- "\"periodic_interval_ms\":%d"
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":2048"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"light\","
+ "\"type\":\"periodic_interval\""
+ "},"
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"periodic_interval\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, light_period, heavy_period);
}
static std::string GetTraceConfig_EmptyTriggers() {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"triggers\":["
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"triggers\":["
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_NoTriggers() {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\"],"
- "\"triggers\":["
- "{"
- "\"mode\":\"background\","
- "\"periodic_interval_ms\":%d"
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory, period_ms);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\"],"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"background\","
+ "\"type\":\"periodic_interval\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, period_ms);
+ }
+
+ static std::string GetTraceConfig_PeakDetectionTrigger(int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"peak_memory_usage\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, heavy_period);
}
};
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index 4b46b2fefd..74aa7bdc63 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -5,6 +5,7 @@
#include <stddef.h>
#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
#include "base/macros.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_config.h"
@@ -19,38 +20,52 @@ namespace {
const char kDefaultTraceConfigString[] =
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"record_mode\":\"record-until-full\""
"}";
const char kCustomTraceConfigString[] =
- "{"
+ "{"
"\"enable_argument_filter\":true,"
- "\"enable_sampling\":true,"
"\"enable_systrace\":true,"
+ "\"event_filters\":["
+ "{"
+ "\"excluded_categories\":[\"unfiltered_cat\"],"
+ "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
+ "\"filter_predicate\":\"event_whitelist_predicate\","
+ "\"included_categories\":[\"*\"]"
+ "}"
+ "],"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
- "\"included_categories\":[\"included\","
- "\"inc_pattern*\","
- "\"disabled-by-default-cc\","
- "\"disabled-by-default-memory-infra\"],"
+ "\"included_categories\":["
+ "\"included\","
+ "\"inc_pattern*\","
+ "\"disabled-by-default-cc\","
+ "\"disabled-by-default-memory-infra\"],"
"\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"heap_profiler_options\":{"
- "\"breakdown_threshold_bytes\":10240"
- "},"
- "\"triggers\":["
- "{\"mode\":\"light\",\"periodic_interval_ms\":50},"
- "{\"mode\":\"detailed\",\"periodic_interval_ms\":1000}"
- "]"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":10240"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":50,"
+ "\"mode\":\"light\","
+ "\"type\":\"periodic_interval\""
+ "},"
+ "{"
+ "\"min_time_between_dumps_ms\":1000,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"peak_memory_usage\""
+ "}"
+ "]"
"},"
"\"record_mode\":\"record-continuously\","
"\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
- "}";
+ "}";
void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
@@ -72,44 +87,31 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From trace options strings
TraceConfig config("", "record-until-full");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "record-continuously");
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "record-as-much-as-possible");
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible",
config.ToTraceOptionsString().c_str());
- config = TraceConfig("", "record-until-full, enable-sampling");
- EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
- EXPECT_FALSE(config.IsSystraceEnabled());
- EXPECT_FALSE(config.IsArgumentFilterEnabled());
- EXPECT_STREQ("record-until-full,enable-sampling",
- config.ToTraceOptionsString().c_str());
-
config = TraceConfig("", "enable-systrace, record-continuously");
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously,enable-systrace",
@@ -117,7 +119,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config = TraceConfig("", "enable-argument-filter,record-as-much-as-possible");
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_TRUE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible,enable-argument-filter",
@@ -125,19 +126,17 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config = TraceConfig(
"",
- "enable-systrace,trace-to-console,enable-sampling,enable-argument-filter");
+ "enable-systrace,trace-to-console,enable-argument-filter");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_TRUE(config.IsArgumentFilterEnabled());
EXPECT_STREQ(
- "trace-to-console,enable-sampling,enable-systrace,enable-argument-filter",
+ "trace-to-console,enable-systrace,enable-argument-filter",
config.ToTraceOptionsString().c_str());
config = TraceConfig(
"", "record-continuously, record-until-full, trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
@@ -145,28 +144,24 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From TraceRecordMode
config = TraceConfig("", RECORD_UNTIL_FULL);
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("", RECORD_CONTINUOUSLY);
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
config = TraceConfig("", ECHO_TO_CONSOLE);
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
config = TraceConfig("", RECORD_AS_MUCH_AS_POSSIBLE);
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible",
@@ -198,33 +193,30 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From both trace options and category filter strings
config = TraceConfig("", "");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
- "enable-systrace, trace-to-console, enable-sampling");
+ "enable-systrace, trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
- EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ EXPECT_STREQ("trace-to-console,enable-systrace",
config.ToTraceOptionsString().c_str());
// From both trace options and category filter strings with spaces.
config = TraceConfig(" included , -excluded, inc_pattern*, ,-exc_pattern* ",
- "enable-systrace, ,trace-to-console, enable-sampling ");
+ "enable-systrace, ,trace-to-console ");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
- EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ EXPECT_STREQ("trace-to-console,enable-systrace",
config.ToTraceOptionsString().c_str());
// From category filter string and TraceRecordMode
@@ -232,7 +224,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
RECORD_CONTINUOUSLY);
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
EXPECT_FALSE(config.IsSystraceEnabled());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
@@ -242,7 +233,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
TraceConfig config("", "foo-bar-baz");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
@@ -250,7 +240,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
config = TraceConfig("arbitrary-category", "foo-bar-baz, enable-systrace");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("arbitrary-category", config.ToCategoryFilterString().c_str());
@@ -330,6 +319,7 @@ TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+ EXPECT_TRUE(tc.event_filters().empty());
// Enabling only the disabled-by-default-* category means the default ones
// are also enabled.
tc = TraceConfig("disabled-by-default-foo", "");
@@ -346,7 +336,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig tc(dict);
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -360,7 +349,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig default_tc(*default_dict);
EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
- EXPECT_FALSE(default_tc.IsSamplingEnabled());
EXPECT_FALSE(default_tc.IsSystraceEnabled());
EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
@@ -374,7 +362,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig custom_tc(*custom_dict);
EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
- EXPECT_TRUE(custom_tc.IsSamplingEnabled());
EXPECT_TRUE(custom_tc.IsSystraceEnabled());
EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,"
@@ -387,22 +374,28 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TEST(TraceConfigTest, TraceConfigFromValidString) {
// Using some non-empty config string.
const char config_string[] =
- "{"
+ "{"
"\"enable_argument_filter\":true,"
- "\"enable_sampling\":true,"
"\"enable_systrace\":true,"
+ "\"event_filters\":["
+ "{"
+ "\"excluded_categories\":[\"unfiltered_cat\"],"
+ "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
+ "\"filter_predicate\":\"event_whitelist_predicate\","
+ "\"included_categories\":[\"*\"]"
+ "}"
+ "],"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"included_categories\":[\"included\","
- "\"inc_pattern*\","
- "\"disabled-by-default-cc\"],"
+ "\"inc_pattern*\","
+ "\"disabled-by-default-cc\"],"
"\"record_mode\":\"record-continuously\","
"\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
- "}";
+ "}";
TraceConfig tc(config_string);
EXPECT_STREQ(config_string, tc.ToString().c_str());
EXPECT_EQ(RECORD_CONTINUOUSLY, tc.GetTraceRecordMode());
- EXPECT_TRUE(tc.IsSamplingEnabled());
EXPECT_TRUE(tc.IsSystraceEnabled());
EXPECT_TRUE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,disabled-by-default-cc,-excluded,"
@@ -434,6 +427,26 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
EXPECT_STREQ("test.Delay1;16", tc.GetSyntheticDelayValues()[0].c_str());
EXPECT_STREQ("test.Delay2;32", tc.GetSyntheticDelayValues()[1].c_str());
+ EXPECT_EQ(tc.event_filters().size(), 1u);
+ const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
+ EXPECT_STREQ("event_whitelist_predicate",
+ event_filter.predicate_name().c_str());
+ EXPECT_EQ(1u, event_filter.included_categories().size());
+ EXPECT_STREQ("*", event_filter.included_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.excluded_categories().size());
+ EXPECT_STREQ("unfiltered_cat", event_filter.excluded_categories()[0].c_str());
+ EXPECT_TRUE(event_filter.filter_args());
+
+ std::string json_out;
+ base::JSONWriter::Write(*event_filter.filter_args(), &json_out);
+ EXPECT_STREQ(json_out.c_str(),
+ "{\"event_name_whitelist\":[\"a snake\",\"a dog\"]}");
+ std::unordered_set<std::string> filter_values;
+ EXPECT_TRUE(event_filter.GetArgAsSet("event_name_whitelist", &filter_values));
+ EXPECT_EQ(2u, filter_values.size());
+ EXPECT_EQ(1u, filter_values.count("a snake"));
+ EXPECT_EQ(1u, filter_values.count("a dog"));
+
const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
TraceConfig tc2(config_string_2);
EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
@@ -446,7 +459,6 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
EXPECT_STREQ(tc.ToString().c_str(),
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"record_mode\":\"record-until-full\""
"}");
@@ -458,7 +470,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
TraceConfig tc("");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -467,7 +478,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("This is an invalid config string.");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -476,7 +486,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -485,7 +494,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -495,7 +503,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
// initialize TraceConfig with best effort.
tc = TraceConfig("{}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -503,7 +510,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -511,7 +517,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
const char invalid_config_string[] =
"{"
- "\"enable_sampling\":\"true\","
"\"enable_systrace\":1,"
"\"excluded_categories\":[\"excluded\"],"
"\"included_categories\":\"not a list\","
@@ -522,7 +527,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
"}";
tc = TraceConfig(invalid_config_string);
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("-excluded,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
@@ -547,7 +551,6 @@ TEST(TraceConfigTest, MergingTraceConfigs) {
tc.Merge(tc2);
EXPECT_STREQ("{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"record_mode\":\"record-until-full\""
@@ -614,15 +617,11 @@ TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
TEST(TraceConfigTest, SetTraceOptionValues) {
TraceConfig tc;
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
tc.SetTraceRecordMode(RECORD_AS_MUCH_AS_POSSIBLE);
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, tc.GetTraceRecordMode());
- tc.EnableSampling();
- EXPECT_TRUE(tc.IsSamplingEnabled());
-
tc.EnableSystrace();
EXPECT_TRUE(tc.IsSystraceEnabled());
}
@@ -632,30 +631,47 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
TraceConfig tc1(tc_str1);
EXPECT_EQ(tc_str1, tc1.ToString());
+ TraceConfig tc2(
+ TraceConfigMemoryTestUtil::GetTraceConfig_LegacyPeriodicTriggers(200,
+ 2000));
+ EXPECT_EQ(tc_str1, tc2.ToString());
+
EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
- EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(200u,
+ tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
tc1.memory_dump_config_.triggers[0].level_of_detail);
- EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
+ EXPECT_EQ(2000u,
+ tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
tc1.memory_dump_config_.triggers[1].level_of_detail);
EXPECT_EQ(
2048u,
tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
- std::string tc_str2 =
+ std::string tc_str3 =
TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
1 /* period_ms */);
- TraceConfig tc2(tc_str2);
- EXPECT_EQ(tc_str2, tc2.ToString());
- EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+ TraceConfig tc3(tc_str3);
+ EXPECT_EQ(tc_str3, tc3.ToString());
+ EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
- tc2.memory_dump_config_.triggers[0].level_of_detail);
+ tc3.memory_dump_config_.triggers[0].level_of_detail);
+
+ std::string tc_str4 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
+ 1 /*heavy_period */);
+ TraceConfig tc4(tc_str4);
+ EXPECT_EQ(tc_str4, tc4.ToString());
+ ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+ tc4.memory_dump_config_.triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
deleted file mode 100644
index f915780de5..0000000000
--- a/base/trace_event/trace_event.gypi
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'trace_event_sources' : [
- 'trace_event/blame_context.cc',
- 'trace_event/blame_context.h',
- 'trace_event/common/trace_event_common.h',
- 'trace_event/heap_profiler.h',
- 'trace_event/heap_profiler_allocation_context.cc',
- 'trace_event/heap_profiler_allocation_context.h',
- 'trace_event/heap_profiler_allocation_context_tracker.cc',
- 'trace_event/heap_profiler_allocation_context_tracker.h',
- 'trace_event/heap_profiler_allocation_register.cc',
- 'trace_event/heap_profiler_allocation_register_posix.cc',
- 'trace_event/heap_profiler_allocation_register_win.cc',
- 'trace_event/heap_profiler_allocation_register.h',
- 'trace_event/heap_profiler_heap_dump_writer.cc',
- 'trace_event/heap_profiler_heap_dump_writer.h',
- 'trace_event/heap_profiler_stack_frame_deduplicator.cc',
- 'trace_event/heap_profiler_stack_frame_deduplicator.h',
- 'trace_event/heap_profiler_type_name_deduplicator.cc',
- 'trace_event/heap_profiler_type_name_deduplicator.h',
- 'trace_event/java_heap_dump_provider_android.cc',
- 'trace_event/java_heap_dump_provider_android.h',
- 'trace_event/memory_allocator_dump.cc',
- 'trace_event/memory_allocator_dump.h',
- 'trace_event/memory_allocator_dump_guid.cc',
- 'trace_event/memory_allocator_dump_guid.h',
- 'trace_event/memory_dump_manager.cc',
- 'trace_event/memory_dump_manager.h',
- 'trace_event/memory_dump_provider.h',
- 'trace_event/memory_dump_request_args.cc',
- 'trace_event/memory_dump_request_args.h',
- 'trace_event/memory_dump_session_state.cc',
- 'trace_event/memory_dump_session_state.h',
- 'trace_event/memory_infra_background_whitelist.cc',
- 'trace_event/memory_infra_background_whitelist.h',
- 'trace_event/process_memory_dump.cc',
- 'trace_event/process_memory_dump.h',
- 'trace_event/process_memory_maps.cc',
- 'trace_event/process_memory_maps.h',
- 'trace_event/process_memory_totals.cc',
- 'trace_event/process_memory_totals.h',
- 'trace_event/trace_buffer.cc',
- 'trace_event/trace_buffer.h',
- 'trace_event/trace_config.cc',
- 'trace_event/trace_config.h',
- 'trace_event/trace_event.h',
- 'trace_event/trace_event_android.cc',
- 'trace_event/trace_event_argument.cc',
- 'trace_event/trace_event_argument.h',
- 'trace_event/trace_event_etw_export_win.cc',
- 'trace_event/trace_event_etw_export_win.h',
- 'trace_event/trace_event_impl.cc',
- 'trace_event/trace_event_impl.h',
- 'trace_event/trace_event_memory_overhead.cc',
- 'trace_event/trace_event_memory_overhead.h',
- 'trace_event/trace_event_synthetic_delay.cc',
- 'trace_event/trace_event_synthetic_delay.h',
- 'trace_event/trace_event_system_stats_monitor.cc',
- 'trace_event/trace_event_system_stats_monitor.h',
- 'trace_event/trace_log.cc',
- 'trace_event/trace_log.h',
- 'trace_event/trace_log_constants.cc',
- 'trace_event/trace_sampling_thread.cc',
- 'trace_event/trace_sampling_thread.h',
- 'trace_event/tracing_agent.cc',
- 'trace_event/tracing_agent.h',
- 'trace_event/winheap_dump_provider_win.cc',
- 'trace_event/winheap_dump_provider_win.h',
- ],
- 'trace_event_test_sources' : [
- 'trace_event/blame_context_unittest.cc',
- 'trace_event/heap_profiler_allocation_context_tracker_unittest.cc',
- 'trace_event/heap_profiler_allocation_register_unittest.cc',
- 'trace_event/heap_profiler_heap_dump_writer_unittest.cc',
- 'trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc',
- 'trace_event/heap_profiler_type_name_deduplicator_unittest.cc',
- 'trace_event/java_heap_dump_provider_android_unittest.cc',
- 'trace_event/memory_allocator_dump_unittest.cc',
- 'trace_event/memory_dump_manager_unittest.cc',
- 'trace_event/process_memory_dump_unittest.cc',
- 'trace_event/trace_config_memory_test_util.h',
- 'trace_event/trace_config_unittest.cc',
- 'trace_event/trace_event_argument_unittest.cc',
- 'trace_event/trace_event_synthetic_delay_unittest.cc',
- 'trace_event/trace_event_system_stats_monitor_unittest.cc',
- 'trace_event/trace_event_unittest.cc',
- 'trace_event/winheap_dump_provider_win_unittest.cc',
- ],
- 'conditions': [
- ['OS == "linux" or OS=="android" or OS=="mac" or OS=="ios"', {
- 'trace_event_sources': [
- 'trace_event/malloc_dump_provider.cc',
- 'trace_event/malloc_dump_provider.h',
- ],
- }],
- ['OS == "android"', {
- 'trace_event_test_sources' : [
- 'trace_event/trace_event_android_unittest.cc',
- ],
- }],
- ],
- },
-}
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
index a075898269..51e6927cbd 100644
--- a/base/trace_event/trace_event.h
+++ b/base/trace_event/trace_event.h
@@ -19,6 +19,7 @@
#include "base/time/time.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_category.h"
#include "base/trace_event/trace_event_system_stats_monitor.h"
#include "base/trace_event/trace_log.h"
#include "build/build_config.h"
@@ -28,55 +29,52 @@
#define TRACE_STR_COPY(str) \
trace_event_internal::TraceStringWithCopy(str)
-// By default, uint64_t ID argument values are not mangled with the Process ID
-// in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
+// instead. By default, uint64_t ID argument values are not mangled with the
+// Process ID in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID
+// mangling.
#define TRACE_ID_MANGLE(id) \
trace_event_internal::TraceID::ForceMangle(id)
-// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
-// macros. Use this macro to prevent Process ID mangling.
+// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
+// instead. By default, pointers are mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to prevent Process ID mangling.
#define TRACE_ID_DONT_MANGLE(id) \
trace_event_internal::TraceID::DontMangle(id)
// By default, trace IDs are eventually converted to a single 64-bit number. Use
-// this macro to add a scope string.
-#define TRACE_ID_WITH_SCOPE(scope, id) \
- trace_event_internal::TraceID::WithScope(scope, id)
-
-// Sets the current sample state to the given category and name (both must be
-// constant strings). These states are intended for a sampling profiler.
-// Implementation note: we store category and name together because we don't
-// want the inconsistency/expense of storing two pointers.
-// |thread_bucket| is [0..2] and is used to statically isolate samples in one
-// thread from others.
-#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
- bucket_number, category, name) \
- trace_event_internal:: \
- TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
-
-// Returns a current sampling state of the given bucket.
-#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
- trace_event_internal::TraceEventSamplingStateScope<bucket_number>::Current()
-
-// Creates a scope of a sampling state of the given bucket.
+// this macro to add a scope string. For example,
//
-// { // The sampling state is set within this scope.
-// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
-// ...;
-// }
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \
- bucket_number, category, name) \
- trace_event_internal::TraceEventSamplingStateScope<bucket_number> \
- traceEventSamplingScope(category "\0" name);
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+// "network", "ResourceLoad",
+// TRACE_ID_WITH_SCOPE("BlinkResourceID", resourceID));
+//
+// Also, it is possible to prepend the ID with another number, like the process
+// ID. This is useful in creatin IDs that are unique among all processes. To do
+// that, pass two numbers after the scope string instead of one. For example,
+//
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+// "network", "ResourceLoad",
+// TRACE_ID_WITH_SCOPE("BlinkResourceID", pid, resourceID));
+#define TRACE_ID_WITH_SCOPE(scope, ...) \
+ trace_event_internal::TraceID::WithScope(scope, ##__VA_ARGS__)
+
+#define TRACE_ID_GLOBAL(id) trace_event_internal::TraceID::GlobalId(id)
+#define TRACE_ID_LOCAL(id) trace_event_internal::TraceID::LocalId(id)
#define TRACE_EVENT_API_CURRENT_THREAD_ID \
static_cast<int>(base::PlatformThread::CurrentId())
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
- (base::trace_event::TraceLog::ENABLED_FOR_RECORDING | \
- base::trace_event::TraceLog::ENABLED_FOR_EVENT_CALLBACK | \
- base::trace_event::TraceLog::ENABLED_FOR_ETW_EXPORT))
+ (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT))
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED() \
+ UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
+ base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
////////////////////////////////////////////////////////////////////////////////
// Implementation specific tracing API definitions.
@@ -204,13 +202,6 @@
// Defines visibility for classes in trace_event.h
#define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
-// The thread buckets for the sampling profiler.
-TRACE_EVENT_API_CLASS_EXPORT extern \
- TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
-#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
- g_trace_state[thread_bucket]
-
////////////////////////////////////////////////////////////////////////////////
// Implementation detail: trace event macros create temporary variables
@@ -249,69 +240,69 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- trace_event_internal::AddTraceEvent( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
- } while (0)
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
// Implementation detail: internal macro to create static category and add begin
// event if the category is enabled. Also adds the end event when the scope
// ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- base::trace_event::TraceEventHandle h = \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_COMPLETE, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
- ##__VA_ARGS__); \
- INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
- }
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
+ ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW( \
- category_group, name, bind_id, flow_flags, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned int trace_event_flags = flow_flags; \
- trace_event_internal::TraceID trace_event_bind_id(bind_id, \
- &trace_event_flags); \
- base::trace_event::TraceEventHandle h = \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_COMPLETE, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
+ bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_bind_id((bind_id)); \
+ unsigned int trace_event_flags = \
+ flow_flags | trace_event_bind_id.id_flags(); \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
- INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
}
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
- flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
- trace_event_internal::TraceID trace_event_trace_id( \
- id, &trace_event_flags); \
- trace_event_internal::AddTraceEvent( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
- name, trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
- trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
- } while (0)
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_trace_id((id)); \
+ unsigned int trace_event_flags = \
+ flags | trace_event_trace_id.id_flags(); \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
@@ -319,12 +310,11 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
timestamp, flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, \
- base::TimeTicks::FromInternalValue(timestamp), \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
@@ -332,33 +322,50 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- phase, category_group, name, id, thread_id, timestamp, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
- trace_event_internal::TraceID trace_event_trace_id(id, \
- &trace_event_flags); \
- trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
- thread_id, base::TimeTicks::FromInternalValue(timestamp), \
- trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
- trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_trace_id((id)); \
+ unsigned int trace_event_flags = \
+ flags | trace_event_trace_id.id_flags(); \
+ trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ thread_id, timestamp, \
+ trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// The linked ID will not be mangled.
+#define INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id1, id2) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID source_id((id1)); \
+ unsigned int source_flags = source_id.id_flags(); \
+ trace_event_internal::TraceID target_id((id2)); \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_LINK_IDS, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ source_id.scope(), source_id.raw_id(), source_flags, \
+ trace_event_internal::kNoId, "linked_id", \
+ target_id.AsConvertableToTraceFormat()); \
+ } \
} while (0)
// Implementation detail: internal macro to create static category and add
// metadata event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- TRACE_EVENT_API_ADD_METADATA_EVENT( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- ##__VA_ARGS__); \
- } \
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ TRACE_EVENT_API_ADD_METADATA_EVENT( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ ##__VA_ARGS__); \
+ } \
} while (0)
// Implementation detail: internal macro to enter and leave a
@@ -381,7 +388,7 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {}; \
}; \
INTERNAL_TRACE_EVENT_UID(ScopedContext) \
- INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+ INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
// Implementation detail: internal macro to trace a task execution with the
// location where it was posted from.
@@ -403,19 +410,64 @@ const unsigned long long kNoId = 0;
// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
// are by default mangled with the Process ID so that they are unlikely to
// collide when the same pointer is used on different processes.
-class TraceID {
+class BASE_EXPORT TraceID {
public:
+ // Can be combined with WithScope.
+ class LocalId {
+ public:
+ explicit LocalId(unsigned long long raw_id) : raw_id_(raw_id) {}
+ unsigned long long raw_id() const { return raw_id_; }
+ private:
+ unsigned long long raw_id_;
+ };
+
+ // Can be combined with WithScope.
+ class GlobalId {
+ public:
+ explicit GlobalId(unsigned long long raw_id) : raw_id_(raw_id) {}
+ unsigned long long raw_id() const { return raw_id_; }
+ private:
+ unsigned long long raw_id_;
+ };
+
class WithScope {
public:
WithScope(const char* scope, unsigned long long raw_id)
: scope_(scope), raw_id_(raw_id) {}
+ WithScope(const char* scope, LocalId local_id)
+ : scope_(scope), raw_id_(local_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+ }
+ WithScope(const char* scope, GlobalId global_id)
+ : scope_(scope), raw_id_(global_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
+ WithScope(const char* scope,
+ unsigned long long prefix,
+ unsigned long long raw_id)
+ : scope_(scope), has_prefix_(true), prefix_(prefix), raw_id_(raw_id) {}
+ WithScope(const char* scope, unsigned long long prefix, GlobalId global_id)
+ : scope_(scope),
+ has_prefix_(true),
+ prefix_(prefix),
+ raw_id_(global_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ unsigned long long prefix() const { return prefix_; }
+ unsigned int id_flags() const { return id_flags_; }
+
private:
const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ unsigned long long prefix_;
unsigned long long raw_id_;
+ unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
+ // DEPRECATED: consider using LocalId or GlobalId, instead.
class DontMangle {
public:
explicit DontMangle(const void* raw_id)
@@ -436,15 +488,12 @@ class TraceID {
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
explicit DontMangle(signed char raw_id)
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
- explicit DontMangle(WithScope scoped_id)
- : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
- const char* scope() const { return scope_; }
unsigned long long raw_id() const { return raw_id_; }
private:
- const char* scope_ = nullptr;
unsigned long long raw_id_;
};
+ // DEPRECATED: consider using LocalId or GlobalId, instead.
class ForceMangle {
public:
explicit ForceMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
@@ -466,50 +515,58 @@ class TraceID {
private:
unsigned long long raw_id_;
};
- TraceID(const void* raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(
- reinterpret_cast<uintptr_t>(raw_id))) {
- *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
- }
- TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
- *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
- }
- TraceID(DontMangle maybe_scoped_id, unsigned int* /*flags*/)
- : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
- TraceID(unsigned long long raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
- }
- TraceID(unsigned long raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+
+ TraceID(const void* raw_id) : raw_id_(static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(raw_id))) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+ TraceID(ForceMangle raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(unsigned short raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+ TraceID(DontMangle raw_id) : raw_id_(raw_id.raw_id()) {}
+ TraceID(unsigned long long raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned long raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned int raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned short raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned char raw_id) : raw_id_(raw_id) {}
+ TraceID(long long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(int raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(short raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(signed char raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(LocalId raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
}
- TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+ TraceID(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
}
- TraceID(long long raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(long raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(int raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(short raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(signed char raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(WithScope scoped_id, unsigned int* /*flags*/)
- : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+ TraceID(WithScope scoped_id)
+ : scope_(scoped_id.scope()),
+ has_prefix_(scoped_id.has_prefix()),
+ prefix_(scoped_id.prefix()),
+ raw_id_(scoped_id.raw_id()),
+ id_flags_(scoped_id.id_flags()) {}
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ unsigned long long prefix() const { return prefix_; }
+ unsigned int id_flags() const { return id_flags_; }
+
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ AsConvertableToTraceFormat() const;
private:
const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ unsigned long long prefix_;
unsigned long long raw_id_;
+ unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
// Simple union to store various types as unsigned long long.
@@ -973,9 +1030,10 @@ class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
ScopedTracer() : p_data_(NULL) {}
~ScopedTracer() {
- if (p_data_ && *data_.category_group_enabled)
+ if (p_data_ && *data_.category_group_enabled) {
TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
data_.category_group_enabled, data_.name, data_.event_handle);
+ }
}
void Initialize(const unsigned char* category_group_enabled,
@@ -1023,37 +1081,6 @@ class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
trace_event_internal::ScopedTraceBinaryEfficient \
INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
-// TraceEventSamplingStateScope records the current sampling state
-// and sets a new sampling state. When the scope exists, it restores
-// the sampling state having recorded.
-template<size_t BucketNumber>
-class TraceEventSamplingStateScope {
- public:
- TraceEventSamplingStateScope(const char* category_and_name) {
- previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
- TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
- }
-
- ~TraceEventSamplingStateScope() {
- TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
- }
-
- static inline const char* Current() {
- return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
- g_trace_state[BucketNumber]));
- }
-
- static inline void Set(const char* category_and_name) {
- TRACE_EVENT_API_ATOMIC_STORE(
- g_trace_state[BucketNumber],
- reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
- const_cast<char*>(category_and_name)));
- }
-
- private:
- const char* previous_state_;
-};
-
} // namespace trace_event_internal
namespace base {
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
index 336d964bff..db702b6231 100644
--- a/base/trace_event/trace_event_argument.cc
+++ b/base/trace_event/trace_event_argument.cc
@@ -244,36 +244,36 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
const base::Value& value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
switch (value.GetType()) {
- case base::Value::TYPE_NULL:
- case base::Value::TYPE_BINARY:
+ case base::Value::Type::NONE:
+ case base::Value::Type::BINARY:
NOTREACHED();
break;
- case base::Value::TYPE_BOOLEAN: {
+ case base::Value::Type::BOOLEAN: {
bool bool_value;
value.GetAsBoolean(&bool_value);
SetBooleanWithCopiedName(name, bool_value);
} break;
- case base::Value::TYPE_INTEGER: {
+ case base::Value::Type::INTEGER: {
int int_value;
value.GetAsInteger(&int_value);
SetIntegerWithCopiedName(name, int_value);
} break;
- case base::Value::TYPE_DOUBLE: {
+ case base::Value::Type::DOUBLE: {
double double_value;
value.GetAsDouble(&double_value);
SetDoubleWithCopiedName(name, double_value);
} break;
- case base::Value::TYPE_STRING: {
- const StringValue* string_value;
+ case base::Value::Type::STRING: {
+ const Value* string_value;
value.GetAsString(&string_value);
SetStringWithCopiedName(name, string_value->GetString());
} break;
- case base::Value::TYPE_DICTIONARY: {
+ case base::Value::Type::DICTIONARY: {
const DictionaryValue* dict_value;
value.GetAsDictionary(&dict_value);
BeginDictionaryWithCopiedName(name);
@@ -284,7 +284,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
EndDictionary();
} break;
- case base::Value::TYPE_LIST: {
+ case base::Value::Type::LIST: {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArrayWithCopiedName(name);
@@ -298,36 +298,36 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
void TracedValue::AppendBaseValue(const base::Value& value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
switch (value.GetType()) {
- case base::Value::TYPE_NULL:
- case base::Value::TYPE_BINARY:
+ case base::Value::Type::NONE:
+ case base::Value::Type::BINARY:
NOTREACHED();
break;
- case base::Value::TYPE_BOOLEAN: {
+ case base::Value::Type::BOOLEAN: {
bool bool_value;
value.GetAsBoolean(&bool_value);
AppendBoolean(bool_value);
} break;
- case base::Value::TYPE_INTEGER: {
+ case base::Value::Type::INTEGER: {
int int_value;
value.GetAsInteger(&int_value);
AppendInteger(int_value);
} break;
- case base::Value::TYPE_DOUBLE: {
+ case base::Value::Type::DOUBLE: {
double double_value;
value.GetAsDouble(&double_value);
AppendDouble(double_value);
} break;
- case base::Value::TYPE_STRING: {
- const StringValue* string_value;
+ case base::Value::Type::STRING: {
+ const Value* string_value;
value.GetAsString(&string_value);
AppendString(string_value->GetString());
} break;
- case base::Value::TYPE_DICTIONARY: {
+ case base::Value::Type::DICTIONARY: {
const DictionaryValue* dict_value;
value.GetAsDictionary(&dict_value);
BeginDictionary();
@@ -338,7 +338,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
EndDictionary();
} break;
- case base::Value::TYPE_LIST: {
+ case base::Value::Type::LIST: {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArray();
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
index 61395f4d55..aef8441c8e 100644
--- a/base/trace_event/trace_event_argument_unittest.cc
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -97,9 +97,9 @@ TEST(TraceEventArgumentTest, LongStrings) {
}
TEST(TraceEventArgumentTest, PassBaseValue) {
- FundamentalValue int_value(42);
- FundamentalValue bool_value(true);
- FundamentalValue double_value(42.0f);
+ Value int_value(42);
+ Value bool_value(true);
+ Value double_value(42.0f);
auto dict_value = WrapUnique(new DictionaryValue);
dict_value->SetBoolean("bool", true);
@@ -131,10 +131,10 @@ TEST(TraceEventArgumentTest, PassBaseValue) {
}
TEST(TraceEventArgumentTest, PassTracedValue) {
- auto dict_value = WrapUnique(new TracedValue());
+ auto dict_value = MakeUnique<TracedValue>();
dict_value->SetInteger("a", 1);
- auto nested_dict_value = WrapUnique(new TracedValue());
+ auto nested_dict_value = MakeUnique<TracedValue>();
nested_dict_value->SetInteger("b", 2);
nested_dict_value->BeginArray("c");
nested_dict_value->AppendString("foo");
diff --git a/base/trace_event/trace_event_filter.cc b/base/trace_event/trace_event_filter.cc
new file mode 100644
index 0000000000..d50c5fe251
--- /dev/null
+++ b/base/trace_event/trace_event_filter.cc
@@ -0,0 +1,21 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/compiler_specific.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+TraceEventFilter::TraceEventFilter() {}
+TraceEventFilter::~TraceEventFilter() {}
+
+void TraceEventFilter::EndEvent(const char* category_name,
+ const char* event_name) const {
+ ALLOW_UNUSED_PARAM(category_name);
+ ALLOW_UNUSED_PARAM(event_name);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_event_filter.h b/base/trace_event/trace_event_filter.h
new file mode 100644
index 0000000000..48c6711432
--- /dev/null
+++ b/base/trace_event/trace_event_filter.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// TraceEventFilter is like iptables for TRACE_EVENT macros. Filters can be
+// enabled on a per-category basis, hence a single filter instance can serve
+// more than a TraceCategory. There are two use cases for filters:
+// 1. Snooping TRACE_EVENT macros without adding them to the TraceLog. This is
+// possible by setting the ENABLED_FOR_FILTERING flag on a category w/o
+// ENABLED_FOR_RECORDING (see TraceConfig for user-facing configuration).
+// 2. Filtering TRACE_EVENT macros before they are added to the TraceLog. This
+// requires both the ENABLED_FOR_FILTERING and ENABLED_FOR_RECORDING flags
+// on the category.
+// More importantly, filters must be thread-safe. The FilterTraceEvent and
+// EndEvent methods can be called concurrently as trace macros are hit on
+// different threads.
+class BASE_EXPORT TraceEventFilter {
+ public:
+ TraceEventFilter();
+ virtual ~TraceEventFilter();
+
+ // If the category is ENABLED_FOR_RECORDING, the event is added iff all the
+ // filters enabled for the category return true. false causes the event to be
+ // discarded.
+ virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0;
+
+ // Notifies the end of a duration event when the RAII macro goes out of scope.
+ virtual void EndEvent(const char* category_name,
+ const char* event_name) const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TraceEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
diff --git a/base/trace_event/trace_event_filter_test_utils.cc b/base/trace_event/trace_event_filter_test_utils.cc
new file mode 100644
index 0000000000..06548b049a
--- /dev/null
+++ b/base/trace_event/trace_event_filter_test_utils.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_filter_test_utils.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+TestEventFilter::HitsCounter* g_hits_counter;
+} // namespace;
+
+// static
+const char TestEventFilter::kName[] = "testing_predicate";
+bool TestEventFilter::filter_return_value_;
+
+// static
+std::unique_ptr<TraceEventFilter> TestEventFilter::Factory(
+ const std::string& predicate_name) {
+ std::unique_ptr<TraceEventFilter> res;
+ if (predicate_name == kName)
+ res.reset(new TestEventFilter());
+ return res;
+}
+
+TestEventFilter::TestEventFilter() {}
+TestEventFilter::~TestEventFilter() {}
+
+bool TestEventFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+ if (g_hits_counter)
+ g_hits_counter->filter_trace_event_hit_count++;
+ return filter_return_value_;
+}
+
+void TestEventFilter::EndEvent(const char* category_name,
+ const char* name) const {
+ if (g_hits_counter)
+ g_hits_counter->end_event_hit_count++;
+}
+
+TestEventFilter::HitsCounter::HitsCounter() {
+ Reset();
+ DCHECK(!g_hits_counter);
+ g_hits_counter = this;
+}
+
+TestEventFilter::HitsCounter::~HitsCounter() {
+ DCHECK(g_hits_counter);
+ g_hits_counter = nullptr;
+}
+
+void TestEventFilter::HitsCounter::Reset() {
+ filter_trace_event_hit_count = 0;
+ end_event_hit_count = 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_event_filter_test_utils.h b/base/trace_event/trace_event_filter_test_utils.h
new file mode 100644
index 0000000000..419068b221
--- /dev/null
+++ b/base/trace_event/trace_event_filter_test_utils.h
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TestEventFilter : public TraceEventFilter {
+ public:
+ struct HitsCounter {
+ HitsCounter();
+ ~HitsCounter();
+ void Reset();
+ size_t filter_trace_event_hit_count;
+ size_t end_event_hit_count;
+ };
+
+ static const char kName[];
+
+ // Factory method for TraceLog::SetFilterFactoryForTesting().
+ static std::unique_ptr<TraceEventFilter> Factory(
+ const std::string& predicate_name);
+
+ TestEventFilter();
+ ~TestEventFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+ void EndEvent(const char* category_name, const char* name) const override;
+
+ static void set_filter_return_value(bool value) {
+ filter_return_value_ = value;
+ }
+
+ private:
+ static bool filter_return_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index f469f2f6bc..cb23eb474c 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -8,6 +8,7 @@
#include "base/format_macros.h"
#include "base/json/string_escape.h"
+#include "base/memory/ptr_util.h"
#include "base/process/process_handle.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
@@ -15,6 +16,7 @@
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
#include "base/trace_event/trace_log.h"
namespace base {
@@ -358,10 +360,33 @@ void TraceEvent::AppendAsJSON(
// If id_ is set, print it out as a hex string so we don't loose any
// bits (it might be a 64-bit pointer).
- if (flags_ & TRACE_EVENT_FLAG_HAS_ID) {
+ unsigned int id_flags_ = flags_ & (TRACE_EVENT_FLAG_HAS_ID |
+ TRACE_EVENT_FLAG_HAS_LOCAL_ID |
+ TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
+ if (id_flags_) {
if (scope_ != trace_event_internal::kGlobalScope)
StringAppendF(out, ",\"scope\":\"%s\"", scope_);
- StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64_t>(id_));
+
+ switch (id_flags_) {
+ case TRACE_EVENT_FLAG_HAS_ID:
+ StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"",
+ static_cast<uint64_t>(id_));
+ break;
+
+ case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
+ StringAppendF(out, ",\"id2\":{\"local\":\"0x%" PRIx64 "\"}",
+ static_cast<uint64_t>(id_));
+ break;
+
+ case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
+ StringAppendF(out, ",\"id2\":{\"global\":\"0x%" PRIx64 "\"}",
+ static_cast<uint64_t>(id_));
+ break;
+
+ default:
+ NOTREACHED() << "More than one of the ID flags are set";
+ break;
+ }
}
if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
@@ -424,3 +449,42 @@ void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
} // namespace trace_event
} // namespace base
+
+namespace trace_event_internal {
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+TraceID::AsConvertableToTraceFormat() const {
+ auto value = base::MakeUnique<base::trace_event::TracedValue>();
+
+ if (scope_ != kGlobalScope)
+ value->SetString("scope", scope_);
+
+ const char* id_field_name = "id";
+ if (id_flags_ == TRACE_EVENT_FLAG_HAS_GLOBAL_ID) {
+ id_field_name = "global";
+ value->BeginDictionary("id2");
+ } else if (id_flags_ == TRACE_EVENT_FLAG_HAS_LOCAL_ID) {
+ id_field_name = "local";
+ value->BeginDictionary("id2");
+ } else if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID) {
+ NOTREACHED() << "Unrecognized ID flag";
+ }
+
+ if (has_prefix_) {
+ value->SetString(id_field_name,
+ base::StringPrintf("0x%" PRIx64 "/0x%" PRIx64,
+ static_cast<uint64_t>(prefix_),
+ static_cast<uint64_t>(raw_id_)));
+ } else {
+ value->SetString(
+ id_field_name,
+ base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
+ }
+
+ if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID)
+ value->EndDictionary();
+
+ return std::move(value);
+}
+
+} // namespace trace_event_internal
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
index 4382217881..5eef702fb9 100644
--- a/base/trace_event/trace_event_impl.h
+++ b/base/trace_event/trace_event_impl.h
@@ -23,16 +23,11 @@
#include "base/strings/string_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
-#include "base/threading/thread.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "build/build_config.h"
namespace base {
-
-class WaitableEvent;
-class MessageLoop;
-
namespace trace_event {
typedef base::Callback<bool(const char* arg_name)> ArgumentNameFilterPredicate;
diff --git a/base/trace_event/trace_event_memory_overhead.cc b/base/trace_event/trace_event_memory_overhead.cc
index 23579cbb22..8d56e1d80e 100644
--- a/base/trace_event/trace_event_memory_overhead.cc
+++ b/base/trace_event/trace_event_memory_overhead.cc
@@ -69,27 +69,27 @@ void TraceEventMemoryOverhead::AddRefCountedString(
void TraceEventMemoryOverhead::AddValue(const Value& value) {
switch (value.GetType()) {
- case Value::TYPE_NULL:
- case Value::TYPE_BOOLEAN:
- case Value::TYPE_INTEGER:
- case Value::TYPE_DOUBLE:
+ case Value::Type::NONE:
+ case Value::Type::BOOLEAN:
+ case Value::Type::INTEGER:
+ case Value::Type::DOUBLE:
Add("FundamentalValue", sizeof(Value));
break;
- case Value::TYPE_STRING: {
- const StringValue* string_value = nullptr;
+ case Value::Type::STRING: {
+ const Value* string_value = nullptr;
value.GetAsString(&string_value);
- Add("StringValue", sizeof(StringValue));
+ Add("StringValue", sizeof(Value));
AddString(string_value->GetString());
} break;
- case Value::TYPE_BINARY: {
+ case Value::Type::BINARY: {
const BinaryValue* binary_value = nullptr;
value.GetAsBinary(&binary_value);
Add("BinaryValue", sizeof(BinaryValue) + binary_value->GetSize());
} break;
- case Value::TYPE_DICTIONARY: {
+ case Value::Type::DICTIONARY: {
const DictionaryValue* dictionary_value = nullptr;
value.GetAsDictionary(&dictionary_value);
Add("DictionaryValue", sizeof(DictionaryValue));
@@ -100,7 +100,7 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
}
} break;
- case Value::TYPE_LIST: {
+ case Value::Type::LIST: {
const ListValue* list_value = nullptr;
value.GetAsList(&list_value);
Add("ListValue", sizeof(ListValue));
diff --git a/base/trace_event/trace_event_synthetic_delay.h b/base/trace_event/trace_event_synthetic_delay.h
index 59e2842f71..e86f9eee2c 100644
--- a/base/trace_event/trace_event_synthetic_delay.h
+++ b/base/trace_event/trace_event_synthetic_delay.h
@@ -62,9 +62,6 @@
trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->End(); \
} while (false)
-template <typename Type>
-struct DefaultSingletonTraits;
-
namespace base {
namespace trace_event {
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index ff8ec2de78..82a552aa4e 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -18,6 +18,7 @@
#include "base/json/json_writer.h"
#include "base/location.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/memory/singleton.h"
#include "base/process/process_handle.h"
@@ -29,7 +30,12 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
+#include "base/trace_event/event_name_filter.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_filter.h"
+#include "base/trace_event/trace_event_filter_test_utils.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
#include "base/values.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -67,9 +73,6 @@ class TraceEventTestFixture : public testing::Test {
WaitableEvent* flush_complete_event,
const scoped_refptr<base::RefCountedString>& events_str,
bool has_more_events);
- void OnWatchEventMatched() {
- ++event_watch_notification_;
- }
DictionaryValue* FindMatchingTraceEntry(const JsonKeyValue* key_values);
DictionaryValue* FindNamePhase(const char* name, const char* phase);
DictionaryValue* FindNamePhaseKeyValue(const char* name,
@@ -91,7 +94,6 @@ class TraceEventTestFixture : public testing::Test {
}
void BeginSpecificTrace(const std::string& filter) {
- event_watch_notification_ = 0;
TraceLog::GetInstance()->SetEnabled(TraceConfig(filter, ""),
TraceLog::RECORDING_MODE);
}
@@ -135,7 +137,8 @@ class TraceEventTestFixture : public testing::Test {
}
void EndTraceAndFlushAsync(WaitableEvent* flush_complete_event) {
- TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE |
+ TraceLog::FILTERING_MODE);
TraceLog::GetInstance()->Flush(
base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
base::Unretained(static_cast<TraceEventTestFixture*>(this)),
@@ -151,7 +154,6 @@ class TraceEventTestFixture : public testing::Test {
ASSERT_TRUE(tracelog);
ASSERT_FALSE(tracelog->IsEnabled());
trace_buffer_.SetOutputCallback(json_output_.GetCallback());
- event_watch_notification_ = 0;
num_flush_callbacks_ = 0;
}
void TearDown() override {
@@ -168,7 +170,6 @@ class TraceEventTestFixture : public testing::Test {
ListValue trace_parsed_;
TraceResultBuffer trace_buffer_;
TraceResultBuffer::SimpleOutput json_output_;
- int event_watch_notification_;
size_t num_flush_callbacks_;
private:
@@ -263,7 +264,7 @@ DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
for (size_t i = 0; i < trace_parsed_count; i++) {
Value* value = NULL;
trace_parsed_.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
DictionaryValue* dict = static_cast<DictionaryValue*>(value);
@@ -281,7 +282,7 @@ void TraceEventTestFixture::DropTracedMetadataRecords() {
for (size_t i = 0; i < old_trace_parsed_size; i++) {
Value* value = nullptr;
old_trace_parsed->Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
+ if (!value || value->GetType() != Value::Type::DICTIONARY) {
trace_parsed_.Append(value->CreateDeepCopy());
continue;
}
@@ -370,7 +371,7 @@ const DictionaryValue* FindTraceEntry(
match_after_this_item = NULL;
continue;
}
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
@@ -388,7 +389,7 @@ std::vector<const DictionaryValue*> FindTraceEntries(
for (size_t i = 0; i < trace_parsed_count; i++) {
const Value* value = NULL;
trace_parsed.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
@@ -460,9 +461,10 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
"b", 1415);
TRACE_COUNTER_WITH_TIMESTAMP1("all", "TRACE_COUNTER_WITH_TIMESTAMP1 call",
- 42, 31415);
+ TimeTicks::FromInternalValue(42), 31415);
TRACE_COUNTER_WITH_TIMESTAMP2("all", "TRACE_COUNTER_WITH_TIMESTAMP2 call",
- 42, "a", 30000, "b", 1415);
+ TimeTicks::FromInternalValue(42),
+ "a", 30000, "b", 1415);
TRACE_COUNTER_ID1("all", "TRACE_COUNTER_ID1 call", 0x319009, 31415);
TRACE_COUNTER_ID2("all", "TRACE_COUNTER_ID2 call", 0x319009,
@@ -470,14 +472,14 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId, kThreadId, 12345);
+ kAsyncId, kThreadId, TimeTicks::FromInternalValue(12345));
TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId, kThreadId, 23456);
+ kAsyncId, kThreadId, TimeTicks::FromInternalValue(23456));
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId2, kThreadId, 34567);
+ kAsyncId2, kThreadId, TimeTicks::FromInternalValue(34567));
TRACE_EVENT_ASYNC_STEP_PAST0("all", "TRACE_EVENT_ASYNC_STEP_PAST0 call",
kAsyncId2, "step_end1");
TRACE_EVENT_ASYNC_STEP_PAST1("all", "TRACE_EVENT_ASYNC_STEP_PAST1 call",
@@ -485,7 +487,7 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId2, kThreadId, 45678);
+ kAsyncId2, kThreadId, TimeTicks::FromInternalValue(45678));
TRACE_EVENT_OBJECT_CREATED_WITH_ID("all", "tracked object 1", 0x42);
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
@@ -517,6 +519,24 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
context_id);
TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
context_id);
+
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS simple call", 0x1000, 0x2000);
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS scoped call",
+ TRACE_ID_WITH_SCOPE("scope 1", 0x1000),
+ TRACE_ID_WITH_SCOPE("scope 2", 0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a local ID", 0x1000,
+ TRACE_ID_LOCAL(0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a global ID", 0x1000,
+ TRACE_ID_GLOBAL(0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a composite ID", 0x1000,
+ TRACE_ID_WITH_SCOPE("scope 1", 0x2000, 0x3000));
+
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async default process scope", 0x1000);
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async local id", TRACE_ID_LOCAL(0x2000));
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async global id", TRACE_ID_GLOBAL(0x3000));
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async global id with scope string",
+ TRACE_ID_WITH_SCOPE("scope string",
+ TRACE_ID_GLOBAL(0x4000)));
} // Scope close causes TRACE_EVENT0 etc to send their END events.
if (task_complete_event)
@@ -957,6 +977,144 @@ void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ("0x20151021", id);
}
+
+ EXPECT_FIND_("TRACE_LINK_IDS simple call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS scoped call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ std::string scope1;
+ EXPECT_TRUE((item && item->GetString("scope", &scope1)));
+ EXPECT_EQ("scope 1", scope1);
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ std::string scope2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.scope", &scope2)));
+ EXPECT_EQ("scope 2", scope2);
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a local ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id2.local", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a global ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id2.global", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a composite ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE(item->HasKey("scope"));
+ std::string id1;
+ EXPECT_TRUE(item->GetString("id", &id1));
+ EXPECT_EQ("0x1000", id1);
+
+ std::string scope;
+ EXPECT_TRUE(item->GetString("args.linked_id.scope", &scope));
+ EXPECT_EQ("scope 1", scope);
+ std::string id2;
+ EXPECT_TRUE(item->GetString("args.linked_id.id", &id2));
+ EXPECT_EQ(id2, "0x2000/0x3000");
+ }
+
+ EXPECT_FIND_("async default process scope");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x1000", id);
+ }
+
+ EXPECT_FIND_("async local id");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id2.local", &id)));
+ EXPECT_EQ("0x2000", id);
+ }
+
+ EXPECT_FIND_("async global id");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id2.global", &id)));
+ EXPECT_EQ("0x3000", id);
+ }
+
+ EXPECT_FIND_("async global id with scope string");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id2.global", &id)));
+ EXPECT_EQ("0x4000", id);
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("scope", &scope)));
+ EXPECT_EQ("scope string", scope);
+ }
}
void TraceManyInstantEvents(int thread_id, int num_events,
@@ -981,7 +1139,7 @@ void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
for (size_t i = 0; i < trace_parsed_count; i++) {
const Value* value = NULL;
trace_parsed.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
std::string name;
@@ -1430,59 +1588,6 @@ TEST_F(TraceEventTestFixture, Categories) {
}
-// Test EVENT_WATCH_NOTIFICATION
-TEST_F(TraceEventTestFixture, EventWatchNotification) {
- // Basic one occurrence.
- BeginTrace();
- TraceLog::WatchEventCallback callback =
- base::Bind(&TraceEventTestFixture::OnWatchEventMatched,
- base::Unretained(this));
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 1);
-
- // Auto-reset after end trace.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- EndTraceAndFlush();
- BeginTrace();
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Multiple occurrence.
- BeginTrace();
- int num_occurrences = 5;
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- for (int i = 0; i < num_occurrences; ++i)
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, num_occurrences);
-
- // Wrong category.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("wrong_cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Wrong name.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("cat", "wrong_event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Canceled.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TraceLog::GetInstance()->CancelWatchEvent();
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-}
-
// Test ASYNC_BEGIN/END events
TEST_F(TraceEventTestFixture, AsyncBeginEndEvents) {
BeginTrace();
@@ -2053,55 +2158,6 @@ TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
trace_log->SetDisabled();
}
-TEST_F(TraceEventTestFixture, TraceSampling) {
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
- TraceLog::RECORDING_MODE);
-
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Stuff");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Things");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
- EndTraceAndFlush();
-
- // Make sure we hit at least once.
- EXPECT_TRUE(FindNamePhase("Stuff", "P"));
- EXPECT_TRUE(FindNamePhase("Things", "P"));
-}
-
-TEST_F(TraceEventTestFixture, TraceSamplingScope) {
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
- TraceLog::RECORDING_MODE);
-
- TRACE_EVENT_SCOPED_SAMPLING_STATE("AAA", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SCOPED_SAMPLING_STATE("BBB", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "BBB");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SCOPED_SAMPLING_STATE("CCC", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "CCC");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SET_SAMPLING_STATE("DDD", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
-
- EndTraceAndFlush();
-}
class MyData : public ConvertableToTraceFormat {
public:
@@ -2290,7 +2346,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_one", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(1, double_value);
@@ -2300,7 +2356,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_half", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(0.5, double_value);
@@ -2310,7 +2366,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(-0.5, double_value);
@@ -2480,233 +2536,6 @@ TEST_F(TraceEventTestFixture, ArgsWhitelisting) {
EXPECT_EQ(args_string, "__stripped__");
}
-class TraceEventCallbackTest : public TraceEventTestFixture {
- public:
- void SetUp() override {
- TraceEventTestFixture::SetUp();
- ASSERT_EQ(NULL, s_instance);
- s_instance = this;
- }
- void TearDown() override {
- TraceLog::GetInstance()->SetDisabled();
- ASSERT_TRUE(s_instance);
- s_instance = NULL;
- TraceEventTestFixture::TearDown();
- }
-
- protected:
- // For TraceEventCallbackAndRecordingX tests.
- void VerifyCallbackAndRecordedEvents(size_t expected_callback_count,
- size_t expected_recorded_count) {
- // Callback events.
- EXPECT_EQ(expected_callback_count, collected_events_names_.size());
- for (size_t i = 0; i < collected_events_names_.size(); ++i) {
- EXPECT_EQ("callback", collected_events_categories_[i]);
- EXPECT_EQ("yes", collected_events_names_[i]);
- }
-
- // Recorded events.
- EXPECT_EQ(expected_recorded_count, trace_parsed_.GetSize());
- EXPECT_TRUE(FindTraceEntry(trace_parsed_, "recording"));
- EXPECT_FALSE(FindTraceEntry(trace_parsed_, "callback"));
- EXPECT_TRUE(FindTraceEntry(trace_parsed_, "yes"));
- EXPECT_FALSE(FindTraceEntry(trace_parsed_, "no"));
- }
-
- void VerifyCollectedEvent(size_t i,
- unsigned phase,
- const std::string& category,
- const std::string& name) {
- EXPECT_EQ(phase, collected_events_phases_[i]);
- EXPECT_EQ(category, collected_events_categories_[i]);
- EXPECT_EQ(name, collected_events_names_[i]);
- }
-
- std::vector<std::string> collected_events_categories_;
- std::vector<std::string> collected_events_names_;
- std::vector<unsigned char> collected_events_phases_;
- std::vector<TimeTicks> collected_events_timestamps_;
-
- static TraceEventCallbackTest* s_instance;
- static void Callback(TimeTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- const char* scope,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned int flags) {
- s_instance->collected_events_phases_.push_back(phase);
- s_instance->collected_events_categories_.push_back(
- TraceLog::GetCategoryGroupName(category_group_enabled));
- s_instance->collected_events_names_.push_back(name);
- s_instance->collected_events_timestamps_.push_back(timestamp);
- }
-};
-
-TraceEventCallbackTest* TraceEventCallbackTest::s_instance;
-
-TEST_F(TraceEventCallbackTest, TraceEventCallback) {
- TRACE_EVENT_INSTANT0("all", "before enable", TRACE_EVENT_SCOPE_THREAD);
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- TRACE_EVENT_INSTANT0("all", "event1", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("all", "event2", TRACE_EVENT_SCOPE_GLOBAL);
- {
- TRACE_EVENT0("all", "duration");
- TRACE_EVENT_INSTANT0("all", "event3", TRACE_EVENT_SCOPE_GLOBAL);
- }
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("all", "after callback removed",
- TRACE_EVENT_SCOPE_GLOBAL);
- ASSERT_EQ(5u, collected_events_names_.size());
- EXPECT_EQ("event1", collected_events_names_[0]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[0]);
- EXPECT_EQ("event2", collected_events_names_[1]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[1]);
- EXPECT_EQ("duration", collected_events_names_[2]);
- EXPECT_EQ(TRACE_EVENT_PHASE_BEGIN, collected_events_phases_[2]);
- EXPECT_EQ("event3", collected_events_names_[3]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[3]);
- EXPECT_EQ("duration", collected_events_names_[4]);
- EXPECT_EQ(TRACE_EVENT_PHASE_END, collected_events_phases_[4]);
- for (size_t i = 1; i < collected_events_timestamps_.size(); i++) {
- EXPECT_LE(collected_events_timestamps_[i - 1],
- collected_events_timestamps_[i]);
- }
-}
-
-TEST_F(TraceEventCallbackTest, TraceEventCallbackWhileFull) {
- TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
- TraceLog::RECORDING_MODE);
- do {
- TRACE_EVENT_INSTANT0("all", "badger badger", TRACE_EVENT_SCOPE_GLOBAL);
- } while (!TraceLog::GetInstance()->BufferIsFull());
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- TRACE_EVENT_INSTANT0("all", "a snake", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- ASSERT_EQ(1u, collected_events_names_.size());
- EXPECT_EQ("a snake", collected_events_names_[0]);
-}
-
-// 1: Enable callback, enable recording, disable callback, disable recording.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording1) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(2, 2);
-}
-
-// 2: Enable callback, enable recording, disable recording, disable callback.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording2) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(3, 1);
-}
-
-// 3: Enable recording, enable callback, disable callback, disable recording.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording3) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(1, 3);
-}
-
-// 4: Enable recording, enable callback, disable recording, disable callback.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording4) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(2, 2);
-}
-
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecordingDuration) {
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- {
- TRACE_EVENT0("callback", "duration1");
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
- TRACE_EVENT0("callback", "duration2");
- EndTraceAndFlush();
- TRACE_EVENT0("callback", "duration3");
- }
- TraceLog::GetInstance()->SetEventCallbackDisabled();
-
- ASSERT_EQ(6u, collected_events_names_.size());
- VerifyCollectedEvent(0, TRACE_EVENT_PHASE_BEGIN, "callback", "duration1");
- VerifyCollectedEvent(1, TRACE_EVENT_PHASE_BEGIN, "callback", "duration2");
- VerifyCollectedEvent(2, TRACE_EVENT_PHASE_BEGIN, "callback", "duration3");
- VerifyCollectedEvent(3, TRACE_EVENT_PHASE_END, "callback", "duration3");
- VerifyCollectedEvent(4, TRACE_EVENT_PHASE_END, "callback", "duration2");
- VerifyCollectedEvent(5, TRACE_EVENT_PHASE_END, "callback", "duration1");
-}
-
TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
TraceLog* trace_log = TraceLog::GetInstance();
trace_log->SetEnabled(
@@ -2715,9 +2544,9 @@ TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
TraceBuffer::CreateTraceBufferVectorOfSize(100));
do {
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
} while (!trace_log->BufferIsFull());
EndTraceAndFlush();
@@ -2928,29 +2757,9 @@ TEST_F(TraceEventTestFixture, ConvertTraceConfigToInternalOptions) {
trace_log->GetInternalOptionsFromTraceConfig(
TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE)));
- EXPECT_EQ(
- TraceLog::kInternalRecordUntilFull | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "record-until-full,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalRecordContinuously | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "record-continuously,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "trace-to-console,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig("*",
- "trace-to-console,enable-sampling,enable-systrace")));
+ EXPECT_EQ(TraceLog::kInternalEchoToConsole,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig("*", "trace-to-console,enable-systrace")));
}
void SetBlockingFlagAndBlockUntilStopped(WaitableEvent* task_start_event,
@@ -3109,9 +2918,9 @@ TEST_F(TraceEventTestFixture, TimeOffset) {
TRACE_EVENT0("all", "duration2");
}
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
EndTraceAndFlush();
DropTracedMetadataRecords();
@@ -3173,6 +2982,213 @@ TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
EXPECT_EQ(filter, config.ToCategoryFilterString());
}
+TEST_F(TraceEventTestFixture, TraceFilteringMode) {
+ const char config_json[] =
+ "{"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"testing_predicate\", "
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}";
+
+ // Run RECORDING_MODE within FILTERING_MODE:
+ TestEventFilter::HitsCounter filter_hits_counter;
+ TestEventFilter::set_filter_return_value(true);
+ TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
+
+ // Only filtering mode is enabled with test filters.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+ TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+ {
+ void* ptr = this;
+ TRACE_EVENT0("c0", "name0");
+ TRACE_EVENT_ASYNC_BEGIN0("c1", "name1", ptr);
+ TRACE_EVENT_INSTANT0("c0", "name0", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_ASYNC_END0("c1", "name1", ptr);
+ }
+
+ // Recording mode is enabled when filtering mode is turned on.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+ TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c2", "name2");
+ }
+ // Only recording mode is disabled and filtering mode will continue to run.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+ {
+ TRACE_EVENT0("c0", "name0");
+ }
+ // Filtering mode is disabled and no tracing mode should be enabled.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+ EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+ EndTraceAndFlush();
+ EXPECT_FALSE(FindMatchingValue("cat", "c0"));
+ EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+ EXPECT_FALSE(FindMatchingValue("name", "name0"));
+ EXPECT_FALSE(FindMatchingValue("name", "name1"));
+ EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+ EXPECT_TRUE(FindMatchingValue("name", "name2"));
+ EXPECT_EQ(6u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(3u, filter_hits_counter.end_event_hit_count);
+ Clear();
+ filter_hits_counter.Reset();
+
+ // Run FILTERING_MODE within RECORDING_MODE:
+ // Only recording mode is enabled and all events must be recorded.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c0", "name0");
+ }
+
+ // Filtering mode is also enabled and all events must be filtered-out.
+ TestEventFilter::set_filter_return_value(false);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+ TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+ TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c1", "name1");
+ }
+ // Only filtering mode is disabled and recording mode should continue to run
+ // with all events being recorded.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+ {
+ TRACE_EVENT0("c2", "name2");
+ }
+ // Recording mode is disabled and no tracing mode should be enabled.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+ EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "c0"));
+ EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+ EXPECT_TRUE(FindMatchingValue("name", "name0"));
+ EXPECT_TRUE(FindMatchingValue("name", "name2"));
+ EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+ EXPECT_FALSE(FindMatchingValue("name", "name1"));
+ EXPECT_EQ(1u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
+ Clear();
+}
+
+TEST_F(TraceEventTestFixture, EventFiltering) {
+ const char config_json[] =
+ "{"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"unfiltered_cat\"],"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"testing_predicate\", "
+ " \"included_categories\": [\"filtered_cat\"]"
+ " }"
+ " "
+ " ]"
+ "}";
+
+ TestEventFilter::HitsCounter filter_hits_counter;
+ TestEventFilter::set_filter_return_value(true);
+ TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
+
+ TraceConfig trace_config(config_json);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+ ASSERT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TRACE_EVENT0("filtered_cat", "a snake");
+ TRACE_EVENT0("filtered_cat", "a mushroom");
+ TRACE_EVENT0("unfiltered_cat", "a horse");
+
+ // This is scoped so we can test the end event being filtered.
+ { TRACE_EVENT0("filtered_cat", "another cat whoa"); }
+
+ EndTraceAndFlush();
+
+ EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
+}
+
+TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
+ std::string config_json = StringPrintf(
+ "{"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"unfiltered_cat\"],"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"%s\", "
+ " \"included_categories\": [\"*\"], "
+ " \"excluded_categories\": [\"unfiltered_cat\"], "
+ " \"filter_args\": {"
+ " \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
+ " }"
+ " }"
+ " "
+ " ]"
+ "}",
+ EventNameFilter::kName);
+
+ TraceConfig trace_config(config_json);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TRACE_EVENT0("filtered_cat", "a snake");
+ TRACE_EVENT0("filtered_cat", "a mushroom");
+ TRACE_EVENT0("unfiltered_cat", "a cat");
+
+ EndTraceAndFlush();
+
+ EXPECT_TRUE(FindMatchingValue("name", "a snake"));
+ EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
+ EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+}
+
+TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
+ std::string config_json = StringPrintf(
+ "{"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"unfiltered_cat\"],"
+ " \"excluded_categories\": [\"excluded_cat\"],"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"%s\", "
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}",
+ HeapProfilerEventFilter::kName);
+
+ TraceConfig trace_config(config_json);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TRACE_EVENT0("filtered_cat", "a snake");
+ TRACE_EVENT0("excluded_cat", "a mushroom");
+ TRACE_EVENT0("unfiltered_cat", "a cat");
+
+ EndTraceAndFlush();
+
+ // The predicate should not change behavior of the trace events.
+ EXPECT_TRUE(FindMatchingValue("name", "a snake"));
+ EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
+ EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+}
+
TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
BeginSpecificTrace("-*");
TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 12cebc6f65..10b090ae57 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -13,41 +13,43 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/debug/leak_annotations.h"
-#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/memory/singleton.h"
+#include "base/message_loop/message_loop.h"
#include "base/process/process_metrics.h"
#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+// post_task.h pulls in a lot of code not needed on Arc++.
+#if 0
+#include "base/task_scheduler/post_task.h"
+#endif
#include "base/threading/platform_thread.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_task_runner_handle.h"
-#include "base/threading/worker_pool.h"
#include "base/time/time.h"
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/event_name_filter.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
-#include "base/trace_event/trace_sampling_thread.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/trace_event/trace_event_etw_export_win.h"
#endif
-// The thread buckets for the sampling profiler.
-BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
namespace base {
namespace internal {
@@ -86,35 +88,13 @@ const size_t kEchoToConsoleTraceEventBufferChunks = 256;
const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
const int kThreadFlushTimeoutMs = 3000;
-#define MAX_CATEGORY_GROUPS 200
-
-// Parallel arrays g_category_groups and g_category_group_enabled are separate
-// so that a pointer to a member of g_category_group_enabled can be easily
-// converted to an index into g_category_groups. This allows macros to deal
-// only with char enabled pointers from g_category_group_enabled, and we can
-// convert internally to determine the category name from the char enabled
-// pointer.
-const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
- "toplevel",
- "tracing already shutdown",
- "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
- "__metadata"};
-
-// The enabled flag is char instead of bool so that the API can be used from C.
-unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
-// Indexes here have to match the g_category_groups array indexes above.
-const int g_category_already_shutdown = 1;
-const int g_category_categories_exhausted = 2;
-const int g_category_metadata = 3;
-const int g_num_builtin_categories = 4;
-// Skip default categories.
-base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
-
-// The name of the current thread. This is used to decide if the current
-// thread name has changed. We combine all the seen thread names into the
-// output name for the thread.
-LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
- LAZY_INSTANCE_INITIALIZER;
+#define MAX_TRACE_EVENT_FILTERS 32
+
+// List of TraceEventFilter objects from the most recent tracing session.
+std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
+ static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
+ return *filters;
+}
ThreadTicks ThreadNow() {
return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
@@ -138,7 +118,7 @@ void InitializeMetadataEvent(TraceEvent* trace_event,
TimeTicks(),
ThreadTicks(),
TRACE_EVENT_PHASE_METADATA,
- &g_category_group_enabled[g_category_metadata],
+ CategoryRegistry::kCategoryMetadata->state_ptr(),
metadata_name,
trace_event_internal::kGlobalScope, // scope
trace_event_internal::kNoId, // id
@@ -174,11 +154,24 @@ void MakeHandle(uint32_t chunk_seq,
DCHECK(chunk_seq);
DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
+ DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
handle->chunk_seq = chunk_seq;
handle->chunk_index = static_cast<uint16_t>(chunk_index);
handle->event_index = static_cast<uint16_t>(event_index);
}
+template <typename Function>
+void ForEachCategoryFilter(const unsigned char* category_group_enabled,
+ Function filter_fn) {
+ const TraceCategory* category =
+ CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
+ uint32_t filter_bitmap = category->enabled_filters();
+ for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
+ if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
+ filter_fn(GetCategoryGroupFilters()[index].get());
+ }
+}
+
} // namespace
// A helper class that allows the lock to be acquired in the middle of the scope
@@ -352,33 +345,20 @@ TraceLog* TraceLog::GetInstance() {
}
TraceLog::TraceLog()
- : mode_(DISABLED),
+ : enabled_modes_(0),
num_traces_recorded_(0),
- event_callback_(0),
dispatching_to_observer_list_(false),
process_sort_index_(0),
process_id_hash_(0),
process_id_(0),
- watch_category_(0),
trace_options_(kInternalRecordUntilFull),
- sampling_thread_handle_(0),
trace_config_(TraceConfig()),
- event_callback_trace_config_(TraceConfig()),
thread_shared_chunk_index_(0),
generation_(0),
- use_worker_thread_(false) {
- // Trace is enabled or disabled on one thread while other threads are
- // accessing the enabled flag. We don't care whether edge-case events are
- // traced or not, so we allow races on the enabled flag to keep the trace
- // macros fast.
- // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
- // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
- // sizeof(g_category_group_enabled),
- // "trace_event category enabled");
- for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
- ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
- "trace_event category enabled");
- }
+ use_worker_thread_(false),
+ filter_factory_for_testing_(nullptr) {
+ CategoryRegistry::Initialize();
+
#if defined(OS_NACL) // NaCl shouldn't expose the process id.
SetProcessID(0);
#else
@@ -414,7 +394,9 @@ void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
}
}
-bool TraceLog::OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump* pmd) {
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
+ ALLOW_UNUSED_PARAM(args);
// TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
// (crbug.com/499731).
TraceEventMemoryOverhead overhead;
@@ -436,61 +418,111 @@ const unsigned char* TraceLog::GetCategoryGroupEnabled(
const char* category_group) {
TraceLog* tracelog = GetInstance();
if (!tracelog) {
- DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
- return &g_category_group_enabled[g_category_already_shutdown];
+ DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
+ return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
+ }
+ TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
+ if (!category) {
+ // Slow path: in the case of a new category we have to repeat the check
+ // holding the lock, as multiple threads might have reached this point
+ // at the same time.
+ auto category_initializer = [](TraceCategory* category) {
+ TraceLog::GetInstance()->UpdateCategoryState(category);
+ };
+ AutoLock lock(tracelog->lock_);
+ CategoryRegistry::GetOrCreateCategoryLocked(
+ category_group, category_initializer, &category);
}
- return tracelog->GetCategoryGroupEnabledInternal(category_group);
+ DCHECK(category->state_ptr());
+ return category->state_ptr();
}
const char* TraceLog::GetCategoryGroupName(
const unsigned char* category_group_enabled) {
- // Calculate the index of the category group by finding
- // category_group_enabled in g_category_group_enabled array.
- uintptr_t category_begin =
- reinterpret_cast<uintptr_t>(g_category_group_enabled);
- uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
- DCHECK(category_ptr >= category_begin &&
- category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
- MAX_CATEGORY_GROUPS))
- << "out of bounds category pointer";
- uintptr_t category_index =
- (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
- return g_category_groups[category_index];
+ return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
+ ->name();
}
-void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
- unsigned char enabled_flag = 0;
- const char* category_group = g_category_groups[category_index];
- if (mode_ == RECORDING_MODE &&
- trace_config_.IsCategoryGroupEnabled(category_group)) {
- enabled_flag |= ENABLED_FOR_RECORDING;
+void TraceLog::UpdateCategoryState(TraceCategory* category) {
+ lock_.AssertAcquired();
+ DCHECK(category->is_valid());
+ unsigned char state_flags = 0;
+ if (enabled_modes_ & RECORDING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
}
- if (event_callback_ &&
- event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
- enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (enabled_modes_ & RECORDING_MODE &&
+ category == CategoryRegistry::kCategoryMetadata) {
+ state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
}
#if defined(OS_WIN)
if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- enabled_flag |= ENABLED_FOR_ETW_EXPORT;
+ category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
}
#endif
- // TODO(primiano): this is a temporary workaround for catapult:#2341,
- // to guarantee that metadata events are always added even if the category
- // filter is "-*". See crbug.com/618054 for more details and long-term fix.
- if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
- enabled_flag |= ENABLED_FOR_RECORDING;
+ uint32_t enabled_filters_bitmap = 0;
+ int index = 0;
+ for (const auto& event_filter : enabled_event_filters_) {
+ if (event_filter.IsCategoryGroupEnabled(category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
+ DCHECK(GetCategoryGroupFilters()[index]);
+ enabled_filters_bitmap |= 1 << index;
+ }
+ if (index++ >= MAX_TRACE_EVENT_FILTERS) {
+ NOTREACHED();
+ break;
+ }
+ }
+ category->set_enabled_filters(enabled_filters_bitmap);
+ category->set_state(state_flags);
+}
- g_category_group_enabled[category_index] = enabled_flag;
+void TraceLog::UpdateCategoryRegistry() {
+ lock_.AssertAcquired();
+ CreateFiltersForTraceConfig();
+ for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
+ UpdateCategoryState(&category);
+ }
}
-void TraceLog::UpdateCategoryGroupEnabledFlags() {
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = 0; i < category_index; i++)
- UpdateCategoryGroupEnabledFlag(i);
+void TraceLog::CreateFiltersForTraceConfig() {
+ if (!(enabled_modes_ & FILTERING_MODE))
+ return;
+
+ // Filters were already added and tracing could be enabled. Filters list
+ // cannot be changed when trace events are using them.
+ if (GetCategoryGroupFilters().size())
+ return;
+
+ for (auto& filter_config : enabled_event_filters_) {
+ if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
+ NOTREACHED()
+ << "Too many trace event filters installed in the current session";
+ break;
+ }
+
+ std::unique_ptr<TraceEventFilter> new_filter;
+ const std::string& predicate_name = filter_config.predicate_name();
+ if (predicate_name == EventNameFilter::kName) {
+ auto whitelist = MakeUnique<std::unordered_set<std::string>>();
+ CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
+ new_filter = MakeUnique<EventNameFilter>(std::move(whitelist));
+ } else if (predicate_name == HeapProfilerEventFilter::kName) {
+ new_filter = MakeUnique<HeapProfilerEventFilter>();
+ } else {
+ if (filter_factory_for_testing_)
+ new_filter = filter_factory_for_testing_(predicate_name);
+ CHECK(new_filter) << "Unknown trace filter " << predicate_name;
+ }
+ GetCategoryGroupFilters().push_back(std::move(new_filter));
+ }
}
void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
@@ -522,67 +554,16 @@ void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
}
}
-const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
- const char* category_group) {
- DCHECK(!strchr(category_group, '"'))
- << "Category groups may not contain double quote";
- // The g_category_groups is append only, avoid using a lock for the fast path.
- size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
-
- // Search for pre-existing category group.
- for (size_t i = 0; i < current_category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- unsigned char* category_group_enabled = NULL;
- // This is the slow path: the lock is not held in the case above, so more
- // than one thread could have reached here trying to add the same category.
- // Only hold to lock when actually appending a new category, and
- // check the categories groups again.
- AutoLock lock(lock_);
- size_t category_index = base::subtle::Acquire_Load(&g_category_index);
- for (size_t i = 0; i < category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- // Create a new category group.
- DCHECK(category_index < MAX_CATEGORY_GROUPS)
- << "must increase MAX_CATEGORY_GROUPS";
- if (category_index < MAX_CATEGORY_GROUPS) {
- // Don't hold on to the category_group pointer, so that we can create
- // category groups with strings not known at compile time (this is
- // required by SetWatchEvent).
- const char* new_group = strdup(category_group);
- ANNOTATE_LEAKING_OBJECT_PTR(new_group);
- g_category_groups[category_index] = new_group;
- DCHECK(!g_category_group_enabled[category_index]);
- // Note that if both included and excluded patterns in the
- // TraceConfig are empty, we exclude nothing,
- // thereby enabling this category group.
- UpdateCategoryGroupEnabledFlag(category_index);
- category_group_enabled = &g_category_group_enabled[category_index];
- // Update the max index now.
- base::subtle::Release_Store(&g_category_index, category_index + 1);
- } else {
- category_group_enabled =
- &g_category_group_enabled[g_category_categories_exhausted];
- }
- return category_group_enabled;
-}
-
void TraceLog::GetKnownCategoryGroups(
std::vector<std::string>* category_groups) {
- AutoLock lock(lock_);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = g_num_builtin_categories; i < category_index; i++)
- category_groups->push_back(g_category_groups[i]);
+ for (const auto& category : CategoryRegistry::GetAllCategories()) {
+ if (!CategoryRegistry::IsBuiltinCategory(&category))
+ category_groups->push_back(category.name());
+ }
}
-void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
+void TraceLog::SetEnabled(const TraceConfig& trace_config,
+ uint8_t modes_to_enable) {
std::vector<EnabledStateObserver*> observer_list;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
{
@@ -596,28 +577,58 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
InternalTraceOptions old_options = trace_options();
- if (IsEnabled()) {
- if (new_options != old_options) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different "
- << "set of options.";
- }
-
- if (mode != mode_) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
- }
-
- trace_config_.Merge(trace_config);
- UpdateCategoryGroupEnabledFlags();
- return;
- }
-
if (dispatching_to_observer_list_) {
+ // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
DLOG(ERROR)
<< "Cannot manipulate TraceLog::Enabled state from an observer.";
return;
}
- mode_ = mode;
+ // Clear all filters from previous tracing session. These filters are not
+ // cleared at the end of tracing because some threads which hit trace event
+ // when disabling, could try to use the filters.
+ if (!enabled_modes_)
+ GetCategoryGroupFilters().clear();
+
+ // Update trace config for recording.
+ const bool already_recording = enabled_modes_ & RECORDING_MODE;
+ if (modes_to_enable & RECORDING_MODE) {
+ if (already_recording) {
+ // TODO(ssid): Stop suporting enabling of RECODING_MODE when already
+ // enabled crbug.com/625170.
+ DCHECK_EQ(new_options, old_options) << "Attempting to re-enable "
+ "tracing with a different set "
+ "of options.";
+ trace_config_.Merge(trace_config);
+ } else {
+ trace_config_ = trace_config;
+ }
+ }
+
+ // Update event filters.
+ if (modes_to_enable & FILTERING_MODE) {
+ DCHECK(!trace_config.event_filters().empty())
+ << "Attempting to enable filtering without any filters";
+ DCHECK(enabled_event_filters_.empty()) << "Attempting to re-enable "
+ "filtering when filters are "
+ "already enabled.";
+
+ // Use the given event filters only if filtering was not enabled.
+ if (enabled_event_filters_.empty())
+ enabled_event_filters_ = trace_config.event_filters();
+ }
+ // Keep the |trace_config_| updated with only enabled filters in case anyone
+ // tries to read it using |GetCurrentTraceConfig| (even if filters are
+ // empty).
+ trace_config_.SetEventFilters(enabled_event_filters_);
+
+ enabled_modes_ |= modes_to_enable;
+ UpdateCategoryRegistry();
+
+ // Do not notify observers or create trace buffer if only enabled for
+ // filtering or if recording was already enabled.
+ if (!(modes_to_enable & RECORDING_MODE) || already_recording)
+ return;
if (new_options != old_options) {
subtle::NoBarrier_Store(&trace_options_, new_options);
@@ -626,34 +637,16 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
num_traces_recorded_++;
- trace_config_ = TraceConfig(trace_config);
- UpdateCategoryGroupEnabledFlags();
+ UpdateCategoryRegistry();
UpdateSyntheticDelaysFromTraceConfig();
- if (new_options & kInternalEnableSampling) {
- sampling_thread_.reset(new TraceSamplingThread);
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[0], "bucket0",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[1], "bucket1",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[2], "bucket2",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- if (!PlatformThread::Create(0, sampling_thread_.get(),
- &sampling_thread_handle_)) {
- DCHECK(false) << "failed to create thread";
- }
- }
-
dispatching_to_observer_list_ = true;
observer_list = enabled_state_observer_list_;
observer_map = async_observers_;
}
// Notify observers outside the lock in case they trigger trace events.
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogEnabled();
+ for (EnabledStateObserver* observer : observer_list)
+ observer->OnTraceLogEnabled();
for (const auto& it : observer_map) {
it.second.task_runner->PostTask(
FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
@@ -676,10 +669,9 @@ void TraceLog::SetArgumentFilterPredicate(
TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
const TraceConfig& config) {
- InternalTraceOptions ret =
- config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
- if (config.IsArgumentFilterEnabled())
- ret |= kInternalEnableArgumentFilter;
+ InternalTraceOptions ret = config.IsArgumentFilterEnabled()
+ ? kInternalEnableArgumentFilter
+ : kInternalNone;
switch (config.GetTraceRecordMode()) {
case RECORD_UNTIL_FULL:
return ret | kInternalRecordUntilFull;
@@ -701,37 +693,44 @@ TraceConfig TraceLog::GetCurrentTraceConfig() const {
void TraceLog::SetDisabled() {
AutoLock lock(lock_);
- SetDisabledWhileLocked();
+ SetDisabledWhileLocked(RECORDING_MODE);
+}
+
+void TraceLog::SetDisabled(uint8_t modes_to_disable) {
+ AutoLock lock(lock_);
+ SetDisabledWhileLocked(modes_to_disable);
}
-void TraceLog::SetDisabledWhileLocked() {
+void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
lock_.AssertAcquired();
- if (!IsEnabled())
+ if (!(enabled_modes_ & modes_to_disable))
return;
if (dispatching_to_observer_list_) {
+ // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
DLOG(ERROR)
<< "Cannot manipulate TraceLog::Enabled state from an observer.";
return;
}
- mode_ = DISABLED;
+ bool is_recording_mode_disabled =
+ (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
+ enabled_modes_ &= ~modes_to_disable;
- if (sampling_thread_.get()) {
- // Stop the sampling thread.
- sampling_thread_->Stop();
- lock_.Release();
- PlatformThread::Join(sampling_thread_handle_);
- lock_.Acquire();
- sampling_thread_handle_ = PlatformThreadHandle();
- sampling_thread_.reset();
- }
+ if (modes_to_disable & FILTERING_MODE)
+ enabled_event_filters_.clear();
+
+ if (modes_to_disable & RECORDING_MODE)
+ trace_config_.Clear();
+
+ UpdateCategoryRegistry();
+
+ // Add metadata events and notify observers only if recording mode was
+ // disabled now.
+ if (!is_recording_mode_disabled)
+ return;
- trace_config_.Clear();
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- UpdateCategoryGroupEnabledFlags();
AddMetadataEventsWhileLocked();
// Remove metadata events so they will not get added to a subsequent trace.
@@ -747,8 +746,8 @@ void TraceLog::SetDisabledWhileLocked() {
// Dispatch to observers outside the lock in case the observer triggers a
// trace event.
AutoUnlock unlock(lock_);
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogDisabled();
+ for (EnabledStateObserver* observer : observer_list)
+ observer->OnTraceLogDisabled();
for (const auto& it : observer_map) {
it.second.task_runner->PostTask(
FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
@@ -831,25 +830,10 @@ void TraceLog::CheckIfBufferIsFullWhileLocked() {
if (buffer_limit_reached_timestamp_.is_null()) {
buffer_limit_reached_timestamp_ = OffsetNow();
}
- SetDisabledWhileLocked();
+ SetDisabledWhileLocked(RECORDING_MODE);
}
}
-void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb) {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_,
- reinterpret_cast<subtle::AtomicWord>(cb));
- event_callback_trace_config_ = trace_config;
- UpdateCategoryGroupEnabledFlags();
-}
-
-void TraceLog::SetEventCallbackDisabled() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_, 0);
- UpdateCategoryGroupEnabledFlags();
-}
-
// Flush() works as the following:
// 1. Flush() is called in thread A whose task runner is saved in
// flush_task_runner_;
@@ -886,7 +870,7 @@ void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
return;
}
- int generation = this->generation();
+ int gen = generation();
// Copy of thread_message_loops_ to be used without locking.
std::vector<scoped_refptr<SingleThreadTaskRunner>>
thread_message_loop_task_runners;
@@ -904,29 +888,24 @@ void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
std::move(thread_shared_chunk_));
}
- if (thread_message_loops_.size()) {
- for (hash_set<MessageLoop*>::const_iterator it =
- thread_message_loops_.begin();
- it != thread_message_loops_.end(); ++it) {
- thread_message_loop_task_runners.push_back((*it)->task_runner());
- }
- }
+ for (MessageLoop* loop : thread_message_loops_)
+ thread_message_loop_task_runners.push_back(loop->task_runner());
}
- if (thread_message_loop_task_runners.size()) {
- for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
- thread_message_loop_task_runners[i]->PostTask(
+ if (!thread_message_loop_task_runners.empty()) {
+ for (auto& task_runner : thread_message_loop_task_runners) {
+ task_runner->PostTask(
FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
- generation, discard_events));
+ gen, discard_events));
}
flush_task_runner_->PostDelayedTask(
- FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
+ FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), gen,
discard_events),
TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
return;
}
- FinishFlush(generation, discard_events);
+ FinishFlush(gen, discard_events);
}
// Usually it runs on a different thread.
@@ -990,13 +969,21 @@ void TraceLog::FinishFlush(int generation, bool discard_events) {
return;
}
- if (use_worker_thread_ &&
- WorkerPool::PostTask(
- FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
- Passed(&previous_logged_events),
- flush_output_callback, argument_filter_predicate),
- true)) {
+ if (use_worker_thread_) {
+#if 0
+ base::PostTaskWithTraits(
+ FROM_HERE, base::TaskTraits()
+ .MayBlock()
+ .WithPriority(base::TaskPriority::BACKGROUND)
+ .WithShutdownBehavior(
+ base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
+ Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
+ Passed(&previous_logged_events), flush_output_callback,
+ argument_filter_predicate));
return;
+#else
+ NOTREACHED();
+#endif
}
ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
@@ -1019,7 +1006,7 @@ void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
AutoLock lock(lock_);
if (!CheckGeneration(generation) || !flush_task_runner_ ||
- thread_message_loops_.size())
+ !thread_message_loops_.empty())
return;
flush_task_runner_->PostTask(
@@ -1223,10 +1210,13 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
ThreadTicks thread_now = ThreadNow();
- // |thread_local_event_buffer_| can be null if the current thread doesn't have
- // a message loop or the message loop is blocked.
- InitializeThreadLocalEventBufferIfSupported();
- auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
+ ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
+ if (*category_group_enabled & RECORDING_MODE) {
+ // |thread_local_event_buffer_| can be null if the current thread doesn't
+ // have a message loop or the message loop is blocked.
+ InitializeThreadLocalEventBufferIfSupported();
+ thread_local_event_buffer = thread_local_event_buffer_.Get();
+ }
// Check and update the current thread name only if the event is for the
// current thread to avoid locks in most cases.
@@ -1237,9 +1227,9 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// call (if any), but don't bother if the new name is empty. Note this will
// not detect a thread name change within the same char* buffer address: we
// favor common case performance over corner case correctness.
- if (new_name != g_current_thread_name.Get().Get() && new_name &&
- *new_name) {
- g_current_thread_name.Get().Set(new_name);
+ static auto* current_thread_name = new ThreadLocalPointer<const char>();
+ if (new_name != current_thread_name->Get() && new_name && *new_name) {
+ current_thread_name->Set(new_name);
AutoLock thread_info_lock(thread_info_lock_);
@@ -1257,7 +1247,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
bool found = std::find(existing_names.begin(), existing_names.end(),
new_name) != existing_names.end();
if (!found) {
- if (existing_names.size())
+ if (!existing_names.empty())
existing_name->second.push_back(',');
existing_name->second.append(new_name);
}
@@ -1268,14 +1258,37 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
#if defined(OS_WIN)
// This is done sooner rather than later, to avoid creating the event and
// acquiring the lock, which is not needed for ETW as it's already threadsafe.
- if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
+ if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
num_args, arg_names, arg_types, arg_values,
convertable_values);
#endif // OS_WIN
std::string console_message;
- if (*category_group_enabled & ENABLED_FOR_RECORDING) {
+ std::unique_ptr<TraceEvent> filtered_trace_event;
+ bool disabled_by_filters = false;
+ if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
+ std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
+ new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
+ phase, category_group_enabled, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, convertable_values, flags);
+
+ disabled_by_filters = true;
+ ForEachCategoryFilter(
+ category_group_enabled, [&new_trace_event, &disabled_by_filters](
+ TraceEventFilter* trace_event_filter) {
+ if (trace_event_filter->FilterTraceEvent(*new_trace_event))
+ disabled_by_filters = false;
+ });
+ if (!disabled_by_filters)
+ filtered_trace_event = std::move(new_trace_event);
+ }
+
+ // If enabled for recording, the event should be added only if one of the
+ // filters indicates or category is not enabled for filtering.
+ if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
+ !disabled_by_filters) {
OptionalAutoLock lock(&lock_);
TraceEvent* trace_event = NULL;
@@ -1287,21 +1300,14 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
}
if (trace_event) {
- trace_event->Initialize(thread_id,
- offset_event_timestamp,
- thread_now,
- phase,
- category_group_enabled,
- name,
- scope,
- id,
- bind_id,
- num_args,
- arg_names,
- arg_types,
- arg_values,
- convertable_values,
- flags);
+ if (filtered_trace_event) {
+ trace_event->MoveFrom(std::move(filtered_trace_event));
+ } else {
+ trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
+ phase, category_group_enabled, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, convertable_values, flags);
+ }
#if defined(OS_ANDROID)
trace_event->SendToATrace();
@@ -1315,53 +1321,9 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
}
}
- if (console_message.size())
+ if (!console_message.empty())
LOG(ERROR) << console_message;
- if (reinterpret_cast<const unsigned char*>(
- subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
- bool event_name_matches;
- WatchEventCallback watch_event_callback_copy;
- {
- AutoLock lock(lock_);
- event_name_matches = watch_event_name_ == name;
- watch_event_callback_copy = watch_event_callback_;
- }
- if (event_name_matches) {
- if (!watch_event_callback_copy.is_null())
- watch_event_callback_copy.Run();
- }
- }
-
- if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(
- offset_event_timestamp,
- phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
- category_group_enabled, name, scope, id, num_args, arg_names,
- arg_types, arg_values, flags);
- }
- }
-
- // TODO(primiano): Add support for events with copied name crbug.com/581078
- if (!(flags & TRACE_EVENT_FLAG_COPY)) {
- if (AllocationContextTracker::capture_mode() ==
- AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
- if (phase == TRACE_EVENT_PHASE_BEGIN ||
- phase == TRACE_EVENT_PHASE_COMPLETE) {
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PushPseudoStackFrame(name);
- } else if (phase == TRACE_EVENT_PHASE_END) {
- // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
- // is in |TraceLog::UpdateTraceEventDuration|.
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PopPseudoStackFrame(name);
- }
- }
- }
-
return handle;
}
@@ -1419,9 +1381,9 @@ std::string TraceLog::EventToConsoleMessage(unsigned char phase,
thread_colors_[thread_name]);
size_t depth = 0;
- if (thread_event_start_times_.find(thread_id) !=
- thread_event_start_times_.end())
- depth = thread_event_start_times_[thread_id].size();
+ auto it = thread_event_start_times_.find(thread_id);
+ if (it != thread_event_start_times_.end())
+ depth = it->second.size();
for (size_t i = 0; i < depth; ++i)
log << "| ";
@@ -1439,6 +1401,18 @@ std::string TraceLog::EventToConsoleMessage(unsigned char phase,
return log.str();
}
+void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle) {
+ ALLOW_UNUSED_PARAM(handle);
+ const char* category_name = GetCategoryGroupName(category_group_enabled);
+ ForEachCategoryFilter(
+ category_group_enabled,
+ [name, category_name](TraceEventFilter* trace_event_filter) {
+ trace_event_filter->EndEvent(category_name, name);
+ });
+}
+
void TraceLog::UpdateTraceEventDuration(
const unsigned char* category_group_enabled,
const char* name,
@@ -1460,17 +1434,29 @@ void TraceLog::UpdateTraceEventDuration(
#if defined(OS_WIN)
// Generate an ETW event that marks the end of a complete event.
- if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT)
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
TraceEventETWExport::AddCompleteEndEvent(name);
#endif // OS_WIN
std::string console_message;
- if (category_group_enabled_local & ENABLED_FOR_RECORDING) {
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
OptionalAutoLock lock(&lock_);
TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
if (trace_event) {
DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
+ // TEMP(oysteine) to debug crbug.com/638744
+ if (trace_event->duration().ToInternalValue() != -1) {
+ DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq
+ << ", chunk_index " << handle.chunk_index << ", event_index "
+ << handle.event_index;
+
+ std::string serialized_event;
+ trace_event->AppendAsJSON(&serialized_event, ArgumentFilterPredicate());
+ DVLOG(1) << "TraceEvent: " << serialized_event;
+ lock_.AssertAcquired();
+ }
+
trace_event->UpdateDuration(now, thread_now);
#if defined(OS_ANDROID)
trace_event->SendToATrace();
@@ -1481,47 +1467,13 @@ void TraceLog::UpdateTraceEventDuration(
console_message =
EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
}
-
- if (AllocationContextTracker::capture_mode() ==
- AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
- // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PopPseudoStackFrame(name);
- }
}
- if (console_message.size())
+ if (!console_message.empty())
LOG(ERROR) << console_message;
- if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(
- now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
- nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
- }
- }
-}
-
-void TraceLog::SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback) {
- const unsigned char* category =
- GetCategoryGroupEnabled(category_name.c_str());
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_,
- reinterpret_cast<subtle::AtomicWord>(category));
- watch_event_name_ = event_name;
- watch_event_callback_ = callback;
-}
-
-void TraceLog::CancelWatchEvent() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- watch_event_callback_.Reset();
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
+ EndFilteredEvent(category_group_enabled, name, handle);
}
uint64_t TraceLog::MangleEventId(uint64_t id) {
@@ -1551,42 +1503,37 @@ void TraceLog::AddMetadataEventsWhileLocked() {
"sort_index", process_sort_index_);
}
- if (process_name_.size()) {
+ if (!process_name_.empty()) {
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
current_thread_id, "process_name", "name",
process_name_);
}
- if (process_labels_.size() > 0) {
+ if (!process_labels_.empty()) {
std::vector<std::string> labels;
- for (base::hash_map<int, std::string>::iterator it =
- process_labels_.begin();
- it != process_labels_.end(); it++) {
- labels.push_back(it->second);
- }
+ for (const auto& it : process_labels_)
+ labels.push_back(it.second);
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
current_thread_id, "process_labels", "labels",
base::JoinString(labels, ","));
}
// Thread sort indices.
- for (hash_map<int, int>::iterator it = thread_sort_indices_.begin();
- it != thread_sort_indices_.end(); it++) {
- if (it->second == 0)
+ for (const auto& it : thread_sort_indices_) {
+ if (it.second == 0)
continue;
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first, "thread_sort_index", "sort_index",
- it->second);
+ it.first, "thread_sort_index", "sort_index",
+ it.second);
}
// Thread names.
AutoLock thread_info_lock(thread_info_lock_);
- for (hash_map<int, std::string>::iterator it = thread_names_.begin();
- it != thread_names_.end(); it++) {
- if (it->second.empty())
+ for (const auto& it : thread_names_) {
+ if (it.second.empty())
continue;
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first, "thread_name", "name", it->second);
+ it.first, "thread_name", "name", it.second);
}
// If buffer is full, add a metadata record to report this.
@@ -1598,14 +1545,9 @@ void TraceLog::AddMetadataEventsWhileLocked() {
}
}
-void TraceLog::WaitSamplingEventForTesting() {
- if (!sampling_thread_)
- return;
- sampling_thread_->WaitSamplingEventForTesting();
-}
-
void TraceLog::DeleteForTesting() {
internal::DeleteTraceLogForTesting::Delete();
+ CategoryRegistry::ResetForTesting();
}
TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
@@ -1617,6 +1559,10 @@ TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
if (!handle.chunk_seq)
return NULL;
+ DCHECK(handle.chunk_seq);
+ DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
+ DCHECK(handle.event_index < TraceBufferChunk::kTraceBufferChunkSize);
+
if (thread_local_event_buffer_.Get()) {
TraceEvent* trace_event =
thread_local_event_buffer_.Get()->GetEventByHandle(handle);
@@ -1643,10 +1589,10 @@ void TraceLog::SetProcessID(int process_id) {
process_id_ = process_id;
// Create a FNV hash from the process ID for XORing.
// See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
- unsigned long long offset_basis = 14695981039346656037ull;
- unsigned long long fnv_prime = 1099511628211ull;
- unsigned long long pid = static_cast<unsigned long long>(process_id_);
- process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
+ const unsigned long long kOffsetBasis = 14695981039346656037ull;
+ const unsigned long long kFnvPrime = 1099511628211ull;
+ const unsigned long long pid = static_cast<unsigned long long>(process_id_);
+ process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
}
void TraceLog::SetProcessSortIndex(int sort_index) {
@@ -1654,7 +1600,7 @@ void TraceLog::SetProcessSortIndex(int sort_index) {
process_sort_index_ = sort_index;
}
-void TraceLog::SetProcessName(const std::string& process_name) {
+void TraceLog::SetProcessName(const char* process_name) {
AutoLock lock(lock_);
process_name_ = process_name;
}
@@ -1670,12 +1616,7 @@ void TraceLog::UpdateProcessLabel(int label_id,
void TraceLog::RemoveProcessLabel(int label_id) {
AutoLock lock(lock_);
- base::hash_map<int, std::string>::iterator it =
- process_labels_.find(label_id);
- if (it == process_labels_.end())
- return;
-
- process_labels_.erase(it);
+ process_labels_.erase(label_id);
}
void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
@@ -1693,42 +1634,39 @@ size_t TraceLog::GetObserverCountForTest() const {
void TraceLog::SetCurrentThreadBlocksMessageLoop() {
thread_blocks_message_loop_.Set(true);
- if (thread_local_event_buffer_.Get()) {
- // This will flush the thread local buffer.
- delete thread_local_event_buffer_.Get();
- }
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
}
TraceBuffer* TraceLog::CreateTraceBuffer() {
HEAP_PROFILER_SCOPED_IGNORE;
InternalTraceOptions options = trace_options();
- if (options & kInternalRecordContinuously)
+ if (options & kInternalRecordContinuously) {
return TraceBuffer::CreateTraceBufferRingBuffer(
kTraceEventRingBufferChunks);
- else if (options & kInternalEchoToConsole)
+ }
+ if (options & kInternalEchoToConsole) {
return TraceBuffer::CreateTraceBufferRingBuffer(
kEchoToConsoleTraceEventBufferChunks);
- else if (options & kInternalRecordAsMuchAsPossible)
+ }
+ if (options & kInternalRecordAsMuchAsPossible) {
return TraceBuffer::CreateTraceBufferVectorOfSize(
kTraceEventVectorBigBufferChunks);
+ }
return TraceBuffer::CreateTraceBufferVectorOfSize(
kTraceEventVectorBufferChunks);
}
#if defined(OS_WIN)
void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
- AutoLock lock(lock_);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
// Go through each category and set/clear the ETW bit depending on whether the
// category is enabled.
- for (size_t i = 0; i < category_index; i++) {
- const char* category_group = g_category_groups[i];
- DCHECK(category_group);
+ for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT;
+ category.name())) {
+ category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
} else {
- g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT;
+ category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
}
}
}
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index e4407e81bd..88b6e588e4 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -26,15 +26,17 @@ namespace base {
template <typename Type>
struct DefaultSingletonTraits;
+class MessageLoop;
class RefCountedString;
namespace trace_event {
+struct TraceCategory;
class TraceBuffer;
class TraceBufferChunk;
class TraceEvent;
+class TraceEventFilter;
class TraceEventMemoryOverhead;
-class TraceSamplingThread;
struct BASE_EXPORT TraceLogStatus {
TraceLogStatus();
@@ -45,22 +47,14 @@ struct BASE_EXPORT TraceLogStatus {
class BASE_EXPORT TraceLog : public MemoryDumpProvider {
public:
- enum Mode {
- DISABLED = 0,
- RECORDING_MODE
- };
-
- // The pointer returned from GetCategoryGroupEnabledInternal() points to a
- // value with zero or more of the following bits. Used in this class only.
- // The TRACE_EVENT macros should only use the value as a bool.
- // These values must be in sync with macro values in TraceEvent.h in Blink.
- enum CategoryGroupEnabledFlags {
- // Category group enabled for the recording mode.
- ENABLED_FOR_RECORDING = 1 << 0,
- // Category group enabled by SetEventCallbackEnabled().
- ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
- // Category group enabled to export events to ETW.
- ENABLED_FOR_ETW_EXPORT = 1 << 3
+ // Argument passed to TraceLog::SetEnabled.
+ enum Mode : uint8_t {
+ // Enables normal tracing (recording trace events in the trace buffer).
+ RECORDING_MODE = 1 << 0,
+
+ // Trace events are enabled just for filtering but not for recording. Only
+ // event filters config of |trace_config| argument is used.
+ FILTERING_MODE = 1 << 1
};
static TraceLog* GetInstance();
@@ -76,16 +70,30 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// if the current thread supports that (has a message loop).
void InitializeThreadLocalEventBufferIfSupported();
- // Enables normal tracing (recording trace events in the trace buffer).
- // See TraceConfig comments for details on how to control what categories
- // will be traced. If tracing has already been enabled, |category_filter| will
- // be merged into the current category filter.
- void SetEnabled(const TraceConfig& trace_config, Mode mode);
-
- // Disables normal tracing for all categories.
+ // See TraceConfig comments for details on how to control which categories
+ // will be traced. SetDisabled must be called distinctly for each mode that is
+ // enabled. If tracing has already been enabled for recording, category filter
+ // (enabled and disabled categories) will be merged into the current category
+ // filter. Enabling RECORDING_MODE does not enable filters. Trace event
+ // filters will be used only if FILTERING_MODE is set on |modes_to_enable|.
+ // Conversely to RECORDING_MODE, FILTERING_MODE doesn't support upgrading,
+ // i.e. filters can only be enabled if not previously enabled.
+ void SetEnabled(const TraceConfig& trace_config, uint8_t modes_to_enable);
+
+ // TODO(ssid): Remove the default SetEnabled and IsEnabled. They should take
+ // Mode as argument.
+
+ // Disables tracing for all categories for the specified |modes_to_disable|
+ // only. Only RECORDING_MODE is taken as default |modes_to_disable|.
void SetDisabled();
+ void SetDisabled(uint8_t modes_to_disable);
- bool IsEnabled() { return mode_ != DISABLED; }
+ // Returns true if TraceLog is enabled on recording mode.
+ // Note: Returns false even if FILTERING_MODE is enabled.
+ bool IsEnabled() { return enabled_modes_ & RECORDING_MODE; }
+
+ // Returns a bitmap of enabled modes from TraceLog::Mode.
+ uint8_t enabled_modes() { return enabled_modes_; }
// The number of times we have begun recording traces. If tracing is off,
// returns -1. If tracing is on, then it returns the number of times we have
@@ -148,31 +156,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// objects.
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
- // Not using base::Callback because of its limited by 7 parameters.
- // Also, using primitive type allows directly passing callback from WebCore.
- // WARNING: It is possible for the previously set callback to be called
- // after a call to SetEventCallbackEnabled() that replaces or a call to
- // SetEventCallbackDisabled() that disables the callback.
- // This callback may be invoked on any thread.
- // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
- // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
- // interface simple.
- typedef void (*EventCallback)(TimeTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- const char* scope,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned int flags);
-
- // Enable tracing for EventCallback.
- void SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb);
- void SetEventCallbackDisabled();
void SetArgumentFilterPredicate(
const ArgumentFilterPredicate& argument_filter_predicate);
@@ -286,14 +269,9 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
const char* name,
TraceEventHandle handle);
- // For every matching event, the callback will be called.
- typedef base::Callback<void()> WatchEventCallback;
- void SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback);
- // Cancel the watch event. If tracing is enabled, this may race with the
- // watch event notification firing.
- void CancelWatchEvent();
+ void EndFilteredEvent(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle);
int process_id() const { return process_id_; }
@@ -301,7 +279,12 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// Exposed for unittesting:
- void WaitSamplingEventForTesting();
+ // Testing factory for TraceEventFilter.
+ typedef std::unique_ptr<TraceEventFilter> (*FilterFactoryForTesting)(
+ const std::string& /* predicate_name */);
+ void SetFilterFactoryForTesting(FilterFactoryForTesting factory) {
+ filter_factory_for_testing_ = factory;
+ }
// Allows deleting our singleton instance.
static void DeleteForTesting();
@@ -316,8 +299,9 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// on their sort index, ascending, then by their name, and then tid.
void SetProcessSortIndex(int sort_index);
- // Sets the name of the process.
- void SetProcessName(const std::string& process_name);
+ // Sets the name of the process. |process_name| should be a string literal
+ // since it is a whitelisted argument for background field trials.
+ void SetProcessName(const char* process_name);
// Processes can have labels in addition to their names. Use labels, for
// instance, to list out the web page titles that a process is handling.
@@ -371,12 +355,14 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
ProcessMemoryDump* pmd) override;
// Enable/disable each category group based on the current mode_,
- // category_filter_, event_callback_ and event_callback_category_filter_.
- // Enable the category group in the enabled mode if category_filter_ matches
- // the category group, or event_callback_ is not null and
- // event_callback_category_filter_ matches the category group.
- void UpdateCategoryGroupEnabledFlags();
- void UpdateCategoryGroupEnabledFlag(size_t category_index);
+ // category_filter_ and event_filters_enabled_.
+ // Enable the category group in the recording mode if category_filter_ matches
+ // the category group, is not null. Enable category for filtering if any
+ // filter in event_filters_enabled_ enables it.
+ void UpdateCategoryRegistry();
+ void UpdateCategoryState(TraceCategory* category);
+
+ void CreateFiltersForTraceConfig();
// Configure synthetic delays based on the values set in the current
// trace config.
@@ -391,7 +377,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TraceLog();
~TraceLog() override;
- const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
void AddMetadataEventsWhileLocked();
InternalTraceOptions trace_options() const {
@@ -409,7 +394,7 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
bool check_buffer_is_full);
void CheckIfBufferIsFullWhileLocked();
- void SetDisabledWhileLocked();
+ void SetDisabledWhileLocked(uint8_t modes);
TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
OptionalAutoLock* lock);
@@ -448,7 +433,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
static const InternalTraceOptions kInternalRecordUntilFull;
static const InternalTraceOptions kInternalRecordContinuously;
static const InternalTraceOptions kInternalEchoToConsole;
- static const InternalTraceOptions kInternalEnableSampling;
static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
static const InternalTraceOptions kInternalEnableArgumentFilter;
@@ -458,11 +442,10 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// This lock protects accesses to thread_names_, thread_event_start_times_
// and thread_colors_.
Lock thread_info_lock_;
- Mode mode_;
+ uint8_t enabled_modes_; // See TraceLog::Mode.
int num_traces_recorded_;
std::unique_ptr<TraceBuffer> logged_events_;
std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
- subtle::AtomicWord /* EventCallback */ event_callback_;
bool dispatching_to_observer_list_;
std::vector<EnabledStateObserver*> enabled_state_observer_list_;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
@@ -487,19 +470,10 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TimeDelta time_offset_;
- // Allow tests to wake up when certain events occur.
- WatchEventCallback watch_event_callback_;
- subtle::AtomicWord /* const unsigned char* */ watch_category_;
- std::string watch_event_name_;
-
subtle::AtomicWord /* Options */ trace_options_;
- // Sampling thread handles.
- std::unique_ptr<TraceSamplingThread> sampling_thread_;
- PlatformThreadHandle sampling_thread_handle_;
-
TraceConfig trace_config_;
- TraceConfig event_callback_trace_config_;
+ TraceConfig::EventFilters enabled_event_filters_;
ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
ThreadLocalBoolean thread_blocks_message_loop_;
@@ -522,6 +496,8 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
subtle::AtomicWord generation_;
bool use_worker_thread_;
+ FilterFactoryForTesting filter_factory_for_testing_;
+
DISALLOW_COPY_AND_ASSIGN(TraceLog);
};
diff --git a/base/trace_event/trace_log_constants.cc b/base/trace_event/trace_log_constants.cc
index cd2ff0dad3..65dca2e4d6 100644
--- a/base/trace_event/trace_log_constants.cc
+++ b/base/trace_event/trace_log_constants.cc
@@ -14,8 +14,7 @@ const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordUntilFull = 1 << 0;
const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordContinuously = 1 << 1;
-const TraceLog::InternalTraceOptions
- TraceLog::kInternalEnableSampling = 1 << 2;
+// 1 << 2 is reserved for the DEPRECATED kInternalEnableSampling. DO NOT USE.
const TraceLog::InternalTraceOptions
TraceLog::kInternalEchoToConsole = 1 << 3;
const TraceLog::InternalTraceOptions
diff --git a/base/trace_event/trace_sampling_thread.cc b/base/trace_event/trace_sampling_thread.cc
deleted file mode 100644
index 5a0d2f8a02..0000000000
--- a/base/trace_event/trace_sampling_thread.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_impl.h"
-#include "base/trace_event/trace_log.h"
-#include "base/trace_event/trace_sampling_thread.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceBucketData {
- public:
- TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback);
- ~TraceBucketData();
-
- TRACE_EVENT_API_ATOMIC_WORD* bucket;
- const char* bucket_name;
- TraceSampleCallback callback;
-};
-
-TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false),
- waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
-
-TraceSamplingThread::~TraceSamplingThread() {}
-
-void TraceSamplingThread::ThreadMain() {
- PlatformThread::SetName("Sampling Thread");
- thread_running_ = true;
- const int kSamplingFrequencyMicroseconds = 1000;
- while (!cancellation_flag_.IsSet()) {
- PlatformThread::Sleep(
- TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
- GetSamples();
- waitable_event_for_testing_.Signal();
- }
-}
-
-// static
-void TraceSamplingThread::DefaultSamplingCallback(
- TraceBucketData* bucket_data) {
- TRACE_EVENT_API_ATOMIC_WORD category_and_name =
- TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
- if (!category_and_name)
- return;
- const char* const combined =
- reinterpret_cast<const char* const>(category_and_name);
- const char* category_group;
- const char* name;
- ExtractCategoryAndName(combined, &category_group, &name);
- TRACE_EVENT_API_ADD_TRACE_EVENT(
- TRACE_EVENT_PHASE_SAMPLE,
- TraceLog::GetCategoryGroupEnabled(category_group), name,
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
- NULL, NULL, NULL, NULL, 0);
-}
-
-void TraceSamplingThread::GetSamples() {
- for (size_t i = 0; i < sample_buckets_.size(); ++i) {
- TraceBucketData* bucket_data = &sample_buckets_[i];
- bucket_data->callback.Run(bucket_data);
- }
-}
-
-void TraceSamplingThread::RegisterSampleBucket(
- TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback) {
- // Access to sample_buckets_ doesn't cause races with the sampling thread
- // that uses the sample_buckets_, because it is guaranteed that
- // RegisterSampleBucket is called before the sampling thread is created.
- DCHECK(!thread_running_);
- sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
-}
-
-// static
-void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name) {
- *category = combined;
- *name = &combined[strlen(combined) + 1];
-}
-
-void TraceSamplingThread::Stop() {
- cancellation_flag_.Set();
-}
-
-void TraceSamplingThread::WaitSamplingEventForTesting() {
- waitable_event_for_testing_.Wait();
-}
-
-TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback)
- : bucket(bucket), bucket_name(name), callback(callback) {}
-
-TraceBucketData::~TraceBucketData() {}
-
-} // namespace trace_event
-} // namespace base
diff --git a/base/trace_event/trace_sampling_thread.h b/base/trace_event/trace_sampling_thread.h
deleted file mode 100644
index f976a80e07..0000000000
--- a/base/trace_event/trace_sampling_thread.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
-#define BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
-
-#include "base/synchronization/cancellation_flag.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/trace_event/trace_event.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceBucketData;
-typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
-
-// This object must be created on the IO thread.
-class TraceSamplingThread : public PlatformThread::Delegate {
- public:
- TraceSamplingThread();
- ~TraceSamplingThread() override;
-
- // Implementation of PlatformThread::Delegate:
- void ThreadMain() override;
-
- static void DefaultSamplingCallback(TraceBucketData* bucket_data);
-
- void Stop();
- void WaitSamplingEventForTesting();
-
- private:
- friend class TraceLog;
-
- void GetSamples();
- // Not thread-safe. Once the ThreadMain has been called, this can no longer
- // be called.
- void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback);
- // Splits a combined "category\0name" into the two component parts.
- static void ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name);
- std::vector<TraceBucketData> sample_buckets_;
- bool thread_running_;
- CancellationFlag cancellation_flag_;
- WaitableEvent waitable_event_for_testing_;
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 487fd19098..131af14a3a 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -4,6 +4,7 @@
#include "base/tracked_objects.h"
+#include <ctype.h>
#include <limits.h>
#include <stdlib.h>
@@ -13,8 +14,10 @@
#include "base/compiler_specific.h"
#include "base/debug/leak_annotations.h"
#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
#include "base/process/process_handle.h"
-#include "base/strings/stringprintf.h"
#include "base/third_party/valgrind/memcheck.h"
#include "base/threading/worker_pool.h"
#include "base/tracking_info.h"
@@ -29,6 +32,9 @@ class TimeDelta;
namespace tracked_objects {
namespace {
+
+constexpr char kWorkerThreadSanitizedName[] = "WorkerThread-*";
+
// When ThreadData is first initialized, should we start in an ACTIVE state to
// record all of the startup-time tasks, or should we start up DEACTIVATED, so
// that we only record after parsing the command line flag --enable-tracking.
@@ -74,6 +80,22 @@ inline bool IsProfilerTimingEnabled() {
return current_timing_enabled == ENABLED_TIMING;
}
+// Sanitize a thread name by replacing trailing sequence of digits with "*".
+// Examples:
+// 1. "BrowserBlockingWorker1/23857" => "BrowserBlockingWorker1/*"
+// 2. "Chrome_IOThread" => "Chrome_IOThread"
+std::string SanitizeThreadName(const std::string& thread_name) {
+ size_t i = thread_name.length();
+
+ while (i > 0 && isdigit(thread_name[i - 1]))
+ --i;
+
+ if (i == thread_name.length())
+ return thread_name;
+
+ return thread_name.substr(0, i) + '*';
+}
+
} // namespace
//------------------------------------------------------------------------------
@@ -86,10 +108,15 @@ DeathData::DeathData()
queue_duration_sum_(0),
run_duration_max_(0),
queue_duration_max_(0),
+ alloc_ops_(0),
+ free_ops_(0),
+ allocated_bytes_(0),
+ freed_bytes_(0),
+ alloc_overhead_bytes_(0),
+ max_allocated_bytes_(0),
run_duration_sample_(0),
queue_duration_sample_(0),
- last_phase_snapshot_(nullptr) {
-}
+ last_phase_snapshot_(nullptr) {}
DeathData::DeathData(const DeathData& other)
: count_(other.count_),
@@ -98,6 +125,12 @@ DeathData::DeathData(const DeathData& other)
queue_duration_sum_(other.queue_duration_sum_),
run_duration_max_(other.run_duration_max_),
queue_duration_max_(other.queue_duration_max_),
+ alloc_ops_(other.alloc_ops_),
+ free_ops_(other.free_ops_),
+ allocated_bytes_(other.allocated_bytes_),
+ freed_bytes_(other.freed_bytes_),
+ alloc_overhead_bytes_(other.alloc_overhead_bytes_),
+ max_allocated_bytes_(other.max_allocated_bytes_),
run_duration_sample_(other.run_duration_sample_),
queue_duration_sample_(other.queue_duration_sample_),
last_phase_snapshot_(nullptr) {
@@ -125,9 +158,9 @@ DeathData::~DeathData() {
#define CONDITIONAL_ASSIGN(assign_it, target, source) \
((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
-void DeathData::RecordDeath(const int32_t queue_duration,
- const int32_t run_duration,
- const uint32_t random_number) {
+void DeathData::RecordDurations(const int32_t queue_duration,
+ const int32_t run_duration,
+ const uint32_t random_number) {
// We'll just clamp at INT_MAX, but we should note this in the UI as such.
if (count_ < INT_MAX)
base::subtle::NoBarrier_Store(&count_, count_ + 1);
@@ -164,12 +197,28 @@ void DeathData::RecordDeath(const int32_t queue_duration,
}
}
+void DeathData::RecordAllocations(const uint32_t alloc_ops,
+ const uint32_t free_ops,
+ const uint32_t allocated_bytes,
+ const uint32_t freed_bytes,
+ const uint32_t alloc_overhead_bytes,
+ const uint32_t max_allocated_bytes) {
+ // Use saturating arithmetic.
+ SaturatingMemberAdd(alloc_ops, &alloc_ops_);
+ SaturatingMemberAdd(free_ops, &free_ops_);
+ SaturatingMemberAdd(allocated_bytes, &allocated_bytes_);
+ SaturatingMemberAdd(freed_bytes, &freed_bytes_);
+ SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
+
+ int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
+ if (max > max_allocated_bytes_)
+ base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
+}
+
void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
// Snapshotting and storing current state.
- last_phase_snapshot_ = new DeathDataPhaseSnapshot(
- profiling_phase, count(), run_duration_sum(), run_duration_max(),
- run_duration_sample(), queue_duration_sum(), queue_duration_max(),
- queue_duration_sample(), last_phase_snapshot_);
+ last_phase_snapshot_ =
+ new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
// Not touching fields for which a delta can be computed by comparing with a
// snapshot from the previous phase. Resetting other fields. Sample values
@@ -201,6 +250,17 @@ void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
}
+void DeathData::SaturatingMemberAdd(const uint32_t addend,
+ base::subtle::Atomic32* sum) {
+ // Bail quick if no work or already saturated.
+ if (addend == 0U || *sum == INT_MAX)
+ return;
+
+ base::CheckedNumeric<int32_t> new_sum = *sum;
+ new_sum += addend;
+ base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX));
+}
+
//------------------------------------------------------------------------------
DeathDataSnapshot::DeathDataSnapshot()
: count(-1),
@@ -209,8 +269,13 @@ DeathDataSnapshot::DeathDataSnapshot()
run_duration_sample(-1),
queue_duration_sum(-1),
queue_duration_max(-1),
- queue_duration_sample(-1) {
-}
+ queue_duration_sample(-1),
+ alloc_ops(-1),
+ free_ops(-1),
+ allocated_bytes(-1),
+ freed_bytes(-1),
+ alloc_overhead_bytes(-1),
+ max_allocated_bytes(-1) {}
DeathDataSnapshot::DeathDataSnapshot(int count,
int32_t run_duration_sum,
@@ -218,25 +283,58 @@ DeathDataSnapshot::DeathDataSnapshot(int count,
int32_t run_duration_sample,
int32_t queue_duration_sum,
int32_t queue_duration_max,
- int32_t queue_duration_sample)
+ int32_t queue_duration_sample,
+ int32_t alloc_ops,
+ int32_t free_ops,
+ int32_t allocated_bytes,
+ int32_t freed_bytes,
+ int32_t alloc_overhead_bytes,
+ int32_t max_allocated_bytes)
: count(count),
run_duration_sum(run_duration_sum),
run_duration_max(run_duration_max),
run_duration_sample(run_duration_sample),
queue_duration_sum(queue_duration_sum),
queue_duration_max(queue_duration_max),
- queue_duration_sample(queue_duration_sample) {}
+ queue_duration_sample(queue_duration_sample),
+ alloc_ops(alloc_ops),
+ free_ops(free_ops),
+ allocated_bytes(allocated_bytes),
+ freed_bytes(freed_bytes),
+ alloc_overhead_bytes(alloc_overhead_bytes),
+ max_allocated_bytes(max_allocated_bytes) {}
+
+DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
+ : count(death_data.count()),
+ run_duration_sum(death_data.run_duration_sum()),
+ run_duration_max(death_data.run_duration_max()),
+ run_duration_sample(death_data.run_duration_sample()),
+ queue_duration_sum(death_data.queue_duration_sum()),
+ queue_duration_max(death_data.queue_duration_max()),
+ queue_duration_sample(death_data.queue_duration_sample()),
+ alloc_ops(death_data.alloc_ops()),
+ free_ops(death_data.free_ops()),
+ allocated_bytes(death_data.allocated_bytes()),
+ freed_bytes(death_data.freed_bytes()),
+ alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
+ max_allocated_bytes(death_data.max_allocated_bytes()) {}
+
+DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) =
+ default;
DeathDataSnapshot::~DeathDataSnapshot() {
}
DeathDataSnapshot DeathDataSnapshot::Delta(
const DeathDataSnapshot& older) const {
- return DeathDataSnapshot(count - older.count,
- run_duration_sum - older.run_duration_sum,
- run_duration_max, run_duration_sample,
- queue_duration_sum - older.queue_duration_sum,
- queue_duration_max, queue_duration_sample);
+ return DeathDataSnapshot(
+ count - older.count, run_duration_sum - older.run_duration_sum,
+ run_duration_max, run_duration_sample,
+ queue_duration_sum - older.queue_duration_sum, queue_duration_max,
+ queue_duration_sample, alloc_ops - older.alloc_ops,
+ free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
+ freed_bytes - older.freed_bytes,
+ alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
}
//------------------------------------------------------------------------------
@@ -252,8 +350,7 @@ BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth)
: location(birth.location()),
- thread_name(birth.birth_thread()->thread_name()) {
-}
+ sanitized_thread_name(birth.birth_thread()->sanitized_thread_name()) {}
BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
}
@@ -285,9 +382,6 @@ ThreadData::NowFunction* ThreadData::now_function_for_testing_ = NULL;
base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
// static
-int ThreadData::worker_thread_data_creation_count_ = 0;
-
-// static
int ThreadData::cleanup_count_ = 0;
// static
@@ -297,7 +391,7 @@ int ThreadData::incarnation_counter_ = 0;
ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
// static
-ThreadData* ThreadData::first_retired_worker_ = NULL;
+ThreadData* ThreadData::first_retired_thread_data_ = NULL;
// static
base::LazyInstance<base::Lock>::Leaky
@@ -306,25 +400,14 @@ base::LazyInstance<base::Lock>::Leaky
// static
base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
-ThreadData::ThreadData(const std::string& suggested_name)
+ThreadData::ThreadData(const std::string& sanitized_thread_name)
: next_(NULL),
- next_retired_worker_(NULL),
- worker_thread_number_(0),
+ next_retired_thread_data_(NULL),
+ sanitized_thread_name_(sanitized_thread_name),
incarnation_count_for_pool_(-1),
current_stopwatch_(NULL) {
- DCHECK_GE(suggested_name.size(), 0u);
- thread_name_ = suggested_name;
- PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
-}
-
-ThreadData::ThreadData(int thread_number)
- : next_(NULL),
- next_retired_worker_(NULL),
- worker_thread_number_(thread_number),
- incarnation_count_for_pool_(-1),
- current_stopwatch_(NULL) {
- CHECK_GT(thread_number, 0);
- base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
+ DCHECK(sanitized_thread_name_.empty() ||
+ !isdigit(sanitized_thread_name_.back()));
PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
}
@@ -355,15 +438,17 @@ ThreadData* ThreadData::first() {
ThreadData* ThreadData::next() const { return next_; }
// static
-void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
+void ThreadData::InitializeThreadContext(const std::string& thread_name) {
if (base::WorkerPool::RunsTasksOnCurrentThread())
return;
+ DCHECK_NE(thread_name, kWorkerThreadSanitizedName);
EnsureTlsInitialization();
ThreadData* current_thread_data =
reinterpret_cast<ThreadData*>(tls_index_.Get());
if (current_thread_data)
return; // Browser tests instigate this.
- current_thread_data = new ThreadData(suggested_name);
+ current_thread_data =
+ GetRetiredOrCreateThreadData(SanitizeThreadName(thread_name));
tls_index_.Set(current_thread_data);
}
@@ -376,26 +461,8 @@ ThreadData* ThreadData::Get() {
return registered;
// We must be a worker thread, since we didn't pre-register.
- ThreadData* worker_thread_data = NULL;
- int worker_thread_number = 0;
- {
- base::AutoLock lock(*list_lock_.Pointer());
- if (first_retired_worker_) {
- worker_thread_data = first_retired_worker_;
- first_retired_worker_ = first_retired_worker_->next_retired_worker_;
- worker_thread_data->next_retired_worker_ = NULL;
- } else {
- worker_thread_number = ++worker_thread_data_creation_count_;
- }
- }
-
- // If we can't find a previously used instance, then we have to create one.
- if (!worker_thread_data) {
- DCHECK_GT(worker_thread_number, 0);
- worker_thread_data = new ThreadData(worker_thread_number);
- }
- DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
-
+ ThreadData* worker_thread_data =
+ GetRetiredOrCreateThreadData(kWorkerThreadSanitizedName);
tls_index_.Set(worker_thread_data);
return worker_thread_data;
}
@@ -409,21 +476,23 @@ void ThreadData::OnThreadTermination(void* thread_data) {
}
void ThreadData::OnThreadTerminationCleanup() {
+ // We must NOT do any allocations during this callback. There is a chance that
+ // the allocator is no longer active on this thread.
+
// The list_lock_ was created when we registered the callback, so it won't be
// allocated here despite the lazy reference.
base::AutoLock lock(*list_lock_.Pointer());
if (incarnation_counter_ != incarnation_count_for_pool_)
return; // ThreadData was constructed in an earlier unit test.
++cleanup_count_;
- // Only worker threads need to be retired and reused.
- if (!worker_thread_number_) {
- return;
- }
- // We must NOT do any allocations during this callback.
- // Using the simple linked lists avoids all allocations.
- DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
- this->next_retired_worker_ = first_retired_worker_;
- first_retired_worker_ = this;
+
+ // Add this ThreadData to a retired list so that it can be reused by a thread
+ // with the same name sanitized name in the future.
+ // |next_retired_thread_data_| is expected to be nullptr for a ThreadData
+ // associated with an active thread.
+ DCHECK(!next_retired_thread_data_);
+ next_retired_thread_data_ = first_retired_thread_data_;
+ first_retired_thread_data_ = this;
}
// static
@@ -455,7 +524,8 @@ void ThreadData::Snapshot(int current_profiling_phase,
if (birth_count.second > 0) {
current_phase_tasks->push_back(
TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
- DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0),
+ DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0),
"Still_Alive"));
}
}
@@ -514,7 +584,21 @@ void ThreadData::TallyADeath(const Births& births,
base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
death_data = &death_map_[&births];
} // Release lock ASAP.
- death_data->RecordDeath(queue_duration, run_duration, random_number_);
+ death_data->RecordDurations(queue_duration, run_duration, random_number_);
+
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ if (stopwatch.heap_tracking_enabled()) {
+ base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage();
+ // Saturate the 64 bit counts on conversion to 32 bit storage.
+ death_data->RecordAllocations(
+ base::saturated_cast<int32_t>(heap_usage.alloc_ops),
+ base::saturated_cast<int32_t>(heap_usage.free_ops),
+ base::saturated_cast<int32_t>(heap_usage.alloc_bytes),
+ base::saturated_cast<int32_t>(heap_usage.free_bytes),
+ base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes),
+ base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes));
+ }
+#endif
}
// static
@@ -635,7 +719,7 @@ void ThreadData::SnapshotExecutedTasks(
if (death_data.count > 0) {
(*phased_snapshots)[phase->profiling_phase].tasks.push_back(
TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data,
- thread_name()));
+ sanitized_thread_name()));
}
}
}
@@ -653,13 +737,7 @@ void ThreadData::SnapshotMaps(int profiling_phase,
for (const auto& death : death_map_) {
deaths->push_back(std::make_pair(
death.first,
- DeathDataPhaseSnapshot(profiling_phase, death.second.count(),
- death.second.run_duration_sum(),
- death.second.run_duration_max(),
- death.second.run_duration_sample(),
- death.second.queue_duration_sum(),
- death.second.queue_duration_max(),
- death.second.queue_duration_sample(),
+ DeathDataPhaseSnapshot(profiling_phase, death.second,
death.second.last_phase_snapshot())));
}
}
@@ -716,6 +794,7 @@ void ThreadData::InitializeAndSetTrackingStatus(Status status) {
if (status > DEACTIVATED)
status = PROFILING_ACTIVE;
+
base::subtle::Release_Store(&status_, status);
}
@@ -744,10 +823,9 @@ TrackedTime ThreadData::Now() {
}
// static
-void ThreadData::EnsureCleanupWasCalled(int /*major_threads_shutdown_count*/) {
+void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
+ ALLOW_UNUSED_PARAM(major_threads_shutdown_count);
base::AutoLock lock(*list_lock_.Pointer());
- if (worker_thread_data_creation_count_ == 0)
- return; // We haven't really run much, and couldn't have leaked.
// TODO(jar): until this is working on XP, don't run the real test.
#if 0
@@ -772,16 +850,14 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
all_thread_data_list_head_ = NULL;
++incarnation_counter_;
// To be clean, break apart the retired worker list (though we leak them).
- while (first_retired_worker_) {
- ThreadData* worker = first_retired_worker_;
- CHECK_GT(worker->worker_thread_number_, 0);
- first_retired_worker_ = worker->next_retired_worker_;
- worker->next_retired_worker_ = NULL;
+ while (first_retired_thread_data_) {
+ ThreadData* thread_data = first_retired_thread_data_;
+ first_retired_thread_data_ = thread_data->next_retired_thread_data_;
+ thread_data->next_retired_thread_data_ = nullptr;
}
}
// Put most global static back in pristine shape.
- worker_thread_data_creation_count_ = 0;
cleanup_count_ = 0;
tls_index_.Set(NULL);
// Almost UNINITIALIZED.
@@ -813,6 +889,39 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
}
}
+// static
+ThreadData* ThreadData::GetRetiredOrCreateThreadData(
+ const std::string& sanitized_thread_name) {
+ SCOPED_UMA_HISTOGRAM_TIMER("TrackedObjects.GetRetiredOrCreateThreadData");
+
+ {
+ base::AutoLock lock(*list_lock_.Pointer());
+ ThreadData** pcursor = &first_retired_thread_data_;
+ ThreadData* cursor = first_retired_thread_data_;
+
+ // Assuming that there aren't more than a few tens of retired ThreadData
+ // instances, this lookup should be quick compared to the thread creation
+ // time. Retired ThreadData instances cannot be stored in a map because
+ // insertions are done from OnThreadTerminationCleanup() where allocations
+ // are not allowed.
+ //
+ // Note: Test processes may have more than a few tens of retired ThreadData
+ // instances.
+ while (cursor) {
+ if (cursor->sanitized_thread_name() == sanitized_thread_name) {
+ DCHECK_EQ(*pcursor, cursor);
+ *pcursor = cursor->next_retired_thread_data_;
+ cursor->next_retired_thread_data_ = nullptr;
+ return cursor;
+ }
+ pcursor = &cursor->next_retired_thread_data_;
+ cursor = cursor->next_retired_thread_data_;
+ }
+ }
+
+ return new ThreadData(sanitized_thread_name);
+}
+
//------------------------------------------------------------------------------
TaskStopwatch::TaskStopwatch()
: wallclock_duration_ms_(0),
@@ -823,6 +932,10 @@ TaskStopwatch::TaskStopwatch()
state_ = CREATED;
child_ = NULL;
#endif
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ heap_tracking_enabled_ =
+ base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
+#endif
}
TaskStopwatch::~TaskStopwatch() {
@@ -839,6 +952,10 @@ void TaskStopwatch::Start() {
#endif
start_time_ = ThreadData::Now();
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ if (heap_tracking_enabled_)
+ heap_usage_.Start();
+#endif
current_thread_data_ = ThreadData::Get();
if (!current_thread_data_)
@@ -862,6 +979,10 @@ void TaskStopwatch::Stop() {
state_ = STOPPED;
DCHECK(child_ == NULL);
#endif
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ if (heap_tracking_enabled_)
+ heap_usage_.Stop(true);
+#endif
if (!start_time_.is_null() && !end_time.is_null()) {
wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
@@ -913,23 +1034,9 @@ ThreadData* TaskStopwatch::GetThreadData() const {
DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
int profiling_phase,
- int count,
- int32_t run_duration_sum,
- int32_t run_duration_max,
- int32_t run_duration_sample,
- int32_t queue_duration_sum,
- int32_t queue_duration_max,
- int32_t queue_duration_sample,
+ const DeathData& death,
const DeathDataPhaseSnapshot* prev)
- : profiling_phase(profiling_phase),
- death_data(count,
- run_duration_sum,
- run_duration_max,
- run_duration_sample,
- queue_duration_sum,
- queue_duration_max,
- queue_duration_sample),
- prev(prev) {}
+ : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
//------------------------------------------------------------------------------
// TaskSnapshot
@@ -939,11 +1046,10 @@ TaskSnapshot::TaskSnapshot() {
TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
const DeathDataSnapshot& death_data,
- const std::string& death_thread_name)
+ const std::string& death_sanitized_thread_name)
: birth(birth),
death_data(death_data),
- death_thread_name(death_thread_name) {
-}
+ death_sanitized_thread_name(death_sanitized_thread_name) {}
TaskSnapshot::~TaskSnapshot() {
}
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 7ef0317c39..36caec3c6e 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -14,9 +14,12 @@
#include <utility>
#include <vector>
+#include "base/allocator/features.h"
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/containers/hash_tables.h"
+#include "base/debug/debugging_flags.h"
+#include "base/debug/thread_heap_usage_tracker.h"
#include "base/gtest_prod_util.h"
#include "base/lazy_instance.h"
#include "base/location.h"
@@ -59,71 +62,76 @@ struct TrackingInfo;
// with great efficiency (i.e., copying of strings is never needed, and
// comparisons for equality can be based on pointer comparisons).
//
-// Next, a Births instance is created for use ONLY on the thread where this
-// instance was created. That Births instance records (in a base class
-// BirthOnThread) references to the static data provided in a Location instance,
-// as well as a pointer specifying the thread on which the birth takes place.
-// Hence there is at most one Births instance for each Location on each thread.
-// The derived Births class contains slots for recording statistics about all
-// instances born at the same location. Statistics currently include only the
-// count of instances constructed.
+// Next, a Births instance is constructed or found. A Births instance records
+// (in a base class BirthOnThread) references to the static data provided in a
+// Location instance, as well as a pointer to the ThreadData bound to the thread
+// on which the birth takes place (see discussion on ThreadData below). There is
+// at most one Births instance for each Location / ThreadData pair. The derived
+// Births class contains slots for recording statistics about all instances born
+// at the same location. Statistics currently include only the count of
+// instances constructed.
//
// Since the base class BirthOnThread contains only constant data, it can be
-// freely accessed by any thread at any time (i.e., only the statistic needs to
-// be handled carefully, and stats are updated exclusively on the birth thread).
+// freely accessed by any thread at any time. The statistics must be handled
+// more carefully; they are updated exclusively by the single thread to which
+// the ThreadData is bound at a given time.
//
// For Tasks, having now either constructed or found the Births instance
// described above, a pointer to the Births instance is then recorded into the
-// PendingTask structure in MessageLoop. This fact alone is very useful in
-// debugging, when there is a question of where an instance came from. In
-// addition, the birth time is also recorded and used to later evaluate the
-// lifetime duration of the whole Task. As a result of the above embedding, we
-// can find out a Task's location of birth, and thread of birth, without using
-// any locks, as all that data is constant across the life of the process.
+// PendingTask structure. This fact alone is very useful in debugging, when
+// there is a question of where an instance came from. In addition, the birth
+// time is also recorded and used to later evaluate the lifetime duration of the
+// whole Task. As a result of the above embedding, we can find out a Task's
+// location of birth, and name of birth thread, without using any locks, as all
+// that data is constant across the life of the process.
//
// The above work *could* also be done for any other object as well by calling
// TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
//
-// The amount of memory used in the above data structures depends on how many
-// threads there are, and how many Locations of construction there are.
-// Fortunately, we don't use memory that is the product of those two counts, but
-// rather we only need one Births instance for each thread that constructs an
-// instance at a Location. In many cases, instances are only created on one
-// thread, so the memory utilization is actually fairly restrained.
+// The upper bound for the amount of memory used in the above data structures is
+// the product of the number of ThreadData instances and the number of
+// Locations. Fortunately, Locations are often created on a single thread and
+// the memory utilization is actually fairly restrained.
//
// Lastly, when an instance is deleted, the final tallies of statistics are
// carefully accumulated. That tallying writes into slots (members) in a
-// collection of DeathData instances. For each birth place Location that is
-// destroyed on a thread, there is a DeathData instance to record the additional
-// death count, as well as accumulate the run-time and queue-time durations for
-// the instance as it is destroyed (dies). By maintaining a single place to
-// aggregate this running sum *only* for the given thread, we avoid the need to
-// lock such DeathData instances. (i.e., these accumulated stats in a DeathData
-// instance are exclusively updated by the singular owning thread).
+// collection of DeathData instances. For each Births / death ThreadData pair,
+// there is a DeathData instance to record the additional death count, as well
+// as to accumulate the run-time and queue-time durations for the instance as it
+// is destroyed (dies). Since a ThreadData is bound to at most one thread at a
+// time, there is no need to lock such DeathData instances. (i.e., these
+// accumulated stats in a DeathData instance are exclusively updated by the
+// singular owning thread).
//
-// With the above life cycle description complete, the major remaining detail
-// is explaining how each thread maintains a list of DeathData instances, and
-// of Births instances, and is able to avoid additional (redundant/unnecessary)
-// allocations.
+// With the above life cycle description complete, the major remaining detail is
+// explaining how existing Births and DeathData instances are found to avoid
+// redundant allocations.
//
-// Each thread maintains a list of data items specific to that thread in a
-// ThreadData instance (for that specific thread only). The two critical items
-// are lists of DeathData and Births instances. These lists are maintained in
-// STL maps, which are indexed by Location. As noted earlier, we can compare
-// locations very efficiently as we consider the underlying data (file,
-// function, line) to be atoms, and hence pointer comparison is used rather than
-// (slow) string comparisons.
+// A ThreadData instance maintains maps of Births and DeathData instances. The
+// Births map is indexed by Location and the DeathData map is indexed by
+// Births*. As noted earlier, we can compare Locations very efficiently as we
+// consider the underlying data (file, function, line) to be atoms, and hence
+// pointer comparison is used rather than (slow) string comparisons.
//
-// To provide a mechanism for iterating over all "known threads," which means
-// threads that have recorded a birth or a death, we create a singly linked list
-// of ThreadData instances. Each such instance maintains a pointer to the next
-// one. A static member of ThreadData provides a pointer to the first item on
-// this global list, and access via that all_thread_data_list_head_ item
-// requires the use of the list_lock_.
-// When new ThreadData instances is added to the global list, it is pre-pended,
-// which ensures that any prior acquisition of the list is valid (i.e., the
-// holder can iterate over it without fear of it changing, or the necessity of
-// using an additional lock. Iterations are actually pretty rare (used
+// The first time that a thread calls ThreadData::InitializeThreadContext() or
+// ThreadData::Get(), a ThreadData instance is bound to it and stored in TLS. If
+// a ThreadData bound to a terminated thread with the same sanitized name (i.e.
+// name without trailing digits) as the current thread is available, it is
+// reused. Otherwise, a new ThreadData instance is instantiated. Since a
+// ThreadData is bound to at most one thread at a time, there is no need to
+// acquire a lock to access its maps. Over time, a ThreadData may be bound to
+// different threads that share the same sanitized name.
+//
+// We maintain a list of all ThreadData instances for the current process. Each
+// ThreadData instance has a pointer to the next one. A static member of
+// ThreadData provides a pointer to the first item on this global list, and
+// access via that all_thread_data_list_head_ item requires the use of the
+// list_lock_.
+//
+// When new ThreadData instances are added to the global list, they are pre-
+// pended, which ensures that any prior acquisition of the list is valid (i.e.,
+// the holder can iterate over it without fear of it changing, or the necessity
+// of using an additional lock. Iterations are actually pretty rare (used
// primarily for cleanup, or snapshotting data for display), so this lock has
// very little global performance impact.
//
@@ -170,12 +178,13 @@ struct TrackingInfo;
// memory reference).
//
// TODO(jar): We can implement a Snapshot system that *tries* to grab the
-// snapshots on the source threads *when* they have MessageLoops available
-// (worker threads don't have message loops generally, and hence gathering from
-// them will continue to be asynchronous). We had an implementation of this in
-// the past, but the difficulty is dealing with message loops being terminated.
-// We can *try* to spam the available threads via some task runner to
-// achieve this feat, and it *might* be valuable when we are collecting data
+// snapshots on the source threads *when* they have SingleThreadTaskRunners
+// available (worker threads don't have SingleThreadTaskRunners, and hence
+// gathering from them will continue to be asynchronous). We had an
+// implementation of this in the past, but the difficulty is dealing with
+// threads being terminated. We can *try* to post a task to threads that have a
+// SingleThreadTaskRunner and check if that succeeds (will fail if the thread
+// has been terminated). This *might* be valuable when we are collecting data
// for upload via UMA (where correctness of data may be more significant than
// for a single screen of about:profiler).
//
@@ -226,7 +235,7 @@ struct BASE_EXPORT BirthOnThreadSnapshot {
~BirthOnThreadSnapshot();
LocationSnapshot location;
- std::string thread_name;
+ std::string sanitized_thread_name;
};
//------------------------------------------------------------------------------
@@ -248,6 +257,8 @@ class BASE_EXPORT Births: public BirthOnThread {
DISALLOW_COPY_AND_ASSIGN(Births);
};
+class DeathData;
+
//------------------------------------------------------------------------------
// A "snapshotted" representation of the DeathData class.
@@ -265,7 +276,15 @@ struct BASE_EXPORT DeathDataSnapshot {
int32_t run_duration_sample,
int32_t queue_duration_sum,
int32_t queue_duration_max,
- int32_t queue_duration_sample);
+ int32_t queue_duration_sample,
+ int32_t alloc_ops,
+ int32_t free_ops,
+ int32_t allocated_bytes,
+ int32_t freed_bytes,
+ int32_t alloc_overhead_bytes,
+ int32_t max_allocated_bytes);
+ DeathDataSnapshot(const DeathData& death_data);
+ DeathDataSnapshot(const DeathDataSnapshot& other);
~DeathDataSnapshot();
// Calculates and returns the delta between this snapshot and an earlier
@@ -279,6 +298,13 @@ struct BASE_EXPORT DeathDataSnapshot {
int32_t queue_duration_sum;
int32_t queue_duration_max;
int32_t queue_duration_sample;
+
+ int32_t alloc_ops;
+ int32_t free_ops;
+ int32_t allocated_bytes;
+ int32_t freed_bytes;
+ int32_t alloc_overhead_bytes;
+ int32_t max_allocated_bytes;
};
//------------------------------------------------------------------------------
@@ -287,13 +313,7 @@ struct BASE_EXPORT DeathDataSnapshot {
struct DeathDataPhaseSnapshot {
DeathDataPhaseSnapshot(int profiling_phase,
- int count,
- int32_t run_duration_sum,
- int32_t run_duration_max,
- int32_t run_duration_sample,
- int32_t queue_duration_sum,
- int32_t queue_duration_max,
- int32_t queue_duration_sample,
+ const DeathData& death_data,
const DeathDataPhaseSnapshot* prev);
// Profiling phase at which completion this snapshot was taken.
@@ -326,9 +346,26 @@ class BASE_EXPORT DeathData {
// Update stats for a task destruction (death) that had a Run() time of
// |duration|, and has had a queueing delay of |queue_duration|.
- void RecordDeath(const int32_t queue_duration,
- const int32_t run_duration,
- const uint32_t random_number);
+ void RecordDurations(const int32_t queue_duration,
+ const int32_t run_duration,
+ const uint32_t random_number);
+
+ // Update stats for a task destruction that performed |alloc_ops|
+ // allocations, |free_ops| frees, allocated |allocated_bytes| bytes, freed
+ // |freed_bytes|, where an estimated |alloc_overhead_bytes| went to heap
+ // overhead, and where at most |max_allocated_bytes| were outstanding at any
+ // one time.
+ // Note that |alloc_overhead_bytes|/|alloc_ops| yields the average estimated
+ // heap overhead of allocations in the task, and |allocated_bytes|/|alloc_ops|
+ // yields the average size of allocation.
+ // Note also that |allocated_bytes|-|freed_bytes| yields the net heap memory
+ // usage of the task, which can be negative.
+ void RecordAllocations(const uint32_t alloc_ops,
+ const uint32_t free_ops,
+ const uint32_t allocated_bytes,
+ const uint32_t freed_bytes,
+ const uint32_t alloc_overhead_bytes,
+ const uint32_t max_allocated_bytes);
// Metrics and past snapshots accessors, used only for serialization and in
// tests.
@@ -351,6 +388,22 @@ class BASE_EXPORT DeathData {
int32_t queue_duration_sample() const {
return base::subtle::NoBarrier_Load(&queue_duration_sample_);
}
+ int32_t alloc_ops() const {
+ return base::subtle::NoBarrier_Load(&alloc_ops_);
+ }
+ int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); }
+ int32_t allocated_bytes() const {
+ return base::subtle::NoBarrier_Load(&allocated_bytes_);
+ }
+ int32_t freed_bytes() const {
+ return base::subtle::NoBarrier_Load(&freed_bytes_);
+ }
+ int32_t alloc_overhead_bytes() const {
+ return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_);
+ }
+ int32_t max_allocated_bytes() const {
+ return base::subtle::NoBarrier_Load(&max_allocated_bytes_);
+ }
const DeathDataPhaseSnapshot* last_phase_snapshot() const {
return last_phase_snapshot_;
}
@@ -361,6 +414,12 @@ class BASE_EXPORT DeathData {
void OnProfilingPhaseCompleted(int profiling_phase);
private:
+ // A saturating addition operation for member variables. This elides the
+ // use of atomic-primitive reads for members that are only written on the
+ // owning thread.
+ static void SaturatingMemberAdd(const uint32_t addend,
+ base::subtle::Atomic32* sum);
+
// Members are ordered from most regularly read and updated, to least
// frequently used. This might help a bit with cache lines.
// Number of runs seen (divisor for calculating averages).
@@ -383,6 +442,24 @@ class BASE_EXPORT DeathData {
// snapshot thread.
base::subtle::Atomic32 run_duration_max_;
base::subtle::Atomic32 queue_duration_max_;
+
+ // The cumulative number of allocation and free operations.
+ base::subtle::Atomic32 alloc_ops_;
+ base::subtle::Atomic32 free_ops_;
+
+ // The number of bytes allocated by the task.
+ base::subtle::Atomic32 allocated_bytes_;
+
+ // The number of bytes freed by the task.
+ base::subtle::Atomic32 freed_bytes_;
+
+ // The cumulative number of overhead bytes. Where available this yields an
+ // estimate of the heap overhead for allocations.
+ base::subtle::Atomic32 alloc_overhead_bytes_;
+
+ // The high-watermark for the number of outstanding heap allocated bytes.
+ base::subtle::Atomic32 max_allocated_bytes_;
+
// Samples, used by crowd sourcing gatherers. These are almost never read,
// and rarely updated. They can be modified only on the death thread.
base::subtle::Atomic32 run_duration_sample_;
@@ -407,14 +484,14 @@ struct BASE_EXPORT TaskSnapshot {
TaskSnapshot();
TaskSnapshot(const BirthOnThreadSnapshot& birth,
const DeathDataSnapshot& death_data,
- const std::string& death_thread_name);
+ const std::string& death_sanitized_thread_name);
~TaskSnapshot();
BirthOnThreadSnapshot birth;
// Delta between death data for a thread for a certain profiling phase and the
// snapshot for the pervious phase, if any. Otherwise, just a snapshot.
DeathDataSnapshot death_data;
- std::string death_thread_name;
+ std::string death_sanitized_thread_name;
};
//------------------------------------------------------------------------------
@@ -450,9 +527,8 @@ class BASE_EXPORT ThreadData {
// Initialize the current thread context with a new instance of ThreadData.
// This is used by all threads that have names, and should be explicitly
- // set *before* any births on the threads have taken place. It is generally
- // only used by the message loop, which has a well defined thread name.
- static void InitializeThreadContext(const std::string& suggested_name);
+ // set *before* any births on the threads have taken place.
+ static void InitializeThreadContext(const std::string& thread_name);
// Using Thread Local Store, find the current instance for collecting data.
// If an instance does not exist, construct one (and remember it for use on
@@ -510,7 +586,9 @@ class BASE_EXPORT ThreadData {
static void TallyRunInAScopedRegionIfTracking(const Births* births,
const TaskStopwatch& stopwatch);
- const std::string& thread_name() const { return thread_name_; }
+ const std::string& sanitized_thread_name() const {
+ return sanitized_thread_name_;
+ }
// Initializes all statics if needed (this initialization call should be made
// while we are single threaded).
@@ -559,12 +637,7 @@ class BASE_EXPORT ThreadData {
typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
DeathsSnapshot;
- // Worker thread construction creates a name since there is none.
- explicit ThreadData(int thread_number);
-
- // Message loop based construction should provide a name.
- explicit ThreadData(const std::string& suggested_name);
-
+ explicit ThreadData(const std::string& sanitized_thread_name);
~ThreadData();
// Push this instance to the head of all_thread_data_list_head_, linking it to
@@ -628,6 +701,12 @@ class BASE_EXPORT ThreadData {
// ThreadData instances.
static void ShutdownSingleThreadedCleanup(bool leak);
+ // Returns a ThreadData instance for a thread whose sanitized name is
+ // |sanitized_thread_name|. The returned instance may have been extracted from
+ // the list of retired ThreadData instances or newly allocated.
+ static ThreadData* GetRetiredOrCreateThreadData(
+ const std::string& sanitized_thread_name);
+
// When non-null, this specifies an external function that supplies monotone
// increasing time functcion.
static NowFunction* now_function_for_testing_;
@@ -635,22 +714,16 @@ class BASE_EXPORT ThreadData {
// We use thread local store to identify which ThreadData to interact with.
static base::ThreadLocalStorage::StaticSlot tls_index_;
- // List of ThreadData instances for use with worker threads. When a worker
- // thread is done (terminated), we push it onto this list. When a new worker
- // thread is created, we first try to re-use a ThreadData instance from the
- // list, and if none are available, construct a new one.
- // This is only accessed while list_lock_ is held.
- static ThreadData* first_retired_worker_;
+ // Linked list of ThreadData instances that were associated with threads that
+ // have been terminated and that have not been associated with a new thread
+ // since then. This is only accessed while |list_lock_| is held.
+ static ThreadData* first_retired_thread_data_;
// Link to the most recently created instance (starts a null terminated list).
// The list is traversed by about:profiler when it needs to snapshot data.
// This is only accessed while list_lock_ is held.
static ThreadData* all_thread_data_list_head_;
- // The next available worker thread number. This should only be accessed when
- // the list_lock_ is held.
- static int worker_thread_data_creation_count_;
-
// The number of times TLS has called us back to cleanup a ThreadData
// instance. This is only accessed while list_lock_ is held.
static int cleanup_count_;
@@ -671,23 +744,16 @@ class BASE_EXPORT ThreadData {
// Link to next instance (null terminated list). Used to globally track all
// registered instances (corresponds to all registered threads where we keep
- // data).
+ // data). Only modified in the constructor.
ThreadData* next_;
- // Pointer to another ThreadData instance for a Worker-Thread that has been
- // retired (its thread was terminated). This value is non-NULL only for a
- // retired ThreadData associated with a Worker-Thread.
- ThreadData* next_retired_worker_;
-
- // The name of the thread that is being recorded. If this thread has no
- // message_loop, then this is a worker thread, with a sequence number postfix.
- std::string thread_name_;
+ // Pointer to another retired ThreadData instance. This value is nullptr if
+ // this is associated with an active thread.
+ ThreadData* next_retired_thread_data_;
- // Indicate if this is a worker thread, and the ThreadData contexts should be
- // stored in the unregistered_thread_data_pool_ when not in use.
- // Value is zero when it is not a worker thread. Value is a positive integer
- // corresponding to the created thread name if it is a worker thread.
- int worker_thread_number_;
+ // The name of the thread that is being recorded, with all trailing digits
+ // replaced with a single "*" character.
+ const std::string sanitized_thread_name_;
// A map used on each thread to keep track of Births on this thread.
// This map should only be accessed on the thread it was constructed on.
@@ -755,6 +821,13 @@ class BASE_EXPORT TaskStopwatch {
// this thread during that period.
int32_t RunDurationMs() const;
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ const base::debug::ThreadHeapUsageTracker& heap_usage() const {
+ return heap_usage_;
+ }
+ bool heap_tracking_enabled() const { return heap_tracking_enabled_; }
+#endif
+
// Returns tracking info for the current thread.
ThreadData* GetThreadData() const;
@@ -762,6 +835,11 @@ class BASE_EXPORT TaskStopwatch {
// Time when the stopwatch was started.
TrackedTime start_time_;
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ base::debug::ThreadHeapUsageTracker heap_usage_;
+ bool heap_tracking_enabled_;
+#endif
+
// Wallclock duration of the task.
int32_t wallclock_duration_ms_;
diff --git a/base/tracked_objects_unittest.cc b/base/tracked_objects_unittest.cc
index 70d9601cd0..f208e3c981 100644
--- a/base/tracked_objects_unittest.cc
+++ b/base/tracked_objects_unittest.cc
@@ -11,17 +11,27 @@
#include <memory>
+#include "base/macros.h"
#include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
#include "base/time/time.h"
#include "base/tracking_info.h"
#include "testing/gtest/include/gtest/gtest.h"
const int kLineNumber = 1776;
const char kFile[] = "FixedUnitTestFileName";
-const char kWorkerThreadName[] = "WorkerThread-1";
+const char kWorkerThreadName[] = "WorkerThread-*";
const char kMainThreadName[] = "SomeMainThreadName";
const char kStillAlive[] = "Still_Alive";
+const int32_t kAllocOps = 23;
+const int32_t kFreeOps = 27;
+const int32_t kAllocatedBytes = 59934;
+const int32_t kFreedBytes = 2 * kAllocatedBytes;
+const int32_t kAllocOverheadBytes = kAllocOps * 8;
+const int32_t kMaxAllocatedBytes = kAllocatedBytes / 2;
+
namespace tracked_objects {
class TrackedObjectsTest : public testing::Test {
@@ -85,7 +95,8 @@ class TrackedObjectsTest : public testing::Test {
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[0].birth.location.line_number);
- EXPECT_EQ(birth_thread, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(birth_thread,
+ process_data_phase.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(count, process_data_phase.tasks[0].death_data.count);
EXPECT_EQ(count * run_ms,
@@ -100,7 +111,8 @@ class TrackedObjectsTest : public testing::Test {
EXPECT_EQ(queue_ms,
process_data_phase.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(death_thread, process_data_phase.tasks[0].death_thread_name);
+ EXPECT_EQ(death_thread,
+ process_data_phase.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -108,6 +120,16 @@ class TrackedObjectsTest : public testing::Test {
// Sets time that will be returned by ThreadData::Now().
static void SetTestTime(unsigned int test_time) { test_time_ = test_time; }
+ int GetNumThreadData() {
+ int num_thread_data = 0;
+ ThreadData* current = ThreadData::first();
+ while (current) {
+ ++num_thread_data;
+ current = current->next();
+ }
+ return num_thread_data;
+ }
+
private:
// Returns test time in milliseconds.
static unsigned int GetTestTime() { return test_time_; }
@@ -223,7 +245,8 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
process_data_phase.tasks[0].birth.location.function_name);
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[0].birth.location.line_number);
- EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(kWorkerThreadName,
+ process_data_phase.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
EXPECT_EQ(time_elapsed,
process_data_phase.tasks[0].death_data.run_duration_sum);
@@ -234,10 +257,11 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sum);
EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].death_thread_name);
+ EXPECT_EQ(kWorkerThreadName,
+ process_data_phase.tasks[0].death_sanitized_thread_name);
}
-TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
+TEST_F(TrackedObjectsTest, DeathDataTestRecordDurations) {
ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
std::unique_ptr<DeathData> data(new DeathData());
@@ -255,7 +279,7 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
int32_t queue_ms = 8;
const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
EXPECT_EQ(data->run_duration_sum(), run_ms);
EXPECT_EQ(data->run_duration_max(), run_ms);
EXPECT_EQ(data->run_duration_sample(), run_ms);
@@ -265,7 +289,7 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
EXPECT_EQ(data->count(), 1);
EXPECT_EQ(nullptr, data->last_phase_snapshot());
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
EXPECT_EQ(data->run_duration_max(), run_ms);
EXPECT_EQ(data->run_duration_sample(), run_ms);
@@ -276,18 +300,77 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
EXPECT_EQ(nullptr, data->last_phase_snapshot());
}
+TEST_F(TrackedObjectsTest, DeathDataTestRecordAllocations) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ std::unique_ptr<DeathData> data(new DeathData());
+ ASSERT_NE(data, nullptr);
+
+ EXPECT_EQ(data->alloc_ops(), 0);
+ EXPECT_EQ(data->free_ops(), 0);
+ EXPECT_EQ(data->allocated_bytes(), 0);
+ EXPECT_EQ(data->freed_bytes(), 0);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 0);
+ EXPECT_EQ(data->max_allocated_bytes(), 0);
+
+ EXPECT_EQ(nullptr, data->last_phase_snapshot());
+
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kMaxAllocatedBytes);
+ EXPECT_EQ(data->alloc_ops(), kAllocOps);
+ EXPECT_EQ(data->free_ops(), kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
+ // Record another batch, with a smaller max.
+ const int32_t kSmallerMaxAllocatedBytes = kMaxAllocatedBytes / 2;
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kSmallerMaxAllocatedBytes);
+ EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
+ EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
+ // Now with a larger max.
+ const int32_t kLargerMaxAllocatedBytes = kMaxAllocatedBytes * 2;
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kLargerMaxAllocatedBytes);
+ EXPECT_EQ(data->alloc_ops(), 3 * kAllocOps);
+ EXPECT_EQ(data->free_ops(), 3 * kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), 3 * kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), 3 * kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 3 * kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kLargerMaxAllocatedBytes);
+
+ // Saturate everything.
+ data->RecordAllocations(INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX);
+ EXPECT_EQ(data->alloc_ops(), INT_MAX);
+ EXPECT_EQ(data->free_ops(), INT_MAX);
+ EXPECT_EQ(data->allocated_bytes(), INT_MAX);
+ EXPECT_EQ(data->freed_bytes(), INT_MAX);
+ EXPECT_EQ(data->alloc_overhead_bytes(), INT_MAX);
+ EXPECT_EQ(data->max_allocated_bytes(), INT_MAX);
+}
+
TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
std::unique_ptr<DeathData> data(new DeathData());
ASSERT_NE(data, nullptr);
- int32_t run_ms = 42;
- int32_t queue_ms = 8;
+ const int32_t run_ms = 42;
+ const int32_t queue_ms = 8;
const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
+
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kMaxAllocatedBytes);
data->OnProfilingPhaseCompleted(123);
EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
@@ -297,6 +380,14 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
EXPECT_EQ(data->queue_duration_max(), 0);
EXPECT_EQ(data->queue_duration_sample(), queue_ms);
EXPECT_EQ(data->count(), 2);
+
+ EXPECT_EQ(data->alloc_ops(), kAllocOps);
+ EXPECT_EQ(data->free_ops(), kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
ASSERT_NE(nullptr, data->last_phase_snapshot());
EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
@@ -311,12 +402,26 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
data->last_phase_snapshot()->death_data.queue_duration_max);
EXPECT_EQ(queue_ms,
data->last_phase_snapshot()->death_data.queue_duration_sample);
+
+ EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
+ EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
+ EXPECT_EQ(kAllocatedBytes,
+ data->last_phase_snapshot()->death_data.allocated_bytes);
+ EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
+ EXPECT_EQ(kAllocOverheadBytes,
+ data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
+ EXPECT_EQ(kMaxAllocatedBytes,
+ data->last_phase_snapshot()->death_data.max_allocated_bytes);
+
EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
- int32_t run_ms1 = 21;
- int32_t queue_ms1 = 4;
+ const int32_t run_ms1 = 21;
+ const int32_t queue_ms1 = 4;
+
+ data->RecordDurations(queue_ms1, run_ms1, kUnrandomInt);
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kMaxAllocatedBytes);
- data->RecordDeath(queue_ms1, run_ms1, kUnrandomInt);
EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms + run_ms1);
EXPECT_EQ(data->run_duration_max(), run_ms1);
EXPECT_EQ(data->run_duration_sample(), run_ms1);
@@ -324,6 +429,14 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
EXPECT_EQ(data->queue_duration_max(), queue_ms1);
EXPECT_EQ(data->queue_duration_sample(), queue_ms1);
EXPECT_EQ(data->count(), 3);
+
+ EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
+ EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
ASSERT_NE(nullptr, data->last_phase_snapshot());
EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
@@ -338,6 +451,17 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
data->last_phase_snapshot()->death_data.queue_duration_max);
EXPECT_EQ(queue_ms,
data->last_phase_snapshot()->death_data.queue_duration_sample);
+
+ EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
+ EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
+ EXPECT_EQ(kAllocatedBytes,
+ data->last_phase_snapshot()->death_data.allocated_bytes);
+ EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
+ EXPECT_EQ(kAllocOverheadBytes,
+ data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
+ EXPECT_EQ(kMaxAllocatedBytes,
+ data->last_phase_snapshot()->death_data.max_allocated_bytes);
+
EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
}
@@ -353,6 +477,13 @@ TEST_F(TrackedObjectsTest, Delta) {
snapshot.queue_duration_max = 101;
snapshot.queue_duration_sample = 26;
+ snapshot.alloc_ops = 95;
+ snapshot.free_ops = 90;
+ snapshot.allocated_bytes = 10240;
+ snapshot.freed_bytes = 4096;
+ snapshot.alloc_overhead_bytes = 950;
+ snapshot.max_allocated_bytes = 10240;
+
DeathDataSnapshot older_snapshot;
older_snapshot.count = 2;
older_snapshot.run_duration_sum = 95;
@@ -362,6 +493,13 @@ TEST_F(TrackedObjectsTest, Delta) {
older_snapshot.queue_duration_max = 99;
older_snapshot.queue_duration_sample = 21;
+ older_snapshot.alloc_ops = 45;
+ older_snapshot.free_ops = 40;
+ older_snapshot.allocated_bytes = 4096;
+ older_snapshot.freed_bytes = 2048;
+ older_snapshot.alloc_overhead_bytes = 450;
+ older_snapshot.max_allocated_bytes = 10200;
+
const DeathDataSnapshot& delta = snapshot.Delta(older_snapshot);
EXPECT_EQ(8, delta.count);
EXPECT_EQ(5, delta.run_duration_sum);
@@ -370,6 +508,13 @@ TEST_F(TrackedObjectsTest, Delta) {
EXPECT_EQ(10, delta.queue_duration_sum);
EXPECT_EQ(101, delta.queue_duration_max);
EXPECT_EQ(26, delta.queue_duration_sample);
+
+ EXPECT_EQ(50, delta.alloc_ops);
+ EXPECT_EQ(50, delta.free_ops);
+ EXPECT_EQ(6144, delta.allocated_bytes);
+ EXPECT_EQ(2048, delta.freed_bytes);
+ EXPECT_EQ(500, delta.alloc_overhead_bytes);
+ EXPECT_EQ(10240, delta.max_allocated_bytes);
}
TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToSnapshotWorkerThread) {
@@ -531,7 +676,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(kLineNumber,
process_data_phase0.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -541,7 +687,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].death_sanitized_thread_name);
auto it1 = process_data.phased_snapshots.find(1);
ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -555,7 +702,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(kLineNumber,
process_data_phase1.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
EXPECT_EQ(10, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -565,7 +713,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -648,7 +797,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(kLineNumber,
process_data_phase0.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
EXPECT_EQ(6, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -658,7 +808,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].death_sanitized_thread_name);
auto it1 = process_data.phased_snapshots.find(1);
ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -672,7 +823,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(kLineNumber,
process_data_phase1.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -682,7 +834,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].death_sanitized_thread_name);
auto it2 = process_data.phased_snapshots.find(2);
ASSERT_TRUE(it2 != process_data.phased_snapshots.end());
@@ -696,7 +849,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(kLineNumber,
process_data_phase2.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase2.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase2.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase2.tasks[0].death_data.run_duration_sum);
@@ -706,7 +860,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase2.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -753,7 +908,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesSecondEmpty) {
EXPECT_EQ(kLineNumber,
process_data_phase0.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -763,7 +919,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesSecondEmpty) {
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].death_sanitized_thread_name);
auto it1 = process_data.phased_snapshots.find(1);
ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -816,7 +973,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesFirstEmpty) {
EXPECT_EQ(kLineNumber,
process_data_phase1.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -826,7 +984,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesFirstEmpty) {
EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -993,7 +1152,8 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
process_data_phase.tasks[0].birth.location.function_name);
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_sum);
EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_max);
@@ -1001,13 +1161,15 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sum);
EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(kFile, process_data_phase.tasks[1].birth.location.file_name);
EXPECT_EQ(kFunction,
process_data_phase.tasks[1].birth.location.function_name);
EXPECT_EQ(kSecondFakeLineNumber,
process_data_phase.tasks[1].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[1].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[1].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[1].death_data.count);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_sum);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_max);
@@ -1015,7 +1177,8 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sum);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_max);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sample);
- EXPECT_EQ(kStillAlive, process_data_phase.tasks[1].death_thread_name);
+ EXPECT_EQ(kStillAlive,
+ process_data_phase.tasks[1].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -1158,7 +1321,8 @@ TEST_F(TrackedObjectsTest, TaskWithNestedExclusionWithNestedTask) {
process_data_phase.tasks[t0].birth.location.function_name);
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[t0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[t0].death_data.count);
EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_sum);
EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_max);
@@ -1166,13 +1330,15 @@ TEST_F(TrackedObjectsTest, TaskWithNestedExclusionWithNestedTask) {
EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sum);
EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t0].death_sanitized_thread_name);
EXPECT_EQ(kFile, process_data_phase.tasks[t1].birth.location.file_name);
EXPECT_EQ(kFunction,
process_data_phase.tasks[t1].birth.location.function_name);
EXPECT_EQ(kSecondFakeLineNumber,
process_data_phase.tasks[t1].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t1].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.count);
EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_sum);
EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_max);
@@ -1180,8 +1346,30 @@ TEST_F(TrackedObjectsTest, TaskWithNestedExclusionWithNestedTask) {
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sum);
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_max);
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t1].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
+// Repetitively create and stop named threads. Verify that the number of
+// instantiated ThreadData instance is equal to the number of different
+// sanitized thread names used in the test.
+TEST_F(TrackedObjectsTest, ReuseRetiredThreadData) {
+ const char* const kThreadNames[] = {"Foo%d", "Bar%d", "123Dummy%d",
+ "456Dummy%d", "%d"};
+ constexpr int kNumIterations = 10;
+ EXPECT_EQ(0, GetNumThreadData());
+
+ for (int i = 0; i < kNumIterations; ++i) {
+ for (const char* thread_name : kThreadNames) {
+ base::Thread thread(base::StringPrintf(thread_name, i));
+ EXPECT_TRUE(thread.Start());
+ }
+ }
+
+ // Expect one ThreadData instance for each element in |kThreadNames| and one
+ // ThreadData instance for the main thread.
+ EXPECT_EQ(static_cast<int>(arraysize(kThreadNames) + 1), GetNumThreadData());
+}
+
} // namespace tracked_objects
diff --git a/base/tuple.h b/base/tuple.h
index e82f2e5f06..34fd789976 100644
--- a/base/tuple.h
+++ b/base/tuple.h
@@ -28,7 +28,6 @@
#include <stddef.h>
#include <tuple>
-#include "base/bind_helpers.h"
#include "build/build_config.h"
namespace base {
@@ -43,56 +42,6 @@ struct IndexSequence {};
template <size_t... Ns>
struct MakeIndexSequenceImpl;
-#if defined(_PREFAST_) && defined(OS_WIN)
-
-// Work around VC++ 2013 /analyze internal compiler error:
-// https://connect.microsoft.com/VisualStudio/feedback/details/1053626
-
-template <> struct MakeIndexSequenceImpl<0> {
- using Type = IndexSequence<>;
-};
-template <> struct MakeIndexSequenceImpl<1> {
- using Type = IndexSequence<0>;
-};
-template <> struct MakeIndexSequenceImpl<2> {
- using Type = IndexSequence<0,1>;
-};
-template <> struct MakeIndexSequenceImpl<3> {
- using Type = IndexSequence<0,1,2>;
-};
-template <> struct MakeIndexSequenceImpl<4> {
- using Type = IndexSequence<0,1,2,3>;
-};
-template <> struct MakeIndexSequenceImpl<5> {
- using Type = IndexSequence<0,1,2,3,4>;
-};
-template <> struct MakeIndexSequenceImpl<6> {
- using Type = IndexSequence<0,1,2,3,4,5>;
-};
-template <> struct MakeIndexSequenceImpl<7> {
- using Type = IndexSequence<0,1,2,3,4,5,6>;
-};
-template <> struct MakeIndexSequenceImpl<8> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7>;
-};
-template <> struct MakeIndexSequenceImpl<9> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8>;
-};
-template <> struct MakeIndexSequenceImpl<10> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9>;
-};
-template <> struct MakeIndexSequenceImpl<11> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10>;
-};
-template <> struct MakeIndexSequenceImpl<12> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11>;
-};
-template <> struct MakeIndexSequenceImpl<13> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11,12>;
-};
-
-#else // defined(OS_WIN) && defined(_PREFAST_)
-
template <size_t... Ns>
struct MakeIndexSequenceImpl<0, Ns...> {
using Type = IndexSequence<Ns...>;
@@ -102,8 +51,6 @@ template <size_t N, size_t... Ns>
struct MakeIndexSequenceImpl<N, Ns...>
: MakeIndexSequenceImpl<N - 1, N - 1, Ns...> {};
-#endif // defined(OS_WIN) && defined(_PREFAST_)
-
// std::get() in <=libstdc++-4.6 returns an lvalue-reference for
// rvalue-reference of a tuple, where an rvalue-reference is expected.
template <size_t I, typename... Ts>
@@ -121,6 +68,10 @@ auto get(T& t) -> decltype(std::get<I>(t)) {
template <size_t N>
using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
+template <typename T>
+using MakeIndexSequenceForTuple =
+ MakeIndexSequence<std::tuple_size<typename std::decay<T>::type>::value>;
+
// Dispatchers ----------------------------------------------------------------
//
// Helper functions that call the given method on an object, with the unpacked
@@ -132,62 +83,63 @@ using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
// Non-Static Dispatchers with no out params.
-template <typename ObjT, typename Method, typename... Ts, size_t... Ns>
+template <typename ObjT, typename Method, typename Tuple, size_t... Ns>
inline void DispatchToMethodImpl(const ObjT& obj,
Method method,
- const std::tuple<Ts...>& arg,
+ Tuple&& args,
IndexSequence<Ns...>) {
- (obj->*method)(internal::Unwrap(std::get<Ns>(arg))...);
+ (obj->*method)(base::get<Ns>(std::forward<Tuple>(args))...);
}
-template <typename ObjT, typename Method, typename... Ts>
+template <typename ObjT, typename Method, typename Tuple>
inline void DispatchToMethod(const ObjT& obj,
Method method,
- const std::tuple<Ts...>& arg) {
- DispatchToMethodImpl(obj, method, arg, MakeIndexSequence<sizeof...(Ts)>());
+ Tuple&& args) {
+ DispatchToMethodImpl(obj, method, std::forward<Tuple>(args),
+ MakeIndexSequenceForTuple<Tuple>());
}
// Static Dispatchers with no out params.
-template <typename Function, typename... Ts, size_t... Ns>
+template <typename Function, typename Tuple, size_t... Ns>
inline void DispatchToFunctionImpl(Function function,
- const std::tuple<Ts...>& arg,
+ Tuple&& args,
IndexSequence<Ns...>) {
- (*function)(internal::Unwrap(std::get<Ns>(arg))...);
+ (*function)(base::get<Ns>(std::forward<Tuple>(args))...);
}
-template <typename Function, typename... Ts>
-inline void DispatchToFunction(Function function,
- const std::tuple<Ts...>& arg) {
- DispatchToFunctionImpl(function, arg, MakeIndexSequence<sizeof...(Ts)>());
+template <typename Function, typename Tuple>
+inline void DispatchToFunction(Function function, Tuple&& args) {
+ DispatchToFunctionImpl(function, std::forward<Tuple>(args),
+ MakeIndexSequenceForTuple<Tuple>());
}
// Dispatchers with out parameters.
template <typename ObjT,
typename Method,
- typename... InTs,
- typename... OutTs,
+ typename InTuple,
+ typename OutTuple,
size_t... InNs,
size_t... OutNs>
inline void DispatchToMethodImpl(const ObjT& obj,
Method method,
- const std::tuple<InTs...>& in,
- std::tuple<OutTs...>* out,
+ InTuple&& in,
+ OutTuple* out,
IndexSequence<InNs...>,
IndexSequence<OutNs...>) {
- (obj->*method)(internal::Unwrap(std::get<InNs>(in))...,
+ (obj->*method)(base::get<InNs>(std::forward<InTuple>(in))...,
&std::get<OutNs>(*out)...);
}
-template <typename ObjT, typename Method, typename... InTs, typename... OutTs>
+template <typename ObjT, typename Method, typename InTuple, typename OutTuple>
inline void DispatchToMethod(const ObjT& obj,
Method method,
- const std::tuple<InTs...>& in,
- std::tuple<OutTs...>* out) {
- DispatchToMethodImpl(obj, method, in, out,
- MakeIndexSequence<sizeof...(InTs)>(),
- MakeIndexSequence<sizeof...(OutTs)>());
+ InTuple&& in,
+ OutTuple* out) {
+ DispatchToMethodImpl(obj, method, std::forward<InTuple>(in), out,
+ MakeIndexSequenceForTuple<InTuple>(),
+ MakeIndexSequenceForTuple<OutTuple>());
}
} // namespace base
diff --git a/base/values.cc b/base/values.cc
index d579699079..5cc0d693bd 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -21,6 +21,12 @@ namespace base {
namespace {
+const char* const kTypeNames[] = {"null", "boolean", "integer", "double",
+ "string", "binary", "dictionary", "list"};
+static_assert(arraysize(kTypeNames) ==
+ static_cast<size_t>(Value::Type::LIST) + 1,
+ "kTypeNames Has Wrong Size");
+
std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
// Make a deep copy of |node|, but don't include empty lists or dictionaries
@@ -55,10 +61,10 @@ std::unique_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
switch (node.GetType()) {
- case Value::TYPE_LIST:
+ case Value::Type::LIST:
return CopyListWithoutEmptyChildren(static_cast<const ListValue&>(node));
- case Value::TYPE_DICTIONARY:
+ case Value::Type::DICTIONARY:
return CopyDictionaryWithoutEmptyChildren(
static_cast<const DictionaryValue&>(node));
@@ -69,266 +75,541 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
} // namespace
-Value::~Value() {
-}
-
// static
std::unique_ptr<Value> Value::CreateNullValue() {
- return WrapUnique(new Value(TYPE_NULL));
+ return WrapUnique(new Value(Type::NONE));
}
-bool Value::GetAsBinary(const BinaryValue**) const {
- return false;
+// static
+std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
+ const char* buffer,
+ size_t size) {
+ return MakeUnique<BinaryValue>(std::vector<char>(buffer, buffer + size));
+}
+
+Value::Value(const Value& that) {
+ InternalCopyConstructFrom(that);
+}
+
+Value::Value(Value&& that) {
+ InternalMoveConstructFrom(std::move(that));
+}
+
+Value::Value() : type_(Type::NONE) {}
+
+Value::Value(Type type) : type_(type) {
+ // Initialize with the default value.
+ switch (type_) {
+ case Type::NONE:
+ return;
+
+ case Type::BOOLEAN:
+ bool_value_ = false;
+ return;
+ case Type::INTEGER:
+ int_value_ = 0;
+ return;
+ case Type::DOUBLE:
+ double_value_ = 0.0;
+ return;
+ case Type::STRING:
+ string_value_.Init();
+ return;
+ case Type::BINARY:
+ binary_value_.Init();
+ return;
+ case Type::DICTIONARY:
+ dict_ptr_.Init(MakeUnique<DictStorage>());
+ return;
+ case Type::LIST:
+ list_.Init();
+ return;
+ }
}
-bool Value::GetAsBoolean(bool*) const {
- return false;
-}
+Value::Value(bool in_bool) : type_(Type::BOOLEAN), bool_value_(in_bool) {}
-bool Value::GetAsInteger(int*) const {
- return false;
-}
+Value::Value(int in_int) : type_(Type::INTEGER), int_value_(in_int) {}
-bool Value::GetAsDouble(double*) const {
- return false;
+Value::Value(double in_double) : type_(Type::DOUBLE), double_value_(in_double) {
+ if (!std::isfinite(double_value_)) {
+ NOTREACHED() << "Non-finite (i.e. NaN or positive/negative infinity) "
+ << "values cannot be represented in JSON";
+ double_value_ = 0.0;
+ }
}
-bool Value::GetAsString(std::string*) const {
- return false;
+Value::Value(const char* in_string) : type_(Type::STRING) {
+ string_value_.Init(in_string);
+ DCHECK(IsStringUTF8(*string_value_));
}
-bool Value::GetAsString(string16*) const {
- return false;
+Value::Value(const std::string& in_string) : type_(Type::STRING) {
+ string_value_.Init(in_string);
+ DCHECK(IsStringUTF8(*string_value_));
}
-bool Value::GetAsString(const StringValue**) const {
- return false;
+Value::Value(std::string&& in_string) : type_(Type::STRING) {
+ string_value_.Init(std::move(in_string));
+ DCHECK(IsStringUTF8(*string_value_));
}
-bool Value::GetAsList(ListValue**) {
- return false;
+Value::Value(const char16* in_string) : type_(Type::STRING) {
+ string_value_.Init(UTF16ToUTF8(in_string));
}
-bool Value::GetAsList(const ListValue**) const {
- return false;
+Value::Value(const string16& in_string) : type_(Type::STRING) {
+ string_value_.Init(UTF16ToUTF8(in_string));
}
-bool Value::GetAsDictionary(DictionaryValue**) {
- return false;
+Value::Value(StringPiece in_string) : Value(in_string.as_string()) {}
+
+Value::Value(const std::vector<char>& in_blob) : type_(Type::BINARY) {
+ binary_value_.Init(in_blob);
}
-bool Value::GetAsDictionary(const DictionaryValue**) const {
- return false;
+Value::Value(std::vector<char>&& in_blob) : type_(Type::BINARY) {
+ binary_value_.Init(std::move(in_blob));
}
-Value* Value::DeepCopy() const {
- // This method should only be getting called for null Values--all subclasses
- // need to provide their own implementation;.
- DCHECK(IsType(TYPE_NULL));
- return CreateNullValue().release();
+Value& Value::operator=(const Value& that) {
+ if (this != &that) {
+ if (type_ == that.type_) {
+ InternalCopyAssignFromSameType(that);
+ } else {
+ InternalCleanup();
+ InternalCopyConstructFrom(that);
+ }
+ }
+
+ return *this;
}
-std::unique_ptr<Value> Value::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+Value& Value::operator=(Value&& that) {
+ if (this != &that) {
+ if (type_ == that.type_) {
+ InternalMoveAssignFromSameType(std::move(that));
+ } else {
+ InternalCleanup();
+ InternalMoveConstructFrom(std::move(that));
+ }
+ }
+
+ return *this;
}
-bool Value::Equals(const Value* other) const {
- // This method should only be getting called for null Values--all subclasses
- // need to provide their own implementation;.
- DCHECK(IsType(TYPE_NULL));
- return other->IsType(TYPE_NULL);
+Value::~Value() {
+ InternalCleanup();
}
// static
-bool Value::Equals(const Value* a, const Value* b) {
- if ((a == NULL) && (b == NULL)) return true;
- if ((a == NULL) ^ (b == NULL)) return false;
- return a->Equals(b);
+const char* Value::GetTypeName(Value::Type type) {
+ DCHECK_GE(static_cast<int>(type), 0);
+ DCHECK_LT(static_cast<size_t>(type), arraysize(kTypeNames));
+ return kTypeNames[static_cast<size_t>(type)];
}
-Value::Value(Type type) : type_(type) {}
-
-Value::Value(const Value& that) : type_(that.type_) {}
+bool Value::GetBool() const {
+ CHECK(is_bool());
+ return bool_value_;
+}
-Value& Value::operator=(const Value& that) {
- type_ = that.type_;
- return *this;
+int Value::GetInt() const {
+ CHECK(is_int());
+ return int_value_;
}
-///////////////////// FundamentalValue ////////////////////
+double Value::GetDouble() const {
+ if (is_double())
+ return double_value_;
+ if (is_int())
+ return int_value_;
+ CHECK(false);
+ return 0.0;
+}
-FundamentalValue::FundamentalValue(bool in_value)
- : Value(TYPE_BOOLEAN), boolean_value_(in_value) {
+const std::string& Value::GetString() const {
+ CHECK(is_string());
+ return *string_value_;
}
-FundamentalValue::FundamentalValue(int in_value)
- : Value(TYPE_INTEGER), integer_value_(in_value) {
+const std::vector<char>& Value::GetBlob() const {
+ CHECK(is_blob());
+ return *binary_value_;
}
-FundamentalValue::FundamentalValue(double in_value)
- : Value(TYPE_DOUBLE), double_value_(in_value) {
- if (!std::isfinite(double_value_)) {
- NOTREACHED() << "Non-finite (i.e. NaN or positive/negative infinity) "
- << "values cannot be represented in JSON";
- double_value_ = 0.0;
- }
+size_t Value::GetSize() const {
+ return GetBlob().size();
}
-FundamentalValue::~FundamentalValue() {
+const char* Value::GetBuffer() const {
+ return GetBlob().data();
}
-bool FundamentalValue::GetAsBoolean(bool* out_value) const {
- if (out_value && IsType(TYPE_BOOLEAN))
- *out_value = boolean_value_;
- return (IsType(TYPE_BOOLEAN));
+bool Value::GetAsBoolean(bool* out_value) const {
+ if (out_value && is_bool()) {
+ *out_value = bool_value_;
+ return true;
+ }
+ return is_bool();
}
-bool FundamentalValue::GetAsInteger(int* out_value) const {
- if (out_value && IsType(TYPE_INTEGER))
- *out_value = integer_value_;
- return (IsType(TYPE_INTEGER));
+bool Value::GetAsInteger(int* out_value) const {
+ if (out_value && is_int()) {
+ *out_value = int_value_;
+ return true;
+ }
+ return is_int();
}
-bool FundamentalValue::GetAsDouble(double* out_value) const {
- if (out_value && IsType(TYPE_DOUBLE))
+bool Value::GetAsDouble(double* out_value) const {
+ if (out_value && is_double()) {
*out_value = double_value_;
- else if (out_value && IsType(TYPE_INTEGER))
- *out_value = integer_value_;
- return (IsType(TYPE_DOUBLE) || IsType(TYPE_INTEGER));
+ return true;
+ } else if (out_value && is_int()) {
+ // Allow promotion from int to double.
+ *out_value = int_value_;
+ return true;
+ }
+ return is_double() || is_int();
}
-FundamentalValue* FundamentalValue::DeepCopy() const {
- switch (GetType()) {
- case TYPE_BOOLEAN:
- return new FundamentalValue(boolean_value_);
-
- case TYPE_INTEGER:
- return new FundamentalValue(integer_value_);
-
- case TYPE_DOUBLE:
- return new FundamentalValue(double_value_);
-
- default:
- NOTREACHED();
- return NULL;
+bool Value::GetAsString(std::string* out_value) const {
+ if (out_value && is_string()) {
+ *out_value = *string_value_;
+ return true;
}
+ return is_string();
}
-bool FundamentalValue::Equals(const Value* other) const {
- if (other->GetType() != GetType())
- return false;
-
- switch (GetType()) {
- case TYPE_BOOLEAN: {
- bool lhs, rhs;
- return GetAsBoolean(&lhs) && other->GetAsBoolean(&rhs) && lhs == rhs;
- }
- case TYPE_INTEGER: {
- int lhs, rhs;
- return GetAsInteger(&lhs) && other->GetAsInteger(&rhs) && lhs == rhs;
- }
- case TYPE_DOUBLE: {
- double lhs, rhs;
- return GetAsDouble(&lhs) && other->GetAsDouble(&rhs) && lhs == rhs;
- }
- default:
- NOTREACHED();
- return false;
+bool Value::GetAsString(string16* out_value) const {
+ if (out_value && is_string()) {
+ *out_value = UTF8ToUTF16(*string_value_);
+ return true;
}
+ return is_string();
}
-///////////////////// StringValue ////////////////////
-
-StringValue::StringValue(const std::string& in_value)
- : Value(TYPE_STRING),
- value_(in_value) {
- DCHECK(IsStringUTF8(in_value));
+bool Value::GetAsString(const Value** out_value) const {
+ if (out_value && is_string()) {
+ *out_value = static_cast<const Value*>(this);
+ return true;
+ }
+ return is_string();
}
-StringValue::StringValue(const string16& in_value)
- : Value(TYPE_STRING),
- value_(UTF16ToUTF8(in_value)) {
+bool Value::GetAsString(StringPiece* out_value) const {
+ if (out_value && is_string()) {
+ *out_value = *string_value_;
+ return true;
+ }
+ return is_string();
}
-StringValue::~StringValue() {
+bool Value::GetAsBinary(const BinaryValue** out_value) const {
+ if (out_value && is_blob()) {
+ *out_value = this;
+ return true;
+ }
+ return is_blob();
}
-std::string* StringValue::GetString() {
- return &value_;
+bool Value::GetAsList(ListValue** out_value) {
+ if (out_value && is_list()) {
+ *out_value = static_cast<ListValue*>(this);
+ return true;
+ }
+ return is_list();
}
-const std::string& StringValue::GetString() const {
- return value_;
+bool Value::GetAsList(const ListValue** out_value) const {
+ if (out_value && is_list()) {
+ *out_value = static_cast<const ListValue*>(this);
+ return true;
+ }
+ return is_list();
}
-bool StringValue::GetAsString(std::string* out_value) const {
- if (out_value)
- *out_value = value_;
- return true;
+bool Value::GetAsDictionary(DictionaryValue** out_value) {
+ if (out_value && is_dict()) {
+ *out_value = static_cast<DictionaryValue*>(this);
+ return true;
+ }
+ return is_dict();
}
-bool StringValue::GetAsString(string16* out_value) const {
- if (out_value)
- *out_value = UTF8ToUTF16(value_);
- return true;
+bool Value::GetAsDictionary(const DictionaryValue** out_value) const {
+ if (out_value && is_dict()) {
+ *out_value = static_cast<const DictionaryValue*>(this);
+ return true;
+ }
+ return is_dict();
}
-bool StringValue::GetAsString(const StringValue** out_value) const {
- if (out_value)
- *out_value = this;
- return true;
+Value* Value::DeepCopy() const {
+ // This method should only be getting called for null Values--all subclasses
+ // need to provide their own implementation;.
+ switch (type()) {
+ case Type::NONE:
+ return CreateNullValue().release();
+
+ case Type::BOOLEAN:
+ return new Value(bool_value_);
+ case Type::INTEGER:
+ return new Value(int_value_);
+ case Type::DOUBLE:
+ return new Value(double_value_);
+ case Type::STRING:
+ return new Value(*string_value_);
+ // For now, make BinaryValues for backward-compatibility. Convert to
+ // Value when that code is deleted.
+ case Type::BINARY:
+ return new Value(*binary_value_);
+
+ // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+ // are completely inlined.
+ case Type::DICTIONARY: {
+ DictionaryValue* result = new DictionaryValue;
+
+ for (const auto& current_entry : **dict_ptr_) {
+ result->SetWithoutPathExpansion(current_entry.first,
+ current_entry.second->CreateDeepCopy());
+ }
+
+ return result;
+ }
+
+ case Type::LIST: {
+ ListValue* result = new ListValue;
+
+ for (const auto& entry : *list_)
+ result->Append(entry->CreateDeepCopy());
+
+ return result;
+ }
+
+ default:
+ NOTREACHED();
+ return nullptr;
+ }
}
-StringValue* StringValue::DeepCopy() const {
- return new StringValue(value_);
+std::unique_ptr<Value> Value::CreateDeepCopy() const {
+ return WrapUnique(DeepCopy());
}
-bool StringValue::Equals(const Value* other) const {
- if (other->GetType() != GetType())
+bool Value::Equals(const Value* other) const {
+ if (other->type() != type())
return false;
- std::string lhs, rhs;
- return GetAsString(&lhs) && other->GetAsString(&rhs) && lhs == rhs;
+
+ switch (type()) {
+ case Type::NONE:
+ return true;
+ case Type::BOOLEAN:
+ return bool_value_ == other->bool_value_;
+ case Type::INTEGER:
+ return int_value_ == other->int_value_;
+ case Type::DOUBLE:
+ return double_value_ == other->double_value_;
+ case Type::STRING:
+ return *string_value_ == *(other->string_value_);
+ case Type::BINARY:
+ return *binary_value_ == *(other->binary_value_);
+ // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+ // are completely inlined.
+ case Type::DICTIONARY: {
+ if ((*dict_ptr_)->size() != (*other->dict_ptr_)->size())
+ return false;
+
+ return std::equal(std::begin(**dict_ptr_), std::end(**dict_ptr_),
+ std::begin(**(other->dict_ptr_)),
+ [](const DictStorage::value_type& lhs,
+ const DictStorage::value_type& rhs) {
+ if (lhs.first != rhs.first)
+ return false;
+
+ return lhs.second->Equals(rhs.second.get());
+ });
+ }
+ case Type::LIST: {
+ if (list_->size() != other->list_->size())
+ return false;
+
+ return std::equal(std::begin(*list_), std::end(*list_),
+ std::begin(*(other->list_)),
+ [](const ListStorage::value_type& lhs,
+ const ListStorage::value_type& rhs) {
+ return lhs->Equals(rhs.get());
+ });
+ }
+ }
+
+ NOTREACHED();
+ return false;
}
-///////////////////// BinaryValue ////////////////////
+// static
+bool Value::Equals(const Value* a, const Value* b) {
+ if ((a == NULL) && (b == NULL)) return true;
+ if ((a == NULL) ^ (b == NULL)) return false;
+ return a->Equals(b);
+}
+
+void Value::InternalCopyFundamentalValue(const Value& that) {
+ switch (type_) {
+ case Type::NONE:
+ // Nothing to do.
+ return;
+
+ case Type::BOOLEAN:
+ bool_value_ = that.bool_value_;
+ return;
+ case Type::INTEGER:
+ int_value_ = that.int_value_;
+ return;
+ case Type::DOUBLE:
+ double_value_ = that.double_value_;
+ return;
-BinaryValue::BinaryValue()
- : Value(TYPE_BINARY),
- size_(0) {
+ default:
+ NOTREACHED();
+ }
}
-BinaryValue::BinaryValue(std::unique_ptr<char[]> buffer, size_t size)
- : Value(TYPE_BINARY), buffer_(std::move(buffer)), size_(size) {}
+void Value::InternalCopyConstructFrom(const Value& that) {
+ type_ = that.type_;
-BinaryValue::~BinaryValue() {
+ switch (type_) {
+ case Type::NONE:
+ case Type::BOOLEAN:
+ case Type::INTEGER:
+ case Type::DOUBLE:
+ InternalCopyFundamentalValue(that);
+ return;
+
+ case Type::STRING:
+ string_value_.Init(*that.string_value_);
+ return;
+ case Type::BINARY:
+ binary_value_.Init(*that.binary_value_);
+ return;
+ // DictStorage and ListStorage are move-only types due to the presence of
+ // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
+ // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
+ // can be copied directly.
+ case Type::DICTIONARY:
+ dict_ptr_.Init(std::move(*that.CreateDeepCopy()->dict_ptr_));
+ return;
+ case Type::LIST:
+ list_.Init(std::move(*that.CreateDeepCopy()->list_));
+ return;
+ }
}
-// static
-std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
- const char* buffer,
- size_t size) {
- std::unique_ptr<char[]> buffer_copy(new char[size]);
- memcpy(buffer_copy.get(), buffer, size);
- return base::MakeUnique<BinaryValue>(std::move(buffer_copy), size);
+void Value::InternalMoveConstructFrom(Value&& that) {
+ type_ = that.type_;
+
+ switch (type_) {
+ case Type::NONE:
+ case Type::BOOLEAN:
+ case Type::INTEGER:
+ case Type::DOUBLE:
+ InternalCopyFundamentalValue(that);
+ return;
+
+ case Type::STRING:
+ string_value_.InitFromMove(std::move(that.string_value_));
+ return;
+ case Type::BINARY:
+ binary_value_.InitFromMove(std::move(that.binary_value_));
+ return;
+ case Type::DICTIONARY:
+ dict_ptr_.InitFromMove(std::move(that.dict_ptr_));
+ return;
+ case Type::LIST:
+ list_.InitFromMove(std::move(that.list_));
+ return;
+ }
}
-bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
- if (out_value)
- *out_value = this;
- return true;
+void Value::InternalCopyAssignFromSameType(const Value& that) {
+ CHECK_EQ(type_, that.type_);
+
+ switch (type_) {
+ case Type::NONE:
+ case Type::BOOLEAN:
+ case Type::INTEGER:
+ case Type::DOUBLE:
+ InternalCopyFundamentalValue(that);
+ return;
+
+ case Type::STRING:
+ *string_value_ = *that.string_value_;
+ return;
+ case Type::BINARY:
+ *binary_value_ = *that.binary_value_;
+ return;
+ // DictStorage and ListStorage are move-only types due to the presence of
+ // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
+ // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
+ // can be copied directly.
+ case Type::DICTIONARY:
+ *dict_ptr_ = std::move(*that.CreateDeepCopy()->dict_ptr_);
+ return;
+ case Type::LIST:
+ *list_ = std::move(*that.CreateDeepCopy()->list_);
+ return;
+ }
}
-BinaryValue* BinaryValue::DeepCopy() const {
- return CreateWithCopiedBuffer(buffer_.get(), size_).release();
+void Value::InternalMoveAssignFromSameType(Value&& that) {
+ CHECK_EQ(type_, that.type_);
+
+ switch (type_) {
+ case Type::NONE:
+ case Type::BOOLEAN:
+ case Type::INTEGER:
+ case Type::DOUBLE:
+ InternalCopyFundamentalValue(that);
+ return;
+
+ case Type::STRING:
+ *string_value_ = std::move(*that.string_value_);
+ return;
+ case Type::BINARY:
+ *binary_value_ = std::move(*that.binary_value_);
+ return;
+ case Type::DICTIONARY:
+ *dict_ptr_ = std::move(*that.dict_ptr_);
+ return;
+ case Type::LIST:
+ *list_ = std::move(*that.list_);
+ return;
+ }
}
-bool BinaryValue::Equals(const Value* other) const {
- if (other->GetType() != GetType())
- return false;
- const BinaryValue* other_binary = static_cast<const BinaryValue*>(other);
- if (other_binary->size_ != size_)
- return false;
- return !memcmp(GetBuffer(), other_binary->GetBuffer(), size_);
+void Value::InternalCleanup() {
+ switch (type_) {
+ case Type::NONE:
+ case Type::BOOLEAN:
+ case Type::INTEGER:
+ case Type::DOUBLE:
+ // Nothing to do
+ return;
+
+ case Type::STRING:
+ string_value_.Destroy();
+ return;
+ case Type::BINARY:
+ binary_value_.Destroy();
+ return;
+ case Type::DICTIONARY:
+ dict_ptr_.Destroy();
+ return;
+ case Type::LIST:
+ list_.Destroy();
+ return;
+ }
}
///////////////////// DictionaryValue ////////////////////
@@ -344,122 +625,102 @@ std::unique_ptr<DictionaryValue> DictionaryValue::From(
return nullptr;
}
-DictionaryValue::DictionaryValue()
- : Value(TYPE_DICTIONARY) {
-}
-
-DictionaryValue::~DictionaryValue() {
- Clear();
-}
-
-bool DictionaryValue::GetAsDictionary(DictionaryValue** out_value) {
- if (out_value)
- *out_value = this;
- return true;
-}
-
-bool DictionaryValue::GetAsDictionary(const DictionaryValue** out_value) const {
- if (out_value)
- *out_value = this;
- return true;
-}
+DictionaryValue::DictionaryValue() : Value(Type::DICTIONARY) {}
-bool DictionaryValue::HasKey(const std::string& key) const {
+bool DictionaryValue::HasKey(StringPiece key) const {
DCHECK(IsStringUTF8(key));
- auto current_entry = dictionary_.find(key);
- DCHECK((current_entry == dictionary_.end()) || current_entry->second);
- return current_entry != dictionary_.end();
+ auto current_entry = (*dict_ptr_)->find(key.as_string());
+ DCHECK((current_entry == (*dict_ptr_)->end()) || current_entry->second);
+ return current_entry != (*dict_ptr_)->end();
}
void DictionaryValue::Clear() {
- dictionary_.clear();
+ (*dict_ptr_)->clear();
}
-void DictionaryValue::Set(const std::string& path,
- std::unique_ptr<Value> in_value) {
+void DictionaryValue::Set(StringPiece path, std::unique_ptr<Value> in_value) {
DCHECK(IsStringUTF8(path));
DCHECK(in_value);
- std::string current_path(path);
+ StringPiece current_path(path);
DictionaryValue* current_dictionary = this;
for (size_t delimiter_position = current_path.find('.');
- delimiter_position != std::string::npos;
+ delimiter_position != StringPiece::npos;
delimiter_position = current_path.find('.')) {
// Assume that we're indexing into a dictionary.
- std::string key(current_path, 0, delimiter_position);
- DictionaryValue* child_dictionary = NULL;
+ StringPiece key = current_path.substr(0, delimiter_position);
+ DictionaryValue* child_dictionary = nullptr;
if (!current_dictionary->GetDictionary(key, &child_dictionary)) {
child_dictionary = new DictionaryValue;
- current_dictionary->SetWithoutPathExpansion(key, child_dictionary);
+ current_dictionary->SetWithoutPathExpansion(
+ key, base::WrapUnique(child_dictionary));
}
current_dictionary = child_dictionary;
- current_path.erase(0, delimiter_position + 1);
+ current_path = current_path.substr(delimiter_position + 1);
}
current_dictionary->SetWithoutPathExpansion(current_path,
std::move(in_value));
}
-void DictionaryValue::Set(const std::string& path, Value* in_value) {
+void DictionaryValue::Set(StringPiece path, Value* in_value) {
Set(path, WrapUnique(in_value));
}
-void DictionaryValue::SetBoolean(const std::string& path, bool in_value) {
- Set(path, new FundamentalValue(in_value));
+void DictionaryValue::SetBoolean(StringPiece path, bool in_value) {
+ Set(path, new Value(in_value));
}
-void DictionaryValue::SetInteger(const std::string& path, int in_value) {
- Set(path, new FundamentalValue(in_value));
+void DictionaryValue::SetInteger(StringPiece path, int in_value) {
+ Set(path, new Value(in_value));
}
-void DictionaryValue::SetDouble(const std::string& path, double in_value) {
- Set(path, new FundamentalValue(in_value));
+void DictionaryValue::SetDouble(StringPiece path, double in_value) {
+ Set(path, new Value(in_value));
}
-void DictionaryValue::SetString(const std::string& path,
- const std::string& in_value) {
- Set(path, new StringValue(in_value));
+void DictionaryValue::SetString(StringPiece path, StringPiece in_value) {
+ Set(path, new Value(in_value));
}
-void DictionaryValue::SetString(const std::string& path,
- const string16& in_value) {
- Set(path, new StringValue(in_value));
+void DictionaryValue::SetString(StringPiece path, const string16& in_value) {
+ Set(path, new Value(in_value));
}
-void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
+void DictionaryValue::SetWithoutPathExpansion(StringPiece key,
std::unique_ptr<Value> in_value) {
- dictionary_[key] = std::move(in_value);
+ (**dict_ptr_)[key.as_string()] = std::move(in_value);
}
-void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
+void DictionaryValue::SetWithoutPathExpansion(StringPiece key,
Value* in_value) {
SetWithoutPathExpansion(key, WrapUnique(in_value));
}
-void DictionaryValue::SetBooleanWithoutPathExpansion(
- const std::string& path, bool in_value) {
- SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+void DictionaryValue::SetBooleanWithoutPathExpansion(StringPiece path,
+ bool in_value) {
+ SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
}
-void DictionaryValue::SetIntegerWithoutPathExpansion(
- const std::string& path, int in_value) {
- SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+void DictionaryValue::SetIntegerWithoutPathExpansion(StringPiece path,
+ int in_value) {
+ SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
}
-void DictionaryValue::SetDoubleWithoutPathExpansion(
- const std::string& path, double in_value) {
- SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+void DictionaryValue::SetDoubleWithoutPathExpansion(StringPiece path,
+ double in_value) {
+ SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
}
-void DictionaryValue::SetStringWithoutPathExpansion(
- const std::string& path, const std::string& in_value) {
- SetWithoutPathExpansion(path, new StringValue(in_value));
+void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
+ StringPiece in_value) {
+ SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
}
-void DictionaryValue::SetStringWithoutPathExpansion(
- const std::string& path, const string16& in_value) {
- SetWithoutPathExpansion(path, new StringValue(in_value));
+void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
+ const string16& in_value) {
+ SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
}
bool DictionaryValue::Get(StringPiece path,
@@ -472,8 +733,7 @@ bool DictionaryValue::Get(StringPiece path,
delimiter_position = current_path.find('.')) {
const DictionaryValue* child_dictionary = NULL;
if (!current_dictionary->GetDictionaryWithoutPathExpansion(
- current_path.substr(0, delimiter_position).as_string(),
- &child_dictionary)) {
+ current_path.substr(0, delimiter_position), &child_dictionary)) {
return false;
}
@@ -481,8 +741,7 @@ bool DictionaryValue::Get(StringPiece path,
current_path = current_path.substr(delimiter_position + 1);
}
- return current_dictionary->GetWithoutPathExpansion(current_path.as_string(),
- out_value);
+ return current_dictionary->GetWithoutPathExpansion(current_path, out_value);
}
bool DictionaryValue::Get(StringPiece path, Value** out_value) {
@@ -491,8 +750,7 @@ bool DictionaryValue::Get(StringPiece path, Value** out_value) {
const_cast<const Value**>(out_value));
}
-bool DictionaryValue::GetBoolean(const std::string& path,
- bool* bool_value) const {
+bool DictionaryValue::GetBoolean(StringPiece path, bool* bool_value) const {
const Value* value;
if (!Get(path, &value))
return false;
@@ -500,8 +758,7 @@ bool DictionaryValue::GetBoolean(const std::string& path,
return value->GetAsBoolean(bool_value);
}
-bool DictionaryValue::GetInteger(const std::string& path,
- int* out_value) const {
+bool DictionaryValue::GetInteger(StringPiece path, int* out_value) const {
const Value* value;
if (!Get(path, &value))
return false;
@@ -509,8 +766,7 @@ bool DictionaryValue::GetInteger(const std::string& path,
return value->GetAsInteger(out_value);
}
-bool DictionaryValue::GetDouble(const std::string& path,
- double* out_value) const {
+bool DictionaryValue::GetDouble(StringPiece path, double* out_value) const {
const Value* value;
if (!Get(path, &value))
return false;
@@ -518,7 +774,7 @@ bool DictionaryValue::GetDouble(const std::string& path,
return value->GetAsDouble(out_value);
}
-bool DictionaryValue::GetString(const std::string& path,
+bool DictionaryValue::GetString(StringPiece path,
std::string* out_value) const {
const Value* value;
if (!Get(path, &value))
@@ -527,8 +783,7 @@ bool DictionaryValue::GetString(const std::string& path,
return value->GetAsString(out_value);
}
-bool DictionaryValue::GetString(const std::string& path,
- string16* out_value) const {
+bool DictionaryValue::GetString(StringPiece path, string16* out_value) const {
const Value* value;
if (!Get(path, &value))
return false;
@@ -536,7 +791,7 @@ bool DictionaryValue::GetString(const std::string& path,
return value->GetAsString(out_value);
}
-bool DictionaryValue::GetStringASCII(const std::string& path,
+bool DictionaryValue::GetStringASCII(StringPiece path,
std::string* out_value) const {
std::string out;
if (!GetString(path, &out))
@@ -551,21 +806,20 @@ bool DictionaryValue::GetStringASCII(const std::string& path,
return true;
}
-bool DictionaryValue::GetBinary(const std::string& path,
+bool DictionaryValue::GetBinary(StringPiece path,
const BinaryValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(TYPE_BINARY))
+ if (!result || !value->IsType(Type::BINARY))
return false;
if (out_value)
- *out_value = static_cast<const BinaryValue*>(value);
+ *out_value = value;
return true;
}
-bool DictionaryValue::GetBinary(const std::string& path,
- BinaryValue** out_value) {
+bool DictionaryValue::GetBinary(StringPiece path, BinaryValue** out_value) {
return static_cast<const DictionaryValue&>(*this).GetBinary(
path,
const_cast<const BinaryValue**>(out_value));
@@ -575,7 +829,7 @@ bool DictionaryValue::GetDictionary(StringPiece path,
const DictionaryValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(TYPE_DICTIONARY))
+ if (!result || !value->IsType(Type::DICTIONARY))
return false;
if (out_value)
@@ -591,11 +845,11 @@ bool DictionaryValue::GetDictionary(StringPiece path,
const_cast<const DictionaryValue**>(out_value));
}
-bool DictionaryValue::GetList(const std::string& path,
+bool DictionaryValue::GetList(StringPiece path,
const ListValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(TYPE_LIST))
+ if (!result || !value->IsType(Type::LIST))
return false;
if (out_value)
@@ -604,17 +858,17 @@ bool DictionaryValue::GetList(const std::string& path,
return true;
}
-bool DictionaryValue::GetList(const std::string& path, ListValue** out_value) {
+bool DictionaryValue::GetList(StringPiece path, ListValue** out_value) {
return static_cast<const DictionaryValue&>(*this).GetList(
path,
const_cast<const ListValue**>(out_value));
}
-bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
+bool DictionaryValue::GetWithoutPathExpansion(StringPiece key,
const Value** out_value) const {
DCHECK(IsStringUTF8(key));
- auto entry_iterator = dictionary_.find(key);
- if (entry_iterator == dictionary_.end())
+ auto entry_iterator = (*dict_ptr_)->find(key.as_string());
+ if (entry_iterator == (*dict_ptr_)->end())
return false;
if (out_value)
@@ -622,14 +876,14 @@ bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
return true;
}
-bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
+bool DictionaryValue::GetWithoutPathExpansion(StringPiece key,
Value** out_value) {
return static_cast<const DictionaryValue&>(*this).GetWithoutPathExpansion(
key,
const_cast<const Value**>(out_value));
}
-bool DictionaryValue::GetBooleanWithoutPathExpansion(const std::string& key,
+bool DictionaryValue::GetBooleanWithoutPathExpansion(StringPiece key,
bool* out_value) const {
const Value* value;
if (!GetWithoutPathExpansion(key, &value))
@@ -638,7 +892,7 @@ bool DictionaryValue::GetBooleanWithoutPathExpansion(const std::string& key,
return value->GetAsBoolean(out_value);
}
-bool DictionaryValue::GetIntegerWithoutPathExpansion(const std::string& key,
+bool DictionaryValue::GetIntegerWithoutPathExpansion(StringPiece key,
int* out_value) const {
const Value* value;
if (!GetWithoutPathExpansion(key, &value))
@@ -647,7 +901,7 @@ bool DictionaryValue::GetIntegerWithoutPathExpansion(const std::string& key,
return value->GetAsInteger(out_value);
}
-bool DictionaryValue::GetDoubleWithoutPathExpansion(const std::string& key,
+bool DictionaryValue::GetDoubleWithoutPathExpansion(StringPiece key,
double* out_value) const {
const Value* value;
if (!GetWithoutPathExpansion(key, &value))
@@ -657,7 +911,7 @@ bool DictionaryValue::GetDoubleWithoutPathExpansion(const std::string& key,
}
bool DictionaryValue::GetStringWithoutPathExpansion(
- const std::string& key,
+ StringPiece key,
std::string* out_value) const {
const Value* value;
if (!GetWithoutPathExpansion(key, &value))
@@ -666,7 +920,7 @@ bool DictionaryValue::GetStringWithoutPathExpansion(
return value->GetAsString(out_value);
}
-bool DictionaryValue::GetStringWithoutPathExpansion(const std::string& key,
+bool DictionaryValue::GetStringWithoutPathExpansion(StringPiece key,
string16* out_value) const {
const Value* value;
if (!GetWithoutPathExpansion(key, &value))
@@ -676,11 +930,11 @@ bool DictionaryValue::GetStringWithoutPathExpansion(const std::string& key,
}
bool DictionaryValue::GetDictionaryWithoutPathExpansion(
- const std::string& key,
+ StringPiece key,
const DictionaryValue** out_value) const {
const Value* value;
bool result = GetWithoutPathExpansion(key, &value);
- if (!result || !value->IsType(TYPE_DICTIONARY))
+ if (!result || !value->IsType(Type::DICTIONARY))
return false;
if (out_value)
@@ -690,7 +944,7 @@ bool DictionaryValue::GetDictionaryWithoutPathExpansion(
}
bool DictionaryValue::GetDictionaryWithoutPathExpansion(
- const std::string& key,
+ StringPiece key,
DictionaryValue** out_value) {
const DictionaryValue& const_this =
static_cast<const DictionaryValue&>(*this);
@@ -700,11 +954,11 @@ bool DictionaryValue::GetDictionaryWithoutPathExpansion(
}
bool DictionaryValue::GetListWithoutPathExpansion(
- const std::string& key,
+ StringPiece key,
const ListValue** out_value) const {
const Value* value;
bool result = GetWithoutPathExpansion(key, &value);
- if (!result || !value->IsType(TYPE_LIST))
+ if (!result || !value->IsType(Type::LIST))
return false;
if (out_value)
@@ -713,7 +967,7 @@ bool DictionaryValue::GetListWithoutPathExpansion(
return true;
}
-bool DictionaryValue::GetListWithoutPathExpansion(const std::string& key,
+bool DictionaryValue::GetListWithoutPathExpansion(StringPiece key,
ListValue** out_value) {
return
static_cast<const DictionaryValue&>(*this).GetListWithoutPathExpansion(
@@ -721,17 +975,17 @@ bool DictionaryValue::GetListWithoutPathExpansion(const std::string& key,
const_cast<const ListValue**>(out_value));
}
-bool DictionaryValue::Remove(const std::string& path,
+bool DictionaryValue::Remove(StringPiece path,
std::unique_ptr<Value>* out_value) {
DCHECK(IsStringUTF8(path));
- std::string current_path(path);
+ StringPiece current_path(path);
DictionaryValue* current_dictionary = this;
size_t delimiter_position = current_path.rfind('.');
- if (delimiter_position != std::string::npos) {
+ if (delimiter_position != StringPiece::npos) {
if (!GetDictionary(current_path.substr(0, delimiter_position),
&current_dictionary))
return false;
- current_path.erase(0, delimiter_position + 1);
+ current_path = current_path.substr(delimiter_position + 1);
}
return current_dictionary->RemoveWithoutPathExpansion(current_path,
@@ -739,20 +993,20 @@ bool DictionaryValue::Remove(const std::string& path,
}
bool DictionaryValue::RemoveWithoutPathExpansion(
- const std::string& key,
+ StringPiece key,
std::unique_ptr<Value>* out_value) {
DCHECK(IsStringUTF8(key));
- auto entry_iterator = dictionary_.find(key);
- if (entry_iterator == dictionary_.end())
+ auto entry_iterator = (*dict_ptr_)->find(key.as_string());
+ if (entry_iterator == (*dict_ptr_)->end())
return false;
if (out_value)
*out_value = std::move(entry_iterator->second);
- dictionary_.erase(entry_iterator);
+ (*dict_ptr_)->erase(entry_iterator);
return true;
}
-bool DictionaryValue::RemovePath(const std::string& path,
+bool DictionaryValue::RemovePath(StringPiece path,
std::unique_ptr<Value>* out_value) {
bool result = false;
size_t delimiter_position = path.find('.');
@@ -760,7 +1014,7 @@ bool DictionaryValue::RemovePath(const std::string& path,
if (delimiter_position == std::string::npos)
return RemoveWithoutPathExpansion(path, out_value);
- const std::string subdict_path = path.substr(0, delimiter_position);
+ StringPiece subdict_path = path.substr(0, delimiter_position);
DictionaryValue* subdict = NULL;
if (!GetDictionary(subdict_path, &subdict))
return false;
@@ -782,10 +1036,11 @@ std::unique_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
}
void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
+ CHECK(dictionary->is_dict());
for (DictionaryValue::Iterator it(*dictionary); !it.IsAtEnd(); it.Advance()) {
const Value* merge_value = &it.value();
// Check whether we have to merge dictionaries.
- if (merge_value->IsType(Value::TYPE_DICTIONARY)) {
+ if (merge_value->IsType(Value::Type::DICTIONARY)) {
DictionaryValue* sub_dict;
if (GetDictionaryWithoutPathExpansion(it.key(), &sub_dict)) {
sub_dict->MergeDictionary(
@@ -794,59 +1049,31 @@ void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
}
}
// All other cases: Make a copy and hook it up.
- SetWithoutPathExpansion(it.key(), merge_value->DeepCopy());
+ SetWithoutPathExpansion(it.key(),
+ base::WrapUnique(merge_value->DeepCopy()));
}
}
void DictionaryValue::Swap(DictionaryValue* other) {
- dictionary_.swap(other->dictionary_);
+ CHECK(other->is_dict());
+ dict_ptr_->swap(*(other->dict_ptr_));
}
DictionaryValue::Iterator::Iterator(const DictionaryValue& target)
- : target_(target),
- it_(target.dictionary_.begin()) {}
+ : target_(target), it_((*target.dict_ptr_)->begin()) {}
DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
DictionaryValue::Iterator::~Iterator() {}
DictionaryValue* DictionaryValue::DeepCopy() const {
- DictionaryValue* result = new DictionaryValue;
-
- for (const auto& current_entry : dictionary_) {
- result->SetWithoutPathExpansion(current_entry.first,
- current_entry.second->CreateDeepCopy());
- }
-
- return result;
+ return static_cast<DictionaryValue*>(Value::DeepCopy());
}
std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
return WrapUnique(DeepCopy());
}
-bool DictionaryValue::Equals(const Value* other) const {
- if (other->GetType() != GetType())
- return false;
-
- const DictionaryValue* other_dict =
- static_cast<const DictionaryValue*>(other);
- Iterator lhs_it(*this);
- Iterator rhs_it(*other_dict);
- while (!lhs_it.IsAtEnd() && !rhs_it.IsAtEnd()) {
- if (lhs_it.key() != rhs_it.key() ||
- !lhs_it.value().Equals(&rhs_it.value())) {
- return false;
- }
- lhs_it.Advance();
- rhs_it.Advance();
- }
- if (!lhs_it.IsAtEnd() || !rhs_it.IsAtEnd())
- return false;
-
- return true;
-}
-
///////////////////// ListValue ////////////////////
// static
@@ -859,15 +1086,10 @@ std::unique_ptr<ListValue> ListValue::From(std::unique_ptr<Value> value) {
return nullptr;
}
-ListValue::ListValue() : Value(TYPE_LIST) {
-}
-
-ListValue::~ListValue() {
- Clear();
-}
+ListValue::ListValue() : Value(Type::LIST) {}
void ListValue::Clear() {
- list_.clear();
+ list_->clear();
}
bool ListValue::Set(size_t index, Value* in_value) {
@@ -878,25 +1100,25 @@ bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
if (!in_value)
return false;
- if (index >= list_.size()) {
+ if (index >= list_->size()) {
// Pad out any intermediate indexes with null settings
- while (index > list_.size())
+ while (index > list_->size())
Append(CreateNullValue());
Append(std::move(in_value));
} else {
// TODO(dcheng): remove this DCHECK once the raw pointer version is removed?
- DCHECK(list_[index] != in_value);
- list_[index] = std::move(in_value);
+ DCHECK((*list_)[index] != in_value);
+ (*list_)[index] = std::move(in_value);
}
return true;
}
bool ListValue::Get(size_t index, const Value** out_value) const {
- if (index >= list_.size())
+ if (index >= list_->size())
return false;
if (out_value)
- *out_value = list_[index].get();
+ *out_value = (*list_)[index].get();
return true;
}
@@ -950,11 +1172,11 @@ bool ListValue::GetString(size_t index, string16* out_value) const {
bool ListValue::GetBinary(size_t index, const BinaryValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(TYPE_BINARY))
+ if (!result || !value->IsType(Type::BINARY))
return false;
if (out_value)
- *out_value = static_cast<const BinaryValue*>(value);
+ *out_value = value;
return true;
}
@@ -969,7 +1191,7 @@ bool ListValue::GetDictionary(size_t index,
const DictionaryValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(TYPE_DICTIONARY))
+ if (!result || !value->IsType(Type::DICTIONARY))
return false;
if (out_value)
@@ -987,7 +1209,7 @@ bool ListValue::GetDictionary(size_t index, DictionaryValue** out_value) {
bool ListValue::GetList(size_t index, const ListValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(TYPE_LIST))
+ if (!result || !value->IsType(Type::LIST))
return false;
if (out_value)
@@ -1003,21 +1225,21 @@ bool ListValue::GetList(size_t index, ListValue** out_value) {
}
bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
- if (index >= list_.size())
+ if (index >= list_->size())
return false;
if (out_value)
- *out_value = std::move(list_[index]);
+ *out_value = std::move((*list_)[index]);
- list_.erase(list_.begin() + index);
+ list_->erase(list_->begin() + index);
return true;
}
bool ListValue::Remove(const Value& value, size_t* index) {
- for (auto it = list_.begin(); it != list_.end(); ++it) {
+ for (auto it = list_->begin(); it != list_->end(); ++it) {
if ((*it)->Equals(&value)) {
- size_t previous_index = it - list_.begin();
- list_.erase(it);
+ size_t previous_index = it - list_->begin();
+ list_->erase(it);
if (index)
*index = previous_index;
@@ -1030,38 +1252,40 @@ bool ListValue::Remove(const Value& value, size_t* index) {
ListValue::iterator ListValue::Erase(iterator iter,
std::unique_ptr<Value>* out_value) {
if (out_value)
- *out_value = std::move(*Storage::iterator(iter));
+ *out_value = std::move(*ListStorage::iterator(iter));
- return list_.erase(iter);
+ return list_->erase(iter);
}
void ListValue::Append(std::unique_ptr<Value> in_value) {
- list_.push_back(std::move(in_value));
+ list_->push_back(std::move(in_value));
}
+#if !defined(OS_LINUX)
void ListValue::Append(Value* in_value) {
DCHECK(in_value);
Append(WrapUnique(in_value));
}
+#endif
void ListValue::AppendBoolean(bool in_value) {
- Append(new FundamentalValue(in_value));
+ Append(MakeUnique<Value>(in_value));
}
void ListValue::AppendInteger(int in_value) {
- Append(new FundamentalValue(in_value));
+ Append(MakeUnique<Value>(in_value));
}
void ListValue::AppendDouble(double in_value) {
- Append(new FundamentalValue(in_value));
+ Append(MakeUnique<Value>(in_value));
}
-void ListValue::AppendString(const std::string& in_value) {
- Append(new StringValue(in_value));
+void ListValue::AppendString(StringPiece in_value) {
+ Append(MakeUnique<Value>(in_value));
}
void ListValue::AppendString(const string16& in_value) {
- Append(new StringValue(in_value));
+ Append(MakeUnique<Value>(in_value));
}
void ListValue::AppendStrings(const std::vector<std::string>& in_values) {
@@ -1078,82 +1302,46 @@ void ListValue::AppendStrings(const std::vector<string16>& in_values) {
}
}
-bool ListValue::AppendIfNotPresent(Value* in_value) {
+bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
DCHECK(in_value);
- for (const auto& entry : list_) {
- if (entry->Equals(in_value)) {
- delete in_value;
+ for (const auto& entry : *list_) {
+ if (entry->Equals(in_value.get())) {
return false;
}
}
- list_.emplace_back(in_value);
+ list_->push_back(std::move(in_value));
return true;
}
-bool ListValue::Insert(size_t index, Value* in_value) {
+bool ListValue::Insert(size_t index, std::unique_ptr<Value> in_value) {
DCHECK(in_value);
- if (index > list_.size())
+ if (index > list_->size())
return false;
- list_.insert(list_.begin() + index, WrapUnique(in_value));
+ list_->insert(list_->begin() + index, std::move(in_value));
return true;
}
ListValue::const_iterator ListValue::Find(const Value& value) const {
- return std::find_if(list_.begin(), list_.end(),
+ return std::find_if(list_->begin(), list_->end(),
[&value](const std::unique_ptr<Value>& entry) {
return entry->Equals(&value);
});
}
void ListValue::Swap(ListValue* other) {
- list_.swap(other->list_);
-}
-
-bool ListValue::GetAsList(ListValue** out_value) {
- if (out_value)
- *out_value = this;
- return true;
-}
-
-bool ListValue::GetAsList(const ListValue** out_value) const {
- if (out_value)
- *out_value = this;
- return true;
+ CHECK(other->is_list());
+ list_->swap(*(other->list_));
}
ListValue* ListValue::DeepCopy() const {
- ListValue* result = new ListValue;
-
- for (const auto& entry : list_)
- result->Append(entry->CreateDeepCopy());
-
- return result;
+ return static_cast<ListValue*>(Value::DeepCopy());
}
std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
return WrapUnique(DeepCopy());
}
-bool ListValue::Equals(const Value* other) const {
- if (other->GetType() != GetType())
- return false;
-
- const ListValue* other_list =
- static_cast<const ListValue*>(other);
- Storage::const_iterator lhs_it, rhs_it;
- for (lhs_it = begin(), rhs_it = other_list->begin();
- lhs_it != end() && rhs_it != other_list->end();
- ++lhs_it, ++rhs_it) {
- if (!(*lhs_it)->Equals(rhs_it->get()))
- return false;
- }
- if (lhs_it != end() || rhs_it != other_list->end())
- return false;
-
- return true;
-}
-
ValueSerializer::~ValueSerializer() {
}
@@ -1166,4 +1354,11 @@ std::ostream& operator<<(std::ostream& out, const Value& value) {
return out << json;
}
+std::ostream& operator<<(std::ostream& out, const Value::Type& type) {
+ if (static_cast<int>(type) < 0 ||
+ static_cast<size_t>(type) >= arraysize(kTypeNames))
+ return out << "Invalid Type (index = " << static_cast<int>(type) << ")";
+ return out << Value::GetTypeName(type);
+}
+
} // namespace base
diff --git a/base/values.h b/base/values.h
index e3d60891b3..35f66df904 100644
--- a/base/values.h
+++ b/base/values.h
@@ -30,17 +30,16 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
+#include "base/memory/manual_constructor.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h"
namespace base {
-class BinaryValue;
class DictionaryValue;
-class FundamentalValue;
class ListValue;
-class StringValue;
class Value;
+using BinaryValue = Value;
// The Value class is the base class for Values. A Value can be instantiated
// via the Create*Value() factory methods, or by directly creating instances of
@@ -49,158 +48,151 @@ class Value;
// See the file-level comment above for more information.
class BASE_EXPORT Value {
public:
- enum Type {
- TYPE_NULL = 0,
- TYPE_BOOLEAN,
- TYPE_INTEGER,
- TYPE_DOUBLE,
- TYPE_STRING,
- TYPE_BINARY,
- TYPE_DICTIONARY,
- TYPE_LIST
+ using DictStorage = std::map<std::string, std::unique_ptr<Value>>;
+ using ListStorage = std::vector<std::unique_ptr<Value>>;
+
+ enum class Type {
+ NONE = 0,
+ BOOLEAN,
+ INTEGER,
+ DOUBLE,
+ STRING,
+ BINARY,
+ DICTIONARY,
+ LIST
// Note: Do not add more types. See the file-level comment above for why.
};
- virtual ~Value();
-
static std::unique_ptr<Value> CreateNullValue();
+ // For situations where you want to keep ownership of your buffer, this
+ // factory method creates a new BinaryValue by copying the contents of the
+ // buffer that's passed in.
+ // DEPRECATED, use MakeUnique<Value>(const std::vector<char>&) instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
+ static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
+ size_t size);
+
+ Value(const Value& that);
+ Value(Value&& that);
+ Value(); // A null value.
+ explicit Value(Type type);
+ explicit Value(bool in_bool);
+ explicit Value(int in_int);
+ explicit Value(double in_double);
+
+ // Value(const char*) and Value(const char16*) are required despite
+ // Value(const std::string&) and Value(const string16&) because otherwise the
+ // compiler will choose the Value(bool) constructor for these arguments.
+ // Value(std::string&&) allow for efficient move construction.
+ // Value(StringPiece) exists due to many callsites passing StringPieces as
+ // arguments.
+ explicit Value(const char* in_string);
+ explicit Value(const std::string& in_string);
+ explicit Value(std::string&& in_string);
+ explicit Value(const char16* in_string);
+ explicit Value(const string16& in_string);
+ explicit Value(StringPiece in_string);
+
+ explicit Value(const std::vector<char>& in_blob);
+ explicit Value(std::vector<char>&& in_blob);
+
+ Value& operator=(const Value& that);
+ Value& operator=(Value&& that);
+
+ ~Value();
+
+ // Returns the name for a given |type|.
+ static const char* GetTypeName(Type type);
+
// Returns the type of the value stored by the current Value object.
// Each type will be implemented by only one subclass of Value, so it's
// safe to use the Type to determine whether you can cast from
// Value* to (Implementing Class)*. Also, a Value object never changes
// its type after construction.
- Type GetType() const { return type_; }
+ Type GetType() const { return type_; } // DEPRECATED, use type().
+ Type type() const { return type_; }
// Returns true if the current object represents a given type.
bool IsType(Type type) const { return type == type_; }
+ bool is_bool() const { return type() == Type::BOOLEAN; }
+ bool is_int() const { return type() == Type::INTEGER; }
+ bool is_double() const { return type() == Type::DOUBLE; }
+ bool is_string() const { return type() == Type::STRING; }
+ bool is_blob() const { return type() == Type::BINARY; }
+ bool is_dict() const { return type() == Type::DICTIONARY; }
+ bool is_list() const { return type() == Type::LIST; }
+
+ // These will all fatally assert if the type doesn't match.
+ bool GetBool() const;
+ int GetInt() const;
+ double GetDouble() const; // Implicitly converts from int if necessary.
+ const std::string& GetString() const;
+ const std::vector<char>& GetBlob() const;
+
+ size_t GetSize() const; // DEPRECATED, use GetBlob().size() instead.
+ const char* GetBuffer() const; // DEPRECATED, use GetBlob().data() instead.
// These methods allow the convenient retrieval of the contents of the Value.
// If the current object can be converted into the given type, the value is
// returned through the |out_value| parameter and true is returned;
// otherwise, false is returned and |out_value| is unchanged.
- virtual bool GetAsBoolean(bool* out_value) const;
- virtual bool GetAsInteger(int* out_value) const;
- virtual bool GetAsDouble(double* out_value) const;
- virtual bool GetAsString(std::string* out_value) const;
- virtual bool GetAsString(string16* out_value) const;
- virtual bool GetAsString(const StringValue** out_value) const;
- virtual bool GetAsBinary(const BinaryValue** out_value) const;
- virtual bool GetAsList(ListValue** out_value);
- virtual bool GetAsList(const ListValue** out_value) const;
- virtual bool GetAsDictionary(DictionaryValue** out_value);
- virtual bool GetAsDictionary(const DictionaryValue** out_value) const;
+ bool GetAsBoolean(bool* out_value) const;
+ bool GetAsInteger(int* out_value) const;
+ bool GetAsDouble(double* out_value) const;
+ bool GetAsString(std::string* out_value) const;
+ bool GetAsString(string16* out_value) const;
+ bool GetAsString(const Value** out_value) const;
+ bool GetAsString(StringPiece* out_value) const;
+ bool GetAsBinary(const BinaryValue** out_value) const;
+ // ListValue::From is the equivalent for std::unique_ptr conversions.
+ bool GetAsList(ListValue** out_value);
+ bool GetAsList(const ListValue** out_value) const;
+ // DictionaryValue::From is the equivalent for std::unique_ptr conversions.
+ bool GetAsDictionary(DictionaryValue** out_value);
+ bool GetAsDictionary(const DictionaryValue** out_value) const;
// Note: Do not add more types. See the file-level comment above for why.
// This creates a deep copy of the entire Value tree, and returns a pointer
- // to the copy. The caller gets ownership of the copy, of course.
- //
+ // to the copy. The caller gets ownership of the copy, of course.
// Subclasses return their own type directly in their overrides;
// this works because C++ supports covariant return types.
- virtual Value* DeepCopy() const;
+ Value* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<Value> CreateDeepCopy() const;
// Compares if two Value objects have equal contents.
- virtual bool Equals(const Value* other) const;
+ bool Equals(const Value* other) const;
// Compares if two Value objects have equal contents. Can handle NULLs.
// NULLs are considered equal but different from Value::CreateNullValue().
static bool Equals(const Value* a, const Value* b);
protected:
- // These aren't safe for end-users, but they are useful for subclasses.
- explicit Value(Type type);
- Value(const Value& that);
- Value& operator=(const Value& that);
-
- private:
+ // TODO(crbug.com/646113): Make these private once DictionaryValue and
+ // ListValue are properly inlined.
Type type_;
-};
-// FundamentalValue represents the simple fundamental types of values.
-class BASE_EXPORT FundamentalValue : public Value {
- public:
- explicit FundamentalValue(bool in_value);
- explicit FundamentalValue(int in_value);
- explicit FundamentalValue(double in_value);
- ~FundamentalValue() override;
-
- // Overridden from Value:
- bool GetAsBoolean(bool* out_value) const override;
- bool GetAsInteger(int* out_value) const override;
- // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
- // doubles.
- bool GetAsDouble(double* out_value) const override;
- FundamentalValue* DeepCopy() const override;
- bool Equals(const Value* other) const override;
-
- private:
union {
- bool boolean_value_;
- int integer_value_;
+ bool bool_value_;
+ int int_value_;
double double_value_;
+ ManualConstructor<std::string> string_value_;
+ ManualConstructor<std::vector<char>> binary_value_;
+ // For current gcc and clang sizeof(DictStorage) = 48, which would result
+ // in sizeof(Value) = 56 if DictStorage was stack allocated. Allocating it
+ // on the heap results in sizeof(Value) = 40 for all of gcc, clang and MSVC.
+ ManualConstructor<std::unique_ptr<DictStorage>> dict_ptr_;
+ ManualConstructor<ListStorage> list_;
};
-};
-
-class BASE_EXPORT StringValue : public Value {
- public:
- // Initializes a StringValue with a UTF-8 narrow character string.
- explicit StringValue(const std::string& in_value);
-
- // Initializes a StringValue with a string16.
- explicit StringValue(const string16& in_value);
-
- ~StringValue() override;
-
- // Returns |value_| as a pointer or reference.
- std::string* GetString();
- const std::string& GetString() const;
-
- // Overridden from Value:
- bool GetAsString(std::string* out_value) const override;
- bool GetAsString(string16* out_value) const override;
- bool GetAsString(const StringValue** out_value) const override;
- StringValue* DeepCopy() const override;
- bool Equals(const Value* other) const override;
private:
- std::string value_;
-};
-
-class BASE_EXPORT BinaryValue: public Value {
- public:
- // Creates a BinaryValue with a null buffer and size of 0.
- BinaryValue();
-
- // Creates a BinaryValue, taking ownership of the bytes pointed to by
- // |buffer|.
- BinaryValue(std::unique_ptr<char[]> buffer, size_t size);
-
- ~BinaryValue() override;
-
- // For situations where you want to keep ownership of your buffer, this
- // factory method creates a new BinaryValue by copying the contents of the
- // buffer that's passed in.
- static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
- size_t size);
-
- size_t GetSize() const { return size_; }
-
- // May return NULL.
- char* GetBuffer() { return buffer_.get(); }
- const char* GetBuffer() const { return buffer_.get(); }
-
- // Overridden from Value:
- bool GetAsBinary(const BinaryValue** out_value) const override;
- BinaryValue* DeepCopy() const override;
- bool Equals(const Value* other) const override;
-
- private:
- std::unique_ptr<char[]> buffer_;
- size_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(BinaryValue);
+ void InternalCopyFundamentalValue(const Value& that);
+ void InternalCopyConstructFrom(const Value& that);
+ void InternalMoveConstructFrom(Value&& that);
+ void InternalCopyAssignFromSameType(const Value& that);
+ void InternalMoveAssignFromSameType(Value&& that);
+ void InternalCleanup();
};
// DictionaryValue provides a key-value dictionary with (optional) "path"
@@ -208,25 +200,19 @@ class BASE_EXPORT BinaryValue: public Value {
// are |std::string|s and should be UTF-8 encoded.
class BASE_EXPORT DictionaryValue : public Value {
public:
- using Storage = std::map<std::string, std::unique_ptr<Value>>;
// Returns |value| if it is a dictionary, nullptr otherwise.
static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
DictionaryValue();
- ~DictionaryValue() override;
-
- // Overridden from Value:
- bool GetAsDictionary(DictionaryValue** out_value) override;
- bool GetAsDictionary(const DictionaryValue** out_value) const override;
// Returns true if the current dictionary has a value for the given key.
- bool HasKey(const std::string& key) const;
+ bool HasKey(StringPiece key) const;
// Returns the number of Values in this dictionary.
- size_t size() const { return dictionary_.size(); }
+ size_t size() const { return (*dict_ptr_)->size(); }
// Returns whether the dictionary is empty.
- bool empty() const { return dictionary_.empty(); }
+ bool empty() const { return (*dict_ptr_)->empty(); }
// Clears any current contents of this dictionary.
void Clear();
@@ -238,32 +224,31 @@ class BASE_EXPORT DictionaryValue : public Value {
// If the key at any step of the way doesn't exist, or exists but isn't
// a DictionaryValue, a new DictionaryValue will be created and attached
// to the path in that location. |in_value| must be non-null.
- void Set(const std::string& path, std::unique_ptr<Value> in_value);
+ void Set(StringPiece path, std::unique_ptr<Value> in_value);
// Deprecated version of the above. TODO(estade): remove.
- void Set(const std::string& path, Value* in_value);
+ void Set(StringPiece path, Value* in_value);
// Convenience forms of Set(). These methods will replace any existing
// value at that path, even if it has a different type.
- void SetBoolean(const std::string& path, bool in_value);
- void SetInteger(const std::string& path, int in_value);
- void SetDouble(const std::string& path, double in_value);
- void SetString(const std::string& path, const std::string& in_value);
- void SetString(const std::string& path, const string16& in_value);
+ void SetBoolean(StringPiece path, bool in_value);
+ void SetInteger(StringPiece path, int in_value);
+ void SetDouble(StringPiece path, double in_value);
+ void SetString(StringPiece path, StringPiece in_value);
+ void SetString(StringPiece path, const string16& in_value);
// Like Set(), but without special treatment of '.'. This allows e.g. URLs to
// be used as paths.
- void SetWithoutPathExpansion(const std::string& key,
+ void SetWithoutPathExpansion(StringPiece key,
std::unique_ptr<Value> in_value);
// Deprecated version of the above. TODO(estade): remove.
- void SetWithoutPathExpansion(const std::string& key, Value* in_value);
+ void SetWithoutPathExpansion(StringPiece key, Value* in_value);
// Convenience forms of SetWithoutPathExpansion().
- void SetBooleanWithoutPathExpansion(const std::string& path, bool in_value);
- void SetIntegerWithoutPathExpansion(const std::string& path, int in_value);
- void SetDoubleWithoutPathExpansion(const std::string& path, double in_value);
- void SetStringWithoutPathExpansion(const std::string& path,
- const std::string& in_value);
- void SetStringWithoutPathExpansion(const std::string& path,
+ void SetBooleanWithoutPathExpansion(StringPiece path, bool in_value);
+ void SetIntegerWithoutPathExpansion(StringPiece path, int in_value);
+ void SetDoubleWithoutPathExpansion(StringPiece path, double in_value);
+ void SetStringWithoutPathExpansion(StringPiece path, StringPiece in_value);
+ void SetStringWithoutPathExpansion(StringPiece path,
const string16& in_value);
// Gets the Value associated with the given path starting from this object.
@@ -281,46 +266,41 @@ class BASE_EXPORT DictionaryValue : public Value {
// and the return value will be true if the path is valid and the value at
// the end of the path can be returned in the form specified.
// |out_value| is optional and will only be set if non-NULL.
- bool GetBoolean(const std::string& path, bool* out_value) const;
- bool GetInteger(const std::string& path, int* out_value) const;
- // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ bool GetBoolean(StringPiece path, bool* out_value) const;
+ bool GetInteger(StringPiece path, int* out_value) const;
+ // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
// doubles.
- bool GetDouble(const std::string& path, double* out_value) const;
- bool GetString(const std::string& path, std::string* out_value) const;
- bool GetString(const std::string& path, string16* out_value) const;
- bool GetStringASCII(const std::string& path, std::string* out_value) const;
- bool GetBinary(const std::string& path, const BinaryValue** out_value) const;
- bool GetBinary(const std::string& path, BinaryValue** out_value);
+ bool GetDouble(StringPiece path, double* out_value) const;
+ bool GetString(StringPiece path, std::string* out_value) const;
+ bool GetString(StringPiece path, string16* out_value) const;
+ bool GetStringASCII(StringPiece path, std::string* out_value) const;
+ bool GetBinary(StringPiece path, const BinaryValue** out_value) const;
+ bool GetBinary(StringPiece path, BinaryValue** out_value);
bool GetDictionary(StringPiece path,
const DictionaryValue** out_value) const;
bool GetDictionary(StringPiece path, DictionaryValue** out_value);
- bool GetList(const std::string& path, const ListValue** out_value) const;
- bool GetList(const std::string& path, ListValue** out_value);
+ bool GetList(StringPiece path, const ListValue** out_value) const;
+ bool GetList(StringPiece path, ListValue** out_value);
// Like Get(), but without special treatment of '.'. This allows e.g. URLs to
// be used as paths.
- bool GetWithoutPathExpansion(const std::string& key,
- const Value** out_value) const;
- bool GetWithoutPathExpansion(const std::string& key, Value** out_value);
- bool GetBooleanWithoutPathExpansion(const std::string& key,
- bool* out_value) const;
- bool GetIntegerWithoutPathExpansion(const std::string& key,
- int* out_value) const;
- bool GetDoubleWithoutPathExpansion(const std::string& key,
- double* out_value) const;
- bool GetStringWithoutPathExpansion(const std::string& key,
+ bool GetWithoutPathExpansion(StringPiece key, const Value** out_value) const;
+ bool GetWithoutPathExpansion(StringPiece key, Value** out_value);
+ bool GetBooleanWithoutPathExpansion(StringPiece key, bool* out_value) const;
+ bool GetIntegerWithoutPathExpansion(StringPiece key, int* out_value) const;
+ bool GetDoubleWithoutPathExpansion(StringPiece key, double* out_value) const;
+ bool GetStringWithoutPathExpansion(StringPiece key,
std::string* out_value) const;
- bool GetStringWithoutPathExpansion(const std::string& key,
+ bool GetStringWithoutPathExpansion(StringPiece key,
string16* out_value) const;
bool GetDictionaryWithoutPathExpansion(
- const std::string& key,
+ StringPiece key,
const DictionaryValue** out_value) const;
- bool GetDictionaryWithoutPathExpansion(const std::string& key,
+ bool GetDictionaryWithoutPathExpansion(StringPiece key,
DictionaryValue** out_value);
- bool GetListWithoutPathExpansion(const std::string& key,
+ bool GetListWithoutPathExpansion(StringPiece key,
const ListValue** out_value) const;
- bool GetListWithoutPathExpansion(const std::string& key,
- ListValue** out_value);
+ bool GetListWithoutPathExpansion(StringPiece key, ListValue** out_value);
// Removes the Value with the specified path from this dictionary (or one
// of its child dictionaries, if the path is more than just a local key).
@@ -328,18 +308,16 @@ class BASE_EXPORT DictionaryValue : public Value {
// |out_value|. If |out_value| is NULL, the removed value will be deleted.
// This method returns true if |path| is a valid path; otherwise it will
// return false and the DictionaryValue object will be unchanged.
- virtual bool Remove(const std::string& path,
- std::unique_ptr<Value>* out_value);
+ bool Remove(StringPiece path, std::unique_ptr<Value>* out_value);
// Like Remove(), but without special treatment of '.'. This allows e.g. URLs
// to be used as paths.
- virtual bool RemoveWithoutPathExpansion(const std::string& key,
- std::unique_ptr<Value>* out_value);
+ bool RemoveWithoutPathExpansion(StringPiece key,
+ std::unique_ptr<Value>* out_value);
// Removes a path, clearing out all dictionaries on |path| that remain empty
// after removing the value at |path|.
- virtual bool RemovePath(const std::string& path,
- std::unique_ptr<Value>* out_value);
+ bool RemovePath(StringPiece path, std::unique_ptr<Value>* out_value);
// Makes a copy of |this| but doesn't include empty dictionaries and lists in
// the copy. This never returns NULL, even if |this| itself is empty.
@@ -353,7 +331,7 @@ class BASE_EXPORT DictionaryValue : public Value {
void MergeDictionary(const DictionaryValue* dictionary);
// Swaps contents with the |other| dictionary.
- virtual void Swap(DictionaryValue* other);
+ void Swap(DictionaryValue* other);
// This class provides an iterator over both keys and values in the
// dictionary. It can't be used to modify the dictionary.
@@ -363,7 +341,7 @@ class BASE_EXPORT DictionaryValue : public Value {
Iterator(const Iterator& other);
~Iterator();
- bool IsAtEnd() const { return it_ == target_.dictionary_.end(); }
+ bool IsAtEnd() const { return it_ == (*target_.dict_ptr_)->end(); }
void Advance() { ++it_; }
const std::string& key() const { return it_->first; }
@@ -371,42 +349,33 @@ class BASE_EXPORT DictionaryValue : public Value {
private:
const DictionaryValue& target_;
- Storage::const_iterator it_;
+ DictStorage::const_iterator it_;
};
- // Overridden from Value:
- DictionaryValue* DeepCopy() const override;
+ DictionaryValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
- bool Equals(const Value* other) const override;
-
- private:
- Storage dictionary_;
-
- DISALLOW_COPY_AND_ASSIGN(DictionaryValue);
};
// This type of Value represents a list of other Value values.
class BASE_EXPORT ListValue : public Value {
public:
- using Storage = std::vector<std::unique_ptr<Value>>;
- using const_iterator = Storage::const_iterator;
- using iterator = Storage::iterator;
+ using const_iterator = ListStorage::const_iterator;
+ using iterator = ListStorage::iterator;
// Returns |value| if it is a list, nullptr otherwise.
static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
ListValue();
- ~ListValue() override;
// Clears the contents of this ListValue
void Clear();
// Returns the number of Values in this list.
- size_t GetSize() const { return list_.size(); }
+ size_t GetSize() const { return list_->size(); }
// Returns whether the list is empty.
- bool empty() const { return list_.empty(); }
+ bool empty() const { return list_->empty(); }
// Sets the list item at the given index to be the Value specified by
// the value given. If the index beyond the current end of the list, null
@@ -430,7 +399,7 @@ class BASE_EXPORT ListValue : public Value {
// |out_value| is optional and will only be set if non-NULL.
bool GetBoolean(size_t index, bool* out_value) const;
bool GetInteger(size_t index, int* out_value) const;
- // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
// doubles.
bool GetDouble(size_t index, double* out_value) const;
bool GetString(size_t index, std::string* out_value) const;
@@ -447,7 +416,7 @@ class BASE_EXPORT ListValue : public Value {
// passed out via |out_value|. If |out_value| is NULL, the removed value will
// be deleted. This method returns true if |index| is valid; otherwise
// it will return false and the ListValue object will be unchanged.
- virtual bool Remove(size_t index, std::unique_ptr<Value>* out_value);
+ bool Remove(size_t index, std::unique_ptr<Value>* out_value);
// Removes the first instance of |value| found in the list, if any, and
// deletes it. |index| is the location where |value| was found. Returns false
@@ -462,26 +431,27 @@ class BASE_EXPORT ListValue : public Value {
// Appends a Value to the end of the list.
void Append(std::unique_ptr<Value> in_value);
+#if !defined(OS_LINUX)
// Deprecated version of the above. TODO(estade): remove.
void Append(Value* in_value);
+#endif
// Convenience forms of Append.
void AppendBoolean(bool in_value);
void AppendInteger(int in_value);
void AppendDouble(double in_value);
- void AppendString(const std::string& in_value);
+ void AppendString(StringPiece in_value);
void AppendString(const string16& in_value);
void AppendStrings(const std::vector<std::string>& in_values);
void AppendStrings(const std::vector<string16>& in_values);
- // Appends a Value if it's not already present. Takes ownership of the
- // |in_value|. Returns true if successful, or false if the value was already
- // present. If the value was already present the |in_value| is deleted.
- bool AppendIfNotPresent(Value* in_value);
+ // Appends a Value if it's not already present. Returns true if successful,
+ // or false if the value was already
+ bool AppendIfNotPresent(std::unique_ptr<Value> in_value);
// Insert a Value at index.
// Returns true if successful, or false if the index was out of range.
- bool Insert(size_t index, Value* in_value);
+ bool Insert(size_t index, std::unique_ptr<Value> in_value);
// Searches for the first instance of |value| in the list using the Equals
// method of the Value type.
@@ -489,28 +459,18 @@ class BASE_EXPORT ListValue : public Value {
const_iterator Find(const Value& value) const;
// Swaps contents with the |other| list.
- virtual void Swap(ListValue* other);
+ void Swap(ListValue* other);
// Iteration.
- iterator begin() { return list_.begin(); }
- iterator end() { return list_.end(); }
-
- const_iterator begin() const { return list_.begin(); }
- const_iterator end() const { return list_.end(); }
+ iterator begin() { return list_->begin(); }
+ iterator end() { return list_->end(); }
- // Overridden from Value:
- bool GetAsList(ListValue** out_value) override;
- bool GetAsList(const ListValue** out_value) const override;
- ListValue* DeepCopy() const override;
- bool Equals(const Value* other) const override;
+ const_iterator begin() const { return list_->begin(); }
+ const_iterator end() const { return list_->end(); }
+ ListValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
std::unique_ptr<ListValue> CreateDeepCopy() const;
-
- private:
- Storage list_;
-
- DISALLOW_COPY_AND_ASSIGN(ListValue);
};
// This interface is implemented by classes that know how to serialize
@@ -545,16 +505,6 @@ class BASE_EXPORT ValueDeserializer {
BASE_EXPORT std::ostream& operator<<(std::ostream& out, const Value& value);
BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
- const FundamentalValue& value) {
- return out << static_cast<const Value&>(value);
-}
-
-BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
- const StringValue& value) {
- return out << static_cast<const Value&>(value);
-}
-
-BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
const DictionaryValue& value) {
return out << static_cast<const Value&>(value);
}
@@ -564,6 +514,10 @@ BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
return out << static_cast<const Value&>(value);
}
+// Stream operator so that enum class Types can be used in log statements.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+ const Value::Type& type);
+
} // namespace base
#endif // BASE_VALUES_H_
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index d68522234d..3bcdc16e37 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -9,6 +9,7 @@
#include <limits>
#include <memory>
#include <utility>
+#include <vector>
#include "base/memory/ptr_util.h"
#include "base/strings/string16.h"
@@ -17,6 +18,322 @@
namespace base {
+// Group of tests for the value constructors.
+TEST(ValuesTest, ConstructBool) {
+ Value true_value(true);
+ EXPECT_EQ(Value::Type::BOOLEAN, true_value.type());
+ EXPECT_TRUE(true_value.GetBool());
+
+ Value false_value(false);
+ EXPECT_EQ(Value::Type::BOOLEAN, false_value.type());
+ EXPECT_FALSE(false_value.GetBool());
+}
+
+TEST(ValuesTest, ConstructInt) {
+ Value value(-37);
+ EXPECT_EQ(Value::Type::INTEGER, value.type());
+ EXPECT_EQ(-37, value.GetInt());
+}
+
+TEST(ValuesTest, ConstructDouble) {
+ Value value(-4.655);
+ EXPECT_EQ(Value::Type::DOUBLE, value.type());
+ EXPECT_EQ(-4.655, value.GetDouble());
+}
+
+TEST(ValuesTest, ConstructStringFromConstCharPtr) {
+ const char* str = "foobar";
+ Value value(str);
+ EXPECT_EQ(Value::Type::STRING, value.type());
+ EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromStdStringConstRef) {
+ std::string str = "foobar";
+ Value value(str);
+ EXPECT_EQ(Value::Type::STRING, value.type());
+ EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromStdStringRefRef) {
+ std::string str = "foobar";
+ Value value(std::move(str));
+ EXPECT_EQ(Value::Type::STRING, value.type());
+ EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromConstChar16Ptr) {
+ string16 str = ASCIIToUTF16("foobar");
+ Value value(str.c_str());
+ EXPECT_EQ(Value::Type::STRING, value.type());
+ EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromString16) {
+ string16 str = ASCIIToUTF16("foobar");
+ Value value(str);
+ EXPECT_EQ(Value::Type::STRING, value.type());
+ EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromStringPiece) {
+ StringPiece str = "foobar";
+ Value value(str);
+ EXPECT_EQ(Value::Type::STRING, value.type());
+ EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructBinary) {
+ BinaryValue value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
+ EXPECT_EQ(Value::Type::BINARY, value.type());
+ EXPECT_EQ(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}), value.GetBlob());
+}
+
+TEST(ValuesTest, ConstructDict) {
+ DictionaryValue value;
+ EXPECT_EQ(Value::Type::DICTIONARY, value.type());
+}
+
+TEST(ValuesTest, ConstructList) {
+ ListValue value;
+ EXPECT_EQ(Value::Type::LIST, value.type());
+}
+
+// Group of tests for the copy constructors and copy-assigmnent. For equality
+// checks comparisons of the interesting fields are done instead of relying on
+// Equals being correct.
+TEST(ValuesTest, CopyBool) {
+ Value true_value(true);
+ Value copied_true_value(true_value);
+ EXPECT_EQ(true_value.type(), copied_true_value.type());
+ EXPECT_EQ(true_value.GetBool(), copied_true_value.GetBool());
+
+ Value false_value(false);
+ Value copied_false_value(false_value);
+ EXPECT_EQ(false_value.type(), copied_false_value.type());
+ EXPECT_EQ(false_value.GetBool(), copied_false_value.GetBool());
+
+ Value blank;
+
+ blank = true_value;
+ EXPECT_EQ(true_value.type(), blank.type());
+ EXPECT_EQ(true_value.GetBool(), blank.GetBool());
+
+ blank = false_value;
+ EXPECT_EQ(false_value.type(), blank.type());
+ EXPECT_EQ(false_value.GetBool(), blank.GetBool());
+}
+
+TEST(ValuesTest, CopyInt) {
+ Value value(74);
+ Value copied_value(value);
+ EXPECT_EQ(value.type(), copied_value.type());
+ EXPECT_EQ(value.GetInt(), copied_value.GetInt());
+
+ Value blank;
+
+ blank = value;
+ EXPECT_EQ(value.type(), blank.type());
+ EXPECT_EQ(value.GetInt(), blank.GetInt());
+}
+
+TEST(ValuesTest, CopyDouble) {
+ Value value(74.896);
+ Value copied_value(value);
+ EXPECT_EQ(value.type(), copied_value.type());
+ EXPECT_EQ(value.GetDouble(), copied_value.GetDouble());
+
+ Value blank;
+
+ blank = value;
+ EXPECT_EQ(value.type(), blank.type());
+ EXPECT_EQ(value.GetDouble(), blank.GetDouble());
+}
+
+TEST(ValuesTest, CopyString) {
+ Value value("foobar");
+ Value copied_value(value);
+ EXPECT_EQ(value.type(), copied_value.type());
+ EXPECT_EQ(value.GetString(), copied_value.GetString());
+
+ Value blank;
+
+ blank = value;
+ EXPECT_EQ(value.type(), blank.type());
+ EXPECT_EQ(value.GetString(), blank.GetString());
+}
+
+TEST(ValuesTest, CopyBinary) {
+ BinaryValue value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
+ BinaryValue copied_value(value);
+ EXPECT_EQ(value.type(), copied_value.type());
+ EXPECT_EQ(value.GetBlob(), copied_value.GetBlob());
+
+ Value blank;
+
+ blank = value;
+ EXPECT_EQ(value.type(), blank.type());
+ EXPECT_EQ(value.GetBlob(), blank.GetBlob());
+}
+
+TEST(ValuesTest, CopyDictionary) {
+ // TODO(crbug.com/646113): Clean this up once DictionaryValue switched to
+ // value semantics.
+ int copy;
+ DictionaryValue value;
+ value.SetInteger("Int", 123);
+
+ DictionaryValue copied_value(value);
+ copied_value.GetInteger("Int", &copy);
+
+ EXPECT_EQ(value.type(), copied_value.type());
+ EXPECT_EQ(123, copy);
+
+ auto blank = MakeUnique<Value>();
+
+ *blank = value;
+ EXPECT_EQ(Value::Type::DICTIONARY, blank->type());
+
+ static_cast<DictionaryValue*>(blank.get())->GetInteger("Int", &copy);
+ EXPECT_EQ(123, copy);
+}
+
+TEST(ValuesTest, CopyList) {
+ // TODO(crbug.com/646113): Clean this up once ListValue switched to
+ // value semantics.
+ int copy;
+ ListValue value;
+ value.AppendInteger(123);
+
+ ListValue copied_value(value);
+ copied_value.GetInteger(0, &copy);
+
+ EXPECT_EQ(value.type(), copied_value.type());
+ EXPECT_EQ(123, copy);
+
+ auto blank = MakeUnique<Value>();
+
+ *blank = value;
+ EXPECT_EQ(Value::Type::LIST, blank->type());
+
+ static_cast<ListValue*>(blank.get())->GetInteger(0, &copy);
+ EXPECT_EQ(123, copy);
+}
+
+// Group of tests for the move constructors and move-assigmnent.
+TEST(ValuesTest, MoveBool) {
+ Value true_value(true);
+ Value moved_true_value(std::move(true_value));
+ EXPECT_EQ(Value::Type::BOOLEAN, moved_true_value.type());
+ EXPECT_TRUE(moved_true_value.GetBool());
+
+ Value false_value(false);
+ Value moved_false_value(std::move(false_value));
+ EXPECT_EQ(Value::Type::BOOLEAN, moved_false_value.type());
+ EXPECT_FALSE(moved_false_value.GetBool());
+
+ Value blank;
+
+ blank = Value(true);
+ EXPECT_EQ(Value::Type::BOOLEAN, blank.type());
+ EXPECT_TRUE(blank.GetBool());
+
+ blank = Value(false);
+ EXPECT_EQ(Value::Type::BOOLEAN, blank.type());
+ EXPECT_FALSE(blank.GetBool());
+}
+
+TEST(ValuesTest, MoveInt) {
+ Value value(74);
+ Value moved_value(std::move(value));
+ EXPECT_EQ(Value::Type::INTEGER, moved_value.type());
+ EXPECT_EQ(74, moved_value.GetInt());
+
+ Value blank;
+
+ blank = Value(47);
+ EXPECT_EQ(Value::Type::INTEGER, blank.type());
+ EXPECT_EQ(47, blank.GetInt());
+}
+
+TEST(ValuesTest, MoveDouble) {
+ Value value(74.896);
+ Value moved_value(std::move(value));
+ EXPECT_EQ(Value::Type::DOUBLE, moved_value.type());
+ EXPECT_EQ(74.896, moved_value.GetDouble());
+
+ Value blank;
+
+ blank = Value(654.38);
+ EXPECT_EQ(Value::Type::DOUBLE, blank.type());
+ EXPECT_EQ(654.38, blank.GetDouble());
+}
+
+TEST(ValuesTest, MoveString) {
+ Value value("foobar");
+ Value moved_value(std::move(value));
+ EXPECT_EQ(Value::Type::STRING, moved_value.type());
+ EXPECT_EQ("foobar", moved_value.GetString());
+
+ Value blank;
+
+ blank = Value("foobar");
+ EXPECT_EQ(Value::Type::STRING, blank.type());
+ EXPECT_EQ("foobar", blank.GetString());
+}
+
+TEST(ValuesTest, MoveBinary) {
+ const std::vector<char> buffer = {0xF, 0x0, 0x0, 0xB, 0xA, 0x2};
+ BinaryValue value(buffer);
+ BinaryValue moved_value(std::move(value));
+ EXPECT_EQ(Value::Type::BINARY, moved_value.type());
+ EXPECT_EQ(buffer, moved_value.GetBlob());
+
+ Value blank;
+
+ blank = BinaryValue(buffer);
+ EXPECT_EQ(Value::Type::BINARY, blank.type());
+ EXPECT_EQ(buffer, blank.GetBlob());
+}
+
+TEST(ValuesTest, MoveDictionary) {
+ // TODO(crbug.com/646113): Clean this up once DictionaryValue switched to
+ // value semantics.
+ int move;
+ DictionaryValue value;
+ value.SetInteger("Int", 123);
+
+ DictionaryValue moved_value(std::move(value));
+ moved_value.GetInteger("Int", &move);
+
+ EXPECT_EQ(Value::Type::DICTIONARY, moved_value.type());
+ EXPECT_EQ(123, move);
+
+ Value blank;
+
+ blank = DictionaryValue();
+ EXPECT_EQ(Value::Type::DICTIONARY, blank.type());
+}
+
+TEST(ValuesTest, MoveList) {
+ // TODO(crbug.com/646113): Clean this up once ListValue switched to
+ // value semantics.
+ int move;
+ ListValue value;
+ value.AppendInteger(123);
+
+ ListValue moved_value(std::move(value));
+ moved_value.GetInteger(0, &move);
+
+ EXPECT_EQ(Value::Type::LIST, moved_value.type());
+ EXPECT_EQ(123, move);
+
+ Value blank;
+
+ blank = ListValue();
+ EXPECT_EQ(Value::Type::LIST, blank.type());
+}
+
TEST(ValuesTest, Basic) {
// Test basic dictionary getting/setting
DictionaryValue settings;
@@ -62,10 +379,10 @@ TEST(ValuesTest, Basic) {
TEST(ValuesTest, List) {
std::unique_ptr<ListValue> mixed_list(new ListValue());
- mixed_list->Set(0, WrapUnique(new FundamentalValue(true)));
- mixed_list->Set(1, WrapUnique(new FundamentalValue(42)));
- mixed_list->Set(2, WrapUnique(new FundamentalValue(88.8)));
- mixed_list->Set(3, WrapUnique(new StringValue("foo")));
+ mixed_list->Set(0, MakeUnique<Value>(true));
+ mixed_list->Set(1, MakeUnique<Value>(42));
+ mixed_list->Set(2, MakeUnique<Value>(88.8));
+ mixed_list->Set(3, MakeUnique<Value>("foo"));
ASSERT_EQ(4u, mixed_list->GetSize());
Value *value = NULL;
@@ -100,8 +417,8 @@ TEST(ValuesTest, List) {
ASSERT_EQ("foo", string_value);
// Try searching in the mixed list.
- base::FundamentalValue sought_value(42);
- base::FundamentalValue not_found_value(false);
+ base::Value sought_value(42);
+ base::Value not_found_value(false);
ASSERT_NE(mixed_list->end(), mixed_list->Find(sought_value));
ASSERT_TRUE((*mixed_list->Find(sought_value))->GetAsInteger(&int_value));
@@ -110,16 +427,15 @@ TEST(ValuesTest, List) {
}
TEST(ValuesTest, BinaryValue) {
- // Default constructor creates a BinaryValue with a null buffer and size 0.
- std::unique_ptr<BinaryValue> binary(new BinaryValue());
+ // Default constructor creates a BinaryValue with a buffer of size 0.
+ auto binary = MakeUnique<Value>(Value::Type::BINARY);
ASSERT_TRUE(binary.get());
- ASSERT_EQ(NULL, binary->GetBuffer());
ASSERT_EQ(0U, binary->GetSize());
// Test the common case of a non-empty buffer
- std::unique_ptr<char[]> buffer(new char[15]);
- char* original_buffer = buffer.get();
- binary.reset(new BinaryValue(std::move(buffer), 15));
+ std::vector<char> buffer(15);
+ char* original_buffer = buffer.data();
+ binary.reset(new BinaryValue(std::move(buffer)));
ASSERT_TRUE(binary.get());
ASSERT_TRUE(binary->GetBuffer());
ASSERT_EQ(original_buffer, binary->GetBuffer());
@@ -143,17 +459,17 @@ TEST(ValuesTest, BinaryValue) {
TEST(ValuesTest, StringValue) {
// Test overloaded StringValue constructor.
- std::unique_ptr<Value> narrow_value(new StringValue("narrow"));
+ std::unique_ptr<Value> narrow_value(new Value("narrow"));
ASSERT_TRUE(narrow_value.get());
- ASSERT_TRUE(narrow_value->IsType(Value::TYPE_STRING));
- std::unique_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
+ ASSERT_TRUE(narrow_value->IsType(Value::Type::STRING));
+ std::unique_ptr<Value> utf16_value(new Value(ASCIIToUTF16("utf16")));
ASSERT_TRUE(utf16_value.get());
- ASSERT_TRUE(utf16_value->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(utf16_value->IsType(Value::Type::STRING));
// Test overloaded GetAsString.
std::string narrow = "http://google.com";
string16 utf16 = ASCIIToUTF16("http://google.com");
- const StringValue* string_value = NULL;
+ const Value* string_value = NULL;
ASSERT_TRUE(narrow_value->GetAsString(&narrow));
ASSERT_TRUE(narrow_value->GetAsString(&utf16));
ASSERT_TRUE(narrow_value->GetAsString(&string_value));
@@ -171,65 +487,23 @@ TEST(ValuesTest, StringValue) {
// Don't choke on NULL values.
ASSERT_TRUE(narrow_value->GetAsString(static_cast<string16*>(NULL)));
ASSERT_TRUE(narrow_value->GetAsString(static_cast<std::string*>(NULL)));
- ASSERT_TRUE(narrow_value->GetAsString(
- static_cast<const StringValue**>(NULL)));
+ ASSERT_TRUE(narrow_value->GetAsString(static_cast<const Value**>(NULL)));
}
-// This is a Value object that allows us to tell if it's been
-// properly deleted by modifying the value of external flag on destruction.
-class DeletionTestValue : public Value {
- public:
- explicit DeletionTestValue(bool* deletion_flag) : Value(TYPE_NULL) {
- Init(deletion_flag); // Separate function so that we can use ASSERT_*
- }
-
- void Init(bool* deletion_flag) {
- ASSERT_TRUE(deletion_flag);
- deletion_flag_ = deletion_flag;
- *deletion_flag_ = false;
- }
-
- ~DeletionTestValue() override { *deletion_flag_ = true; }
-
- private:
- bool* deletion_flag_;
-};
-
TEST(ValuesTest, ListDeletion) {
- bool deletion_flag = true;
-
- {
- ListValue list;
- list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
- }
- EXPECT_TRUE(deletion_flag);
-
- {
- ListValue list;
- list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
- list.Clear();
- EXPECT_TRUE(deletion_flag);
- }
-
- {
- ListValue list;
- list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
- EXPECT_TRUE(list.Set(0, Value::CreateNullValue()));
- EXPECT_TRUE(deletion_flag);
- }
+ ListValue list;
+ list.Append(MakeUnique<Value>());
+ EXPECT_FALSE(list.empty());
+ list.Clear();
+ EXPECT_TRUE(list.empty());
}
TEST(ValuesTest, ListRemoval) {
- bool deletion_flag = true;
std::unique_ptr<Value> removed_item;
{
ListValue list;
- list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
+ list.Append(MakeUnique<Value>());
EXPECT_EQ(1U, list.GetSize());
EXPECT_FALSE(list.Remove(std::numeric_limits<size_t>::max(),
&removed_item));
@@ -238,88 +512,55 @@ TEST(ValuesTest, ListRemoval) {
ASSERT_TRUE(removed_item);
EXPECT_EQ(0U, list.GetSize());
}
- EXPECT_FALSE(deletion_flag);
removed_item.reset();
- EXPECT_TRUE(deletion_flag);
{
ListValue list;
- list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
+ list.Append(MakeUnique<Value>());
EXPECT_TRUE(list.Remove(0, NULL));
- EXPECT_TRUE(deletion_flag);
EXPECT_EQ(0U, list.GetSize());
}
{
ListValue list;
- std::unique_ptr<DeletionTestValue> value(
- new DeletionTestValue(&deletion_flag));
- DeletionTestValue* original_value = value.get();
+ auto value = MakeUnique<Value>();
+ Value* original_value = value.get();
list.Append(std::move(value));
- EXPECT_FALSE(deletion_flag);
size_t index = 0;
list.Remove(*original_value, &index);
EXPECT_EQ(0U, index);
- EXPECT_TRUE(deletion_flag);
EXPECT_EQ(0U, list.GetSize());
}
}
TEST(ValuesTest, DictionaryDeletion) {
std::string key = "test";
- bool deletion_flag = true;
-
- {
- DictionaryValue dict;
- dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
- }
- EXPECT_TRUE(deletion_flag);
-
- {
- DictionaryValue dict;
- dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
- dict.Clear();
- EXPECT_TRUE(deletion_flag);
- }
-
- {
- DictionaryValue dict;
- dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
- dict.Set(key, Value::CreateNullValue());
- EXPECT_TRUE(deletion_flag);
- }
+ DictionaryValue dict;
+ dict.Set(key, MakeUnique<Value>());
+ EXPECT_FALSE(dict.empty());
+ dict.Clear();
+ EXPECT_TRUE(dict.empty());
}
TEST(ValuesTest, DictionaryRemoval) {
std::string key = "test";
- bool deletion_flag = true;
std::unique_ptr<Value> removed_item;
{
DictionaryValue dict;
- dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
+ dict.Set(key, MakeUnique<Value>());
EXPECT_TRUE(dict.HasKey(key));
EXPECT_FALSE(dict.Remove("absent key", &removed_item));
EXPECT_TRUE(dict.Remove(key, &removed_item));
EXPECT_FALSE(dict.HasKey(key));
ASSERT_TRUE(removed_item);
}
- EXPECT_FALSE(deletion_flag);
- removed_item.reset();
- EXPECT_TRUE(deletion_flag);
{
DictionaryValue dict;
- dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
- EXPECT_FALSE(deletion_flag);
+ dict.Set(key, MakeUnique<Value>());
EXPECT_TRUE(dict.HasKey(key));
EXPECT_TRUE(dict.Remove(key, NULL));
- EXPECT_TRUE(deletion_flag);
EXPECT_FALSE(dict.HasKey(key));
}
}
@@ -343,7 +584,7 @@ TEST(ValuesTest, DictionaryWithoutPathExpansion) {
EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
Value* value4;
ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
- EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
+ EXPECT_EQ(Value::Type::NONE, value4->GetType());
}
// Tests the deprecated version of SetWithoutPathExpansion.
@@ -367,7 +608,7 @@ TEST(ValuesTest, DictionaryWithoutPathExpansionDeprecated) {
EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
Value* value4;
ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
- EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
+ EXPECT_EQ(Value::Type::NONE, value4->GetType());
}
TEST(ValuesTest, DictionaryRemovePath) {
@@ -378,7 +619,7 @@ TEST(ValuesTest, DictionaryRemovePath) {
std::unique_ptr<Value> removed_item;
EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
ASSERT_TRUE(removed_item);
- EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_INTEGER));
+ EXPECT_TRUE(removed_item->IsType(base::Value::Type::INTEGER));
EXPECT_FALSE(dict.HasKey("a.long.way.down"));
EXPECT_FALSE(dict.HasKey("a.long.way"));
EXPECT_TRUE(dict.Get("a.long.key.path", NULL));
@@ -391,7 +632,7 @@ TEST(ValuesTest, DictionaryRemovePath) {
removed_item.reset();
EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
ASSERT_TRUE(removed_item);
- EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(removed_item->IsType(base::Value::Type::BOOLEAN));
EXPECT_TRUE(dict.empty());
}
@@ -400,38 +641,34 @@ TEST(ValuesTest, DeepCopy) {
std::unique_ptr<Value> scoped_null = Value::CreateNullValue();
Value* original_null = scoped_null.get();
original_dict.Set("null", std::move(scoped_null));
- std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
- FundamentalValue* original_bool = scoped_bool.get();
+ std::unique_ptr<Value> scoped_bool(new Value(true));
+ Value* original_bool = scoped_bool.get();
original_dict.Set("bool", std::move(scoped_bool));
- std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
- FundamentalValue* original_int = scoped_int.get();
+ std::unique_ptr<Value> scoped_int(new Value(42));
+ Value* original_int = scoped_int.get();
original_dict.Set("int", std::move(scoped_int));
- std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
- FundamentalValue* original_double = scoped_double.get();
+ std::unique_ptr<Value> scoped_double(new Value(3.14));
+ Value* original_double = scoped_double.get();
original_dict.Set("double", std::move(scoped_double));
- std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
- StringValue* original_string = scoped_string.get();
+ std::unique_ptr<Value> scoped_string(new Value("hello"));
+ Value* original_string = scoped_string.get();
original_dict.Set("string", std::move(scoped_string));
- std::unique_ptr<StringValue> scoped_string16(
- new StringValue(ASCIIToUTF16("hello16")));
- StringValue* original_string16 = scoped_string16.get();
+ std::unique_ptr<Value> scoped_string16(new Value(ASCIIToUTF16("hello16")));
+ Value* original_string16 = scoped_string16.get();
original_dict.Set("string16", std::move(scoped_string16));
- std::unique_ptr<char[]> original_buffer(new char[42]);
- memset(original_buffer.get(), '!', 42);
+ std::vector<char> original_buffer(42, '!');
std::unique_ptr<BinaryValue> scoped_binary(
- new BinaryValue(std::move(original_buffer), 42));
+ new BinaryValue(std::move(original_buffer)));
BinaryValue* original_binary = scoped_binary.get();
original_dict.Set("binary", std::move(scoped_binary));
std::unique_ptr<ListValue> scoped_list(new ListValue());
Value* original_list = scoped_list.get();
- std::unique_ptr<FundamentalValue> scoped_list_element_0(
- new FundamentalValue(0));
+ std::unique_ptr<Value> scoped_list_element_0(new Value(0));
Value* original_list_element_0 = scoped_list_element_0.get();
scoped_list->Append(std::move(scoped_list_element_0));
- std::unique_ptr<FundamentalValue> scoped_list_element_1(
- new FundamentalValue(1));
+ std::unique_ptr<Value> scoped_list_element_1(new Value(1));
Value* original_list_element_1 = scoped_list_element_1.get();
scoped_list->Append(std::move(scoped_list_element_1));
original_dict.Set("list", std::move(scoped_list));
@@ -450,13 +687,13 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("null", &copy_null));
ASSERT_TRUE(copy_null);
ASSERT_NE(copy_null, original_null);
- ASSERT_TRUE(copy_null->IsType(Value::TYPE_NULL));
+ ASSERT_TRUE(copy_null->IsType(Value::Type::NONE));
Value* copy_bool = NULL;
ASSERT_TRUE(copy_dict->Get("bool", &copy_bool));
ASSERT_TRUE(copy_bool);
ASSERT_NE(copy_bool, original_bool);
- ASSERT_TRUE(copy_bool->IsType(Value::TYPE_BOOLEAN));
+ ASSERT_TRUE(copy_bool->IsType(Value::Type::BOOLEAN));
bool copy_bool_value = false;
ASSERT_TRUE(copy_bool->GetAsBoolean(&copy_bool_value));
ASSERT_TRUE(copy_bool_value);
@@ -465,7 +702,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("int", &copy_int));
ASSERT_TRUE(copy_int);
ASSERT_NE(copy_int, original_int);
- ASSERT_TRUE(copy_int->IsType(Value::TYPE_INTEGER));
+ ASSERT_TRUE(copy_int->IsType(Value::Type::INTEGER));
int copy_int_value = 0;
ASSERT_TRUE(copy_int->GetAsInteger(&copy_int_value));
ASSERT_EQ(42, copy_int_value);
@@ -474,7 +711,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("double", &copy_double));
ASSERT_TRUE(copy_double);
ASSERT_NE(copy_double, original_double);
- ASSERT_TRUE(copy_double->IsType(Value::TYPE_DOUBLE));
+ ASSERT_TRUE(copy_double->IsType(Value::Type::DOUBLE));
double copy_double_value = 0;
ASSERT_TRUE(copy_double->GetAsDouble(&copy_double_value));
ASSERT_EQ(3.14, copy_double_value);
@@ -483,7 +720,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("string", &copy_string));
ASSERT_TRUE(copy_string);
ASSERT_NE(copy_string, original_string);
- ASSERT_TRUE(copy_string->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(copy_string->IsType(Value::Type::STRING));
std::string copy_string_value;
string16 copy_string16_value;
ASSERT_TRUE(copy_string->GetAsString(&copy_string_value));
@@ -495,7 +732,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("string16", &copy_string16));
ASSERT_TRUE(copy_string16);
ASSERT_NE(copy_string16, original_string16);
- ASSERT_TRUE(copy_string16->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(copy_string16->IsType(Value::Type::STRING));
ASSERT_TRUE(copy_string16->GetAsString(&copy_string_value));
ASSERT_TRUE(copy_string16->GetAsString(&copy_string16_value));
ASSERT_EQ(std::string("hello16"), copy_string_value);
@@ -505,20 +742,17 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("binary", &copy_binary));
ASSERT_TRUE(copy_binary);
ASSERT_NE(copy_binary, original_binary);
- ASSERT_TRUE(copy_binary->IsType(Value::TYPE_BINARY));
- ASSERT_NE(original_binary->GetBuffer(),
- static_cast<BinaryValue*>(copy_binary)->GetBuffer());
- ASSERT_EQ(original_binary->GetSize(),
- static_cast<BinaryValue*>(copy_binary)->GetSize());
- ASSERT_EQ(0, memcmp(original_binary->GetBuffer(),
- static_cast<BinaryValue*>(copy_binary)->GetBuffer(),
- original_binary->GetSize()));
+ ASSERT_TRUE(copy_binary->IsType(Value::Type::BINARY));
+ ASSERT_NE(original_binary->GetBuffer(), copy_binary->GetBuffer());
+ ASSERT_EQ(original_binary->GetSize(), copy_binary->GetSize());
+ ASSERT_EQ(0, memcmp(original_binary->GetBuffer(), copy_binary->GetBuffer(),
+ original_binary->GetSize()));
Value* copy_value = NULL;
ASSERT_TRUE(copy_dict->Get("list", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, original_list);
- ASSERT_TRUE(copy_value->IsType(Value::TYPE_LIST));
+ ASSERT_TRUE(copy_value->IsType(Value::Type::LIST));
ListValue* copy_list = NULL;
ASSERT_TRUE(copy_value->GetAsList(&copy_list));
ASSERT_TRUE(copy_list);
@@ -544,7 +778,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("dictionary", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, original_nested_dictionary);
- ASSERT_TRUE(copy_value->IsType(Value::TYPE_DICTIONARY));
+ ASSERT_TRUE(copy_value->IsType(Value::Type::DICTIONARY));
DictionaryValue* copy_nested_dictionary = NULL;
ASSERT_TRUE(copy_value->GetAsDictionary(&copy_nested_dictionary));
ASSERT_TRUE(copy_nested_dictionary);
@@ -557,7 +791,7 @@ TEST(ValuesTest, Equals) {
EXPECT_NE(null1.get(), null2.get());
EXPECT_TRUE(null1->Equals(null2.get()));
- FundamentalValue boolean(false);
+ Value boolean(false);
EXPECT_FALSE(null1->Equals(&boolean));
DictionaryValue dv;
@@ -582,7 +816,7 @@ TEST(ValuesTest, Equals) {
copy->Set("f", std::move(list_copy));
EXPECT_TRUE(dv.Equals(copy.get()));
- original_list->Append(WrapUnique(new FundamentalValue(true)));
+ original_list->Append(MakeUnique<Value>(true));
EXPECT_FALSE(dv.Equals(copy.get()));
// Check if Equals detects differences in only the keys.
@@ -599,9 +833,9 @@ TEST(ValuesTest, StaticEquals) {
EXPECT_TRUE(Value::Equals(null1.get(), null2.get()));
EXPECT_TRUE(Value::Equals(NULL, NULL));
- std::unique_ptr<Value> i42(new FundamentalValue(42));
- std::unique_ptr<Value> j42(new FundamentalValue(42));
- std::unique_ptr<Value> i17(new FundamentalValue(17));
+ std::unique_ptr<Value> i42(new Value(42));
+ std::unique_ptr<Value> j42(new Value(42));
+ std::unique_ptr<Value> i17(new Value(17));
EXPECT_TRUE(Value::Equals(i42.get(), i42.get()));
EXPECT_TRUE(Value::Equals(j42.get(), i42.get()));
EXPECT_TRUE(Value::Equals(i42.get(), j42.get()));
@@ -621,37 +855,33 @@ TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
Value* original_null = scoped_null.get();
original_dict.Set("null", std::move(scoped_null));
- std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+ std::unique_ptr<Value> scoped_bool(new Value(true));
Value* original_bool = scoped_bool.get();
original_dict.Set("bool", std::move(scoped_bool));
- std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+ std::unique_ptr<Value> scoped_int(new Value(42));
Value* original_int = scoped_int.get();
original_dict.Set("int", std::move(scoped_int));
- std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+ std::unique_ptr<Value> scoped_double(new Value(3.14));
Value* original_double = scoped_double.get();
original_dict.Set("double", std::move(scoped_double));
- std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
+ std::unique_ptr<Value> scoped_string(new Value("hello"));
Value* original_string = scoped_string.get();
original_dict.Set("string", std::move(scoped_string));
- std::unique_ptr<StringValue> scoped_string16(
- new StringValue(ASCIIToUTF16("hello16")));
+ std::unique_ptr<Value> scoped_string16(new Value(ASCIIToUTF16("hello16")));
Value* original_string16 = scoped_string16.get();
original_dict.Set("string16", std::move(scoped_string16));
- std::unique_ptr<char[]> original_buffer(new char[42]);
- memset(original_buffer.get(), '!', 42);
+ std::vector<char> original_buffer(42, '!');
std::unique_ptr<BinaryValue> scoped_binary(
- new BinaryValue(std::move(original_buffer), 42));
+ new BinaryValue(std::move(original_buffer)));
Value* original_binary = scoped_binary.get();
original_dict.Set("binary", std::move(scoped_binary));
std::unique_ptr<ListValue> scoped_list(new ListValue());
Value* original_list = scoped_list.get();
- std::unique_ptr<FundamentalValue> scoped_list_element_0(
- new FundamentalValue(0));
+ std::unique_ptr<Value> scoped_list_element_0(new Value(0));
scoped_list->Append(std::move(scoped_list_element_0));
- std::unique_ptr<FundamentalValue> scoped_list_element_1(
- new FundamentalValue(1));
+ std::unique_ptr<Value> scoped_list_element_1(new Value(1));
scoped_list->Append(std::move(scoped_list_element_1));
original_dict.Set("list", std::move(scoped_list));
@@ -739,7 +969,7 @@ TEST(ValuesTest, RemoveEmptyChildren) {
{
std::unique_ptr<ListValue> inner(new ListValue);
std::unique_ptr<ListValue> inner2(new ListValue);
- inner2->Append(WrapUnique(new StringValue("hello")));
+ inner2->Append(MakeUnique<Value>("hello"));
inner->Append(WrapUnique(new DictionaryValue));
inner->Append(std::move(inner2));
root->Set("list_with_empty_children", std::move(inner));
@@ -837,7 +1067,7 @@ TEST(ValuesTest, DictionaryIterator) {
ADD_FAILURE();
}
- StringValue value1("value1");
+ Value value1("value1");
dict.Set("key1", value1.CreateDeepCopy());
bool seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
@@ -848,7 +1078,7 @@ TEST(ValuesTest, DictionaryIterator) {
}
EXPECT_TRUE(seen1);
- StringValue value2("value2");
+ Value value2("value2");
dict.Set("key2", value2.CreateDeepCopy());
bool seen2 = seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
@@ -874,11 +1104,11 @@ TEST(ValuesTest, GetWithNullOutValue) {
DictionaryValue main_dict;
ListValue main_list;
- FundamentalValue bool_value(false);
- FundamentalValue int_value(1234);
- FundamentalValue double_value(12.34567);
- StringValue string_value("foo");
- BinaryValue binary_value;
+ Value bool_value(false);
+ Value int_value(1234);
+ Value double_value(12.34567);
+ Value string_value("foo");
+ BinaryValue binary_value(Value::Type::BINARY);
DictionaryValue dict_value;
ListValue list_value;
diff --git a/base/version.cc b/base/version.cc
index 02213fbf15..ca97a84222 100644
--- a/base/version.cc
+++ b/base/version.cc
@@ -93,6 +93,8 @@ Version::Version(const std::string& version_str) {
components_.swap(parsed);
}
+Version::Version(std::vector<uint32_t> components) : components_(components) {}
+
bool Version::IsValid() const {
return (!components_.empty());
}
diff --git a/base/version.h b/base/version.h
index 25b570a4e3..b3a0956bbe 100644
--- a/base/version.h
+++ b/base/version.h
@@ -25,13 +25,17 @@ class BASE_EXPORT Version {
Version(const Version& other);
- ~Version();
-
// Initializes from a decimal dotted version number, like "0.1.1".
// Each component is limited to a uint16_t. Call IsValid() to learn
// the outcome.
explicit Version(const std::string& version_str);
+ // Initializes from a vector of components, like {1, 2, 3, 4}. Call IsValid()
+ // to learn the outcome.
+ explicit Version(std::vector<uint32_t> components);
+
+ ~Version();
+
// Returns true if the object contains a valid version number.
bool IsValid() const;
@@ -69,8 +73,4 @@ BASE_EXPORT std::ostream& operator<<(std::ostream& stream, const Version& v);
} // namespace base
-// TODO(xhwang) remove this when all users are updated to explicitly use the
-// namespace
-using base::Version;
-
#endif // BASE_VERSION_H_
diff --git a/base/version_unittest.cc b/base/version_unittest.cc
index 5d9ea9973c..4ca784fc11 100644
--- a/base/version_unittest.cc
+++ b/base/version_unittest.cc
@@ -6,6 +6,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <utility>
#include "base/macros.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -13,17 +14,17 @@
namespace {
TEST(VersionTest, DefaultConstructor) {
- Version v;
+ base::Version v;
EXPECT_FALSE(v.IsValid());
}
TEST(VersionTest, ValueSemantics) {
- Version v1("1.2.3.4");
+ base::Version v1("1.2.3.4");
EXPECT_TRUE(v1.IsValid());
- Version v3;
+ base::Version v3;
EXPECT_FALSE(v3.IsValid());
{
- Version v2(v1);
+ base::Version v2(v1);
v3 = v2;
EXPECT_TRUE(v2.IsValid());
EXPECT_EQ(v1, v2);
@@ -31,6 +32,14 @@ TEST(VersionTest, ValueSemantics) {
EXPECT_EQ(v3, v1);
}
+TEST(VersionTest, MoveSemantics) {
+ const std::vector<uint32_t> components = {1, 2, 3, 4};
+ base::Version v1(std::move(components));
+ EXPECT_TRUE(v1.IsValid());
+ base::Version v2("1.2.3.4");
+ EXPECT_EQ(v1, v2);
+}
+
TEST(VersionTest, GetVersionFromString) {
static const struct version_string {
const char* input;
@@ -67,7 +76,7 @@ TEST(VersionTest, GetVersionFromString) {
};
for (size_t i = 0; i < arraysize(cases); ++i) {
- Version version(cases[i].input);
+ base::Version version(cases[i].input);
EXPECT_EQ(cases[i].success, version.IsValid());
if (cases[i].success) {
EXPECT_EQ(cases[i].parts, version.components().size());
@@ -96,8 +105,8 @@ TEST(VersionTest, Compare) {
{"11.0.10", "15.5.28.130162", -1},
};
for (size_t i = 0; i < arraysize(cases); ++i) {
- Version lhs(cases[i].lhs);
- Version rhs(cases[i].rhs);
+ base::Version lhs(cases[i].lhs);
+ base::Version rhs(cases[i].rhs);
EXPECT_EQ(lhs.CompareTo(rhs), cases[i].expected) <<
cases[i].lhs << " ? " << cases[i].rhs;
@@ -152,7 +161,7 @@ TEST(VersionTest, CompareToWildcardString) {
{"1.2.0.0.0.0", "1.2.*", 0},
};
for (size_t i = 0; i < arraysize(cases); ++i) {
- const Version version(cases[i].lhs);
+ const base::Version version(cases[i].lhs);
const int result = version.CompareToWildcardString(cases[i].rhs);
EXPECT_EQ(result, cases[i].expected) << cases[i].lhs << "?" << cases[i].rhs;
}
@@ -176,7 +185,7 @@ TEST(VersionTest, IsValidWildcardString) {
{"*.2", false},
};
for (size_t i = 0; i < arraysize(cases); ++i) {
- EXPECT_EQ(Version::IsValidWildcardString(cases[i].version),
+ EXPECT_EQ(base::Version::IsValidWildcardString(cases[i].version),
cases[i].expected) << cases[i].version << "?" << cases[i].expected;
}
}
diff --git a/base/win/scoped_comptr.h b/base/win/scoped_comptr.h
index 5ce60e2b68..9442672054 100644
--- a/base/win/scoped_comptr.h
+++ b/base/win/scoped_comptr.h
@@ -51,7 +51,7 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// Explicit Release() of the held object. Useful for reuse of the
// ScopedComPtr instance.
// Note that this function equates to IUnknown::Release and should not
- // be confused with e.g. scoped_ptr::release().
+ // be confused with e.g. unique_ptr::release().
void Release() {
if (this->ptr_ != NULL) {
this->ptr_->Release();
diff --git a/base/win/scoped_handle_test_dll.cc b/base/win/scoped_handle_test_dll.cc
index c72e4592b9..0d70c0b627 100644
--- a/base/win/scoped_handle_test_dll.cc
+++ b/base/win/scoped_handle_test_dll.cc
@@ -66,7 +66,7 @@ bool InternalRunThreadTest() {
::CloseHandle(ready_event);
if (threads_.size() != kNumThreads) {
- for (const auto& thread : threads_)
+ for (auto* thread : threads_)
::CloseHandle(thread);
::CloseHandle(start_event);
return false;
@@ -74,7 +74,7 @@ bool InternalRunThreadTest() {
::SetEvent(start_event);
::CloseHandle(start_event);
- for (const auto& thread : threads_) {
+ for (auto* thread : threads_) {
::WaitForSingleObject(thread, INFINITE);
::CloseHandle(thread);
}
diff --git a/base/win/scoped_hdc.h b/base/win/scoped_hdc.h
index fa686dd050..890e34a82c 100644
--- a/base/win/scoped_hdc.h
+++ b/base/win/scoped_hdc.h
@@ -7,6 +7,7 @@
#include <windows.h>
+#include "base/debug/gdi_debug_util_win.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/win/scoped_handle.h"
@@ -28,7 +29,8 @@ class ScopedGetDC {
// If GetDC(NULL) returns NULL, something really bad has happened, like
// GDI handle exhaustion. In this case Chrome is going to behave badly no
// matter what, so we may as well just force a crash now.
- CHECK(hdc_);
+ if (!hdc_)
+ base::debug::CollectGDIUsageAndDie();
}
}
diff --git a/components/timers/alarm_timer_chromeos.cc b/components/timers/alarm_timer_chromeos.cc
index 3f1abbfbc1..601b411bd7 100644
--- a/components/timers/alarm_timer_chromeos.cc
+++ b/components/timers/alarm_timer_chromeos.cc
@@ -6,450 +6,146 @@
#include <stdint.h>
#include <sys/timerfd.h>
+
+#include <algorithm>
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/debug/task_annotator.h"
#include "base/files/file_util.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
+#include "base/memory/ptr_util.h"
#include "base/pending_task.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
namespace timers {
-namespace {
-// This class represents the IO thread that the AlarmTimer::Delegate may use for
-// watching file descriptors if it gets called from a thread that does not have
-// a MessageLoopForIO. It is a lazy global instance because it may not always
-// be necessary.
-class RtcAlarmIOThread : public base::Thread {
- public:
- RtcAlarmIOThread() : Thread("RTC Alarm IO Thread") {
- CHECK(
- StartWithOptions(base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
- }
- ~RtcAlarmIOThread() override { Stop(); }
-};
-
-base::LazyInstance<RtcAlarmIOThread> g_io_thread = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-// Watches a MessageLoop and runs a callback if that MessageLoop will be
-// destroyed.
-class AlarmTimer::MessageLoopObserver
- : public base::MessageLoop::DestructionObserver {
- public:
- // Constructs a MessageLoopObserver that will observe |message_loop| and will
- // call |on_will_be_destroyed_callback| when |message_loop| is about to be
- // destroyed.
- MessageLoopObserver(base::MessageLoop* message_loop,
- base::Closure on_will_be_destroyed_callback)
- : message_loop_(message_loop),
- on_will_be_destroyed_callback_(on_will_be_destroyed_callback) {
- DCHECK(message_loop_);
- message_loop_->AddDestructionObserver(this);
- }
-
- ~MessageLoopObserver() override {
- // If |message_loop_| was destroyed, then this class will have already
- // unregistered itself. Doing it again will trigger a warning.
- if (message_loop_)
- message_loop_->RemoveDestructionObserver(this);
- }
-
- // base::MessageLoop::DestructionObserver override.
- void WillDestroyCurrentMessageLoop() override {
- message_loop_->RemoveDestructionObserver(this);
- message_loop_ = NULL;
-
- on_will_be_destroyed_callback_.Run();
- }
-
- private:
- // The MessageLoop that this class should watch. Is a weak pointer.
- base::MessageLoop* message_loop_;
-
- // The callback to run when |message_loop_| will be destroyed.
- base::Closure on_will_be_destroyed_callback_;
-
- DISALLOW_COPY_AND_ASSIGN(MessageLoopObserver);
-};
-
-// This class manages a Real Time Clock (RTC) alarm, a feature that is available
-// from linux version 3.11 onwards. It creates a file descriptor for the RTC
-// alarm timer and then watches that file descriptor to see when it can be read
-// without blocking, indicating that the timer has fired.
-//
-// A major problem for this class is that watching file descriptors is only
-// available on a MessageLoopForIO but there is no guarantee the timer is going
-// to be created on one. To get around this, the timer has a dedicated thread
-// with a MessageLoopForIO that posts tasks back to the thread that started the
-// timer.
-class AlarmTimer::Delegate
- : public base::RefCountedThreadSafe<AlarmTimer::Delegate>,
- public base::MessageLoopForIO::Watcher {
- public:
- // Construct a Delegate for the AlarmTimer. It should be safe to call
- // |on_timer_fired_callback| multiple times.
- explicit Delegate(base::Closure on_timer_fired_callback);
-
- // Returns true if the system timer managed by this delegate is capable of
- // waking the system from suspend.
- bool CanWakeFromSuspend();
-
- // Resets the timer to fire after |delay| has passed. Cancels any
- // pre-existing delay.
- void Reset(base::TimeDelta delay);
-
- // Stops the currently running timer. It should be safe to call this even if
- // the timer is not running.
- void Stop();
-
- // Sets a hook that will be called when the timer fires and a task has been
- // queued on |origin_task_runner_|. Used by tests to wait until a task is
- // pending in the MessageLoop.
- void SetTimerFiredCallbackForTest(base::Closure test_callback);
-
- // base::MessageLoopForIO::Watcher overrides.
- void OnFileCanReadWithoutBlocking(int fd) override;
- void OnFileCanWriteWithoutBlocking(int fd) override;
-
- private:
- friend class base::RefCountedThreadSafe<Delegate>;
- ~Delegate() override;
-
- // Actually performs the system calls to set up the timer. This must be
- // called on a MessageLoopForIO.
- void ResetImpl(base::TimeDelta delay, int reset_sequence_number);
-
- // Callback that is run when the timer fires. Must be run on
- // |origin_task_runner_|.
- void OnTimerFired(int reset_sequence_number);
-
- // File descriptor associated with the alarm timer.
- int alarm_fd_;
-
- // Task runner which initially started the timer.
- scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
-
- // Callback that should be run when the timer fires.
- base::Closure on_timer_fired_callback_;
-
- // Hook used by tests to be notified when the timer has fired and a task has
- // been queued in the MessageLoop.
- base::Closure on_timer_fired_callback_for_test_;
-
- // Manages watching file descriptors.
- std::unique_ptr<base::MessageLoopForIO::FileDescriptorWatcher> fd_watcher_;
-
- // The sequence numbers of the last Reset() call handled respectively on
- // |origin_task_runner_| and on the MessageLoopForIO used for watching the
- // timer file descriptor. Note that these can be the same MessageLoop.
- // OnTimerFired() runs |on_timer_fired_callback_| only if the sequence number
- // it receives from the MessageLoopForIO matches
- // |origin_reset_sequence_number_|.
- int origin_reset_sequence_number_;
- int io_reset_sequence_number_;
-
- DISALLOW_COPY_AND_ASSIGN(Delegate);
-};
-
-AlarmTimer::Delegate::Delegate(base::Closure on_timer_fired_callback)
- : alarm_fd_(timerfd_create(CLOCK_REALTIME_ALARM, 0)),
- on_timer_fired_callback_(on_timer_fired_callback),
- origin_reset_sequence_number_(0),
- io_reset_sequence_number_(0) {
- // The call to timerfd_create above may fail. This is the only indication
- // that CLOCK_REALTIME_ALARM is not supported on this system.
- DPLOG_IF(INFO, (alarm_fd_ == -1))
- << "CLOCK_REALTIME_ALARM not supported on this system";
-}
-
-AlarmTimer::Delegate::~Delegate() {
- if (alarm_fd_ != -1)
- close(alarm_fd_);
-}
-
-bool AlarmTimer::Delegate::CanWakeFromSuspend() {
- return alarm_fd_ != -1;
-}
-
-void AlarmTimer::Delegate::Reset(base::TimeDelta delay) {
- // Get a task runner for the current message loop. When the timer fires, we
- // will
- // post tasks to this proxy to let the parent timer know.
- origin_task_runner_ = base::ThreadTaskRunnerHandle::Get();
-
- // Increment the sequence number. Used to invalidate any events that have
- // been queued but not yet run since the last time Reset() was called.
- origin_reset_sequence_number_++;
-
- // Calling timerfd_settime with a zero delay actually clears the timer so if
- // the user has requested a zero delay timer, we need to handle it
- // differently. We queue the task here but we still go ahead and call
- // timerfd_settime with the zero delay anyway to cancel any previous delay
- // that might have been programmed.
- if (delay <= base::TimeDelta::FromMicroseconds(0)) {
- // The timerfd_settime documentation is vague on what happens when it is
- // passed a negative delay. We can sidestep the issue by ensuring that
- // the delay is 0.
- delay = base::TimeDelta::FromMicroseconds(0);
- origin_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
- origin_reset_sequence_number_));
- }
-
- // Run ResetImpl() on a MessageLoopForIO.
- if (base::MessageLoopForIO::IsCurrent()) {
- ResetImpl(delay, origin_reset_sequence_number_);
- } else {
- g_io_thread.Pointer()->task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&Delegate::ResetImpl, scoped_refptr<Delegate>(this), delay,
- origin_reset_sequence_number_));
- }
-}
-
-void AlarmTimer::Delegate::Stop() {
- // Stop the RTC from a MessageLoopForIO.
- if (!base::MessageLoopForIO::IsCurrent()) {
- g_io_thread.Pointer()->task_runner()->PostTask(
- FROM_HERE, base::Bind(&Delegate::Stop, scoped_refptr<Delegate>(this)));
- return;
- }
-
- // Stop watching for events.
- fd_watcher_.reset();
-
- // Now clear the timer.
- DCHECK_NE(alarm_fd_, -1);
-#if defined(ANDROID)
- itimerspec blank_time;
- memset(&blank_time, 0, sizeof(blank_time));
-#else
- itimerspec blank_time = {};
-#endif // defined(ANDROID)
- if (timerfd_settime(alarm_fd_, 0, &blank_time, NULL) < 0)
- PLOG(ERROR) << "Unable to clear alarm time. Timer may still fire.";
-}
-
-void AlarmTimer::Delegate::OnFileCanReadWithoutBlocking(int fd) {
- DCHECK_EQ(alarm_fd_, fd);
-
- // Read from the fd to ack the event.
- char val[sizeof(uint64_t)];
- if (!base::ReadFromFD(alarm_fd_, val, sizeof(uint64_t)))
- PLOG(DFATAL) << "Unable to read from timer file descriptor.";
-
- // Make sure that the parent timer is informed on the proper message loop.
- if (origin_task_runner_->RunsTasksOnCurrentThread()) {
- OnTimerFired(io_reset_sequence_number_);
- } else {
- origin_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
- io_reset_sequence_number_));
- }
-}
-
-void AlarmTimer::Delegate::OnFileCanWriteWithoutBlocking(int /*fd*/) {
- NOTREACHED();
-}
-
-void AlarmTimer::Delegate::SetTimerFiredCallbackForTest(
- base::Closure test_callback) {
- on_timer_fired_callback_for_test_ = test_callback;
-}
-
-void AlarmTimer::Delegate::ResetImpl(base::TimeDelta delay,
- int reset_sequence_number) {
- DCHECK(base::MessageLoopForIO::IsCurrent());
- DCHECK_NE(alarm_fd_, -1);
-
- // Store the sequence number in the IO thread variable. When the timer
- // fires, we will bind this value to the OnTimerFired callback to ensure
- // that we do the right thing if the timer gets reset.
- io_reset_sequence_number_ = reset_sequence_number;
-
- // If we were already watching the fd, this will stop watching it.
- fd_watcher_.reset(new base::MessageLoopForIO::FileDescriptorWatcher);
-
- // Start watching the fd to see when the timer fires.
- if (!base::MessageLoopForIO::current()->WatchFileDescriptor(
- alarm_fd_, false, base::MessageLoopForIO::WATCH_READ,
- fd_watcher_.get(), this)) {
- LOG(ERROR) << "Error while attempting to watch file descriptor for RTC "
- << "alarm. Timer will not fire.";
- }
-
- // Actually set the timer. This will also clear the pre-existing timer, if
- // any.
-#if defined(ANDROID)
- itimerspec alarm_time;
- memset(&alarm_time, 0, sizeof(alarm_time));
-#else
- itimerspec alarm_time = {};
-#endif // defined(ANDROID)
- alarm_time.it_value.tv_sec = delay.InSeconds();
- alarm_time.it_value.tv_nsec =
- (delay.InMicroseconds() % base::Time::kMicrosecondsPerSecond) *
- base::Time::kNanosecondsPerMicrosecond;
- if (timerfd_settime(alarm_fd_, 0, &alarm_time, NULL) < 0)
- PLOG(ERROR) << "Error while setting alarm time. Timer will not fire";
-}
-
-void AlarmTimer::Delegate::OnTimerFired(int reset_sequence_number) {
- DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
-
- // If a test wants to be notified when this function is about to run, then
- // re-queue this task in the MessageLoop and run the test's callback.
- if (!on_timer_fired_callback_for_test_.is_null()) {
- origin_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
- reset_sequence_number));
-
- on_timer_fired_callback_for_test_.Run();
- on_timer_fired_callback_for_test_.Reset();
- return;
- }
-
- // Check to make sure that the timer was not reset in the time between when
- // this task was queued to run and now. If it was reset, then don't do
- // anything.
- if (reset_sequence_number != origin_reset_sequence_number_)
- return;
-
- on_timer_fired_callback_.Run();
-}
AlarmTimer::AlarmTimer(bool retain_user_task, bool is_repeating)
: base::Timer(retain_user_task, is_repeating),
- can_wake_from_suspend_(false),
- origin_message_loop_(NULL),
- weak_factory_(this) {
- Init();
-}
-
-AlarmTimer::AlarmTimer(const tracked_objects::Location& posted_from,
- base::TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating)
- : base::Timer(posted_from, delay, user_task, is_repeating),
- can_wake_from_suspend_(false),
- origin_message_loop_(NULL),
- weak_factory_(this) {
- Init();
-}
+ alarm_fd_(timerfd_create(CLOCK_REALTIME_ALARM, 0)),
+ weak_factory_(this) {}
AlarmTimer::~AlarmTimer() {
+ DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
Stop();
}
-void AlarmTimer::SetTimerFiredCallbackForTest(base::Closure test_callback) {
- delegate_->SetTimerFiredCallbackForTest(test_callback);
-}
-
-void AlarmTimer::Init() {
- delegate_ = make_scoped_refptr(new AlarmTimer::Delegate(
- base::Bind(&AlarmTimer::OnTimerFired, weak_factory_.GetWeakPtr())));
- can_wake_from_suspend_ = delegate_->CanWakeFromSuspend();
-}
-
void AlarmTimer::Stop() {
+ DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
+
if (!base::Timer::is_running())
return;
- if (!can_wake_from_suspend_) {
+ if (!CanWakeFromSuspend()) {
base::Timer::Stop();
return;
}
- // Clear the running flag, stop the delegate, and delete the pending task.
+ // Cancel any previous callbacks.
+ weak_factory_.InvalidateWeakPtrs();
+
base::Timer::set_is_running(false);
- delegate_->Stop();
+ alarm_fd_watcher_.reset();
pending_task_.reset();
- // Stop watching |origin_message_loop_|.
- origin_message_loop_ = NULL;
- message_loop_observer_.reset();
-
if (!base::Timer::retain_user_task())
base::Timer::set_user_task(base::Closure());
}
void AlarmTimer::Reset() {
- if (!can_wake_from_suspend_) {
+ DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
+ DCHECK(!base::Timer::user_task().is_null());
+
+ if (!CanWakeFromSuspend()) {
base::Timer::Reset();
return;
}
- DCHECK(!base::Timer::user_task().is_null());
- DCHECK(!origin_message_loop_ ||
- origin_message_loop_->task_runner()->RunsTasksOnCurrentThread());
-
- // Make sure that the timer will stop if the underlying message loop is
- // destroyed.
- if (!origin_message_loop_) {
- origin_message_loop_ = base::MessageLoop::current();
- message_loop_observer_.reset(new MessageLoopObserver(
- origin_message_loop_,
- base::Bind(&AlarmTimer::WillDestroyCurrentMessageLoop,
- weak_factory_.GetWeakPtr())));
- }
+ // Cancel any previous callbacks and stop watching |alarm_fd_|.
+ weak_factory_.InvalidateWeakPtrs();
+ alarm_fd_watcher_.reset();
+
+ // Ensure that the delay is not negative.
+ const base::TimeDelta delay =
+ std::max(base::TimeDelta(), base::Timer::GetCurrentDelay());
// Set up the pending task.
- if (base::Timer::GetCurrentDelay() > base::TimeDelta::FromMicroseconds(0)) {
- base::Timer::set_desired_run_time(base::TimeTicks::Now() +
- base::Timer::GetCurrentDelay());
- pending_task_.reset(new base::PendingTask(
- base::Timer::posted_from(), base::Timer::user_task(),
- base::Timer::desired_run_time(), true /* nestable */));
- } else {
- base::Timer::set_desired_run_time(base::TimeTicks());
- pending_task_.reset(new base::PendingTask(base::Timer::posted_from(),
- base::Timer::user_task()));
- }
- base::MessageLoop::current()->task_annotator()->DidQueueTask(
- "AlarmTimer::Reset", *pending_task_);
+ base::Timer::set_desired_run_time(
+ delay.is_zero() ? base::TimeTicks() : base::TimeTicks::Now() + delay);
+ pending_task_ = base::MakeUnique<base::PendingTask>(
+ base::Timer::posted_from(), base::Timer::user_task(),
+ base::Timer::desired_run_time(), true /* nestable */);
+
+ // Set |alarm_fd_| to be signaled when the delay expires. If the delay is
+ // zero, |alarm_fd_| will never be signaled. This overrides the previous
+ // delay, if any.
+ itimerspec alarm_time = {};
+ alarm_time.it_value.tv_sec = delay.InSeconds();
+ alarm_time.it_value.tv_nsec =
+ (delay.InMicroseconds() % base::Time::kMicrosecondsPerSecond) *
+ base::Time::kNanosecondsPerMicrosecond;
+ if (timerfd_settime(alarm_fd_, 0, &alarm_time, NULL) < 0)
+ PLOG(ERROR) << "Error while setting alarm time. Timer will not fire";
- // Now start up the timer.
- delegate_->Reset(base::Timer::GetCurrentDelay());
+ // The timer is running.
base::Timer::set_is_running(true);
+
+ // If the delay is zero, post the task now.
+ if (delay.is_zero()) {
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&AlarmTimer::OnTimerFired, weak_factory_.GetWeakPtr()));
+ } else {
+ // Otherwise, if the delay is not zero, generate a tracing event to indicate
+ // that the task was posted and watch |alarm_fd_|.
+ base::debug::TaskAnnotator().DidQueueTask("AlarmTimer::Reset",
+ *pending_task_);
+ alarm_fd_watcher_ = base::FileDescriptorWatcher::WatchReadable(
+ alarm_fd_, base::Bind(&AlarmTimer::OnAlarmFdReadableWithoutBlocking,
+ weak_factory_.GetWeakPtr()));
+ }
}
-void AlarmTimer::WillDestroyCurrentMessageLoop() {
- Stop();
+void AlarmTimer::OnAlarmFdReadableWithoutBlocking() {
+ DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
+ DCHECK(base::Timer::IsRunning());
+
+ // Read from |alarm_fd_| to ack the event.
+ char val[sizeof(uint64_t)];
+ if (!base::ReadFromFD(alarm_fd_, val, sizeof(uint64_t)))
+ PLOG(DFATAL) << "Unable to read from timer file descriptor.";
+
+ OnTimerFired();
}
void AlarmTimer::OnTimerFired() {
- if (!base::Timer::IsRunning())
- return;
-
+ DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
+ DCHECK(base::Timer::IsRunning());
DCHECK(pending_task_.get());
- // Take ownership of the pending user task, which is going to be cleared by
- // the Stop() or Reset() functions below.
- std::unique_ptr<base::PendingTask> pending_user_task(
- std::move(pending_task_));
+ // Take ownership of the PendingTask to prevent it from being deleted if the
+ // AlarmTimer is deleted.
+ const auto pending_user_task = std::move(pending_task_);
- // Re-schedule or stop the timer as requested.
- if (base::Timer::is_repeating())
- Reset();
- else
- Stop();
+ base::WeakPtr<AlarmTimer> weak_ptr = weak_factory_.GetWeakPtr();
+ // Run the task.
TRACE_TASK_EXECUTION("AlarmTimer::OnTimerFired", *pending_user_task);
+ base::debug::TaskAnnotator().RunTask("AlarmTimer::Reset",
+ pending_user_task.get());
+
+ // If the timer wasn't deleted, stopped or reset by the callback, reset or
+ // stop it.
+ if (weak_ptr.get()) {
+ if (base::Timer::is_repeating())
+ Reset();
+ else
+ Stop();
+ }
+}
- // Now run the user task.
- base::MessageLoop::current()->task_annotator()->RunTask("AlarmTimer::Reset",
- *pending_user_task);
+bool AlarmTimer::CanWakeFromSuspend() const {
+ return alarm_fd_ != -1;
}
OneShotAlarmTimer::OneShotAlarmTimer() : AlarmTimer(false, false) {
@@ -461,25 +157,12 @@ OneShotAlarmTimer::~OneShotAlarmTimer() {
RepeatingAlarmTimer::RepeatingAlarmTimer() : AlarmTimer(true, true) {
}
-RepeatingAlarmTimer::RepeatingAlarmTimer(
- const tracked_objects::Location& posted_from,
- base::TimeDelta delay,
- const base::Closure& user_task)
- : AlarmTimer(posted_from, delay, user_task, true) {
-}
-
RepeatingAlarmTimer::~RepeatingAlarmTimer() {
}
SimpleAlarmTimer::SimpleAlarmTimer() : AlarmTimer(true, false) {
}
-SimpleAlarmTimer::SimpleAlarmTimer(const tracked_objects::Location& posted_from,
- base::TimeDelta delay,
- const base::Closure& user_task)
- : AlarmTimer(posted_from, delay, user_task, false) {
-}
-
SimpleAlarmTimer::~SimpleAlarmTimer() {
}
diff --git a/components/timers/alarm_timer_chromeos.h b/components/timers/alarm_timer_chromeos.h
index 313c9f9a93..d861aeeda0 100644
--- a/components/timers/alarm_timer_chromeos.h
+++ b/components/timers/alarm_timer_chromeos.h
@@ -7,85 +7,69 @@
#include <memory>
-#include "base/callback.h"
+#include "base/files/file_descriptor_watcher_posix.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
namespace base {
-class MessageLoop;
struct PendingTask;
}
namespace timers {
// The class implements a timer that is capable of waking the system up from a
-// suspended state. For example, this is useful for running tasks that are
+// suspended state. For example, this is useful for running tasks that are
// needed for maintaining network connectivity, like sending heartbeat messages.
// Currently, this feature is only available on Chrome OS systems running linux
-// version 3.11 or higher. On all other platforms, the AlarmTimer behaves
+// version 3.11 or higher. On all other platforms, the AlarmTimer behaves
// exactly the same way as a regular Timer.
+//
+// An AlarmTimer instance can only be used from the sequence on which it was
+// instantiated. Start() and Stop() must be called from a thread that supports
+// FileDescriptorWatcher.
class AlarmTimer : public base::Timer {
public:
~AlarmTimer() override;
- bool can_wake_from_suspend() const { return can_wake_from_suspend_; }
-
- // Sets a hook that will be called when the timer fires and a task has been
- // queued on |origin_message_loop_|. Used by tests to wait until a task is
- // pending in the MessageLoop.
- void SetTimerFiredCallbackForTest(base::Closure test_callback);
-
// Timer overrides.
void Stop() override;
void Reset() override;
protected:
- // The constructors for this class are protected because consumers should
- // instantiate one of the specialized sub-classes defined below instead.
AlarmTimer(bool retain_user_task, bool is_repeating);
- AlarmTimer(const tracked_objects::Location& posted_from,
- base::TimeDelta delay,
- const base::Closure& user_task,
- bool is_repeating);
private:
- // Common initialization that must be performed by both constructors. This
- // really should live in a delegated constructor but the way base::Timer's
- // constructors are written makes it really hard to do so.
- void Init();
+ // Called when |alarm_fd_| is readable without blocking. Reads data from
+ // |alarm_fd_| and calls OnTimerFired().
+ void OnAlarmFdReadableWithoutBlocking();
- // Will be called by the delegate to indicate that the timer has fired and
- // that the user task should be run.
+ // Called when the timer fires. Runs the callback.
void OnTimerFired();
- // Called when |origin_message_loop_| will be destroyed.
- void WillDestroyCurrentMessageLoop();
-
- // Delegate that will manage actually setting the timer.
- class Delegate;
- scoped_refptr<Delegate> delegate_;
+ // Tracks whether the timer has the ability to wake the system up from
+ // suspend. This is a runtime check because we won't know if the system
+ // supports being woken up from suspend until the constructor actually tries
+ // to set it up.
+ bool CanWakeFromSuspend() const;
- // Keeps track of the user task we want to run. A new one is constructed
- // every time Reset() is called.
- std::unique_ptr<base::PendingTask> pending_task_;
+ // Timer file descriptor.
+ const int alarm_fd_;
- // Tracks whether the timer has the ability to wake the system up from
- // suspend. This is a runtime check because we won't know if the system
- // supports being woken up from suspend until the delegate actually tries to
- // set it up.
- bool can_wake_from_suspend_;
+ // Watches |alarm_fd_|.
+ std::unique_ptr<base::FileDescriptorWatcher::Controller> alarm_fd_watcher_;
- // Pointer to the message loop that started the timer. Used to track the
- // destruction of that message loop.
- base::MessageLoop* origin_message_loop_;
+ // Posts tasks to the sequence on which this AlarmTimer was instantiated.
+ const scoped_refptr<base::SequencedTaskRunner> origin_task_runner_ =
+ base::SequencedTaskRunnerHandle::Get();
- // Observes |origin_message_loop_| and informs this class if it will be
- // destroyed.
- class MessageLoopObserver;
- std::unique_ptr<MessageLoopObserver> message_loop_observer_;
+ // Keeps track of the user task we want to run. A new one is constructed every
+ // time Reset() is called.
+ std::unique_ptr<base::PendingTask> pending_task_;
+ // Used to invalidate pending callbacks.
base::WeakPtrFactory<AlarmTimer> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(AlarmTimer);
@@ -96,8 +80,6 @@ class AlarmTimer : public base::Timer {
// repeat. Useful for fire-and-forget tasks.
class OneShotAlarmTimer : public AlarmTimer {
public:
- // Constructs a basic OneShotAlarmTimer. An AlarmTimer constructed this way
- // requires that Start() is called before Reset() is called.
OneShotAlarmTimer();
~OneShotAlarmTimer() override;
};
@@ -108,18 +90,7 @@ class OneShotAlarmTimer : public AlarmTimer {
// after it fires.
class RepeatingAlarmTimer : public AlarmTimer {
public:
- // Constructs a basic RepeatingAlarmTimer. An AlarmTimer constructed this way
- // requires that Start() is called before Reset() is called.
RepeatingAlarmTimer();
-
- // Constructs a RepeatingAlarmTimer with pre-populated parameters but does not
- // start it. Useful if |user_task| or |delay| are not going to change.
- // Reset() can be called immediately after constructing an AlarmTimer in this
- // way.
- RepeatingAlarmTimer(const tracked_objects::Location& posted_from,
- base::TimeDelta delay,
- const base::Closure& user_task);
-
~RepeatingAlarmTimer() override;
};
@@ -128,18 +99,7 @@ class RepeatingAlarmTimer : public AlarmTimer {
// times but not at a regular interval.
class SimpleAlarmTimer : public AlarmTimer {
public:
- // Constructs a basic SimpleAlarmTimer. An AlarmTimer constructed this way
- // requires that Start() is called before Reset() is called.
SimpleAlarmTimer();
-
- // Constructs a SimpleAlarmTimer with pre-populated parameters but does not
- // start it. Useful if |user_task| or |delay| are not going to change.
- // Reset() can be called immediately after constructing an AlarmTimer in this
- // way.
- SimpleAlarmTimer(const tracked_objects::Location& posted_from,
- base::TimeDelta delay,
- const base::Closure& user_task);
-
~SimpleAlarmTimer() override;
};
diff --git a/crypto/BUILD.gn b/crypto/BUILD.gn
index a912d934c5..6b45c9d633 100644
--- a/crypto/BUILD.gn
+++ b/crypto/BUILD.gn
@@ -13,14 +13,11 @@ component("crypto") {
"apple_keychain.h",
"apple_keychain_ios.mm",
"apple_keychain_mac.mm",
- "auto_cbb.h",
"capi_util.cc",
"capi_util.h",
"crypto_export.h",
"cssm_init.cc",
"cssm_init.h",
- "curve25519.cc",
- "curve25519.h",
"ec_private_key.cc",
"ec_private_key.h",
"ec_signature_creator.cc",
@@ -47,8 +44,6 @@ component("crypto") {
"nss_util.cc",
"nss_util.h",
"nss_util_internal.h",
- "openssl_bio_string.cc",
- "openssl_bio_string.h",
"openssl_util.cc",
"openssl_util.h",
"p224.cc",
@@ -84,6 +79,10 @@ component("crypto") {
"//base/third_party/dynamic_annotations",
]
+ public_deps = [
+ "//third_party/boringssl",
+ ]
+
if (!is_mac && !is_ios) {
sources -= [
"apple_keychain.h",
@@ -133,7 +132,6 @@ component("crypto") {
test("crypto_unittests") {
sources = [
"aead_unittest.cc",
- "curve25519_unittest.cc",
"ec_private_key_unittest.cc",
"ec_signature_creator_unittest.cc",
"encryptor_unittest.cc",
@@ -141,7 +139,6 @@ test("crypto_unittests") {
"hmac_unittest.cc",
"nss_key_util_unittest.cc",
"nss_util_unittest.cc",
- "openssl_bio_string_unittest.cc",
"p224_spake_unittest.cc",
"p224_unittest.cc",
"random_unittest.cc",
diff --git a/crypto/apple_keychain.h b/crypto/apple_keychain.h
index 1ea2473547..1037b7eccd 100644
--- a/crypto/apple_keychain.h
+++ b/crypto/apple_keychain.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CRYPTO_KEYCHAIN_MAC_H_
-#define CRYPTO_KEYCHAIN_MAC_H_
+#ifndef CRYPTO_APPLE_KEYCHAIN_H_
+#define CRYPTO_APPLE_KEYCHAIN_H_
#include <Security/Security.h>
@@ -106,4 +106,4 @@ class CRYPTO_EXPORT AppleKeychain {
} // namespace crypto
-#endif // CRYPTO_KEYCHAIN_MAC_H_
+#endif // CRYPTO_APPLE_KEYCHAIN_H_
diff --git a/crypto/auto_cbb.h b/crypto/auto_cbb.h
deleted file mode 100644
index 5206a214f9..0000000000
--- a/crypto/auto_cbb.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CRYPTO_AUTO_CBB_H_
-#define CRYPTO_AUTO_CBB_H_
-
-#include <openssl/bytestring.h>
-
-#include "base/macros.h"
-
-namespace crypto {
-
-// AutoCBB is a wrapper over OpenSSL's CBB type that automatically releases
-// resources when going out of scope.
-class AutoCBB {
- public:
- AutoCBB() { CBB_zero(&cbb_); }
- ~AutoCBB() { CBB_cleanup(&cbb_); }
-
- CBB* get() { return &cbb_; }
-
- void Reset() {
- CBB_cleanup(&cbb_);
- CBB_zero(&cbb_);
- }
-
- private:
- CBB cbb_;
- DISALLOW_COPY_AND_ASSIGN(AutoCBB);
-};
-
-} // namespace crypto
-
-#endif // CRYPTO_AUTO_CBB_H_
diff --git a/crypto/crypto.gyp b/crypto/crypto.gyp
deleted file mode 100644
index 8ed2ab2ac9..0000000000
--- a/crypto/crypto.gyp
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'includes': [
- 'crypto.gypi',
- ],
- 'targets': [
- {
- 'target_name': 'crypto',
- 'type': '<(component)',
- 'product_name': 'crcrypto', # Avoid colliding with OpenSSL's libcrypto
- 'dependencies': [
- '../base/base.gyp:base',
- '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- '../third_party/boringssl/boringssl.gyp:boringssl',
- ],
- 'defines': [
- 'CRYPTO_IMPLEMENTATION',
- ],
- 'conditions': [
- [ 'os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
- 'dependencies': [
- '../build/linux/system.gyp:nss',
- ],
- 'export_dependent_settings': [
- '../build/linux/system.gyp:nss',
- ],
- 'conditions': [
- [ 'chromeos==1', {
- 'sources/': [ ['include', '_chromeos\\.cc$'] ]
- },
- ],
- ],
- }],
- [ 'OS != "mac" and OS != "ios"', {
- 'sources!': [
- 'apple_keychain.h',
- 'mock_apple_keychain.cc',
- 'mock_apple_keychain.h',
- ],
- }],
- [ 'os_bsd==1', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ],
- },
- },
- ],
- [ 'OS == "mac"', {
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/System/Library/Frameworks/Security.framework',
- ],
- },
- }, { # OS != "mac"
- 'sources!': [
- 'cssm_init.cc',
- 'cssm_init.h',
- 'mac_security_services_lock.cc',
- 'mac_security_services_lock.h',
- ],
- }],
- [ 'OS != "win"', {
- 'sources!': [
- 'capi_util.h',
- 'capi_util.cc',
- ],
- }],
- [ 'OS == "win"', {
- 'msvs_disabled_warnings': [
- 4267, # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- ],
- }],
- [ 'use_nss_certs==0', {
- # Some files are built when NSS is used for the platform certificate library.
- 'sources!': [
- 'nss_key_util.cc',
- 'nss_key_util.h',
- 'nss_util.cc',
- 'nss_util.h',
- 'nss_util_internal.h',
- ],
- },],
- ],
- 'sources': [
- '<@(crypto_sources)',
- ],
- },
- {
- 'target_name': 'crypto_unittests',
- 'type': 'executable',
- 'sources': [
- 'aead_unittest.cc',
- 'curve25519_unittest.cc',
- 'ec_private_key_unittest.cc',
- 'ec_signature_creator_unittest.cc',
- 'encryptor_unittest.cc',
- 'hkdf_unittest.cc',
- 'hmac_unittest.cc',
- 'nss_key_util_unittest.cc',
- 'nss_util_unittest.cc',
- 'openssl_bio_string_unittest.cc',
- 'p224_unittest.cc',
- 'p224_spake_unittest.cc',
- 'random_unittest.cc',
- 'rsa_private_key_unittest.cc',
- 'secure_hash_unittest.cc',
- 'sha2_unittest.cc',
- 'signature_creator_unittest.cc',
- 'signature_verifier_unittest.cc',
- 'symmetric_key_unittest.cc',
- ],
- 'dependencies': [
- 'crypto',
- 'crypto_test_support',
- '../base/base.gyp:base',
- '../base/base.gyp:run_all_unittests',
- '../base/base.gyp:test_support_base',
- '../testing/gmock.gyp:gmock',
- '../testing/gtest.gyp:gtest',
- '../third_party/boringssl/boringssl.gyp:boringssl',
- ],
- 'conditions': [
- [ 'use_nss_certs == 1', {
- 'dependencies': [
- '../build/linux/system.gyp:nss',
- ],
- }],
- [ 'use_nss_certs == 0', {
- # Some files are built when NSS is used for the platform certificate library.
- 'sources!': [
- 'nss_key_util_unittest.cc',
- 'nss_util_unittest.cc',
- ],
- }],
- [ 'OS == "win"', {
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [4267, ],
- }],
- ],
- },
- ],
- 'conditions': [
- ['OS == "win" and target_arch=="ia32"', {
- 'targets': [
- {
- 'target_name': 'crypto_nacl_win64',
- # We use the native APIs for the helper.
- 'type': '<(component)',
- 'dependencies': [
- '../base/base.gyp:base_win64',
- '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
- '../third_party/boringssl/boringssl.gyp:boringssl_nacl_win64',
- ],
- 'sources': [
- '<@(nacl_win64_sources)',
- ],
- 'defines': [
- 'CRYPTO_IMPLEMENTATION',
- '<@(nacl_win64_defines)',
- ],
- 'configurations': {
- 'Common_Base': {
- 'msvs_target_platform': 'x64',
- },
- },
- },
- ],
- }],
- ['use_nss_certs==1', {
- 'targets': [
- {
- 'target_name': 'crypto_test_support',
- 'type': 'static_library',
- 'dependencies': [
- '../base/base.gyp:base',
- 'crypto',
- ],
- 'sources': [
- 'scoped_test_nss_db.cc',
- 'scoped_test_nss_db.h',
- 'scoped_test_nss_chromeos_user.cc',
- 'scoped_test_nss_chromeos_user.h',
- 'scoped_test_system_nss_key_slot.cc',
- 'scoped_test_system_nss_key_slot.h',
- ],
- 'conditions': [
- ['use_nss_certs==0', {
- 'sources!': [
- 'scoped_test_nss_db.cc',
- 'scoped_test_nss_db.h',
- ],
- }],
- [ 'chromeos==0', {
- 'sources!': [
- 'scoped_test_nss_chromeos_user.cc',
- 'scoped_test_nss_chromeos_user.h',
- 'scoped_test_system_nss_key_slot.cc',
- 'scoped_test_system_nss_key_slot.h',
- ],
- }],
- ],
- }
- ]}, { # use_nss_certs==0
- 'targets': [
- {
- 'target_name': 'crypto_test_support',
- 'type': 'none',
- 'sources': [],
- }
- ]}],
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'crypto_unittests_run',
- 'type': 'none',
- 'dependencies': [
- 'crypto_unittests',
- ],
- 'includes': [
- '../build/isolate.gypi',
- ],
- 'sources': [
- 'crypto_unittests.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/crypto/crypto.gypi b/crypto/crypto.gypi
deleted file mode 100644
index dadc0ea2fe..0000000000
--- a/crypto/crypto.gypi
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- # Put all transitive dependencies for Windows HMAC here.
- # This is required so that we can build them for nacl win64.
- 'variables': {
- 'hmac_win64_related_sources': [
- 'crypto_export.h',
- 'hmac.cc',
- 'hmac.h',
- 'openssl_util.cc',
- 'openssl_util.h',
- 'secure_util.cc',
- 'secure_util.h',
- 'symmetric_key.cc',
- 'symmetric_key.h',
- ],
- },
- 'crypto_sources': [
- # NOTE: all transitive dependencies of HMAC on windows need
- # to be placed in the source list above.
- '<@(hmac_win64_related_sources)',
- 'aead.cc',
- 'aead.h',
- 'apple_keychain.h',
- 'apple_keychain_ios.mm',
- 'apple_keychain_mac.mm',
- 'auto_cbb.h',
- 'capi_util.cc',
- 'capi_util.h',
- 'cssm_init.cc',
- 'cssm_init.h',
- 'curve25519.cc',
- 'curve25519.h',
- 'ec_private_key.cc',
- 'ec_private_key.h',
- 'ec_signature_creator.cc',
- 'ec_signature_creator.h',
- 'ec_signature_creator_impl.cc',
- 'ec_signature_creator_impl.h',
- 'encryptor.cc',
- 'encryptor.h',
- 'hkdf.cc',
- 'hkdf.h',
- 'mac_security_services_lock.cc',
- 'mac_security_services_lock.h',
- 'mock_apple_keychain.cc',
- 'mock_apple_keychain.h',
- 'mock_apple_keychain_ios.cc',
- 'mock_apple_keychain_mac.cc',
- 'p224_spake.cc',
- 'p224_spake.h',
- 'nss_crypto_module_delegate.h',
- 'nss_key_util.cc',
- 'nss_key_util.h',
- 'nss_util.cc',
- 'nss_util.h',
- 'nss_util_internal.h',
- 'openssl_bio_string.cc',
- 'openssl_bio_string.h',
- 'p224.cc',
- 'p224.h',
- 'random.h',
- 'random.cc',
- 'rsa_private_key.cc',
- 'rsa_private_key.h',
- 'scoped_capi_types.h',
- 'scoped_nss_types.h',
- 'secure_hash.cc',
- 'secure_hash.h',
- 'sha2.cc',
- 'sha2.h',
- 'signature_creator.cc',
- 'signature_creator.h',
- 'signature_verifier.cc',
- 'signature_verifier.h',
- 'wincrypt_shim.h',
- ],
- 'nacl_win64_sources': [
- '<@(hmac_win64_related_sources)',
- 'random.cc',
- 'random.h',
- ],
- }
-}
diff --git a/crypto/crypto_nacl.gyp b/crypto/crypto_nacl.gyp
deleted file mode 100644
index c7c01a8f3e..0000000000
--- a/crypto/crypto_nacl.gyp
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'includes': [
- '../native_client/build/untrusted.gypi',
- 'crypto.gypi',
- ],
- 'targets': [
- {
- 'target_name': 'crypto_nacl',
- 'type': 'none',
- 'variables': {
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libcrypto_nacl.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_pnacl_newlib': 1,
- },
- 'dependencies': [
- '../third_party/boringssl/boringssl_nacl.gyp:boringssl_nacl',
- '../native_client_sdk/native_client_sdk_untrusted.gyp:nacl_io_untrusted',
- ],
- 'defines': [
- 'CRYPTO_IMPLEMENTATION',
- ],
- 'sources': [
- '<@(crypto_sources)',
- ],
- 'sources/': [
- ['exclude', '_nss\.(cc|h)$'],
- ['exclude', '^(mock_)?apple_'],
- ['exclude', '^capi_'],
- ['exclude', '^cssm_'],
- ['exclude', '^nss_'],
- ['exclude', '^mac_'],
- ],
- },
- ],
-}
diff --git a/crypto/crypto_unittests.isolate b/crypto/crypto_unittests.isolate
deleted file mode 100644
index de13aa23a7..0000000000
--- a/crypto/crypto_unittests.isolate
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'command': [
- '../testing/test_env.py',
- '<(PRODUCT_DIR)/crypto_unittests<(EXECUTABLE_SUFFIX)',
- '--brave-new-test-launcher',
- '--test-launcher-bot-mode',
- '--asan=<(asan)',
- '--msan=<(msan)',
- '--tsan=<(tsan)',
- ],
- },
- 'conditions': [
- ['OS=="linux" or OS=="mac" or OS=="win"', {
- 'variables': {
- 'files': [
- '../testing/test_env.py',
- ],
- },
- }],
- ['OS=="mac" and asan==1 and fastbuild==0', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/crypto_unittests.dSYM/',
- ],
- },
- }],
- ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/crypto_unittests.exe.pdb',
- ],
- },
- }],
- ],
- 'includes': [
- '../base/base.isolate',
- ],
-}
diff --git a/crypto/ec_private_key.h b/crypto/ec_private_key.h
index a24219bef5..432019be5d 100644
--- a/crypto/ec_private_key.h
+++ b/crypto/ec_private_key.h
@@ -15,17 +15,7 @@
#include "base/macros.h"
#include "build/build_config.h"
#include "crypto/crypto_export.h"
-
-#if defined(USE_OPENSSL)
-// Forward declaration for openssl/*.h
-typedef struct evp_pkey_st EVP_PKEY;
-#else
-// Forward declaration.
-typedef struct CERTSubjectPublicKeyInfoStr CERTSubjectPublicKeyInfo;
-typedef struct PK11SlotInfoStr PK11SlotInfo;
-typedef struct SECKEYPrivateKeyStr SECKEYPrivateKey;
-typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
-#endif
+#include "third_party/boringssl/src/include/openssl/base.h"
namespace crypto {
@@ -51,57 +41,30 @@ class CRYPTO_EXPORT ECPrivateKey {
// Creates a new instance by importing an existing key pair.
// The key pair is given as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
- // block and an X.509 SubjectPublicKeyInfo block.
+ // block with empty password and an X.509 SubjectPublicKeyInfo block.
// Returns nullptr if initialization fails.
//
// This function is deprecated. Use CreateFromPrivateKeyInfo for new code.
// See https://crbug.com/603319.
static std::unique_ptr<ECPrivateKey> CreateFromEncryptedPrivateKeyInfo(
- const std::string& password,
const std::vector<uint8_t>& encrypted_private_key_info,
const std::vector<uint8_t>& subject_public_key_info);
-#if !defined(USE_OPENSSL)
- // Imports the key pair into |slot| and returns in |public_key| and |key|.
- // Shortcut for code that needs to keep a reference directly to NSS types
- // without having to create a ECPrivateKey object and make a copy of them.
- // TODO(mattm): move this function to some NSS util file.
- static bool ImportFromEncryptedPrivateKeyInfo(
- PK11SlotInfo* slot,
- const std::string& password,
- const uint8_t* encrypted_private_key_info,
- size_t encrypted_private_key_info_len,
- CERTSubjectPublicKeyInfo* decoded_spki,
- bool permanent,
- bool sensitive,
- SECKEYPrivateKey** key,
- SECKEYPublicKey** public_key);
-#endif
-
// Returns a copy of the object.
std::unique_ptr<ECPrivateKey> Copy() const;
-#if defined(USE_OPENSSL)
- EVP_PKEY* key() { return key_; }
-#else
- SECKEYPrivateKey* key() { return key_; }
- SECKEYPublicKey* public_key() { return public_key_; }
-#endif
+ EVP_PKEY* key() { return key_.get(); }
// Exports the private key to a PKCS #8 PrivateKeyInfo block.
bool ExportPrivateKey(std::vector<uint8_t>* output) const;
// Exports the private key as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
- // block and the public key as an X.509 SubjectPublicKeyInfo block.
- // The |password| and |iterations| are used as inputs to the key derivation
- // function for generating the encryption key. PKCS #5 recommends a minimum
- // of 1000 iterations, on modern systems a larger value may be preferrable.
+ // block wth empty password. This was historically used as a workaround for
+ // NSS API deficiencies and does not provide security.
//
// This function is deprecated. Use ExportPrivateKey for new code. See
// https://crbug.com/603319.
- bool ExportEncryptedPrivateKey(const std::string& password,
- int iterations,
- std::vector<uint8_t>* output) const;
+ bool ExportEncryptedPrivateKey(std::vector<uint8_t>* output) const;
// Exports the public key to an X.509 SubjectPublicKeyInfo block.
bool ExportPublicKey(std::vector<uint8_t>* output) const;
@@ -113,12 +76,7 @@ class CRYPTO_EXPORT ECPrivateKey {
// Constructor is private. Use one of the Create*() methods above instead.
ECPrivateKey();
-#if defined(USE_OPENSSL)
- EVP_PKEY* key_;
-#else
- SECKEYPrivateKey* key_;
- SECKEYPublicKey* public_key_;
-#endif
+ bssl::UniquePtr<EVP_PKEY> key_;
DISALLOW_COPY_AND_ASSIGN(ECPrivateKey);
};
diff --git a/crypto/ec_signature_creator_impl.h b/crypto/ec_signature_creator_impl.h
index 21614f8201..bd06e253a4 100644
--- a/crypto/ec_signature_creator_impl.h
+++ b/crypto/ec_signature_creator_impl.h
@@ -7,6 +7,8 @@
#include <stdint.h>
+#include <vector>
+
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "crypto/ec_signature_creator.h"
diff --git a/crypto/hmac.cc b/crypto/hmac.cc
index af5580bb1d..bf89e182d9 100644
--- a/crypto/hmac.cc
+++ b/crypto/hmac.cc
@@ -7,20 +7,26 @@
#include <stddef.h>
#include <algorithm>
+#include <string>
#include "base/logging.h"
+#include "base/stl_util.h"
+#include "crypto/openssl_util.h"
#include "crypto/secure_util.h"
#include "crypto/symmetric_key.h"
+#include "third_party/boringssl/src/include/openssl/hmac.h"
namespace crypto {
-bool HMAC::Init(SymmetricKey* key) {
- std::string raw_key;
- bool result = key->GetRawKey(&raw_key) && Init(raw_key);
- // Zero out key copy. This might get optimized away, but one can hope.
- // Using std::string to store key info at all is a larger problem.
- std::fill(raw_key.begin(), raw_key.end(), 0);
- return result;
+HMAC::HMAC(HashAlgorithm hash_alg) : hash_alg_(hash_alg), initialized_(false) {
+ // Only SHA-1 and SHA-256 hash algorithms are supported now.
+ DCHECK(hash_alg_ == SHA1 || hash_alg_ == SHA256);
+}
+
+HMAC::~HMAC() {
+ // Zero out key copy.
+ key_.assign(key_.size(), 0);
+ base::STLClearObject(&key_);
}
size_t HMAC::DigestLength() const {
@@ -35,6 +41,35 @@ size_t HMAC::DigestLength() const {
}
}
+bool HMAC::Init(const unsigned char* key, size_t key_length) {
+ // Init must not be called more than once on the same HMAC object.
+ DCHECK(!initialized_);
+ initialized_ = true;
+ key_.assign(key, key + key_length);
+ return true;
+}
+
+bool HMAC::Init(SymmetricKey* key) {
+ std::string raw_key;
+ bool result = key->GetRawKey(&raw_key) && Init(raw_key);
+ // Zero out key copy. This might get optimized away, but one can hope.
+ // Using std::string to store key info at all is a larger problem.
+ std::fill(raw_key.begin(), raw_key.end(), 0);
+ return result;
+}
+
+bool HMAC::Sign(const base::StringPiece& data,
+ unsigned char* digest,
+ size_t digest_length) const {
+ DCHECK(initialized_);
+
+ ScopedOpenSSLSafeSizeBuffer<EVP_MAX_MD_SIZE> result(digest, digest_length);
+ return !!::HMAC(hash_alg_ == SHA1 ? EVP_sha1() : EVP_sha256(), key_.data(),
+ key_.size(),
+ reinterpret_cast<const unsigned char*>(data.data()),
+ data.size(), result.safe_buffer(), nullptr);
+}
+
bool HMAC::Verify(const base::StringPiece& data,
const base::StringPiece& digest) const {
if (digest.size() != DigestLength())
diff --git a/crypto/hmac.h b/crypto/hmac.h
index ec32ed7cd1..24213338cc 100644
--- a/crypto/hmac.h
+++ b/crypto/hmac.h
@@ -11,6 +11,7 @@
#include <stddef.h>
#include <memory>
+#include <vector>
#include "base/compiler_specific.h"
#include "base/macros.h"
@@ -20,7 +21,6 @@
namespace crypto {
// Simplify the interface and reduce includes by abstracting out the internals.
-struct HMACPlatformData;
class SymmetricKey;
class CRYPTO_EXPORT HMAC {
@@ -86,7 +86,8 @@ class CRYPTO_EXPORT HMAC {
private:
HashAlgorithm hash_alg_;
- std::unique_ptr<HMACPlatformData> plat_;
+ bool initialized_;
+ std::vector<unsigned char> key_;
DISALLOW_COPY_AND_ASSIGN(HMAC);
};
diff --git a/crypto/hmac_nss.cc b/crypto/hmac_nss.cc
deleted file mode 100644
index 9d759b5d0c..0000000000
--- a/crypto/hmac_nss.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/hmac.h"
-
-#include <nss.h>
-#include <pk11pub.h>
-#include <stddef.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/nss_util.h"
-#include "crypto/scoped_nss_types.h"
-
-namespace crypto {
-
-struct HMACPlatformData {
- CK_MECHANISM_TYPE mechanism_;
- ScopedPK11Slot slot_;
- ScopedPK11SymKey sym_key_;
-};
-
-HMAC::HMAC(HashAlgorithm hash_alg)
- : hash_alg_(hash_alg), plat_(new HMACPlatformData()) {
- // Only SHA-1 and SHA-256 hash algorithms are supported.
- switch (hash_alg_) {
- case SHA1:
- plat_->mechanism_ = CKM_SHA_1_HMAC;
- break;
- case SHA256:
- plat_->mechanism_ = CKM_SHA256_HMAC;
- break;
- default:
- NOTREACHED() << "Unsupported hash algorithm";
- break;
- }
-}
-
-HMAC::~HMAC() {
-}
-
-bool HMAC::Init(const unsigned char *key, size_t key_length) {
- EnsureNSSInit();
-
- if (plat_->slot_.get()) {
- // Init must not be called more than twice on the same HMAC object.
- NOTREACHED();
- return false;
- }
-
- plat_->slot_.reset(PK11_GetInternalSlot());
- if (!plat_->slot_.get()) {
- NOTREACHED();
- return false;
- }
-
- SECItem key_item;
- key_item.type = siBuffer;
- key_item.data = const_cast<unsigned char*>(key); // NSS API isn't const.
- key_item.len = key_length;
-
- plat_->sym_key_.reset(PK11_ImportSymKey(plat_->slot_.get(),
- plat_->mechanism_,
- PK11_OriginUnwrap,
- CKA_SIGN,
- &key_item,
- NULL));
- if (!plat_->sym_key_.get()) {
- NOTREACHED();
- return false;
- }
-
- return true;
-}
-
-bool HMAC::Sign(const base::StringPiece& data,
- unsigned char* digest,
- size_t digest_length) const {
- if (!plat_->sym_key_.get()) {
- // Init has not been called before Sign.
- NOTREACHED();
- return false;
- }
-
- SECItem param = { siBuffer, NULL, 0 };
- ScopedPK11Context context(PK11_CreateContextBySymKey(plat_->mechanism_,
- CKA_SIGN,
- plat_->sym_key_.get(),
- &param));
- if (!context.get()) {
- NOTREACHED();
- return false;
- }
-
- if (PK11_DigestBegin(context.get()) != SECSuccess) {
- NOTREACHED();
- return false;
- }
-
- if (PK11_DigestOp(context.get(),
- reinterpret_cast<const unsigned char*>(data.data()),
- data.length()) != SECSuccess) {
- NOTREACHED();
- return false;
- }
-
- unsigned int len = 0;
- if (PK11_DigestFinal(context.get(),
- digest, &len, digest_length) != SECSuccess) {
- NOTREACHED();
- return false;
- }
-
- return true;
-}
-
-} // namespace crypto
diff --git a/crypto/nss_crypto_module_delegate.h b/crypto/nss_crypto_module_delegate.h
index 6c1da68161..cf08f2859f 100644
--- a/crypto/nss_crypto_module_delegate.h
+++ b/crypto/nss_crypto_module_delegate.h
@@ -35,7 +35,6 @@ class CryptoModuleBlockingPasswordDelegate {
// user entered.
virtual std::string RequestPassword(const std::string& slot_name, bool retry,
bool* cancelled) = 0;
-
};
// Extends CryptoModuleBlockingPasswordDelegate with the ability to return a
diff --git a/crypto/nss_util.cc b/crypto/nss_util.cc
index 96ee060ca7..5ed2fa0674 100644
--- a/crypto/nss_util.cc
+++ b/crypto/nss_util.cc
@@ -15,6 +15,9 @@
#include <memory>
#include <utility>
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "crypto/nss_util_internal.h"
#if defined(OS_OPENBSD)
@@ -29,6 +32,7 @@
#include <map>
#include <vector>
+#include "base/base_paths.h"
#include "base/bind.h"
#include "base/cpu.h"
#include "base/debug/alias.h"
@@ -38,27 +42,16 @@
#include "base/files/file_util.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
+#include "base/memory/ptr_util.h"
#include "base/native_library.h"
-#include "base/stl_util.h"
+#include "base/path_service.h"
#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_restrictions.h"
#include "base/threading/worker_pool.h"
#include "build/build_config.h"
-
-#if !defined(OS_CHROMEOS)
-#include "base/base_paths.h"
-#include "base/path_service.h"
-#endif
-
-// USE_NSS_CERTS means NSS is used for certificates and platform integration.
-// This requires additional support to manage the platform certificate and key
-// stores.
-#if defined(USE_NSS_CERTS)
-#include "base/synchronization/lock.h"
#include "crypto/nss_crypto_module_delegate.h"
-#endif // defined(USE_NSS_CERTS)
namespace crypto {
@@ -88,7 +81,6 @@ std::string GetNSSErrorMessage() {
return result;
}
-#if defined(USE_NSS_CERTS)
#if !defined(OS_CHROMEOS)
base::FilePath GetDefaultConfigDirectory() {
base::FilePath dir;
@@ -134,13 +126,13 @@ char* PKCS11PasswordFunc(PK11SlotInfo* slot, PRBool retry, void* arg) {
retry != PR_FALSE,
&cancelled);
if (cancelled)
- return NULL;
+ return nullptr;
char* result = PORT_Strdup(password.c_str());
password.replace(0, password.size(), password.size(), 0);
return result;
}
- DLOG(ERROR) << "PK11 password requested with NULL arg";
- return NULL;
+ DLOG(ERROR) << "PK11 password requested with nullptr arg";
+ return nullptr;
}
// NSS creates a local cache of the sqlite database if it detects that the
@@ -150,10 +142,6 @@ char* PKCS11PasswordFunc(PK11SlotInfo* slot, PRBool retry, void* arg) {
// the NSS environment variable NSS_SDB_USE_CACHE to "yes" to override NSS's
// detection when database_dir is on NFS. See http://crbug.com/48585.
//
-// TODO(wtc): port this function to other USE_NSS_CERTS platforms. It is
-// defined only for OS_LINUX and OS_OPENBSD simply because the statfs structure
-// is OS-specific.
-//
// Because this function sets an environment variable it must be run before we
// go multi-threaded.
void UseLocalCacheOfNSSDatabaseIfNFS(const base::FilePath& database_dir) {
@@ -178,15 +166,13 @@ void UseLocalCacheOfNSSDatabaseIfNFS(const base::FilePath& database_dir) {
}
}
-#endif // defined(USE_NSS_CERTS)
-
// A singleton to initialize/deinitialize NSPR.
// Separate from the NSS singleton because we initialize NSPR on the UI thread.
// Now that we're leaking the singleton, we could merge back with the NSS
// singleton.
class NSPRInitSingleton {
private:
- friend struct base::DefaultLazyInstanceTraits<NSPRInitSingleton>;
+ friend struct base::LazyInstanceTraitsBase<NSPRInitSingleton>;
NSPRInitSingleton() {
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
@@ -232,8 +218,8 @@ class ChromeOSUserData {
}
ScopedPK11Slot GetPublicSlot() {
- return ScopedPK11Slot(
- public_slot_ ? PK11_ReferenceSlot(public_slot_.get()) : NULL);
+ return ScopedPK11Slot(public_slot_ ? PK11_ReferenceSlot(public_slot_.get())
+ : nullptr);
}
ScopedPK11Slot GetPrivateSlot(
@@ -278,13 +264,13 @@ class ChromeOSUserData {
};
class ScopedChapsLoadFixup {
- public:
- ScopedChapsLoadFixup();
- ~ScopedChapsLoadFixup();
+ public:
+ ScopedChapsLoadFixup();
+ ~ScopedChapsLoadFixup();
- private:
+ private:
#if defined(COMPONENT_BUILD)
- void *chaps_handle_;
+ void* chaps_handle_;
#endif
};
@@ -360,17 +346,17 @@ class NSSInitSingleton {
DCHECK(!initializing_tpm_token_);
// If EnableTPMTokenForNSS hasn't been called, return false.
if (!tpm_token_enabled_for_nss_) {
- base::MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(callback, false));
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(callback, false));
return;
}
// If everything is already initialized, then return true.
// Note that only |tpm_slot_| is checked, since |chaps_module_| could be
- // NULL in tests while |tpm_slot_| has been set to the test DB.
+ // nullptr in tests while |tpm_slot_| has been set to the test DB.
if (tpm_slot_) {
- base::MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(callback, true));
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ base::Bind(callback, true));
return;
}
@@ -382,18 +368,15 @@ class NSSInitSingleton {
if (base::WorkerPool::PostTaskAndReply(
FROM_HERE,
base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread,
- system_slot_id,
- tpm_args_ptr),
+ system_slot_id, tpm_args_ptr),
base::Bind(&NSSInitSingleton::OnInitializedTPMTokenAndSystemSlot,
base::Unretained(this), // NSSInitSingleton is leaky
- callback,
- base::Passed(&tpm_args)),
- true /* task_is_slow */
- )) {
+ callback, base::Passed(&tpm_args)),
+ true /* task_is_slow */)) {
initializing_tpm_token_ = true;
} else {
- base::MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(callback, false));
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(callback, false));
}
}
@@ -507,7 +490,7 @@ class NSSInitSingleton {
"%s %s", kUserNSSDatabaseName, username_hash.c_str());
ScopedPK11Slot public_slot(OpenPersistentNSSDBForPath(db_name, path));
chromeos_user_map_[username_hash] =
- new ChromeOSUserData(std::move(public_slot));
+ base::MakeUnique<ChromeOSUserData>(std::move(public_slot));
return true;
}
@@ -544,15 +527,12 @@ class NSSInitSingleton {
TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
base::WorkerPool::PostTaskAndReply(
FROM_HERE,
- base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread,
- slot_id,
+ base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread, slot_id,
tpm_args_ptr),
base::Bind(&NSSInitSingleton::OnInitializedTPMForChromeOSUser,
base::Unretained(this), // NSSInitSingleton is leaky
- username_hash,
- base::Passed(&tpm_args)),
- true /* task_is_slow */
- );
+ username_hash, base::Passed(&tpm_args)),
+ true /* task_is_slow */);
}
void OnInitializedTPMForChromeOSUser(
@@ -601,7 +581,7 @@ class NSSInitSingleton {
if (username_hash.empty()) {
DVLOG(2) << "empty username_hash";
if (!callback.is_null()) {
- base::MessageLoop::current()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(callback, base::Passed(ScopedPK11Slot())));
}
return ScopedPK11Slot();
@@ -614,15 +594,14 @@ class NSSInitSingleton {
void CloseChromeOSUserForTesting(const std::string& username_hash) {
DCHECK(thread_checker_.CalledOnValidThread());
- ChromeOSUserMap::iterator i = chromeos_user_map_.find(username_hash);
+ auto i = chromeos_user_map_.find(username_hash);
DCHECK(i != chromeos_user_map_.end());
- delete i->second;
chromeos_user_map_.erase(i);
}
void SetSystemKeySlotForTesting(ScopedPK11Slot slot) {
// Ensure that a previous value of test_system_slot_ is not overwritten.
- // Unsetting, i.e. setting a NULL, however is allowed.
+ // Unsetting, i.e. setting a nullptr, however is allowed.
DCHECK(!slot || !test_system_slot_);
test_system_slot_ = std::move(slot);
if (test_system_slot_) {
@@ -658,7 +637,7 @@ class NSSInitSingleton {
// TODO(mattm): chromeos::TPMTokenloader always calls
// InitializeTPMTokenAndSystemSlot with slot 0. If the system slot is
// disabled, tpm_slot_ will be the first user's slot instead. Can that be
- // detected and return NULL instead?
+ // detected and return nullptr instead?
base::Closure wrapped_callback;
if (!callback.is_null()) {
@@ -673,20 +652,18 @@ class NSSInitSingleton {
}
#endif
-#if defined(USE_NSS_CERTS)
base::Lock* write_lock() {
return &write_lock_;
}
-#endif // defined(USE_NSS_CERTS)
private:
- friend struct base::DefaultLazyInstanceTraits<NSSInitSingleton>;
+ friend struct base::LazyInstanceTraitsBase<NSSInitSingleton>;
NSSInitSingleton()
: tpm_token_enabled_for_nss_(false),
initializing_tpm_token_(false),
- chaps_module_(NULL),
- root_(NULL) {
+ chaps_module_(nullptr),
+ root_(nullptr) {
// It's safe to construct on any thread, since LazyInstance will prevent any
// other threads from accessing until the constructor is done.
thread_checker_.DetachFromThread();
@@ -709,73 +686,53 @@ class NSSInitSingleton {
}
SECStatus status = SECFailure;
- bool nodb_init = false;
-
-#if !defined(USE_NSS_CERTS)
- // Use the system certificate store, so initialize NSS without database.
- nodb_init = true;
-#endif
-
- if (nodb_init) {
- status = NSS_NoDB_Init(NULL);
- if (status != SECSuccess) {
- CrashOnNSSInitFailure();
- return;
- }
-#if defined(OS_IOS)
- root_ = InitDefaultRootCerts();
-#endif // defined(OS_IOS)
- } else {
-#if defined(USE_NSS_CERTS)
- base::FilePath database_dir = GetInitialConfigDirectory();
- if (!database_dir.empty()) {
- // This duplicates the work which should have been done in
- // EarlySetupForNSSInit. However, this function is idempotent so
- // there's no harm done.
- UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
-
- // Initialize with a persistent database (likely, ~/.pki/nssdb).
- // Use "sql:" which can be shared by multiple processes safely.
- std::string nss_config_dir =
- base::StringPrintf("sql:%s", database_dir.value().c_str());
+ base::FilePath database_dir = GetInitialConfigDirectory();
+ if (!database_dir.empty()) {
+ // This duplicates the work which should have been done in
+ // EarlySetupForNSSInit. However, this function is idempotent so
+ // there's no harm done.
+ UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
+
+ // Initialize with a persistent database (likely, ~/.pki/nssdb).
+ // Use "sql:" which can be shared by multiple processes safely.
+ std::string nss_config_dir =
+ base::StringPrintf("sql:%s", database_dir.value().c_str());
#if defined(OS_CHROMEOS)
- status = NSS_Init(nss_config_dir.c_str());
+ status = NSS_Init(nss_config_dir.c_str());
#else
- status = NSS_InitReadWrite(nss_config_dir.c_str());
+ status = NSS_InitReadWrite(nss_config_dir.c_str());
#endif
- if (status != SECSuccess) {
- LOG(ERROR) << "Error initializing NSS with a persistent "
- "database (" << nss_config_dir
- << "): " << GetNSSErrorMessage();
- }
- }
if (status != SECSuccess) {
- VLOG(1) << "Initializing NSS without a persistent database.";
- status = NSS_NoDB_Init(NULL);
- if (status != SECSuccess) {
- CrashOnNSSInitFailure();
- return;
- }
+ LOG(ERROR) << "Error initializing NSS with a persistent "
+ "database (" << nss_config_dir
+ << "): " << GetNSSErrorMessage();
}
-
- PK11_SetPasswordFunc(PKCS11PasswordFunc);
-
- // If we haven't initialized the password for the NSS databases,
- // initialize an empty-string password so that we don't need to
- // log in.
- PK11SlotInfo* slot = PK11_GetInternalKeySlot();
- if (slot) {
- // PK11_InitPin may write to the keyDB, but no other thread can use NSS
- // yet, so we don't need to lock.
- if (PK11_NeedUserInit(slot))
- PK11_InitPin(slot, NULL, NULL);
- PK11_FreeSlot(slot);
+ }
+ if (status != SECSuccess) {
+ VLOG(1) << "Initializing NSS without a persistent database.";
+ status = NSS_NoDB_Init(nullptr);
+ if (status != SECSuccess) {
+ CrashOnNSSInitFailure();
+ return;
}
+ }
- root_ = InitDefaultRootCerts();
-#endif // defined(USE_NSS_CERTS)
+ PK11_SetPasswordFunc(PKCS11PasswordFunc);
+
+ // If we haven't initialized the password for the NSS databases,
+ // initialize an empty-string password so that we don't need to
+ // log in.
+ PK11SlotInfo* slot = PK11_GetInternalKeySlot();
+ if (slot) {
+ // PK11_InitPin may write to the keyDB, but no other thread can use NSS
+ // yet, so we don't need to lock.
+ if (PK11_NeedUserInit(slot))
+ PK11_InitPin(slot, nullptr, nullptr);
+ PK11_FreeSlot(slot);
}
+ root_ = InitDefaultRootCerts();
+
// Disable MD5 certificate signatures. (They are disabled by default in
// NSS 3.14.)
NSS_SetAlgorithmPolicy(SEC_OID_MD5, 0, NSS_USE_ALG_IN_CERT_SIGNATURE);
@@ -788,18 +745,18 @@ class NSSInitSingleton {
// down.
~NSSInitSingleton() {
#if defined(OS_CHROMEOS)
- STLDeleteValues(&chromeos_user_map_);
+ chromeos_user_map_.clear();
#endif
tpm_slot_.reset();
if (root_) {
SECMOD_UnloadUserModule(root_);
SECMOD_DestroyModule(root_);
- root_ = NULL;
+ root_ = nullptr;
}
if (chaps_module_) {
SECMOD_UnloadUserModule(chaps_module_);
SECMOD_DestroyModule(chaps_module_);
- chaps_module_ = NULL;
+ chaps_module_ = nullptr;
}
SECStatus status = NSS_Shutdown();
@@ -812,14 +769,14 @@ class NSSInitSingleton {
// Load nss's built-in root certs.
SECMODModule* InitDefaultRootCerts() {
- SECMODModule* root = LoadModule("Root Certs", "libnssckbi.so", NULL);
+ SECMODModule* root = LoadModule("Root Certs", "libnssckbi.so", nullptr);
if (root)
return root;
// Aw, snap. Can't find/load root cert shared library.
// This will make it hard to talk to anybody via https.
// TODO(mattm): Re-add the NOTREACHED here when crbug.com/310972 is fixed.
- return NULL;
+ return nullptr;
}
// Load the given module for this NSS session.
@@ -835,17 +792,17 @@ class NSSInitSingleton {
// https://bugzilla.mozilla.org/show_bug.cgi?id=642546 was filed
// on NSS codebase to address this.
SECMODModule* module = SECMOD_LoadUserModule(
- const_cast<char*>(modparams.c_str()), NULL, PR_FALSE);
+ const_cast<char*>(modparams.c_str()), nullptr, PR_FALSE);
if (!module) {
LOG(ERROR) << "Error loading " << name << " module into NSS: "
<< GetNSSErrorMessage();
- return NULL;
+ return nullptr;
}
if (!module->loaded) {
LOG(ERROR) << "After loading " << name << ", loaded==false: "
<< GetNSSErrorMessage();
SECMOD_DestroyModule(module);
- return NULL;
+ return nullptr;
}
return module;
}
@@ -858,15 +815,12 @@ class NSSInitSingleton {
crypto::ScopedPK11Slot tpm_slot_;
SECMODModule* root_;
#if defined(OS_CHROMEOS)
- typedef std::map<std::string, ChromeOSUserData*> ChromeOSUserMap;
- ChromeOSUserMap chromeos_user_map_;
+ std::map<std::string, std::unique_ptr<ChromeOSUserData>> chromeos_user_map_;
ScopedPK11Slot test_system_slot_;
#endif
-#if defined(USE_NSS_CERTS)
// TODO(davidben): When https://bugzilla.mozilla.org/show_bug.cgi?id=564011
// is fixed, we will no longer need the lock.
base::Lock write_lock_;
-#endif // defined(USE_NSS_CERTS)
base::ThreadChecker thread_checker_;
};
@@ -875,7 +829,6 @@ base::LazyInstance<NSSInitSingleton>::Leaky
g_nss_singleton = LAZY_INSTANCE_INITIALIZER;
} // namespace
-#if defined(USE_NSS_CERTS)
ScopedPK11Slot OpenSoftwareNSSDB(const base::FilePath& path,
const std::string& description) {
const std::string modspec =
@@ -885,7 +838,7 @@ ScopedPK11Slot OpenSoftwareNSSDB(const base::FilePath& path,
PK11SlotInfo* db_slot = SECMOD_OpenUserDB(modspec.c_str());
if (db_slot) {
if (PK11_NeedUserInit(db_slot))
- PK11_InitPin(db_slot, NULL, NULL);
+ PK11_InitPin(db_slot, nullptr, nullptr);
} else {
LOG(ERROR) << "Error opening persistent database (" << modspec
<< "): " << GetNSSErrorMessage();
@@ -898,7 +851,6 @@ void EarlySetupForNSSInit() {
if (!database_dir.empty())
UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
}
-#endif
void EnsureNSPRInit() {
g_nspr_singleton.Get();
@@ -916,13 +868,12 @@ bool CheckNSSVersion(const char* version) {
return !!NSS_VersionCheck(version);
}
-#if defined(USE_NSS_CERTS)
base::Lock* GetNSSWriteLock() {
return g_nss_singleton.Get().write_lock();
}
AutoNSSWriteLock::AutoNSSWriteLock() : lock_(GetNSSWriteLock()) {
- // May be NULL if the lock is not needed in our version of NSS.
+ // May be nullptr if the lock is not needed in our version of NSS.
if (lock_)
lock_->Acquire();
}
@@ -942,7 +893,6 @@ AutoSECMODListReadLock::AutoSECMODListReadLock()
AutoSECMODListReadLock::~AutoSECMODListReadLock() {
SECMOD_ReleaseReadLock(lock_);
}
-#endif // defined(USE_NSS_CERTS)
#if defined(OS_CHROMEOS)
ScopedPK11Slot GetSystemNSSKeySlot(
diff --git a/crypto/nss_util.h b/crypto/nss_util.h
index a8b57ff9f0..5c34fc8f07 100644
--- a/crypto/nss_util.h
+++ b/crypto/nss_util.h
@@ -14,7 +14,6 @@
#include "crypto/crypto_export.h"
namespace base {
-class FilePath;
class Lock;
class Time;
} // namespace base
diff --git a/crypto/nss_util_internal.h b/crypto/nss_util_internal.h
index 697e376e5a..080ac1026d 100644
--- a/crypto/nss_util_internal.h
+++ b/crypto/nss_util_internal.h
@@ -7,6 +7,8 @@
#include <secmodt.h>
+#include <string>
+
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
diff --git a/crypto/openssl_bio_string.cc b/crypto/openssl_bio_string.cc
deleted file mode 100644
index 48805001ef..0000000000
--- a/crypto/openssl_bio_string.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/openssl_bio_string.h"
-
-#include <openssl/bio.h>
-#include <string.h>
-
-namespace crypto {
-
-namespace {
-
-int bio_string_write(BIO* bio, const char* data, int len) {
- reinterpret_cast<std::string*>(bio->ptr)->append(data, len);
- return len;
-}
-
-int bio_string_puts(BIO* bio, const char* data) {
- // Note: unlike puts(), BIO_puts does not add a newline.
- return bio_string_write(bio, data, strlen(data));
-}
-
-long bio_string_ctrl(BIO* bio, int cmd, long num, void* ptr) {
- std::string* str = reinterpret_cast<std::string*>(bio->ptr);
- switch (cmd) {
- case BIO_CTRL_RESET:
- str->clear();
- return 1;
- case BIO_C_FILE_SEEK:
- return -1;
- case BIO_C_FILE_TELL:
- return str->size();
- case BIO_CTRL_FLUSH:
- return 1;
- default:
- return 0;
- }
-}
-
-int bio_string_new(BIO* bio) {
- bio->ptr = NULL;
- bio->init = 0;
- return 1;
-}
-
-int bio_string_free(BIO* bio) {
- // The string is owned by the caller, so there's nothing to do here.
- return bio != NULL;
-}
-
-BIO_METHOD bio_string_methods = {
- // TODO(mattm): Should add some type number too? (bio.h uses 1-24)
- BIO_TYPE_SOURCE_SINK,
- "bio_string",
- bio_string_write,
- NULL, /* read */
- bio_string_puts,
- NULL, /* gets */
- bio_string_ctrl,
- bio_string_new,
- bio_string_free,
- NULL, /* callback_ctrl */
-};
-
-} // namespace
-
-BIO* BIO_new_string(std::string* out) {
- BIO* bio = BIO_new(&bio_string_methods);
- if (!bio)
- return bio;
- bio->ptr = out;
- bio->init = 1;
- return bio;
-}
-
-} // namespace crypto
diff --git a/crypto/openssl_bio_string.h b/crypto/openssl_bio_string.h
deleted file mode 100644
index ca46c12de8..0000000000
--- a/crypto/openssl_bio_string.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CRYPTO_OPENSSL_BIO_STRING_H_
-#define CRYPTO_OPENSSL_BIO_STRING_H_
-
-#include <string>
-
-#include "crypto/crypto_export.h"
-
-// From <openssl/bio.h>
-typedef struct bio_st BIO;
-
-namespace crypto {
-
-// Creates a new BIO that can be used with OpenSSL's various output functions,
-// and which will write all output directly into |out|. This is primarily
-// intended as a utility to reduce the amount of copying and separate
-// allocations when performing extensive string modifications or streaming
-// within OpenSSL.
-//
-// Note: |out| must remain valid for the duration of the BIO.
-CRYPTO_EXPORT BIO* BIO_new_string(std::string* out);
-
-} // namespace crypto
-
-#endif // CRYPTO_OPENSSL_BIO_STRING_H_
-
diff --git a/crypto/openssl_bio_string_unittest.cc b/crypto/openssl_bio_string_unittest.cc
deleted file mode 100644
index 9dfa0e70f7..0000000000
--- a/crypto/openssl_bio_string_unittest.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/openssl_bio_string.h"
-
-#include <openssl/bio.h>
-
-#include "crypto/scoped_openssl_types.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace crypto {
-
-TEST(OpenSSLBIOString, TestWrite) {
- std::string s;
- const std::string expected1("a one\nb 2\n");
- const std::string expected2("c d e f");
- const std::string expected3("g h i");
- {
- ScopedBIO bio(BIO_new_string(&s));
- ASSERT_TRUE(bio.get());
-
- EXPECT_EQ(static_cast<int>(expected1.size()),
- BIO_printf(bio.get(), "a %s\nb %i\n", "one", 2));
- EXPECT_EQ(expected1, s);
-
- EXPECT_EQ(1, BIO_flush(bio.get()));
- EXPECT_EQ(expected1, s);
-
- EXPECT_EQ(static_cast<int>(expected2.size()),
- BIO_write(bio.get(), expected2.data(), expected2.size()));
- EXPECT_EQ(expected1 + expected2, s);
-
- EXPECT_EQ(static_cast<int>(expected3.size()),
- BIO_puts(bio.get(), expected3.c_str()));
- EXPECT_EQ(expected1 + expected2 + expected3, s);
- }
- EXPECT_EQ(expected1 + expected2 + expected3, s);
-}
-
-TEST(OpenSSLBIOString, TestReset) {
- std::string s;
- const std::string expected1("a b c\n");
- const std::string expected2("d e f g\n");
- {
- ScopedBIO bio(BIO_new_string(&s));
- ASSERT_TRUE(bio.get());
-
- EXPECT_EQ(static_cast<int>(expected1.size()),
- BIO_write(bio.get(), expected1.data(), expected1.size()));
- EXPECT_EQ(expected1, s);
-
- EXPECT_EQ(1, BIO_reset(bio.get()));
- EXPECT_EQ(std::string(), s);
-
- EXPECT_EQ(static_cast<int>(expected2.size()),
- BIO_write(bio.get(), expected2.data(), expected2.size()));
- EXPECT_EQ(expected2, s);
- }
- EXPECT_EQ(expected2, s);
-}
-
-} // namespace crypto
diff --git a/crypto/openssl_util.cc b/crypto/openssl_util.cc
index 78c6cbbb65..2349d42db3 100644
--- a/crypto/openssl_util.cc
+++ b/crypto/openssl_util.cc
@@ -14,6 +14,8 @@
#include <stddef.h>
#include <stdint.h>
+#include <string>
+
#include "base/logging.h"
#include "base/strings/string_piece.h"
@@ -47,7 +49,7 @@ void EnsureOpenSSLInit() {
}
void ClearOpenSSLERRStack(const tracked_objects::Location& location) {
- if (logging::DEBUG_MODE && VLOG_IS_ON(1)) {
+ if (DCHECK_IS_ON() && VLOG_IS_ON(1)) {
uint32_t error_num = ERR_peek_error();
if (error_num == 0)
return;
diff --git a/crypto/openssl_util.h b/crypto/openssl_util.h
index d608cdeb4b..54f06d337f 100644
--- a/crypto/openssl_util.h
+++ b/crypto/openssl_util.h
@@ -15,7 +15,7 @@ namespace crypto {
// Provides a buffer of at least MIN_SIZE bytes, for use when calling OpenSSL's
// SHA256, HMAC, etc functions, adapting the buffer sizing rules to meet those
-// of the our base wrapper APIs.
+// of our base wrapper APIs.
// This allows the library to write directly to the caller's buffer if it is of
// sufficient size, but if not it will write to temporary |min_sized_buffer_|
// of required size and then its content is automatically copied out on
diff --git a/crypto/p224_spake.cc b/crypto/p224_spake.cc
index 1574105372..7275a45bc6 100644
--- a/crypto/p224_spake.cc
+++ b/crypto/p224_spake.cc
@@ -5,14 +5,14 @@
// This code implements SPAKE2, a variant of EKE:
// http://www.di.ens.fr/~pointche/pub.php?reference=AbPo04
-#include <crypto/p224_spake.h>
+#include "crypto/p224_spake.h"
#include <algorithm>
-#include <base/logging.h>
-#include <crypto/p224.h>
-#include <crypto/random.h>
-#include <crypto/secure_util.h>
+#include "base/logging.h"
+#include "crypto/p224.h"
+#include "crypto/random.h"
+#include "crypto/secure_util.h"
namespace {
@@ -27,6 +27,9 @@ namespace {
// #include <openssl/obj_mac.h>
// #include <openssl/sha.h>
//
+// // Silence a presubmit.
+// #define PRINTF printf
+//
// static const char kSeed1[] = "P224 point generation seed (M)";
// static const char kSeed2[] = "P224 point generation seed (N)";
//
@@ -52,7 +55,7 @@ namespace {
// EC_POINT_get_affine_coordinates_GFp(p224, p, &x, &y, NULL);
// char* x_str = BN_bn2hex(&x);
// char* y_str = BN_bn2hex(&y);
-// printf("Found after %u iterations:\n%s\n%s\n", i, x_str, y_str);
+// PRINTF("Found after %u iterations:\n%s\n%s\n", i, x_str, y_str);
// OPENSSL_free(x_str);
// OPENSSL_free(y_str);
// BN_free(&x);
diff --git a/crypto/p224_spake.h b/crypto/p224_spake.h
index f9a44e70e1..b5cc70ae9e 100644
--- a/crypto/p224_spake.h
+++ b/crypto/p224_spake.h
@@ -5,12 +5,14 @@
#ifndef CRYPTO_P224_SPAKE_H_
#define CRYPTO_P224_SPAKE_H_
-#include <crypto/p224.h>
-#include <crypto/sha2.h>
#include <stdint.h>
+#include <string>
+
#include "base/gtest_prod_util.h"
#include "base/strings/string_piece.h"
+#include "crypto/p224.h"
+#include "crypto/sha2.h"
namespace crypto {
diff --git a/crypto/p224_spake_unittest.cc b/crypto/p224_spake_unittest.cc
index 3bca430bea..5ecb6fd3b6 100644
--- a/crypto/p224_spake_unittest.cc
+++ b/crypto/p224_spake_unittest.cc
@@ -127,7 +127,7 @@ TEST(MutualAuth, Fuzz) {
// We'll only be testing small values of i, but we don't want that to bias
// the test coverage. So we disperse the value of i by multiplying by the
- // FNV, 32-bit prime, producing a poor-man's PRNG.
+ // FNV, 32-bit prime, producing a simplistic PRNG.
const uint32_t rand = i * 16777619;
for (unsigned round = 0;; round++) {
diff --git a/crypto/p224_unittest.cc b/crypto/p224_unittest.cc
index faa08ebd36..8cfe6e7dc0 100644
--- a/crypto/p224_unittest.cc
+++ b/crypto/p224_unittest.cc
@@ -778,8 +778,8 @@ TEST(P224, ExternalToInternalAndBack) {
const std::string external = point.ToString();
ASSERT_EQ(external.size(), 56u);
- EXPECT_TRUE(memcmp(external.data(), kBasePointExternal,
- sizeof(kBasePointExternal)) == 0);
+ EXPECT_EQ(0, memcmp(external.data(), kBasePointExternal,
+ sizeof(kBasePointExternal)));
}
TEST(P224, ScalarBaseMult) {
@@ -789,8 +789,8 @@ TEST(P224, ScalarBaseMult) {
p224::ScalarBaseMult(kNISTTestVectors[i].scalar, &point);
const std::string external = point.ToString();
ASSERT_EQ(external.size(), 56u);
- EXPECT_TRUE(memcmp(external.data(), kNISTTestVectors[i].affine,
- external.size()) == 0);
+ EXPECT_EQ(0, memcmp(external.data(), kNISTTestVectors[i].affine,
+ external.size()));
}
}
@@ -804,9 +804,9 @@ TEST(P224, Addition) {
p224::Negate(b, &minus_b);
p224::Add(a, b, &sum);
- EXPECT_TRUE(memcmp(&sum, &a, sizeof(sum)) != 0);
+ EXPECT_NE(0, memcmp(&sum, &a, sizeof(sum)));
p224::Add(minus_b, sum, &a_again);
- EXPECT_TRUE(a_again.ToString() == a.ToString());
+ EXPECT_EQ(a_again.ToString(), a.ToString());
}
TEST(P224, Infinity) {
@@ -816,7 +816,7 @@ TEST(P224, Infinity) {
// Test that x^0 = ∞.
Point a;
p224::ScalarBaseMult(reinterpret_cast<const uint8_t*>(zeros), &a);
- EXPECT_TRUE(memcmp(zeros, a.ToString().data(), sizeof(zeros)) == 0);
+ EXPECT_EQ(0, memcmp(zeros, a.ToString().data(), sizeof(zeros)));
// We shouldn't allow ∞ to be imported.
EXPECT_FALSE(a.SetFromString(std::string(zeros, sizeof(zeros))));
diff --git a/crypto/random.h b/crypto/random.h
index 002616bd30..61cde80719 100644
--- a/crypto/random.h
+++ b/crypto/random.h
@@ -18,4 +18,4 @@ CRYPTO_EXPORT void RandBytes(void *bytes, size_t length);
}
-#endif
+#endif // CRYPTO_RANDOM_H_
diff --git a/crypto/random_unittest.cc b/crypto/random_unittest.cc
index caee512068..dfdcfd5077 100644
--- a/crypto/random_unittest.cc
+++ b/crypto/random_unittest.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <string>
+
#include "base/strings/string_util.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/crypto/rsa_private_key.cc b/crypto/rsa_private_key.cc
index c546c91ead..075f5e4041 100644
--- a/crypto/rsa_private_key.cc
+++ b/crypto/rsa_private_key.cc
@@ -4,385 +4,110 @@
#include "crypto/rsa_private_key.h"
-#include <stddef.h>
#include <stdint.h>
-#include <algorithm>
+#include <memory>
+#include <utility>
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_util.h"
-
-// This file manually encodes and decodes RSA private keys using PrivateKeyInfo
-// from PKCS #8 and RSAPrivateKey from PKCS #1. These structures are:
-//
-// PrivateKeyInfo ::= SEQUENCE {
-// version Version,
-// privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,
-// privateKey PrivateKey,
-// attributes [0] IMPLICIT Attributes OPTIONAL
-// }
-//
-// RSAPrivateKey ::= SEQUENCE {
-// version Version,
-// modulus INTEGER,
-// publicExponent INTEGER,
-// privateExponent INTEGER,
-// prime1 INTEGER,
-// prime2 INTEGER,
-// exponent1 INTEGER,
-// exponent2 INTEGER,
-// coefficient INTEGER
-// }
-
-namespace {
-// Helper for error handling during key import.
-#define READ_ASSERT(truth) \
- if (!(truth)) { \
- NOTREACHED(); \
- return false; \
- }
-} // namespace
+#include "crypto/openssl_util.h"
+#include "third_party/boringssl/src/include/openssl/bn.h"
+#include "third_party/boringssl/src/include/openssl/bytestring.h"
+#include "third_party/boringssl/src/include/openssl/evp.h"
+#include "third_party/boringssl/src/include/openssl/mem.h"
+#include "third_party/boringssl/src/include/openssl/rsa.h"
namespace crypto {
-const uint8_t PrivateKeyInfoCodec::kRsaAlgorithmIdentifier[] = {
- 0x30, 0x0D, 0x06, 0x09, 0x2A, 0x86, 0x48, 0x86,
- 0xF7, 0x0D, 0x01, 0x01, 0x01, 0x05, 0x00};
-
-PrivateKeyInfoCodec::PrivateKeyInfoCodec(bool big_endian)
- : big_endian_(big_endian) {}
-
-PrivateKeyInfoCodec::~PrivateKeyInfoCodec() {}
-
-bool PrivateKeyInfoCodec::Export(std::vector<uint8_t>* output) {
- std::list<uint8_t> content;
-
- // Version (always zero)
- uint8_t version = 0;
-
- PrependInteger(coefficient_, &content);
- PrependInteger(exponent2_, &content);
- PrependInteger(exponent1_, &content);
- PrependInteger(prime2_, &content);
- PrependInteger(prime1_, &content);
- PrependInteger(private_exponent_, &content);
- PrependInteger(public_exponent_, &content);
- PrependInteger(modulus_, &content);
- PrependInteger(&version, 1, &content);
- PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
- PrependTypeHeaderAndLength(kOctetStringTag, content.size(), &content);
-
- // RSA algorithm OID
- for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i)
- content.push_front(kRsaAlgorithmIdentifier[i - 1]);
-
- PrependInteger(&version, 1, &content);
- PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+// static
+std::unique_ptr<RSAPrivateKey> RSAPrivateKey::Create(uint16_t num_bits) {
+ OpenSSLErrStackTracer err_tracer(FROM_HERE);
- // Copy everying into the output.
- output->reserve(content.size());
- output->assign(content.begin(), content.end());
+ bssl::UniquePtr<RSA> rsa_key(RSA_new());
+ bssl::UniquePtr<BIGNUM> bn(BN_new());
+ if (!rsa_key.get() || !bn.get() || !BN_set_word(bn.get(), 65537L))
+ return nullptr;
- return true;
-}
-
-bool PrivateKeyInfoCodec::ExportPublicKeyInfo(std::vector<uint8_t>* output) {
- // Create a sequence with the modulus (n) and public exponent (e).
- std::vector<uint8_t> bit_string;
- if (!ExportPublicKey(&bit_string))
- return false;
-
- // Add the sequence as the contents of a bit string.
- std::list<uint8_t> content;
- PrependBitString(&bit_string[0], static_cast<int>(bit_string.size()),
- &content);
-
- // Add the RSA algorithm OID.
- for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i)
- content.push_front(kRsaAlgorithmIdentifier[i - 1]);
-
- // Finally, wrap everything in a sequence.
- PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
-
- // Copy everything into the output.
- output->reserve(content.size());
- output->assign(content.begin(), content.end());
-
- return true;
-}
-
-bool PrivateKeyInfoCodec::ExportPublicKey(std::vector<uint8_t>* output) {
- // Create a sequence with the modulus (n) and public exponent (e).
- std::list<uint8_t> content;
- PrependInteger(&public_exponent_[0],
- static_cast<int>(public_exponent_.size()),
- &content);
- PrependInteger(&modulus_[0], static_cast<int>(modulus_.size()), &content);
- PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
-
- // Copy everything into the output.
- output->reserve(content.size());
- output->assign(content.begin(), content.end());
-
- return true;
-}
-
-bool PrivateKeyInfoCodec::Import(const std::vector<uint8_t>& input) {
- if (input.empty()) {
- return false;
- }
-
- // Parse the private key info up to the public key values, ignoring
- // the subsequent private key values.
- uint8_t* src = const_cast<uint8_t*>(&input.front());
- uint8_t* end = src + input.size();
- if (!ReadSequence(&src, end) ||
- !ReadVersion(&src, end) ||
- !ReadAlgorithmIdentifier(&src, end) ||
- !ReadTypeHeaderAndLength(&src, end, kOctetStringTag, NULL) ||
- !ReadSequence(&src, end) ||
- !ReadVersion(&src, end) ||
- !ReadInteger(&src, end, &modulus_))
- return false;
-
- int mod_size = modulus_.size();
- READ_ASSERT(mod_size % 2 == 0);
- int primes_size = mod_size / 2;
-
- if (!ReadIntegerWithExpectedSize(&src, end, 4, &public_exponent_) ||
- !ReadIntegerWithExpectedSize(&src, end, mod_size, &private_exponent_) ||
- !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime1_) ||
- !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime2_) ||
- !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent1_) ||
- !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent2_) ||
- !ReadIntegerWithExpectedSize(&src, end, primes_size, &coefficient_))
- return false;
-
- READ_ASSERT(src == end);
-
-
- return true;
-}
+ if (!RSA_generate_key_ex(rsa_key.get(), num_bits, bn.get(), nullptr))
+ return nullptr;
-void PrivateKeyInfoCodec::PrependInteger(const std::vector<uint8_t>& in,
- std::list<uint8_t>* out) {
- uint8_t* ptr = const_cast<uint8_t*>(&in.front());
- PrependIntegerImpl(ptr, in.size(), out, big_endian_);
-}
+ std::unique_ptr<RSAPrivateKey> result(new RSAPrivateKey);
+ result->key_.reset(EVP_PKEY_new());
+ if (!result->key_ || !EVP_PKEY_set1_RSA(result->key_.get(), rsa_key.get()))
+ return nullptr;
-// Helper to prepend an ASN.1 integer.
-void PrivateKeyInfoCodec::PrependInteger(uint8_t* val,
- int num_bytes,
- std::list<uint8_t>* data) {
- PrependIntegerImpl(val, num_bytes, data, big_endian_);
+ return result;
}
-void PrivateKeyInfoCodec::PrependIntegerImpl(uint8_t* val,
- int num_bytes,
- std::list<uint8_t>* data,
- bool big_endian) {
- // Reverse input if little-endian.
- std::vector<uint8_t> tmp;
- if (!big_endian) {
- tmp.assign(val, val + num_bytes);
- std::reverse(tmp.begin(), tmp.end());
- val = &tmp.front();
- }
+// static
+std::unique_ptr<RSAPrivateKey> RSAPrivateKey::CreateFromPrivateKeyInfo(
+ const std::vector<uint8_t>& input) {
+ OpenSSLErrStackTracer err_tracer(FROM_HERE);
- // ASN.1 integers are unpadded byte arrays, so skip any null padding bytes
- // from the most-significant end of the integer.
- int start = 0;
- while (start < (num_bytes - 1) && val[start] == 0x00) {
- start++;
- num_bytes--;
- }
- PrependBytes(val, start, num_bytes, data);
+ CBS cbs;
+ CBS_init(&cbs, input.data(), input.size());
+ bssl::UniquePtr<EVP_PKEY> pkey(EVP_parse_private_key(&cbs));
+ if (!pkey || CBS_len(&cbs) != 0 || EVP_PKEY_id(pkey.get()) != EVP_PKEY_RSA)
+ return nullptr;
- // ASN.1 integers are signed. To encode a positive integer whose sign bit
- // (the most significant bit) would otherwise be set and make the number
- // negative, ASN.1 requires a leading null byte to force the integer to be
- // positive.
- uint8_t front = data->front();
- if ((front & 0x80) != 0) {
- data->push_front(0x00);
- num_bytes++;
- }
-
- PrependTypeHeaderAndLength(kIntegerTag, num_bytes, data);
+ std::unique_ptr<RSAPrivateKey> result(new RSAPrivateKey);
+ result->key_ = std::move(pkey);
+ return result;
}
-bool PrivateKeyInfoCodec::ReadInteger(uint8_t** pos,
- uint8_t* end,
- std::vector<uint8_t>* out) {
- return ReadIntegerImpl(pos, end, out, big_endian_);
+// static
+std::unique_ptr<RSAPrivateKey> RSAPrivateKey::CreateFromKey(EVP_PKEY* key) {
+ DCHECK(key);
+ if (EVP_PKEY_type(key->type) != EVP_PKEY_RSA)
+ return nullptr;
+ std::unique_ptr<RSAPrivateKey> copy(new RSAPrivateKey);
+ EVP_PKEY_up_ref(key);
+ copy->key_.reset(key);
+ return copy;
}
-bool PrivateKeyInfoCodec::ReadIntegerWithExpectedSize(
- uint8_t** pos,
- uint8_t* end,
- size_t expected_size,
- std::vector<uint8_t>* out) {
- std::vector<uint8_t> temp;
- if (!ReadIntegerImpl(pos, end, &temp, true)) // Big-Endian
- return false;
-
- int pad = expected_size - temp.size();
- int index = 0;
- if (out->size() == expected_size + 1) {
- READ_ASSERT(out->front() == 0x00);
- pad++;
- index++;
- } else {
- READ_ASSERT(out->size() <= expected_size);
- }
+RSAPrivateKey::RSAPrivateKey() {}
- out->insert(out->end(), pad, 0x00);
- out->insert(out->end(), temp.begin(), temp.end());
+RSAPrivateKey::~RSAPrivateKey() {}
- // Reverse output if little-endian.
- if (!big_endian_)
- std::reverse(out->begin(), out->end());
- return true;
+std::unique_ptr<RSAPrivateKey> RSAPrivateKey::Copy() const {
+ std::unique_ptr<RSAPrivateKey> copy(new RSAPrivateKey);
+ bssl::UniquePtr<RSA> rsa(EVP_PKEY_get1_RSA(key_.get()));
+ if (!rsa)
+ return nullptr;
+ copy->key_.reset(EVP_PKEY_new());
+ if (!EVP_PKEY_set1_RSA(copy->key_.get(), rsa.get()))
+ return nullptr;
+ return copy;
}
-bool PrivateKeyInfoCodec::ReadIntegerImpl(uint8_t** pos,
- uint8_t* end,
- std::vector<uint8_t>* out,
- bool big_endian) {
- uint32_t length = 0;
- if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length) || !length)
+bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
+ OpenSSLErrStackTracer err_tracer(FROM_HERE);
+ uint8_t *der;
+ size_t der_len;
+ bssl::ScopedCBB cbb;
+ if (!CBB_init(cbb.get(), 0) ||
+ !EVP_marshal_private_key(cbb.get(), key_.get()) ||
+ !CBB_finish(cbb.get(), &der, &der_len)) {
return false;
-
- // The first byte can be zero to force positiveness. We can ignore this.
- if (**pos == 0x00) {
- ++(*pos);
- --length;
}
-
- if (length)
- out->insert(out->end(), *pos, (*pos) + length);
-
- (*pos) += length;
-
- // Reverse output if little-endian.
- if (!big_endian)
- std::reverse(out->begin(), out->end());
+ output->assign(der, der + der_len);
+ OPENSSL_free(der);
return true;
}
-void PrivateKeyInfoCodec::PrependBytes(uint8_t* val,
- int start,
- int num_bytes,
- std::list<uint8_t>* data) {
- while (num_bytes > 0) {
- --num_bytes;
- data->push_front(val[start + num_bytes]);
- }
-}
-
-void PrivateKeyInfoCodec::PrependLength(size_t size, std::list<uint8_t>* data) {
- // The high bit is used to indicate whether additional octets are needed to
- // represent the length.
- if (size < 0x80) {
- data->push_front(static_cast<uint8_t>(size));
- } else {
- uint8_t num_bytes = 0;
- while (size > 0) {
- data->push_front(static_cast<uint8_t>(size & 0xFF));
- size >>= 8;
- num_bytes++;
- }
- CHECK_LE(num_bytes, 4);
- data->push_front(0x80 | num_bytes);
- }
-}
-
-void PrivateKeyInfoCodec::PrependTypeHeaderAndLength(
- uint8_t type,
- uint32_t length,
- std::list<uint8_t>* output) {
- PrependLength(length, output);
- output->push_front(type);
-}
-
-void PrivateKeyInfoCodec::PrependBitString(uint8_t* val,
- int num_bytes,
- std::list<uint8_t>* output) {
- // Start with the data.
- PrependBytes(val, 0, num_bytes, output);
- // Zero unused bits.
- output->push_front(0);
- // Add the length.
- PrependLength(num_bytes + 1, output);
- // Finally, add the bit string tag.
- output->push_front((uint8_t)kBitStringTag);
-}
-
-bool PrivateKeyInfoCodec::ReadLength(uint8_t** pos,
- uint8_t* end,
- uint32_t* result) {
- READ_ASSERT(*pos < end);
- int length = 0;
-
- // If the MSB is not set, the length is just the byte itself.
- if (!(**pos & 0x80)) {
- length = **pos;
- (*pos)++;
- } else {
- // Otherwise, the lower 7 indicate the length of the length.
- int length_of_length = **pos & 0x7F;
- READ_ASSERT(length_of_length <= 4);
- (*pos)++;
- READ_ASSERT(*pos + length_of_length < end);
-
- length = 0;
- for (int i = 0; i < length_of_length; ++i) {
- length <<= 8;
- length |= **pos;
- (*pos)++;
- }
- }
-
- READ_ASSERT(*pos + length <= end);
- if (result) *result = length;
- return true;
-}
-
-bool PrivateKeyInfoCodec::ReadTypeHeaderAndLength(uint8_t** pos,
- uint8_t* end,
- uint8_t expected_tag,
- uint32_t* length) {
- READ_ASSERT(*pos < end);
- READ_ASSERT(**pos == expected_tag);
- (*pos)++;
-
- return ReadLength(pos, end, length);
-}
-
-bool PrivateKeyInfoCodec::ReadSequence(uint8_t** pos, uint8_t* end) {
- return ReadTypeHeaderAndLength(pos, end, kSequenceTag, NULL);
-}
-
-bool PrivateKeyInfoCodec::ReadAlgorithmIdentifier(uint8_t** pos, uint8_t* end) {
- READ_ASSERT(*pos + sizeof(kRsaAlgorithmIdentifier) < end);
- READ_ASSERT(memcmp(*pos, kRsaAlgorithmIdentifier,
- sizeof(kRsaAlgorithmIdentifier)) == 0);
- (*pos) += sizeof(kRsaAlgorithmIdentifier);
- return true;
-}
-
-bool PrivateKeyInfoCodec::ReadVersion(uint8_t** pos, uint8_t* end) {
- uint32_t length = 0;
- if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length))
+bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
+ OpenSSLErrStackTracer err_tracer(FROM_HERE);
+ uint8_t *der;
+ size_t der_len;
+ bssl::ScopedCBB cbb;
+ if (!CBB_init(cbb.get(), 0) ||
+ !EVP_marshal_public_key(cbb.get(), key_.get()) ||
+ !CBB_finish(cbb.get(), &der, &der_len)) {
return false;
-
- // The version should be zero.
- for (uint32_t i = 0; i < length; ++i) {
- READ_ASSERT(**pos == 0x00);
- (*pos)++;
}
-
+ output->assign(der, der + der_len);
+ OPENSSL_free(der);
return true;
}
diff --git a/crypto/rsa_private_key.h b/crypto/rsa_private_key.h
index d4808f5aae..fc4c80c232 100644
--- a/crypto/rsa_private_key.h
+++ b/crypto/rsa_private_key.h
@@ -7,162 +7,17 @@
#include <stddef.h>
#include <stdint.h>
+#include <openssl/base.h>
-#include <list>
+#include <memory>
#include <vector>
#include "base/macros.h"
#include "build/build_config.h"
#include "crypto/crypto_export.h"
-#if defined(USE_OPENSSL)
-// Forward declaration for openssl/*.h
-typedef struct evp_pkey_st EVP_PKEY;
-#else
-// Forward declaration.
-typedef struct PK11SlotInfoStr PK11SlotInfo;
-typedef struct SECKEYPrivateKeyStr SECKEYPrivateKey;
-typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
-#endif
-
-
namespace crypto {
-// Used internally by RSAPrivateKey for serializing and deserializing
-// PKCS #8 PrivateKeyInfo and PublicKeyInfo.
-class PrivateKeyInfoCodec {
- public:
- // ASN.1 encoding of the AlgorithmIdentifier from PKCS #8.
- static const uint8_t kRsaAlgorithmIdentifier[];
-
- // ASN.1 tags for some types we use.
- static const uint8_t kBitStringTag = 0x03;
- static const uint8_t kIntegerTag = 0x02;
- static const uint8_t kNullTag = 0x05;
- static const uint8_t kOctetStringTag = 0x04;
- static const uint8_t kSequenceTag = 0x30;
-
- // |big_endian| here specifies the byte-significance of the integer components
- // that will be parsed & serialized (modulus(), etc...) during Import(),
- // Export() and ExportPublicKeyInfo() -- not the ASN.1 DER encoding of the
- // PrivateKeyInfo/PublicKeyInfo (which is always big-endian).
- explicit PrivateKeyInfoCodec(bool big_endian);
-
- ~PrivateKeyInfoCodec();
-
- // Exports the contents of the integer components to the ASN.1 DER encoding
- // of the PrivateKeyInfo structure to |output|.
- bool Export(std::vector<uint8_t>* output);
-
- // Exports the contents of the integer components to the ASN.1 DER encoding
- // of the PublicKeyInfo structure to |output|.
- bool ExportPublicKeyInfo(std::vector<uint8_t>* output);
-
- // Exports the contents of the integer components to the ASN.1 DER encoding
- // of the RSAPublicKey structure to |output|.
- bool ExportPublicKey(std::vector<uint8_t>* output);
-
- // Parses the ASN.1 DER encoding of the PrivateKeyInfo structure in |input|
- // and populates the integer components with |big_endian_| byte-significance.
- // IMPORTANT NOTE: This is currently *not* security-approved for importing
- // keys from unstrusted sources.
- bool Import(const std::vector<uint8_t>& input);
-
- // Accessors to the contents of the integer components of the PrivateKeyInfo
- // structure.
- std::vector<uint8_t>* modulus() { return &modulus_; }
- std::vector<uint8_t>* public_exponent() { return &public_exponent_; }
- std::vector<uint8_t>* private_exponent() { return &private_exponent_; }
- std::vector<uint8_t>* prime1() { return &prime1_; }
- std::vector<uint8_t>* prime2() { return &prime2_; }
- std::vector<uint8_t>* exponent1() { return &exponent1_; }
- std::vector<uint8_t>* exponent2() { return &exponent2_; }
- std::vector<uint8_t>* coefficient() { return &coefficient_; }
-
- private:
- // Utility wrappers for PrependIntegerImpl that use the class's |big_endian_|
- // value.
- void PrependInteger(const std::vector<uint8_t>& in, std::list<uint8_t>* out);
- void PrependInteger(uint8_t* val, int num_bytes, std::list<uint8_t>* data);
-
- // Prepends the integer stored in |val| - |val + num_bytes| with |big_endian|
- // byte-significance into |data| as an ASN.1 integer.
- void PrependIntegerImpl(uint8_t* val,
- int num_bytes,
- std::list<uint8_t>* data,
- bool big_endian);
-
- // Utility wrappers for ReadIntegerImpl that use the class's |big_endian_|
- // value.
- bool ReadInteger(uint8_t** pos, uint8_t* end, std::vector<uint8_t>* out);
- bool ReadIntegerWithExpectedSize(uint8_t** pos,
- uint8_t* end,
- size_t expected_size,
- std::vector<uint8_t>* out);
-
- // Reads an ASN.1 integer from |pos|, and stores the result into |out| with
- // |big_endian| byte-significance.
- bool ReadIntegerImpl(uint8_t** pos,
- uint8_t* end,
- std::vector<uint8_t>* out,
- bool big_endian);
-
- // Prepends the integer stored in |val|, starting a index |start|, for
- // |num_bytes| bytes onto |data|.
- void PrependBytes(uint8_t* val,
- int start,
- int num_bytes,
- std::list<uint8_t>* data);
-
- // Helper to prepend an ASN.1 length field.
- void PrependLength(size_t size, std::list<uint8_t>* data);
-
- // Helper to prepend an ASN.1 type header.
- void PrependTypeHeaderAndLength(uint8_t type,
- uint32_t length,
- std::list<uint8_t>* output);
-
- // Helper to prepend an ASN.1 bit string
- void PrependBitString(uint8_t* val,
- int num_bytes,
- std::list<uint8_t>* output);
-
- // Read an ASN.1 length field. This also checks that the length does not
- // extend beyond |end|.
- bool ReadLength(uint8_t** pos, uint8_t* end, uint32_t* result);
-
- // Read an ASN.1 type header and its length.
- bool ReadTypeHeaderAndLength(uint8_t** pos,
- uint8_t* end,
- uint8_t expected_tag,
- uint32_t* length);
-
- // Read an ASN.1 sequence declaration. This consumes the type header and
- // length field, but not the contents of the sequence.
- bool ReadSequence(uint8_t** pos, uint8_t* end);
-
- // Read the RSA AlgorithmIdentifier.
- bool ReadAlgorithmIdentifier(uint8_t** pos, uint8_t* end);
-
- // Read one of the two version fields in PrivateKeyInfo.
- bool ReadVersion(uint8_t** pos, uint8_t* end);
-
- // The byte-significance of the stored components (modulus, etc..).
- bool big_endian_;
-
- // Component integers of the PrivateKeyInfo
- std::vector<uint8_t> modulus_;
- std::vector<uint8_t> public_exponent_;
- std::vector<uint8_t> private_exponent_;
- std::vector<uint8_t> prime1_;
- std::vector<uint8_t> prime2_;
- std::vector<uint8_t> exponent1_;
- std::vector<uint8_t> exponent2_;
- std::vector<uint8_t> coefficient_;
-
- DISALLOW_COPY_AND_ASSIGN(PrivateKeyInfoCodec);
-};
-
// Encapsulates an RSA private key. Can be used to generate new keys, export
// keys to other formats, or to extract a public key.
// TODO(hclam): This class should be ref-counted so it can be reused easily.
@@ -171,34 +26,23 @@ class CRYPTO_EXPORT RSAPrivateKey {
~RSAPrivateKey();
// Create a new random instance. Can return NULL if initialization fails.
- static RSAPrivateKey* Create(uint16_t num_bits);
+ static std::unique_ptr<RSAPrivateKey> Create(uint16_t num_bits);
// Create a new instance by importing an existing private key. The format is
// an ASN.1-encoded PrivateKeyInfo block from PKCS #8. This can return NULL if
// initialization fails.
- static RSAPrivateKey* CreateFromPrivateKeyInfo(
+ static std::unique_ptr<RSAPrivateKey> CreateFromPrivateKeyInfo(
const std::vector<uint8_t>& input);
-#if defined(USE_OPENSSL)
// Create a new instance from an existing EVP_PKEY, taking a
// reference to it. |key| must be an RSA key. Returns NULL on
// failure.
- static RSAPrivateKey* CreateFromKey(EVP_PKEY* key);
-#else
- // Create a new instance by referencing an existing private key
- // structure. Does not import the key.
- static RSAPrivateKey* CreateFromKey(SECKEYPrivateKey* key);
-#endif
-
-#if defined(USE_OPENSSL)
- EVP_PKEY* key() { return key_; }
-#else
- SECKEYPrivateKey* key() { return key_; }
- SECKEYPublicKey* public_key() { return public_key_; }
-#endif
+ static std::unique_ptr<RSAPrivateKey> CreateFromKey(EVP_PKEY* key);
+
+ EVP_PKEY* key() { return key_.get(); }
// Creates a copy of the object.
- RSAPrivateKey* Copy() const;
+ std::unique_ptr<RSAPrivateKey> Copy() const;
// Exports the private key to a PKCS #8 PrivateKeyInfo block.
bool ExportPrivateKey(std::vector<uint8_t>* output) const;
@@ -210,12 +54,7 @@ class CRYPTO_EXPORT RSAPrivateKey {
// Constructor is private. Use one of the Create*() methods above instead.
RSAPrivateKey();
-#if defined(USE_OPENSSL)
- EVP_PKEY* key_;
-#else
- SECKEYPrivateKey* key_;
- SECKEYPublicKey* public_key_;
-#endif
+ bssl::UniquePtr<EVP_PKEY> key_;
DISALLOW_COPY_AND_ASSIGN(RSAPrivateKey);
};
diff --git a/crypto/rsa_private_key_nss.cc b/crypto/rsa_private_key_nss.cc
deleted file mode 100644
index b1026c1edb..0000000000
--- a/crypto/rsa_private_key_nss.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/rsa_private_key.h"
-
-#include <cryptohi.h>
-#include <keyhi.h>
-#include <pk11pub.h>
-#include <stdint.h>
-
-#include <list>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_util.h"
-#include "crypto/nss_key_util.h"
-#include "crypto/nss_util.h"
-#include "crypto/scoped_nss_types.h"
-
-// TODO(rafaelw): Consider using NSS's ASN.1 encoder.
-namespace {
-
-static bool ReadAttribute(SECKEYPrivateKey* key,
- CK_ATTRIBUTE_TYPE type,
- std::vector<uint8_t>* output) {
- SECItem item;
- SECStatus rv;
- rv = PK11_ReadRawAttribute(PK11_TypePrivKey, key, type, &item);
- if (rv != SECSuccess) {
- NOTREACHED();
- return false;
- }
-
- output->assign(item.data, item.data + item.len);
- SECITEM_FreeItem(&item, PR_FALSE);
- return true;
-}
-
-} // namespace
-
-namespace crypto {
-
-RSAPrivateKey::~RSAPrivateKey() {
- if (key_)
- SECKEY_DestroyPrivateKey(key_);
- if (public_key_)
- SECKEY_DestroyPublicKey(public_key_);
-}
-
-// static
-RSAPrivateKey* RSAPrivateKey::Create(uint16_t num_bits) {
- EnsureNSSInit();
-
- ScopedPK11Slot slot(PK11_GetInternalSlot());
- if (!slot) {
- NOTREACHED();
- return nullptr;
- }
-
- ScopedSECKEYPublicKey public_key;
- ScopedSECKEYPrivateKey private_key;
- if (!GenerateRSAKeyPairNSS(slot.get(), num_bits, false /* not permanent */,
- &public_key, &private_key)) {
- return nullptr;
- }
-
- RSAPrivateKey* rsa_key = new RSAPrivateKey;
- rsa_key->public_key_ = public_key.release();
- rsa_key->key_ = private_key.release();
- return rsa_key;
-}
-
-// static
-RSAPrivateKey* RSAPrivateKey::CreateFromPrivateKeyInfo(
- const std::vector<uint8_t>& input) {
- EnsureNSSInit();
-
- ScopedPK11Slot slot(PK11_GetInternalSlot());
- if (!slot) {
- NOTREACHED();
- return nullptr;
- }
- ScopedSECKEYPrivateKey key(ImportNSSKeyFromPrivateKeyInfo(
- slot.get(), input, false /* not permanent */));
- if (!key || SECKEY_GetPrivateKeyType(key.get()) != rsaKey)
- return nullptr;
- return RSAPrivateKey::CreateFromKey(key.get());
-}
-
-// static
-RSAPrivateKey* RSAPrivateKey::CreateFromKey(SECKEYPrivateKey* key) {
- DCHECK(key);
- if (SECKEY_GetPrivateKeyType(key) != rsaKey)
- return NULL;
- RSAPrivateKey* copy = new RSAPrivateKey();
- copy->key_ = SECKEY_CopyPrivateKey(key);
- copy->public_key_ = SECKEY_ConvertToPublicKey(key);
- if (!copy->key_ || !copy->public_key_) {
- NOTREACHED();
- delete copy;
- return NULL;
- }
- return copy;
-}
-
-RSAPrivateKey* RSAPrivateKey::Copy() const {
- RSAPrivateKey* copy = new RSAPrivateKey();
- copy->key_ = SECKEY_CopyPrivateKey(key_);
- copy->public_key_ = SECKEY_CopyPublicKey(public_key_);
- return copy;
-}
-
-bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
- PrivateKeyInfoCodec private_key_info(true);
-
- // Manually read the component attributes of the private key and build up
- // the PrivateKeyInfo.
- if (!ReadAttribute(key_, CKA_MODULUS, private_key_info.modulus()) ||
- !ReadAttribute(key_, CKA_PUBLIC_EXPONENT,
- private_key_info.public_exponent()) ||
- !ReadAttribute(key_, CKA_PRIVATE_EXPONENT,
- private_key_info.private_exponent()) ||
- !ReadAttribute(key_, CKA_PRIME_1, private_key_info.prime1()) ||
- !ReadAttribute(key_, CKA_PRIME_2, private_key_info.prime2()) ||
- !ReadAttribute(key_, CKA_EXPONENT_1, private_key_info.exponent1()) ||
- !ReadAttribute(key_, CKA_EXPONENT_2, private_key_info.exponent2()) ||
- !ReadAttribute(key_, CKA_COEFFICIENT, private_key_info.coefficient())) {
- NOTREACHED();
- return false;
- }
-
- return private_key_info.Export(output);
-}
-
-bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
- ScopedSECItem der_pubkey(SECKEY_EncodeDERSubjectPublicKeyInfo(public_key_));
- if (!der_pubkey.get()) {
- NOTREACHED();
- return false;
- }
-
- output->assign(der_pubkey->data, der_pubkey->data + der_pubkey->len);
- return true;
-}
-
-RSAPrivateKey::RSAPrivateKey() : key_(NULL), public_key_(NULL) {
- EnsureNSSInit();
-}
-
-} // namespace crypto
diff --git a/crypto/rsa_private_key_unittest.cc b/crypto/rsa_private_key_unittest.cc
index 393a24c536..f9549f3418 100644
--- a/crypto/rsa_private_key_unittest.cc
+++ b/crypto/rsa_private_key_unittest.cc
@@ -103,10 +103,8 @@ TEST(RSAPrivateKeyUnitTest, InitRandomTest) {
ASSERT_EQ(privkey1.size(), privkey3.size());
ASSERT_EQ(privkey2.size(), privkey4.size());
- ASSERT_TRUE(0 == memcmp(&privkey1.front(), &privkey3.front(),
- privkey1.size()));
- ASSERT_TRUE(0 == memcmp(&privkey2.front(), &privkey4.front(),
- privkey2.size()));
+ ASSERT_EQ(0, memcmp(&privkey1.front(), &privkey3.front(), privkey1.size()));
+ ASSERT_EQ(0, memcmp(&privkey2.front(), &privkey4.front(), privkey2.size()));
}
// Test Copy() method.
@@ -195,8 +193,8 @@ TEST(RSAPrivateKeyUnitTest, PublicKeyTest) {
std::vector<uint8_t> output;
ASSERT_TRUE(key->ExportPublicKey(&output));
- ASSERT_TRUE(
- memcmp(expected_public_key_info, &output.front(), output.size()) == 0);
+ ASSERT_EQ(0,
+ memcmp(expected_public_key_info, &output.front(), output.size()));
}
// These two test keys each contain an integer that has 0x00 for its most
@@ -349,10 +347,8 @@ TEST(RSAPrivateKeyUnitTest, ShortIntegers) {
ASSERT_EQ(input1.size(), output1.size());
ASSERT_EQ(input2.size(), output2.size());
- ASSERT_TRUE(0 == memcmp(&output1.front(), &input1.front(),
- input1.size()));
- ASSERT_TRUE(0 == memcmp(&output2.front(), &input2.front(),
- input2.size()));
+ ASSERT_EQ(0, memcmp(&output1.front(), &input1.front(), input1.size()));
+ ASSERT_EQ(0, memcmp(&output2.front(), &input2.front(), input2.size()));
}
TEST(RSAPrivateKeyUnitTest, CreateFromKeyTest) {
diff --git a/crypto/scoped_openssl_types.h b/crypto/scoped_openssl_types.h
deleted file mode 100644
index 622fed298f..0000000000
--- a/crypto/scoped_openssl_types.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CRYPTO_SCOPED_OPENSSL_TYPES_H_
-#define CRYPTO_SCOPED_OPENSSL_TYPES_H_
-
-#include <openssl/bio.h>
-#include <openssl/bn.h>
-#include <openssl/dsa.h>
-#include <openssl/ec.h>
-#include <openssl/ecdsa.h>
-#include <openssl/evp.h>
-#ifdef OPENSSL_IS_BORINGSSL
-#include <openssl/mem.h>
-#endif
-#include <openssl/rsa.h>
-#include <stdint.h>
-
-#include <memory>
-
-namespace crypto {
-
-// Simplistic helper that wraps a call to a deleter function. In a C++11 world,
-// this would be std::function<>. An alternative would be to re-use
-// base::internal::RunnableAdapter<>, but that's far too heavy weight.
-template <typename Type, void (*Destroyer)(Type*)>
-struct OpenSSLDestroyer {
- void operator()(Type* ptr) const { Destroyer(ptr); }
-};
-
-template <typename PointerType, void (*Destroyer)(PointerType*)>
-using ScopedOpenSSL =
- std::unique_ptr<PointerType, OpenSSLDestroyer<PointerType, Destroyer>>;
-
-struct OpenSSLFree {
- void operator()(uint8_t* ptr) const { OPENSSL_free(ptr); }
-};
-
-// Several typedefs are provided for crypto-specific primitives, for
-// short-hand and prevalence. Note that OpenSSL types related to X.509 are
-// intentionally not included, as crypto/ does not generally deal with
-// certificates or PKI.
-using ScopedBIGNUM = ScopedOpenSSL<BIGNUM, BN_free>;
-using ScopedEC_Key = ScopedOpenSSL<EC_KEY, EC_KEY_free>;
-using ScopedBIO = ScopedOpenSSL<BIO, BIO_free_all>;
-using ScopedDSA = ScopedOpenSSL<DSA, DSA_free>;
-using ScopedECDSA_SIG = ScopedOpenSSL<ECDSA_SIG, ECDSA_SIG_free>;
-using ScopedEC_GROUP = ScopedOpenSSL<EC_GROUP, EC_GROUP_free>;
-using ScopedEC_KEY = ScopedOpenSSL<EC_KEY, EC_KEY_free>;
-using ScopedEC_POINT = ScopedOpenSSL<EC_POINT, EC_POINT_free>;
-using ScopedEVP_MD_CTX = ScopedOpenSSL<EVP_MD_CTX, EVP_MD_CTX_destroy>;
-using ScopedEVP_PKEY = ScopedOpenSSL<EVP_PKEY, EVP_PKEY_free>;
-using ScopedEVP_PKEY_CTX = ScopedOpenSSL<EVP_PKEY_CTX, EVP_PKEY_CTX_free>;
-using ScopedRSA = ScopedOpenSSL<RSA, RSA_free>;
-
-// The bytes must have been allocated with OPENSSL_malloc.
-using ScopedOpenSSLBytes = std::unique_ptr<uint8_t, OpenSSLFree>;
-
-} // namespace crypto
-
-#endif // CRYPTO_SCOPED_OPENSSL_TYPES_H_
diff --git a/crypto/scoped_test_nss_chromeos_user.cc b/crypto/scoped_test_nss_chromeos_user.cc
index aec25d8dff..49b92d4859 100644
--- a/crypto/scoped_test_nss_chromeos_user.cc
+++ b/crypto/scoped_test_nss_chromeos_user.cc
@@ -18,7 +18,7 @@ ScopedTestNSSChromeOSUser::ScopedTestNSSChromeOSUser(
// This opens a software DB in the given folder. In production code that is in
// the home folder, but for testing the temp folder is used.
constructed_successfully_ =
- InitializeNSSForChromeOSUser(username_hash, temp_dir_.path());
+ InitializeNSSForChromeOSUser(username_hash, temp_dir_.GetPath());
}
ScopedTestNSSChromeOSUser::~ScopedTestNSSChromeOSUser() {
diff --git a/crypto/scoped_test_nss_db.cc b/crypto/scoped_test_nss_db.cc
index dc58031ce5..b334109e03 100644
--- a/crypto/scoped_test_nss_db.cc
+++ b/crypto/scoped_test_nss_db.cc
@@ -24,7 +24,7 @@ ScopedTestNSSDB::ScopedTestNSSDB() {
return;
const char kTestDescription[] = "Test DB";
- slot_ = OpenSoftwareNSSDB(temp_dir_.path(), kTestDescription);
+ slot_ = OpenSoftwareNSSDB(temp_dir_.GetPath(), kTestDescription);
}
ScopedTestNSSDB::~ScopedTestNSSDB() {
diff --git a/crypto/scoped_test_system_nss_key_slot.h b/crypto/scoped_test_system_nss_key_slot.h
index eb8fbc97a8..ae9b2cd8a5 100644
--- a/crypto/scoped_test_system_nss_key_slot.h
+++ b/crypto/scoped_test_system_nss_key_slot.h
@@ -27,7 +27,7 @@ class ScopedTestNSSDB;
// At most one instance of this helper must be used at a time.
class CRYPTO_EXPORT ScopedTestSystemNSSKeySlot {
public:
- explicit ScopedTestSystemNSSKeySlot();
+ ScopedTestSystemNSSKeySlot();
~ScopedTestSystemNSSKeySlot();
bool ConstructedSuccessfully() const;
diff --git a/crypto/secure_hash.cc b/crypto/secure_hash.cc
index 9003b9cb69..d47f783c05 100644
--- a/crypto/secure_hash.cc
+++ b/crypto/secure_hash.cc
@@ -27,7 +27,7 @@ class SecureHashSHA256 : public SecureHash {
SHA256_Init(&ctx_);
}
- SecureHashSHA256(const SecureHashSHA256& other) : SecureHash() {
+ SecureHashSHA256(const SecureHashSHA256& other) {
memcpy(&ctx_, &other.ctx_, sizeof(ctx_));
}
diff --git a/crypto/sha2.cc b/crypto/sha2.cc
index e97b8f4037..1b302b34f6 100644
--- a/crypto/sha2.cc
+++ b/crypto/sha2.cc
@@ -21,7 +21,7 @@ void SHA256HashString(const base::StringPiece& str, void* output, size_t len) {
std::string SHA256HashString(const base::StringPiece& str) {
std::string output(kSHA256Length, 0);
- SHA256HashString(str, string_as_array(&output), output.size());
+ SHA256HashString(str, base::string_as_array(&output), output.size());
return output;
}
diff --git a/crypto/signature_creator.h b/crypto/signature_creator.h
index 1e8e856a02..674bd4cccb 100644
--- a/crypto/signature_creator.h
+++ b/crypto/signature_creator.h
@@ -14,13 +14,8 @@
#include "build/build_config.h"
#include "crypto/crypto_export.h"
-#if defined(USE_OPENSSL)
// Forward declaration for openssl/*.h
typedef struct env_md_ctx_st EVP_MD_CTX;
-#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
-// Forward declaration.
-struct SGNContextStr;
-#endif
namespace crypto {
@@ -62,11 +57,7 @@ class CRYPTO_EXPORT SignatureCreator {
// Private constructor. Use the Create() method instead.
SignatureCreator();
-#if defined(USE_OPENSSL)
EVP_MD_CTX* sign_context_;
-#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
- SGNContextStr* sign_context_;
-#endif
DISALLOW_COPY_AND_ASSIGN(SignatureCreator);
};
diff --git a/crypto/signature_creator_nss.cc b/crypto/signature_creator_nss.cc
deleted file mode 100644
index bf204134b9..0000000000
--- a/crypto/signature_creator_nss.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/signature_creator.h"
-
-#include <cryptohi.h>
-#include <keyhi.h>
-#include <stdint.h>
-#include <stdlib.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/nss_util.h"
-#include "crypto/rsa_private_key.h"
-
-namespace crypto {
-
-namespace {
-
-SECOidTag ToNSSSigOid(SignatureCreator::HashAlgorithm hash_alg) {
- switch (hash_alg) {
- case SignatureCreator::SHA1:
- return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
- case SignatureCreator::SHA256:
- return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
- }
- return SEC_OID_UNKNOWN;
-}
-
-SECOidTag ToNSSHashOid(SignatureCreator::HashAlgorithm hash_alg) {
- switch (hash_alg) {
- case SignatureCreator::SHA1:
- return SEC_OID_SHA1;
- case SignatureCreator::SHA256:
- return SEC_OID_SHA256;
- }
- return SEC_OID_UNKNOWN;
-}
-
-} // namespace
-
-SignatureCreator::~SignatureCreator() {
- if (sign_context_) {
- SGN_DestroyContext(sign_context_, PR_TRUE);
- sign_context_ = NULL;
- }
-}
-
-// static
-SignatureCreator* SignatureCreator::Create(RSAPrivateKey* key,
- HashAlgorithm hash_alg) {
- scoped_ptr<SignatureCreator> result(new SignatureCreator);
- result->sign_context_ = SGN_NewContext(ToNSSSigOid(hash_alg), key->key());
- if (!result->sign_context_) {
- NOTREACHED();
- return NULL;
- }
-
- SECStatus rv = SGN_Begin(result->sign_context_);
- if (rv != SECSuccess) {
- NOTREACHED();
- return NULL;
- }
-
- return result.release();
-}
-
-// static
-bool SignatureCreator::Sign(RSAPrivateKey* key,
- HashAlgorithm hash_alg,
- const uint8_t* data,
- int data_len,
- std::vector<uint8_t>* signature) {
- SECItem data_item;
- data_item.type = siBuffer;
- data_item.data = const_cast<unsigned char*>(data);
- data_item.len = data_len;
-
- SECItem signature_item;
- SECStatus rv = SGN_Digest(key->key(), ToNSSHashOid(hash_alg), &signature_item,
- &data_item);
- if (rv != SECSuccess) {
- NOTREACHED();
- return false;
- }
- signature->assign(signature_item.data,
- signature_item.data + signature_item.len);
- SECITEM_FreeItem(&signature_item, PR_FALSE);
- return true;
-}
-
-bool SignatureCreator::Update(const uint8_t* data_part, int data_part_len) {
- SECStatus rv = SGN_Update(sign_context_, data_part, data_part_len);
- if (rv != SECSuccess) {
- NOTREACHED();
- return false;
- }
-
- return true;
-}
-
-bool SignatureCreator::Final(std::vector<uint8_t>* signature) {
- SECItem signature_item;
- SECStatus rv = SGN_End(sign_context_, &signature_item);
- if (rv != SECSuccess) {
- return false;
- }
- signature->assign(signature_item.data,
- signature_item.data + signature_item.len);
- SECITEM_FreeItem(&signature_item, PR_FALSE);
- return true;
-}
-
-SignatureCreator::SignatureCreator() : sign_context_(NULL) {
- EnsureNSSInit();
-}
-
-} // namespace crypto
diff --git a/crypto/signature_creator_unittest.cc b/crypto/signature_creator_unittest.cc
index 819e663dac..2f135cc709 100644
--- a/crypto/signature_creator_unittest.cc
+++ b/crypto/signature_creator_unittest.cc
@@ -7,6 +7,7 @@
#include <stdint.h>
#include <memory>
+#include <string>
#include <vector>
#include "base/sha1.h"
diff --git a/crypto/signature_verifier.h b/crypto/signature_verifier.h
index 5b7369fb51..f1ea58062c 100644
--- a/crypto/signature_verifier.h
+++ b/crypto/signature_verifier.h
@@ -7,19 +7,14 @@
#include <stdint.h>
+#include <memory>
#include <vector>
#include "build/build_config.h"
#include "crypto/crypto_export.h"
-#if defined(USE_OPENSSL)
typedef struct env_md_st EVP_MD;
typedef struct evp_pkey_ctx_st EVP_PKEY_CTX;
-#else
-typedef struct HASHContextStr HASHContext;
-typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
-typedef struct VFYContextStr VFYContext;
-#endif
namespace crypto {
@@ -96,7 +91,6 @@ class CRYPTO_EXPORT SignatureVerifier {
bool VerifyFinal();
private:
-#if defined(USE_OPENSSL)
bool CommonInit(int pkey_type,
const EVP_MD* digest,
const uint8_t* signature,
@@ -104,29 +98,13 @@ class CRYPTO_EXPORT SignatureVerifier {
const uint8_t* public_key_info,
int public_key_info_len,
EVP_PKEY_CTX** pkey_ctx);
-#else
- static SECKEYPublicKey* DecodePublicKeyInfo(const uint8_t* public_key_info,
- int public_key_info_len);
-#endif
void Reset();
std::vector<uint8_t> signature_;
-#if defined(USE_OPENSSL)
struct VerifyContext;
- VerifyContext* verify_context_;
-#else
- // Used for all signature types except RSA-PSS.
- VFYContext* vfy_context_;
-
- // Used for RSA-PSS signatures.
- HashAlgorithm hash_alg_;
- HashAlgorithm mask_hash_alg_;
- unsigned int salt_len_;
- SECKEYPublicKey* public_key_;
- HASHContext* hash_context_;
-#endif
+ std::unique_ptr<VerifyContext> verify_context_;
};
} // namespace crypto
diff --git a/crypto/signature_verifier_nss.cc b/crypto/signature_verifier_nss.cc
deleted file mode 100644
index edbd3f6a98..0000000000
--- a/crypto/signature_verifier_nss.cc
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/signature_verifier.h"
-
-#include <cryptohi.h>
-#include <keyhi.h>
-#include <pk11pub.h>
-#include <secerr.h>
-#include <sechash.h>
-#include <stdint.h>
-#include <stdlib.h>
-
-#include "base/logging.h"
-#include "crypto/nss_util.h"
-#include "crypto/third_party/nss/chromium-nss.h"
-
-namespace crypto {
-
-namespace {
-
-HASH_HashType ToNSSHashType(SignatureVerifier::HashAlgorithm hash_alg) {
- switch (hash_alg) {
- case SignatureVerifier::SHA1:
- return HASH_AlgSHA1;
- case SignatureVerifier::SHA256:
- return HASH_AlgSHA256;
- }
- return HASH_AlgNULL;
-}
-
-SECOidTag ToNSSSignatureType(SignatureVerifier::SignatureAlgorithm sig_alg) {
- switch (sig_alg) {
- case SignatureVerifier::RSA_PKCS1_SHA1:
- return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
- case SignatureVerifier::RSA_PKCS1_SHA256:
- return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
- case SignatureVerifier::ECDSA_SHA256:
- return SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE;
- }
- return SEC_OID_UNKNOWN;
-}
-
-SECStatus VerifyRSAPSS_End(SECKEYPublicKey* public_key,
- HASHContext* hash_context,
- HASH_HashType mask_hash_alg,
- unsigned int salt_len,
- const unsigned char* signature,
- unsigned int signature_len) {
- unsigned int hash_len = HASH_ResultLenContext(hash_context);
- std::vector<unsigned char> hash(hash_len);
- HASH_End(hash_context, &hash[0], &hash_len, hash.size());
-
- unsigned int modulus_len = SECKEY_PublicKeyStrength(public_key);
- if (signature_len != modulus_len) {
- PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
- return SECFailure;
- }
- std::vector<unsigned char> enc(signature_len);
- SECStatus rv = PK11_PubEncryptRaw(public_key, &enc[0],
- const_cast<unsigned char*>(signature),
- signature_len, NULL);
- if (rv != SECSuccess) {
- LOG(WARNING) << "PK11_PubEncryptRaw failed";
- return rv;
- }
- return emsa_pss_verify(&hash[0], &enc[0], enc.size(),
- HASH_GetType(hash_context), mask_hash_alg,
- salt_len);
-}
-
-} // namespace
-
-SignatureVerifier::SignatureVerifier()
- : vfy_context_(NULL),
- hash_alg_(SHA1),
- mask_hash_alg_(SHA1),
- salt_len_(0),
- public_key_(NULL),
- hash_context_(NULL) {
- EnsureNSSInit();
-}
-
-SignatureVerifier::~SignatureVerifier() {
- Reset();
-}
-
-bool SignatureVerifier::VerifyInit(SignatureAlgorithm signature_algorithm,
- const uint8_t* signature,
- int signature_len,
- const uint8_t* public_key_info,
- int public_key_info_len) {
- if (vfy_context_ || hash_context_)
- return false;
-
- signature_.assign(signature, signature + signature_len);
-
- SECKEYPublicKey* public_key = DecodePublicKeyInfo(public_key_info,
- public_key_info_len);
- if (!public_key)
- return false;
-
- SECItem sig;
- sig.type = siBuffer;
- sig.data = const_cast<uint8_t*>(signature);
- sig.len = signature_len;
- vfy_context_ = VFY_CreateContext(
- public_key, &sig, ToNSSSignatureType(signature_algorithm), nullptr);
- SECKEY_DestroyPublicKey(public_key); // Done with public_key.
- if (!vfy_context_) {
- // A corrupted RSA signature could be detected without the data, so
- // VFY_CreateContextWithAlgorithmID may fail with SEC_ERROR_BAD_SIGNATURE
- // (-8182).
- return false;
- }
-
- if (VFY_Begin(vfy_context_) != SECSuccess) {
- NOTREACHED();
- return false;
- }
- return true;
-}
-
-bool SignatureVerifier::VerifyInitRSAPSS(HashAlgorithm hash_alg,
- HashAlgorithm mask_hash_alg,
- int salt_len,
- const uint8_t* signature,
- int signature_len,
- const uint8_t* public_key_info,
- int public_key_info_len) {
- if (vfy_context_ || hash_context_)
- return false;
-
- signature_.assign(signature, signature + signature_len);
-
- SECKEYPublicKey* public_key = DecodePublicKeyInfo(public_key_info,
- public_key_info_len);
- if (!public_key)
- return false;
-
- public_key_ = public_key;
- hash_alg_ = hash_alg;
- mask_hash_alg_ = mask_hash_alg;
- salt_len_ = salt_len;
- hash_context_ = HASH_Create(ToNSSHashType(hash_alg_));
- if (!hash_context_)
- return false;
- HASH_Begin(hash_context_);
- return true;
-}
-
-void SignatureVerifier::VerifyUpdate(const uint8_t* data_part,
- int data_part_len) {
- if (vfy_context_) {
- SECStatus rv = VFY_Update(vfy_context_, data_part, data_part_len);
- DCHECK_EQ(SECSuccess, rv);
- } else {
- HASH_Update(hash_context_, data_part, data_part_len);
- }
-}
-
-bool SignatureVerifier::VerifyFinal() {
- SECStatus rv;
- if (vfy_context_) {
- rv = VFY_End(vfy_context_);
- } else {
- rv = VerifyRSAPSS_End(public_key_, hash_context_,
- ToNSSHashType(mask_hash_alg_), salt_len_,
- signature_.data(),
- signature_.size());
- }
- Reset();
-
- // If signature verification fails, the error code is
- // SEC_ERROR_BAD_SIGNATURE (-8182).
- return (rv == SECSuccess);
-}
-
-// static
-SECKEYPublicKey* SignatureVerifier::DecodePublicKeyInfo(
- const uint8_t* public_key_info,
- int public_key_info_len) {
- CERTSubjectPublicKeyInfo* spki = NULL;
- SECItem spki_der;
- spki_der.type = siBuffer;
- spki_der.data = const_cast<uint8_t*>(public_key_info);
- spki_der.len = public_key_info_len;
- spki = SECKEY_DecodeDERSubjectPublicKeyInfo(&spki_der);
- if (!spki)
- return NULL;
- SECKEYPublicKey* public_key = SECKEY_ExtractPublicKey(spki);
- SECKEY_DestroySubjectPublicKeyInfo(spki); // Done with spki.
- return public_key;
-}
-
-void SignatureVerifier::Reset() {
- if (vfy_context_) {
- VFY_DestroyContext(vfy_context_, PR_TRUE);
- vfy_context_ = NULL;
- }
- if (hash_context_) {
- HASH_Destroy(hash_context_);
- hash_context_ = NULL;
- }
- if (public_key_) {
- SECKEY_DestroyPublicKey(public_key_);
- public_key_ = NULL;
- }
- signature_.clear();
-}
-
-} // namespace crypto
diff --git a/crypto/signature_verifier_unittest.cc b/crypto/signature_verifier_unittest.cc
index d71ea822be..2cda4596a1 100644
--- a/crypto/signature_verifier_unittest.cc
+++ b/crypto/signature_verifier_unittest.cc
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/crypto/symmetric_key.cc b/crypto/symmetric_key.cc
index e3ecf624bc..6a19f84089 100644
--- a/crypto/symmetric_key.cc
+++ b/crypto/symmetric_key.cc
@@ -4,8 +4,6 @@
#include "crypto/symmetric_key.h"
-#include <openssl/evp.h>
-#include <openssl/rand.h>
#include <stddef.h>
#include <stdint.h>
@@ -15,6 +13,8 @@
#include "base/logging.h"
#include "base/strings/string_util.h"
#include "crypto/openssl_util.h"
+#include "third_party/boringssl/src/include/openssl/evp.h"
+#include "third_party/boringssl/src/include/openssl/rand.h"
namespace crypto {
diff --git a/crypto/symmetric_key.h b/crypto/symmetric_key.h
index 88627084c6..7494634b5e 100644
--- a/crypto/symmetric_key.h
+++ b/crypto/symmetric_key.h
@@ -14,15 +14,6 @@
#include "build/build_config.h"
#include "crypto/crypto_export.h"
-#if defined(NACL_WIN64)
-// See comments for crypto_nacl_win64 in crypto.gyp.
-// Must test for NACL_WIN64 before OS_WIN since former is a subset of latter.
-#include "crypto/scoped_capi_types.h"
-#elif defined(USE_NSS_CERTS) || \
- (!defined(USE_OPENSSL) && (defined(OS_WIN) || defined(OS_MACOSX)))
-#include "crypto/scoped_nss_types.h"
-#endif
-
namespace crypto {
// Wraps a platform-specific symmetric key and allows it to be held in a
@@ -63,13 +54,8 @@ class CRYPTO_EXPORT SymmetricKey {
// size for use with |algorithm|. The caller owns the returned SymmetricKey.
static std::unique_ptr<SymmetricKey> Import(Algorithm algorithm,
const std::string& raw_key);
-#if defined(NACL_WIN64)
- HCRYPTKEY key() const { return key_.get(); }
-#elif defined(USE_OPENSSL)
+
const std::string& key() { return key_; }
-#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
- PK11SymKey* key() const { return key_.get(); }
-#endif
// Extracts the raw key from the platform specific data.
// Warning: |raw_key| holds the raw key as bytes and thus must be handled
@@ -77,27 +63,9 @@ class CRYPTO_EXPORT SymmetricKey {
bool GetRawKey(std::string* raw_key);
private:
-#if defined(NACL_WIN64)
- SymmetricKey(HCRYPTPROV provider, HCRYPTKEY key,
- const void* key_data, size_t key_size_in_bytes);
-
- ScopedHCRYPTPROV provider_;
- ScopedHCRYPTKEY key_;
-
- // Contains the raw key, if it is known during initialization and when it
- // is likely that the associated |provider_| will be unable to export the
- // |key_|. This is the case of HMAC keys when the key size exceeds 16 bytes
- // when using the default RSA provider.
- // TODO(rsleevi): See if KP_EFFECTIVE_KEYLEN is the reason why CryptExportKey
- // fails with NTE_BAD_KEY/NTE_BAD_LEN
- std::string raw_key_;
-#elif defined(USE_OPENSSL)
- SymmetricKey() {}
+ SymmetricKey();
+
std::string key_;
-#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
- explicit SymmetricKey(PK11SymKey* key);
- ScopedPK11SymKey key_;
-#endif
DISALLOW_COPY_AND_ASSIGN(SymmetricKey);
};
diff --git a/crypto/symmetric_key_nss.cc b/crypto/symmetric_key_nss.cc
deleted file mode 100644
index e3aacc745b..0000000000
--- a/crypto/symmetric_key_nss.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/symmetric_key.h"
-
-#include <nss.h>
-#include <pk11pub.h>
-#include <stddef.h>
-
-#include "base/logging.h"
-#include "crypto/nss_util.h"
-#include "crypto/scoped_nss_types.h"
-
-namespace crypto {
-
-SymmetricKey::~SymmetricKey() {}
-
-// static
-SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
- size_t key_size_in_bits) {
- DCHECK_EQ(AES, algorithm);
-
- EnsureNSSInit();
-
- // Whitelist supported key sizes to avoid accidentaly relying on
- // algorithms available in NSS but not BoringSSL and vice
- // versa. Note that BoringSSL does not support AES-192.
- if (key_size_in_bits != 128 && key_size_in_bits != 256)
- return NULL;
-
- ScopedPK11Slot slot(PK11_GetInternalSlot());
- if (!slot.get())
- return NULL;
-
- PK11SymKey* sym_key = PK11_KeyGen(slot.get(), CKM_AES_KEY_GEN, NULL,
- key_size_in_bits / 8, NULL);
- if (!sym_key)
- return NULL;
-
- return new SymmetricKey(sym_key);
-}
-
-// static
-SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
- const std::string& password,
- const std::string& salt,
- size_t iterations,
- size_t key_size_in_bits) {
- EnsureNSSInit();
- if (salt.empty() || iterations == 0 || key_size_in_bits == 0)
- return NULL;
-
- if (algorithm == AES) {
- // Whitelist supported key sizes to avoid accidentaly relying on
- // algorithms available in NSS but not BoringSSL and vice
- // versa. Note that BoringSSL does not support AES-192.
- if (key_size_in_bits != 128 && key_size_in_bits != 256)
- return NULL;
- }
-
- SECItem password_item;
- password_item.type = siBuffer;
- password_item.data = reinterpret_cast<unsigned char*>(
- const_cast<char *>(password.data()));
- password_item.len = password.size();
-
- SECItem salt_item;
- salt_item.type = siBuffer;
- salt_item.data = reinterpret_cast<unsigned char*>(
- const_cast<char *>(salt.data()));
- salt_item.len = salt.size();
-
- SECOidTag cipher_algorithm =
- algorithm == AES ? SEC_OID_AES_256_CBC : SEC_OID_HMAC_SHA1;
- ScopedSECAlgorithmID alg_id(PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2,
- cipher_algorithm,
- SEC_OID_HMAC_SHA1,
- key_size_in_bits / 8,
- iterations,
- &salt_item));
- if (!alg_id.get())
- return NULL;
-
- ScopedPK11Slot slot(PK11_GetInternalSlot());
- if (!slot.get())
- return NULL;
-
- PK11SymKey* sym_key = PK11_PBEKeyGen(slot.get(), alg_id.get(), &password_item,
- PR_FALSE, NULL);
- if (!sym_key)
- return NULL;
-
- return new SymmetricKey(sym_key);
-}
-
-// static
-SymmetricKey* SymmetricKey::Import(Algorithm algorithm,
- const std::string& raw_key) {
- EnsureNSSInit();
-
- if (algorithm == AES) {
- // Whitelist supported key sizes to avoid accidentaly relying on
- // algorithms available in NSS but not BoringSSL and vice
- // versa. Note that BoringSSL does not support AES-192.
- if (raw_key.size() != 128/8 && raw_key.size() != 256/8)
- return NULL;
- }
-
- CK_MECHANISM_TYPE cipher =
- algorithm == AES ? CKM_AES_CBC : CKM_SHA_1_HMAC;
-
- SECItem key_item;
- key_item.type = siBuffer;
- key_item.data = reinterpret_cast<unsigned char*>(
- const_cast<char *>(raw_key.data()));
- key_item.len = raw_key.size();
-
- ScopedPK11Slot slot(PK11_GetInternalSlot());
- if (!slot.get())
- return NULL;
-
- // The exact value of the |origin| argument doesn't matter to NSS as long as
- // it's not PK11_OriginFortezzaHack, so we pass PK11_OriginUnwrap as a
- // placeholder.
- PK11SymKey* sym_key = PK11_ImportSymKey(slot.get(), cipher, PK11_OriginUnwrap,
- CKA_ENCRYPT, &key_item, NULL);
- if (!sym_key)
- return NULL;
-
- return new SymmetricKey(sym_key);
-}
-
-bool SymmetricKey::GetRawKey(std::string* raw_key) {
- SECStatus rv = PK11_ExtractKeyValue(key_.get());
- if (SECSuccess != rv)
- return false;
-
- SECItem* key_item = PK11_GetKeyData(key_.get());
- if (!key_item)
- return false;
-
- raw_key->assign(reinterpret_cast<char*>(key_item->data), key_item->len);
- return true;
-}
-
-SymmetricKey::SymmetricKey(PK11SymKey* key) : key_(key) {
- DCHECK(key);
-}
-
-} // namespace crypto
diff --git a/crypto/third_party/nss/chromium-blapi.h b/crypto/third_party/nss/chromium-blapi.h
deleted file mode 100644
index 2ca772e4d3..0000000000
--- a/crypto/third_party/nss/chromium-blapi.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * crypto.h - public data structures and prototypes for the crypto library
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1994-2000
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- * Dr Vipul Gupta <vipul.gupta@sun.com>, Sun Microsystems Laboratories
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-/* $Id: blapi.h,v 1.27 2007/11/09 18:49:32 wtc%google.com Exp $ */
-
-#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_
-#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_
-
-#include "crypto/third_party/nss/chromium-blapit.h"
-
-/******************************************/
-
-extern SHA256Context *SHA256_NewContext(void);
-extern void SHA256_DestroyContext(SHA256Context *cx, PRBool freeit);
-extern void SHA256_Begin(SHA256Context *cx);
-extern void SHA256_Update(SHA256Context *cx, const unsigned char *input,
- unsigned int inputLen);
-extern void SHA256_End(SHA256Context *cx, unsigned char *digest,
- unsigned int *digestLen, unsigned int maxDigestLen);
-extern SECStatus SHA256_HashBuf(unsigned char *dest, const unsigned char *src,
- unsigned int src_length);
-extern SECStatus SHA256_Hash(unsigned char *dest, const char *src);
-extern void SHA256_TraceState(SHA256Context *cx);
-extern unsigned int SHA256_FlattenSize(SHA256Context *cx);
-extern SECStatus SHA256_Flatten(SHA256Context *cx,unsigned char *space);
-extern SHA256Context * SHA256_Resurrect(unsigned char *space, void *arg);
-extern void SHA256_Clone(SHA256Context *dest, SHA256Context *src);
-
-/******************************************/
-
-extern SHA512Context *SHA512_NewContext(void);
-extern void SHA512_DestroyContext(SHA512Context *cx, PRBool freeit);
-extern void SHA512_Begin(SHA512Context *cx);
-extern void SHA512_Update(SHA512Context *cx, const unsigned char *input,
- unsigned int inputLen);
-extern void SHA512_End(SHA512Context *cx, unsigned char *digest,
- unsigned int *digestLen, unsigned int maxDigestLen);
-extern SECStatus SHA512_HashBuf(unsigned char *dest, const unsigned char *src,
- unsigned int src_length);
-extern SECStatus SHA512_Hash(unsigned char *dest, const char *src);
-extern void SHA512_TraceState(SHA512Context *cx);
-extern unsigned int SHA512_FlattenSize(SHA512Context *cx);
-extern SECStatus SHA512_Flatten(SHA512Context *cx,unsigned char *space);
-extern SHA512Context * SHA512_Resurrect(unsigned char *space, void *arg);
-extern void SHA512_Clone(SHA512Context *dest, SHA512Context *src);
-
-/******************************************/
-
-extern SHA384Context *SHA384_NewContext(void);
-extern void SHA384_DestroyContext(SHA384Context *cx, PRBool freeit);
-extern void SHA384_Begin(SHA384Context *cx);
-extern void SHA384_Update(SHA384Context *cx, const unsigned char *input,
- unsigned int inputLen);
-extern void SHA384_End(SHA384Context *cx, unsigned char *digest,
- unsigned int *digestLen, unsigned int maxDigestLen);
-extern SECStatus SHA384_HashBuf(unsigned char *dest, const unsigned char *src,
- unsigned int src_length);
-extern SECStatus SHA384_Hash(unsigned char *dest, const char *src);
-extern void SHA384_TraceState(SHA384Context *cx);
-extern unsigned int SHA384_FlattenSize(SHA384Context *cx);
-extern SECStatus SHA384_Flatten(SHA384Context *cx,unsigned char *space);
-extern SHA384Context * SHA384_Resurrect(unsigned char *space, void *arg);
-extern void SHA384_Clone(SHA384Context *dest, SHA384Context *src);
-
-#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_ */
diff --git a/crypto/third_party/nss/chromium-blapit.h b/crypto/third_party/nss/chromium-blapit.h
deleted file mode 100644
index 938547a2c0..0000000000
--- a/crypto/third_party/nss/chromium-blapit.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * blapit.h - public data structures for the crypto library
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1994-2000
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- * Dr Vipul Gupta <vipul.gupta@sun.com> and
- * Douglas Stebila <douglas@stebila.ca>, Sun Microsystems Laboratories
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-/* $Id: blapit.h,v 1.20 2007/02/28 19:47:37 rrelyea%redhat.com Exp $ */
-
-#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_
-#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_
-
-#include "crypto/third_party/nss/chromium-prtypes.h"
-
-/*
-** A status code. Status's are used by procedures that return status
-** values. Again the motivation is so that a compiler can generate
-** warnings when return values are wrong. Correct testing of status codes:
-**
-** SECStatus rv;
-** rv = some_function (some_argument);
-** if (rv != SECSuccess)
-** do_an_error_thing();
-**
-*/
-typedef enum _SECStatus {
- SECWouldBlock = -2,
- SECFailure = -1,
- SECSuccess = 0
-} SECStatus;
-
-#define SHA256_LENGTH 32 /* bytes */
-#define SHA384_LENGTH 48 /* bytes */
-#define SHA512_LENGTH 64 /* bytes */
-#define HASH_LENGTH_MAX SHA512_LENGTH
-
-/*
- * Input block size for each hash algorithm.
- */
-
-#define SHA256_BLOCK_LENGTH 64 /* bytes */
-#define SHA384_BLOCK_LENGTH 128 /* bytes */
-#define SHA512_BLOCK_LENGTH 128 /* bytes */
-#define HASH_BLOCK_LENGTH_MAX SHA512_BLOCK_LENGTH
-
-/***************************************************************************
-** Opaque objects
-*/
-
-struct SHA256ContextStr ;
-struct SHA512ContextStr ;
-
-typedef struct SHA256ContextStr SHA256Context;
-typedef struct SHA512ContextStr SHA512Context;
-/* SHA384Context is really a SHA512ContextStr. This is not a mistake. */
-typedef struct SHA512ContextStr SHA384Context;
-
-#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_ */
diff --git a/crypto/third_party/nss/chromium-nss.h b/crypto/third_party/nss/chromium-nss.h
deleted file mode 100644
index 437e6bd5cd..0000000000
--- a/crypto/third_party/nss/chromium-nss.h
+++ /dev/null
@@ -1,79 +0,0 @@
- /* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1994-2000
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
-#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
-
-// This file contains some functions we borrowed from NSS.
-
-#include <prtypes.h>
-#include <hasht.h>
-#include <keyhi.h>
-#include <secmod.h>
-
-#include "crypto/crypto_export.h"
-
-extern "C" SECStatus emsa_pss_verify(const unsigned char *mHash,
- const unsigned char *em,
- unsigned int emLen,
- HASH_HashType hashAlg,
- HASH_HashType maskHashAlg,
- unsigned int sLen);
-
-// Like PK11_ImportEncryptedPrivateKeyInfo, but hardcoded for EC, and returns
-// the SECKEYPrivateKey.
-// See https://bugzilla.mozilla.org/show_bug.cgi?id=211546
-// When we use NSS 3.13.2 or later,
-// PK11_ImportEncryptedPrivateKeyInfoAndReturnKey can be used instead.
-SECStatus ImportEncryptedECPrivateKeyInfoAndReturnKey(
- PK11SlotInfo* slot,
- SECKEYEncryptedPrivateKeyInfo* epki,
- SECItem* password,
- SECItem* nickname,
- SECItem* public_value,
- PRBool permanent,
- PRBool sensitive,
- SECKEYPrivateKey** private_key,
- void* wincx);
-
-// Like SEC_DerSignData.
-CRYPTO_EXPORT SECStatus DerSignData(PLArenaPool *arena,
- SECItem *result,
- SECItem *input,
- SECKEYPrivateKey *key,
- SECOidTag algo_id);
-
-#endif // CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
diff --git a/crypto/third_party/nss/chromium-prtypes.h b/crypto/third_party/nss/chromium-prtypes.h
deleted file mode 100644
index d5ea8a9d20..0000000000
--- a/crypto/third_party/nss/chromium-prtypes.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 2002
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-/* Emulates the real prtypes.h. Defines the types and macros that sha512.cc
- * needs. */
-
-#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_
-#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_
-
-#include <limits.h>
-#include <stdint.h>
-
-#include "build/build_config.h"
-
-#if defined(ARCH_CPU_LITTLE_ENDIAN)
-#define IS_LITTLE_ENDIAN 1
-#else
-#define IS_BIG_ENDIAN 1
-#endif
-
-/*
- * The C language requires that 'long' be at least 32 bits. 2147483647 is the
- * largest signed 32-bit integer.
- */
-#if LONG_MAX > 2147483647L
-#define PR_BYTES_PER_LONG 8
-#else
-#define PR_BYTES_PER_LONG 4
-#endif
-
-#define HAVE_LONG_LONG
-
-#if defined(__linux__)
-#define LINUX
-#endif
-
-typedef uint8_t PRUint8;
-typedef uint32_t PRUint32;
-
-typedef int PRBool;
-
-#define PR_MIN(x,y) ((x)<(y)?(x):(y))
-
-#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_ */
diff --git a/crypto/third_party/nss/chromium-sha256.h b/crypto/third_party/nss/chromium-sha256.h
deleted file mode 100644
index 52815ca7f1..0000000000
--- a/crypto/third_party/nss/chromium-sha256.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 2002
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_
-#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_
-
-#include "crypto/third_party/nss/chromium-prtypes.h"
-
-struct SHA256ContextStr {
- union {
- PRUint32 w[64]; /* message schedule, input buffer, plus 48 words */
- PRUint8 b[256];
- } u;
- PRUint32 h[8]; /* 8 state variables */
- PRUint32 sizeHi,sizeLo; /* 64-bit count of hashed bytes. */
-};
-
-#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_ */
diff --git a/crypto/third_party/nss/rsawrapr.c b/crypto/third_party/nss/rsawrapr.c
deleted file mode 100644
index 73e498f937..0000000000
--- a/crypto/third_party/nss/rsawrapr.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * PKCS#1 encoding and decoding functions.
- * This file is believed to contain no code licensed from other parties.
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "seccomon.h"
-#include "secerr.h"
-#include "sechash.h"
-
-/* Needed for RSA-PSS functions */
-static const unsigned char eightZeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
-
-/*
- * Mask generation function MGF1 as defined in PKCS #1 v2.1 / RFC 3447.
- */
-static SECStatus
-MGF1(HASH_HashType hashAlg, unsigned char *mask, unsigned int maskLen,
- const unsigned char *mgfSeed, unsigned int mgfSeedLen)
-{
- unsigned int digestLen;
- PRUint32 counter, rounds;
- unsigned char *tempHash, *temp;
- const SECHashObject *hash;
- void *hashContext;
- unsigned char C[4];
-
- hash = HASH_GetHashObject(hashAlg);
- if (hash == NULL)
- return SECFailure;
-
- hashContext = (*hash->create)();
- rounds = (maskLen + hash->length - 1) / hash->length;
- for (counter = 0; counter < rounds; counter++) {
- C[0] = (unsigned char)((counter >> 24) & 0xff);
- C[1] = (unsigned char)((counter >> 16) & 0xff);
- C[2] = (unsigned char)((counter >> 8) & 0xff);
- C[3] = (unsigned char)(counter & 0xff);
-
- /* This could be optimized when the clone functions in
- * rawhash.c are implemented. */
- (*hash->begin)(hashContext);
- (*hash->update)(hashContext, mgfSeed, mgfSeedLen);
- (*hash->update)(hashContext, C, sizeof C);
-
- tempHash = mask + counter * hash->length;
- if (counter != (rounds-1)) {
- (*hash->end)(hashContext, tempHash, &digestLen, hash->length);
- } else { /* we're in the last round and need to cut the hash */
- temp = (unsigned char *)PORT_Alloc(hash->length);
- (*hash->end)(hashContext, temp, &digestLen, hash->length);
- PORT_Memcpy(tempHash, temp, maskLen - counter * hash->length);
- PORT_Free(temp);
- }
- }
- (*hash->destroy)(hashContext, PR_TRUE);
-
- return SECSuccess;
-}
-
-/*
- * Verify a RSA-PSS signature.
- * Described in RFC 3447, section 9.1.2.
- * We use mHash instead of M as input.
- * emBits from the RFC is just modBits - 1, see section 8.1.2.
- * We only support MGF1 as the MGF.
- *
- * NOTE: this code assumes modBits is a multiple of 8.
- */
-SECStatus
-emsa_pss_verify(const unsigned char *mHash,
- const unsigned char *em, unsigned int emLen,
- HASH_HashType hashAlg, HASH_HashType maskHashAlg,
- unsigned int sLen)
-{
- const SECHashObject *hash;
- void *hash_context;
- unsigned char *db;
- unsigned char *H_; /* H' from the RFC */
- unsigned int i, dbMaskLen;
- SECStatus rv;
-
- hash = HASH_GetHashObject(hashAlg);
- dbMaskLen = emLen - hash->length - 1;
-
- /* Step 3 + 4 + 6 */
- if ((emLen < (hash->length + sLen + 2)) ||
- (em[emLen - 1] != 0xbc) ||
- ((em[0] & 0x80) != 0)) {
- PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
- return SECFailure;
- }
-
- /* Step 7 */
- db = (unsigned char *)PORT_Alloc(dbMaskLen);
- if (db == NULL) {
- PORT_SetError(SEC_ERROR_NO_MEMORY);
- return SECFailure;
- }
- /* &em[dbMaskLen] points to H, used as mgfSeed */
- MGF1(maskHashAlg, db, dbMaskLen, &em[dbMaskLen], hash->length);
-
- /* Step 8 */
- for (i = 0; i < dbMaskLen; i++) {
- db[i] ^= em[i];
- }
-
- /* Step 9 */
- db[0] &= 0x7f;
-
- /* Step 10 */
- for (i = 0; i < (dbMaskLen - sLen - 1); i++) {
- if (db[i] != 0) {
- PORT_Free(db);
- PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
- return SECFailure;
- }
- }
- if (db[dbMaskLen - sLen - 1] != 0x01) {
- PORT_Free(db);
- PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
- return SECFailure;
- }
-
- /* Step 12 + 13 */
- H_ = (unsigned char *)PORT_Alloc(hash->length);
- if (H_ == NULL) {
- PORT_Free(db);
- PORT_SetError(SEC_ERROR_NO_MEMORY);
- return SECFailure;
- }
- hash_context = (*hash->create)();
- if (hash_context == NULL) {
- PORT_Free(db);
- PORT_Free(H_);
- PORT_SetError(SEC_ERROR_NO_MEMORY);
- return SECFailure;
- }
- (*hash->begin)(hash_context);
- (*hash->update)(hash_context, eightZeros, 8);
- (*hash->update)(hash_context, mHash, hash->length);
- (*hash->update)(hash_context, &db[dbMaskLen - sLen], sLen);
- (*hash->end)(hash_context, H_, &i, hash->length);
- (*hash->destroy)(hash_context, PR_TRUE);
-
- PORT_Free(db);
-
- /* Step 14 */
- if (PORT_Memcmp(H_, &em[dbMaskLen], hash->length) != 0) {
- PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
- rv = SECFailure;
- } else {
- rv = SECSuccess;
- }
-
- PORT_Free(H_);
- return rv;
-}
diff --git a/crypto/third_party/nss/sha512.cc b/crypto/third_party/nss/sha512.cc
deleted file mode 100644
index 78950cb51f..0000000000
--- a/crypto/third_party/nss/sha512.cc
+++ /dev/null
@@ -1,1390 +0,0 @@
-/*
- * sha512.c - implementation of SHA256, SHA384 and SHA512
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 2002
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-/* $Id: sha512.c,v 1.9 2006/10/13 16:54:04 wtchang%redhat.com Exp $ */
-
-// Prevent manual unrolling in the sha256 code, which reduces the binary code
-// size from ~10k to ~1k. The performance should be reasonable for our use.
-#define NOUNROLL256 1
-
-#include "crypto/third_party/nss/chromium-prtypes.h" /* for PRUintXX */
-#if defined(_X86_) || defined(SHA_NO_LONG_LONG)
-#define NOUNROLL512 1
-#undef HAVE_LONG_LONG
-#endif
-#include "crypto/third_party/nss/chromium-blapi.h"
-#include "crypto/third_party/nss/chromium-sha256.h" /* for struct SHA256ContextStr */
-
-#include <stdlib.h>
-#include <string.h>
-#define PORT_New(type) static_cast<type*>(malloc(sizeof(type)))
-#define PORT_ZFree(ptr, len) do { memset(ptr, 0, len); free(ptr); } while (0)
-#define PORT_Strlen(s) static_cast<unsigned int>(strlen(s))
-#define PORT_Memcpy memcpy
-
-/* ============= Common constants and defines ======================= */
-
-#define W ctx->u.w
-#define B ctx->u.b
-#define H ctx->h
-
-#define SHR(x,n) (x >> n)
-#define SHL(x,n) (x << n)
-#define Ch(x,y,z) ((x & y) ^ (~x & z))
-#define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z))
-
-/* Padding used with all flavors of SHA */
-static const PRUint8 pad[240] = {
-0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
- /* compiler will fill the rest in with zeros */
-};
-
-/* ============= SHA256 implemenmtation ================================== */
-
-/* SHA-256 constants, K256. */
-static const PRUint32 K256[64] = {
- 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
- 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
- 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
- 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
- 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
- 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
- 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
- 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
- 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
- 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
- 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
- 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
- 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
- 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
- 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
- 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
-};
-
-/* SHA-256 initial hash values */
-static const PRUint32 H256[8] = {
- 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
- 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
-};
-
-#if defined(_MSC_VER) && defined(_X86_)
-#ifndef FORCEINLINE
-#if (_MSC_VER >= 1200)
-#define FORCEINLINE __forceinline
-#else
-#define FORCEINLINE __inline
-#endif
-#endif
-#define FASTCALL __fastcall
-
-static FORCEINLINE PRUint32 FASTCALL
-swap4b(PRUint32 dwd)
-{
- __asm {
- mov eax,dwd
- bswap eax
- }
-}
-
-#define SHA_HTONL(x) swap4b(x)
-#define BYTESWAP4(x) x = SHA_HTONL(x)
-
-#elif defined(LINUX) && defined(_X86_)
-#undef __OPTIMIZE__
-#define __OPTIMIZE__ 1
-#undef __pentium__
-#define __pentium__ 1
-#include <byteswap.h>
-#define SHA_HTONL(x) bswap_32(x)
-#define BYTESWAP4(x) x = SHA_HTONL(x)
-
-#else /* neither windows nor Linux PC */
-#define SWAP4MASK 0x00FF00FF
-#define SHA_HTONL(x) (t1 = (x), t1 = (t1 << 16) | (t1 >> 16), \
- ((t1 & SWAP4MASK) << 8) | ((t1 >> 8) & SWAP4MASK))
-#define BYTESWAP4(x) x = SHA_HTONL(x)
-#endif
-
-#if defined(_MSC_VER) && defined(_X86_)
-#pragma intrinsic (_lrotr, _lrotl)
-#define ROTR32(x,n) _lrotr(x,n)
-#define ROTL32(x,n) _lrotl(x,n)
-#else
-#define ROTR32(x,n) ((x >> n) | (x << ((8 * sizeof x) - n)))
-#define ROTL32(x,n) ((x << n) | (x >> ((8 * sizeof x) - n)))
-#endif
-
-/* Capitol Sigma and lower case sigma functions */
-#define S0(x) (ROTR32(x, 2) ^ ROTR32(x,13) ^ ROTR32(x,22))
-#define S1(x) (ROTR32(x, 6) ^ ROTR32(x,11) ^ ROTR32(x,25))
-#define s0(x) (t1 = x, ROTR32(t1, 7) ^ ROTR32(t1,18) ^ SHR(t1, 3))
-#define s1(x) (t2 = x, ROTR32(t2,17) ^ ROTR32(t2,19) ^ SHR(t2,10))
-
-SHA256Context *
-SHA256_NewContext(void)
-{
- SHA256Context *ctx = PORT_New(SHA256Context);
- return ctx;
-}
-
-void
-SHA256_DestroyContext(SHA256Context *ctx, PRBool freeit)
-{
- if (freeit) {
- PORT_ZFree(ctx, sizeof *ctx);
- }
-}
-
-void
-SHA256_Begin(SHA256Context *ctx)
-{
- memset(ctx, 0, sizeof *ctx);
- memcpy(H, H256, sizeof H256);
-}
-
-static void
-SHA256_Compress(SHA256Context *ctx)
-{
- {
- register PRUint32 t1, t2;
-
-#if defined(IS_LITTLE_ENDIAN)
- BYTESWAP4(W[0]);
- BYTESWAP4(W[1]);
- BYTESWAP4(W[2]);
- BYTESWAP4(W[3]);
- BYTESWAP4(W[4]);
- BYTESWAP4(W[5]);
- BYTESWAP4(W[6]);
- BYTESWAP4(W[7]);
- BYTESWAP4(W[8]);
- BYTESWAP4(W[9]);
- BYTESWAP4(W[10]);
- BYTESWAP4(W[11]);
- BYTESWAP4(W[12]);
- BYTESWAP4(W[13]);
- BYTESWAP4(W[14]);
- BYTESWAP4(W[15]);
-#endif
-
-#define INITW(t) W[t] = (s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16])
-
- /* prepare the "message schedule" */
-#ifdef NOUNROLL256
- {
- int t;
- for (t = 16; t < 64; ++t) {
- INITW(t);
- }
- }
-#else
- INITW(16);
- INITW(17);
- INITW(18);
- INITW(19);
-
- INITW(20);
- INITW(21);
- INITW(22);
- INITW(23);
- INITW(24);
- INITW(25);
- INITW(26);
- INITW(27);
- INITW(28);
- INITW(29);
-
- INITW(30);
- INITW(31);
- INITW(32);
- INITW(33);
- INITW(34);
- INITW(35);
- INITW(36);
- INITW(37);
- INITW(38);
- INITW(39);
-
- INITW(40);
- INITW(41);
- INITW(42);
- INITW(43);
- INITW(44);
- INITW(45);
- INITW(46);
- INITW(47);
- INITW(48);
- INITW(49);
-
- INITW(50);
- INITW(51);
- INITW(52);
- INITW(53);
- INITW(54);
- INITW(55);
- INITW(56);
- INITW(57);
- INITW(58);
- INITW(59);
-
- INITW(60);
- INITW(61);
- INITW(62);
- INITW(63);
-
-#endif
-#undef INITW
- }
- {
- PRUint32 a, b, c, d, e, f, g, h;
-
- a = H[0];
- b = H[1];
- c = H[2];
- d = H[3];
- e = H[4];
- f = H[5];
- g = H[6];
- h = H[7];
-
-#define ROUND(n,a,b,c,d,e,f,g,h) \
- h += S1(e) + Ch(e,f,g) + K256[n] + W[n]; \
- d += h; \
- h += S0(a) + Maj(a,b,c);
-
-#ifdef NOUNROLL256
- {
- int t;
- for (t = 0; t < 64; t+= 8) {
- ROUND(t+0,a,b,c,d,e,f,g,h)
- ROUND(t+1,h,a,b,c,d,e,f,g)
- ROUND(t+2,g,h,a,b,c,d,e,f)
- ROUND(t+3,f,g,h,a,b,c,d,e)
- ROUND(t+4,e,f,g,h,a,b,c,d)
- ROUND(t+5,d,e,f,g,h,a,b,c)
- ROUND(t+6,c,d,e,f,g,h,a,b)
- ROUND(t+7,b,c,d,e,f,g,h,a)
- }
- }
-#else
- ROUND( 0,a,b,c,d,e,f,g,h)
- ROUND( 1,h,a,b,c,d,e,f,g)
- ROUND( 2,g,h,a,b,c,d,e,f)
- ROUND( 3,f,g,h,a,b,c,d,e)
- ROUND( 4,e,f,g,h,a,b,c,d)
- ROUND( 5,d,e,f,g,h,a,b,c)
- ROUND( 6,c,d,e,f,g,h,a,b)
- ROUND( 7,b,c,d,e,f,g,h,a)
-
- ROUND( 8,a,b,c,d,e,f,g,h)
- ROUND( 9,h,a,b,c,d,e,f,g)
- ROUND(10,g,h,a,b,c,d,e,f)
- ROUND(11,f,g,h,a,b,c,d,e)
- ROUND(12,e,f,g,h,a,b,c,d)
- ROUND(13,d,e,f,g,h,a,b,c)
- ROUND(14,c,d,e,f,g,h,a,b)
- ROUND(15,b,c,d,e,f,g,h,a)
-
- ROUND(16,a,b,c,d,e,f,g,h)
- ROUND(17,h,a,b,c,d,e,f,g)
- ROUND(18,g,h,a,b,c,d,e,f)
- ROUND(19,f,g,h,a,b,c,d,e)
- ROUND(20,e,f,g,h,a,b,c,d)
- ROUND(21,d,e,f,g,h,a,b,c)
- ROUND(22,c,d,e,f,g,h,a,b)
- ROUND(23,b,c,d,e,f,g,h,a)
-
- ROUND(24,a,b,c,d,e,f,g,h)
- ROUND(25,h,a,b,c,d,e,f,g)
- ROUND(26,g,h,a,b,c,d,e,f)
- ROUND(27,f,g,h,a,b,c,d,e)
- ROUND(28,e,f,g,h,a,b,c,d)
- ROUND(29,d,e,f,g,h,a,b,c)
- ROUND(30,c,d,e,f,g,h,a,b)
- ROUND(31,b,c,d,e,f,g,h,a)
-
- ROUND(32,a,b,c,d,e,f,g,h)
- ROUND(33,h,a,b,c,d,e,f,g)
- ROUND(34,g,h,a,b,c,d,e,f)
- ROUND(35,f,g,h,a,b,c,d,e)
- ROUND(36,e,f,g,h,a,b,c,d)
- ROUND(37,d,e,f,g,h,a,b,c)
- ROUND(38,c,d,e,f,g,h,a,b)
- ROUND(39,b,c,d,e,f,g,h,a)
-
- ROUND(40,a,b,c,d,e,f,g,h)
- ROUND(41,h,a,b,c,d,e,f,g)
- ROUND(42,g,h,a,b,c,d,e,f)
- ROUND(43,f,g,h,a,b,c,d,e)
- ROUND(44,e,f,g,h,a,b,c,d)
- ROUND(45,d,e,f,g,h,a,b,c)
- ROUND(46,c,d,e,f,g,h,a,b)
- ROUND(47,b,c,d,e,f,g,h,a)
-
- ROUND(48,a,b,c,d,e,f,g,h)
- ROUND(49,h,a,b,c,d,e,f,g)
- ROUND(50,g,h,a,b,c,d,e,f)
- ROUND(51,f,g,h,a,b,c,d,e)
- ROUND(52,e,f,g,h,a,b,c,d)
- ROUND(53,d,e,f,g,h,a,b,c)
- ROUND(54,c,d,e,f,g,h,a,b)
- ROUND(55,b,c,d,e,f,g,h,a)
-
- ROUND(56,a,b,c,d,e,f,g,h)
- ROUND(57,h,a,b,c,d,e,f,g)
- ROUND(58,g,h,a,b,c,d,e,f)
- ROUND(59,f,g,h,a,b,c,d,e)
- ROUND(60,e,f,g,h,a,b,c,d)
- ROUND(61,d,e,f,g,h,a,b,c)
- ROUND(62,c,d,e,f,g,h,a,b)
- ROUND(63,b,c,d,e,f,g,h,a)
-#endif
-
- H[0] += a;
- H[1] += b;
- H[2] += c;
- H[3] += d;
- H[4] += e;
- H[5] += f;
- H[6] += g;
- H[7] += h;
- }
-#undef ROUND
-}
-
-#undef s0
-#undef s1
-#undef S0
-#undef S1
-
-void
-SHA256_Update(SHA256Context *ctx, const unsigned char *input,
- unsigned int inputLen)
-{
- unsigned int inBuf = ctx->sizeLo & 0x3f;
- if (!inputLen)
- return;
-
- /* Add inputLen into the count of bytes processed, before processing */
- if ((ctx->sizeLo += inputLen) < inputLen)
- ctx->sizeHi++;
-
- /* if data already in buffer, attemp to fill rest of buffer */
- if (inBuf) {
- unsigned int todo = SHA256_BLOCK_LENGTH - inBuf;
- if (inputLen < todo)
- todo = inputLen;
- memcpy(B + inBuf, input, todo);
- input += todo;
- inputLen -= todo;
- if (inBuf + todo == SHA256_BLOCK_LENGTH)
- SHA256_Compress(ctx);
- }
-
- /* if enough data to fill one or more whole buffers, process them. */
- while (inputLen >= SHA256_BLOCK_LENGTH) {
- memcpy(B, input, SHA256_BLOCK_LENGTH);
- input += SHA256_BLOCK_LENGTH;
- inputLen -= SHA256_BLOCK_LENGTH;
- SHA256_Compress(ctx);
- }
- /* if data left over, fill it into buffer */
- if (inputLen)
- memcpy(B, input, inputLen);
-}
-
-void
-SHA256_End(SHA256Context *ctx, unsigned char *digest,
- unsigned int *digestLen, unsigned int maxDigestLen)
-{
- unsigned int inBuf = ctx->sizeLo & 0x3f;
- unsigned int padLen = (inBuf < 56) ? (56 - inBuf) : (56 + 64 - inBuf);
- PRUint32 hi, lo;
-#ifdef SWAP4MASK
- PRUint32 t1;
-#endif
-
- hi = (ctx->sizeHi << 3) | (ctx->sizeLo >> 29);
- lo = (ctx->sizeLo << 3);
-
- SHA256_Update(ctx, pad, padLen);
-
-#if defined(IS_LITTLE_ENDIAN)
- W[14] = SHA_HTONL(hi);
- W[15] = SHA_HTONL(lo);
-#else
- W[14] = hi;
- W[15] = lo;
-#endif
- SHA256_Compress(ctx);
-
- /* now output the answer */
-#if defined(IS_LITTLE_ENDIAN)
- BYTESWAP4(H[0]);
- BYTESWAP4(H[1]);
- BYTESWAP4(H[2]);
- BYTESWAP4(H[3]);
- BYTESWAP4(H[4]);
- BYTESWAP4(H[5]);
- BYTESWAP4(H[6]);
- BYTESWAP4(H[7]);
-#endif
- padLen = PR_MIN(SHA256_LENGTH, maxDigestLen);
- memcpy(digest, H, padLen);
- if (digestLen)
- *digestLen = padLen;
-}
-
-void SHA256_Clone(SHA256Context* dest, SHA256Context* src)
-{
- memcpy(dest, src, sizeof *dest);
-}
-
-/* Comment out unused code, mostly the SHA384 and SHA512 implementations. */
-#if 0
-SECStatus
-SHA256_HashBuf(unsigned char *dest, const unsigned char *src,
- unsigned int src_length)
-{
- SHA256Context ctx;
- unsigned int outLen;
-
- SHA256_Begin(&ctx);
- SHA256_Update(&ctx, src, src_length);
- SHA256_End(&ctx, dest, &outLen, SHA256_LENGTH);
-
- return SECSuccess;
-}
-
-
-SECStatus
-SHA256_Hash(unsigned char *dest, const char *src)
-{
- return SHA256_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
-}
-
-
-void SHA256_TraceState(SHA256Context *ctx) { }
-
-unsigned int
-SHA256_FlattenSize(SHA256Context *ctx)
-{
- return sizeof *ctx;
-}
-
-SECStatus
-SHA256_Flatten(SHA256Context *ctx,unsigned char *space)
-{
- PORT_Memcpy(space, ctx, sizeof *ctx);
- return SECSuccess;
-}
-
-SHA256Context *
-SHA256_Resurrect(unsigned char *space, void *arg)
-{
- SHA256Context *ctx = SHA256_NewContext();
- if (ctx)
- PORT_Memcpy(ctx, space, sizeof *ctx);
- return ctx;
-}
-
-/* ======= SHA512 and SHA384 common constants and defines ================= */
-
-/* common #defines for SHA512 and SHA384 */
-#if defined(HAVE_LONG_LONG)
-#define ROTR64(x,n) ((x >> n) | (x << (64 - n)))
-#define ROTL64(x,n) ((x << n) | (x >> (64 - n)))
-
-#define S0(x) (ROTR64(x,28) ^ ROTR64(x,34) ^ ROTR64(x,39))
-#define S1(x) (ROTR64(x,14) ^ ROTR64(x,18) ^ ROTR64(x,41))
-#define s0(x) (t1 = x, ROTR64(t1, 1) ^ ROTR64(t1, 8) ^ SHR(t1,7))
-#define s1(x) (t2 = x, ROTR64(t2,19) ^ ROTR64(t2,61) ^ SHR(t2,6))
-
-#if PR_BYTES_PER_LONG == 8
-#define ULLC(hi,lo) 0x ## hi ## lo ## UL
-#elif defined(_MSC_VER)
-#define ULLC(hi,lo) 0x ## hi ## lo ## ui64
-#else
-#define ULLC(hi,lo) 0x ## hi ## lo ## ULL
-#endif
-
-#define SHA_MASK16 ULLC(0000FFFF,0000FFFF)
-#define SHA_MASK8 ULLC(00FF00FF,00FF00FF)
-#define SHA_HTONLL(x) (t1 = x, \
- t1 = ((t1 & SHA_MASK8 ) << 8) | ((t1 >> 8) & SHA_MASK8 ), \
- t1 = ((t1 & SHA_MASK16) << 16) | ((t1 >> 16) & SHA_MASK16), \
- (t1 >> 32) | (t1 << 32))
-#define BYTESWAP8(x) x = SHA_HTONLL(x)
-
-#else /* no long long */
-
-#if defined(IS_LITTLE_ENDIAN)
-#define ULLC(hi,lo) { 0x ## lo ## U, 0x ## hi ## U }
-#else
-#define ULLC(hi,lo) { 0x ## hi ## U, 0x ## lo ## U }
-#endif
-
-#define SHA_HTONLL(x) ( BYTESWAP4(x.lo), BYTESWAP4(x.hi), \
- x.hi ^= x.lo ^= x.hi ^= x.lo, x)
-#define BYTESWAP8(x) do { PRUint32 tmp; BYTESWAP4(x.lo); BYTESWAP4(x.hi); \
- tmp = x.lo; x.lo = x.hi; x.hi = tmp; } while (0)
-#endif
-
-/* SHA-384 and SHA-512 constants, K512. */
-static const PRUint64 K512[80] = {
-#if PR_BYTES_PER_LONG == 8
- 0x428a2f98d728ae22UL , 0x7137449123ef65cdUL ,
- 0xb5c0fbcfec4d3b2fUL , 0xe9b5dba58189dbbcUL ,
- 0x3956c25bf348b538UL , 0x59f111f1b605d019UL ,
- 0x923f82a4af194f9bUL , 0xab1c5ed5da6d8118UL ,
- 0xd807aa98a3030242UL , 0x12835b0145706fbeUL ,
- 0x243185be4ee4b28cUL , 0x550c7dc3d5ffb4e2UL ,
- 0x72be5d74f27b896fUL , 0x80deb1fe3b1696b1UL ,
- 0x9bdc06a725c71235UL , 0xc19bf174cf692694UL ,
- 0xe49b69c19ef14ad2UL , 0xefbe4786384f25e3UL ,
- 0x0fc19dc68b8cd5b5UL , 0x240ca1cc77ac9c65UL ,
- 0x2de92c6f592b0275UL , 0x4a7484aa6ea6e483UL ,
- 0x5cb0a9dcbd41fbd4UL , 0x76f988da831153b5UL ,
- 0x983e5152ee66dfabUL , 0xa831c66d2db43210UL ,
- 0xb00327c898fb213fUL , 0xbf597fc7beef0ee4UL ,
- 0xc6e00bf33da88fc2UL , 0xd5a79147930aa725UL ,
- 0x06ca6351e003826fUL , 0x142929670a0e6e70UL ,
- 0x27b70a8546d22ffcUL , 0x2e1b21385c26c926UL ,
- 0x4d2c6dfc5ac42aedUL , 0x53380d139d95b3dfUL ,
- 0x650a73548baf63deUL , 0x766a0abb3c77b2a8UL ,
- 0x81c2c92e47edaee6UL , 0x92722c851482353bUL ,
- 0xa2bfe8a14cf10364UL , 0xa81a664bbc423001UL ,
- 0xc24b8b70d0f89791UL , 0xc76c51a30654be30UL ,
- 0xd192e819d6ef5218UL , 0xd69906245565a910UL ,
- 0xf40e35855771202aUL , 0x106aa07032bbd1b8UL ,
- 0x19a4c116b8d2d0c8UL , 0x1e376c085141ab53UL ,
- 0x2748774cdf8eeb99UL , 0x34b0bcb5e19b48a8UL ,
- 0x391c0cb3c5c95a63UL , 0x4ed8aa4ae3418acbUL ,
- 0x5b9cca4f7763e373UL , 0x682e6ff3d6b2b8a3UL ,
- 0x748f82ee5defb2fcUL , 0x78a5636f43172f60UL ,
- 0x84c87814a1f0ab72UL , 0x8cc702081a6439ecUL ,
- 0x90befffa23631e28UL , 0xa4506cebde82bde9UL ,
- 0xbef9a3f7b2c67915UL , 0xc67178f2e372532bUL ,
- 0xca273eceea26619cUL , 0xd186b8c721c0c207UL ,
- 0xeada7dd6cde0eb1eUL , 0xf57d4f7fee6ed178UL ,
- 0x06f067aa72176fbaUL , 0x0a637dc5a2c898a6UL ,
- 0x113f9804bef90daeUL , 0x1b710b35131c471bUL ,
- 0x28db77f523047d84UL , 0x32caab7b40c72493UL ,
- 0x3c9ebe0a15c9bebcUL , 0x431d67c49c100d4cUL ,
- 0x4cc5d4becb3e42b6UL , 0x597f299cfc657e2aUL ,
- 0x5fcb6fab3ad6faecUL , 0x6c44198c4a475817UL
-#else
- ULLC(428a2f98,d728ae22), ULLC(71374491,23ef65cd),
- ULLC(b5c0fbcf,ec4d3b2f), ULLC(e9b5dba5,8189dbbc),
- ULLC(3956c25b,f348b538), ULLC(59f111f1,b605d019),
- ULLC(923f82a4,af194f9b), ULLC(ab1c5ed5,da6d8118),
- ULLC(d807aa98,a3030242), ULLC(12835b01,45706fbe),
- ULLC(243185be,4ee4b28c), ULLC(550c7dc3,d5ffb4e2),
- ULLC(72be5d74,f27b896f), ULLC(80deb1fe,3b1696b1),
- ULLC(9bdc06a7,25c71235), ULLC(c19bf174,cf692694),
- ULLC(e49b69c1,9ef14ad2), ULLC(efbe4786,384f25e3),
- ULLC(0fc19dc6,8b8cd5b5), ULLC(240ca1cc,77ac9c65),
- ULLC(2de92c6f,592b0275), ULLC(4a7484aa,6ea6e483),
- ULLC(5cb0a9dc,bd41fbd4), ULLC(76f988da,831153b5),
- ULLC(983e5152,ee66dfab), ULLC(a831c66d,2db43210),
- ULLC(b00327c8,98fb213f), ULLC(bf597fc7,beef0ee4),
- ULLC(c6e00bf3,3da88fc2), ULLC(d5a79147,930aa725),
- ULLC(06ca6351,e003826f), ULLC(14292967,0a0e6e70),
- ULLC(27b70a85,46d22ffc), ULLC(2e1b2138,5c26c926),
- ULLC(4d2c6dfc,5ac42aed), ULLC(53380d13,9d95b3df),
- ULLC(650a7354,8baf63de), ULLC(766a0abb,3c77b2a8),
- ULLC(81c2c92e,47edaee6), ULLC(92722c85,1482353b),
- ULLC(a2bfe8a1,4cf10364), ULLC(a81a664b,bc423001),
- ULLC(c24b8b70,d0f89791), ULLC(c76c51a3,0654be30),
- ULLC(d192e819,d6ef5218), ULLC(d6990624,5565a910),
- ULLC(f40e3585,5771202a), ULLC(106aa070,32bbd1b8),
- ULLC(19a4c116,b8d2d0c8), ULLC(1e376c08,5141ab53),
- ULLC(2748774c,df8eeb99), ULLC(34b0bcb5,e19b48a8),
- ULLC(391c0cb3,c5c95a63), ULLC(4ed8aa4a,e3418acb),
- ULLC(5b9cca4f,7763e373), ULLC(682e6ff3,d6b2b8a3),
- ULLC(748f82ee,5defb2fc), ULLC(78a5636f,43172f60),
- ULLC(84c87814,a1f0ab72), ULLC(8cc70208,1a6439ec),
- ULLC(90befffa,23631e28), ULLC(a4506ceb,de82bde9),
- ULLC(bef9a3f7,b2c67915), ULLC(c67178f2,e372532b),
- ULLC(ca273ece,ea26619c), ULLC(d186b8c7,21c0c207),
- ULLC(eada7dd6,cde0eb1e), ULLC(f57d4f7f,ee6ed178),
- ULLC(06f067aa,72176fba), ULLC(0a637dc5,a2c898a6),
- ULLC(113f9804,bef90dae), ULLC(1b710b35,131c471b),
- ULLC(28db77f5,23047d84), ULLC(32caab7b,40c72493),
- ULLC(3c9ebe0a,15c9bebc), ULLC(431d67c4,9c100d4c),
- ULLC(4cc5d4be,cb3e42b6), ULLC(597f299c,fc657e2a),
- ULLC(5fcb6fab,3ad6faec), ULLC(6c44198c,4a475817)
-#endif
-};
-
-struct SHA512ContextStr {
- union {
- PRUint64 w[80]; /* message schedule, input buffer, plus 64 words */
- PRUint32 l[160];
- PRUint8 b[640];
- } u;
- PRUint64 h[8]; /* 8 state variables */
- PRUint64 sizeLo; /* 64-bit count of hashed bytes. */
-};
-
-/* =========== SHA512 implementation ===================================== */
-
-/* SHA-512 initial hash values */
-static const PRUint64 H512[8] = {
-#if PR_BYTES_PER_LONG == 8
- 0x6a09e667f3bcc908UL , 0xbb67ae8584caa73bUL ,
- 0x3c6ef372fe94f82bUL , 0xa54ff53a5f1d36f1UL ,
- 0x510e527fade682d1UL , 0x9b05688c2b3e6c1fUL ,
- 0x1f83d9abfb41bd6bUL , 0x5be0cd19137e2179UL
-#else
- ULLC(6a09e667,f3bcc908), ULLC(bb67ae85,84caa73b),
- ULLC(3c6ef372,fe94f82b), ULLC(a54ff53a,5f1d36f1),
- ULLC(510e527f,ade682d1), ULLC(9b05688c,2b3e6c1f),
- ULLC(1f83d9ab,fb41bd6b), ULLC(5be0cd19,137e2179)
-#endif
-};
-
-
-SHA512Context *
-SHA512_NewContext(void)
-{
- SHA512Context *ctx = PORT_New(SHA512Context);
- return ctx;
-}
-
-void
-SHA512_DestroyContext(SHA512Context *ctx, PRBool freeit)
-{
- if (freeit) {
- PORT_ZFree(ctx, sizeof *ctx);
- }
-}
-
-void
-SHA512_Begin(SHA512Context *ctx)
-{
- memset(ctx, 0, sizeof *ctx);
- memcpy(H, H512, sizeof H512);
-}
-
-#if defined(SHA512_TRACE)
-#if defined(HAVE_LONG_LONG)
-#define DUMP(n,a,d,e,h) printf(" t = %2d, %s = %016lx, %s = %016lx\n", \
- n, #e, d, #a, h);
-#else
-#define DUMP(n,a,d,e,h) printf(" t = %2d, %s = %08x%08x, %s = %08x%08x\n", \
- n, #e, d.hi, d.lo, #a, h.hi, h.lo);
-#endif
-#else
-#define DUMP(n,a,d,e,h)
-#endif
-
-#if defined(HAVE_LONG_LONG)
-
-#define ADDTO(x,y) y += x
-
-#define INITW(t) W[t] = (s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16])
-
-#define ROUND(n,a,b,c,d,e,f,g,h) \
- h += S1(e) + Ch(e,f,g) + K512[n] + W[n]; \
- d += h; \
- h += S0(a) + Maj(a,b,c); \
- DUMP(n,a,d,e,h)
-
-#else /* use only 32-bit variables, and don't unroll loops */
-
-#undef NOUNROLL512
-#define NOUNROLL512 1
-
-#define ADDTO(x,y) y.lo += x.lo; y.hi += x.hi + (x.lo > y.lo)
-
-#define ROTR64a(x,n,lo,hi) (x.lo >> n | x.hi << (32-n))
-#define ROTR64A(x,n,lo,hi) (x.lo << (64-n) | x.hi >> (n-32))
-#define SHR64a(x,n,lo,hi) (x.lo >> n | x.hi << (32-n))
-
-/* Capitol Sigma and lower case sigma functions */
-#define s0lo(x) (ROTR64a(x,1,lo,hi) ^ ROTR64a(x,8,lo,hi) ^ SHR64a(x,7,lo,hi))
-#define s0hi(x) (ROTR64a(x,1,hi,lo) ^ ROTR64a(x,8,hi,lo) ^ (x.hi >> 7))
-
-#define s1lo(x) (ROTR64a(x,19,lo,hi) ^ ROTR64A(x,61,lo,hi) ^ SHR64a(x,6,lo,hi))
-#define s1hi(x) (ROTR64a(x,19,hi,lo) ^ ROTR64A(x,61,hi,lo) ^ (x.hi >> 6))
-
-#define S0lo(x)(ROTR64a(x,28,lo,hi) ^ ROTR64A(x,34,lo,hi) ^ ROTR64A(x,39,lo,hi))
-#define S0hi(x)(ROTR64a(x,28,hi,lo) ^ ROTR64A(x,34,hi,lo) ^ ROTR64A(x,39,hi,lo))
-
-#define S1lo(x)(ROTR64a(x,14,lo,hi) ^ ROTR64a(x,18,lo,hi) ^ ROTR64A(x,41,lo,hi))
-#define S1hi(x)(ROTR64a(x,14,hi,lo) ^ ROTR64a(x,18,hi,lo) ^ ROTR64A(x,41,hi,lo))
-
-/* 32-bit versions of Ch and Maj */
-#define Chxx(x,y,z,lo) ((x.lo & y.lo) ^ (~x.lo & z.lo))
-#define Majx(x,y,z,lo) ((x.lo & y.lo) ^ (x.lo & z.lo) ^ (y.lo & z.lo))
-
-#define INITW(t) \
- do { \
- PRUint32 lo, tm; \
- PRUint32 cy = 0; \
- lo = s1lo(W[t-2]); \
- lo += (tm = W[t-7].lo); if (lo < tm) cy++; \
- lo += (tm = s0lo(W[t-15])); if (lo < tm) cy++; \
- lo += (tm = W[t-16].lo); if (lo < tm) cy++; \
- W[t].lo = lo; \
- W[t].hi = cy + s1hi(W[t-2]) + W[t-7].hi + s0hi(W[t-15]) + W[t-16].hi; \
- } while (0)
-
-#define ROUND(n,a,b,c,d,e,f,g,h) \
- { \
- PRUint32 lo, tm, cy; \
- lo = S1lo(e); \
- lo += (tm = Chxx(e,f,g,lo)); cy = (lo < tm); \
- lo += (tm = K512[n].lo); if (lo < tm) cy++; \
- lo += (tm = W[n].lo); if (lo < tm) cy++; \
- h.lo += lo; if (h.lo < lo) cy++; \
- h.hi += cy + S1hi(e) + Chxx(e,f,g,hi) + K512[n].hi + W[n].hi; \
- d.lo += h.lo; \
- d.hi += h.hi + (d.lo < h.lo); \
- lo = S0lo(a); \
- lo += (tm = Majx(a,b,c,lo)); cy = (lo < tm); \
- h.lo += lo; if (h.lo < lo) cy++; \
- h.hi += cy + S0hi(a) + Majx(a,b,c,hi); \
- DUMP(n,a,d,e,h) \
- }
-#endif
-
-static void
-SHA512_Compress(SHA512Context *ctx)
-{
-#if defined(IS_LITTLE_ENDIAN)
- {
-#if defined(HAVE_LONG_LONG)
- PRUint64 t1;
-#else
- PRUint32 t1;
-#endif
- BYTESWAP8(W[0]);
- BYTESWAP8(W[1]);
- BYTESWAP8(W[2]);
- BYTESWAP8(W[3]);
- BYTESWAP8(W[4]);
- BYTESWAP8(W[5]);
- BYTESWAP8(W[6]);
- BYTESWAP8(W[7]);
- BYTESWAP8(W[8]);
- BYTESWAP8(W[9]);
- BYTESWAP8(W[10]);
- BYTESWAP8(W[11]);
- BYTESWAP8(W[12]);
- BYTESWAP8(W[13]);
- BYTESWAP8(W[14]);
- BYTESWAP8(W[15]);
- }
-#endif
-
- {
- PRUint64 t1, t2;
-#ifdef NOUNROLL512
- {
- /* prepare the "message schedule" */
- int t;
- for (t = 16; t < 80; ++t) {
- INITW(t);
- }
- }
-#else
- INITW(16);
- INITW(17);
- INITW(18);
- INITW(19);
-
- INITW(20);
- INITW(21);
- INITW(22);
- INITW(23);
- INITW(24);
- INITW(25);
- INITW(26);
- INITW(27);
- INITW(28);
- INITW(29);
-
- INITW(30);
- INITW(31);
- INITW(32);
- INITW(33);
- INITW(34);
- INITW(35);
- INITW(36);
- INITW(37);
- INITW(38);
- INITW(39);
-
- INITW(40);
- INITW(41);
- INITW(42);
- INITW(43);
- INITW(44);
- INITW(45);
- INITW(46);
- INITW(47);
- INITW(48);
- INITW(49);
-
- INITW(50);
- INITW(51);
- INITW(52);
- INITW(53);
- INITW(54);
- INITW(55);
- INITW(56);
- INITW(57);
- INITW(58);
- INITW(59);
-
- INITW(60);
- INITW(61);
- INITW(62);
- INITW(63);
- INITW(64);
- INITW(65);
- INITW(66);
- INITW(67);
- INITW(68);
- INITW(69);
-
- INITW(70);
- INITW(71);
- INITW(72);
- INITW(73);
- INITW(74);
- INITW(75);
- INITW(76);
- INITW(77);
- INITW(78);
- INITW(79);
-#endif
- }
-#ifdef SHA512_TRACE
- {
- int i;
- for (i = 0; i < 80; ++i) {
-#ifdef HAVE_LONG_LONG
- printf("W[%2d] = %016lx\n", i, W[i]);
-#else
- printf("W[%2d] = %08x%08x\n", i, W[i].hi, W[i].lo);
-#endif
- }
- }
-#endif
- {
- PRUint64 a, b, c, d, e, f, g, h;
-
- a = H[0];
- b = H[1];
- c = H[2];
- d = H[3];
- e = H[4];
- f = H[5];
- g = H[6];
- h = H[7];
-
-#ifdef NOUNROLL512
- {
- int t;
- for (t = 0; t < 80; t+= 8) {
- ROUND(t+0,a,b,c,d,e,f,g,h)
- ROUND(t+1,h,a,b,c,d,e,f,g)
- ROUND(t+2,g,h,a,b,c,d,e,f)
- ROUND(t+3,f,g,h,a,b,c,d,e)
- ROUND(t+4,e,f,g,h,a,b,c,d)
- ROUND(t+5,d,e,f,g,h,a,b,c)
- ROUND(t+6,c,d,e,f,g,h,a,b)
- ROUND(t+7,b,c,d,e,f,g,h,a)
- }
- }
-#else
- ROUND( 0,a,b,c,d,e,f,g,h)
- ROUND( 1,h,a,b,c,d,e,f,g)
- ROUND( 2,g,h,a,b,c,d,e,f)
- ROUND( 3,f,g,h,a,b,c,d,e)
- ROUND( 4,e,f,g,h,a,b,c,d)
- ROUND( 5,d,e,f,g,h,a,b,c)
- ROUND( 6,c,d,e,f,g,h,a,b)
- ROUND( 7,b,c,d,e,f,g,h,a)
-
- ROUND( 8,a,b,c,d,e,f,g,h)
- ROUND( 9,h,a,b,c,d,e,f,g)
- ROUND(10,g,h,a,b,c,d,e,f)
- ROUND(11,f,g,h,a,b,c,d,e)
- ROUND(12,e,f,g,h,a,b,c,d)
- ROUND(13,d,e,f,g,h,a,b,c)
- ROUND(14,c,d,e,f,g,h,a,b)
- ROUND(15,b,c,d,e,f,g,h,a)
-
- ROUND(16,a,b,c,d,e,f,g,h)
- ROUND(17,h,a,b,c,d,e,f,g)
- ROUND(18,g,h,a,b,c,d,e,f)
- ROUND(19,f,g,h,a,b,c,d,e)
- ROUND(20,e,f,g,h,a,b,c,d)
- ROUND(21,d,e,f,g,h,a,b,c)
- ROUND(22,c,d,e,f,g,h,a,b)
- ROUND(23,b,c,d,e,f,g,h,a)
-
- ROUND(24,a,b,c,d,e,f,g,h)
- ROUND(25,h,a,b,c,d,e,f,g)
- ROUND(26,g,h,a,b,c,d,e,f)
- ROUND(27,f,g,h,a,b,c,d,e)
- ROUND(28,e,f,g,h,a,b,c,d)
- ROUND(29,d,e,f,g,h,a,b,c)
- ROUND(30,c,d,e,f,g,h,a,b)
- ROUND(31,b,c,d,e,f,g,h,a)
-
- ROUND(32,a,b,c,d,e,f,g,h)
- ROUND(33,h,a,b,c,d,e,f,g)
- ROUND(34,g,h,a,b,c,d,e,f)
- ROUND(35,f,g,h,a,b,c,d,e)
- ROUND(36,e,f,g,h,a,b,c,d)
- ROUND(37,d,e,f,g,h,a,b,c)
- ROUND(38,c,d,e,f,g,h,a,b)
- ROUND(39,b,c,d,e,f,g,h,a)
-
- ROUND(40,a,b,c,d,e,f,g,h)
- ROUND(41,h,a,b,c,d,e,f,g)
- ROUND(42,g,h,a,b,c,d,e,f)
- ROUND(43,f,g,h,a,b,c,d,e)
- ROUND(44,e,f,g,h,a,b,c,d)
- ROUND(45,d,e,f,g,h,a,b,c)
- ROUND(46,c,d,e,f,g,h,a,b)
- ROUND(47,b,c,d,e,f,g,h,a)
-
- ROUND(48,a,b,c,d,e,f,g,h)
- ROUND(49,h,a,b,c,d,e,f,g)
- ROUND(50,g,h,a,b,c,d,e,f)
- ROUND(51,f,g,h,a,b,c,d,e)
- ROUND(52,e,f,g,h,a,b,c,d)
- ROUND(53,d,e,f,g,h,a,b,c)
- ROUND(54,c,d,e,f,g,h,a,b)
- ROUND(55,b,c,d,e,f,g,h,a)
-
- ROUND(56,a,b,c,d,e,f,g,h)
- ROUND(57,h,a,b,c,d,e,f,g)
- ROUND(58,g,h,a,b,c,d,e,f)
- ROUND(59,f,g,h,a,b,c,d,e)
- ROUND(60,e,f,g,h,a,b,c,d)
- ROUND(61,d,e,f,g,h,a,b,c)
- ROUND(62,c,d,e,f,g,h,a,b)
- ROUND(63,b,c,d,e,f,g,h,a)
-
- ROUND(64,a,b,c,d,e,f,g,h)
- ROUND(65,h,a,b,c,d,e,f,g)
- ROUND(66,g,h,a,b,c,d,e,f)
- ROUND(67,f,g,h,a,b,c,d,e)
- ROUND(68,e,f,g,h,a,b,c,d)
- ROUND(69,d,e,f,g,h,a,b,c)
- ROUND(70,c,d,e,f,g,h,a,b)
- ROUND(71,b,c,d,e,f,g,h,a)
-
- ROUND(72,a,b,c,d,e,f,g,h)
- ROUND(73,h,a,b,c,d,e,f,g)
- ROUND(74,g,h,a,b,c,d,e,f)
- ROUND(75,f,g,h,a,b,c,d,e)
- ROUND(76,e,f,g,h,a,b,c,d)
- ROUND(77,d,e,f,g,h,a,b,c)
- ROUND(78,c,d,e,f,g,h,a,b)
- ROUND(79,b,c,d,e,f,g,h,a)
-#endif
-
- ADDTO(a,H[0]);
- ADDTO(b,H[1]);
- ADDTO(c,H[2]);
- ADDTO(d,H[3]);
- ADDTO(e,H[4]);
- ADDTO(f,H[5]);
- ADDTO(g,H[6]);
- ADDTO(h,H[7]);
- }
-}
-
-void
-SHA512_Update(SHA512Context *ctx, const unsigned char *input,
- unsigned int inputLen)
-{
- unsigned int inBuf;
- if (!inputLen)
- return;
-
-#if defined(HAVE_LONG_LONG)
- inBuf = (unsigned int)ctx->sizeLo & 0x7f;
- /* Add inputLen into the count of bytes processed, before processing */
- ctx->sizeLo += inputLen;
-#else
- inBuf = (unsigned int)ctx->sizeLo.lo & 0x7f;
- ctx->sizeLo.lo += inputLen;
- if (ctx->sizeLo.lo < inputLen) ctx->sizeLo.hi++;
-#endif
-
- /* if data already in buffer, attemp to fill rest of buffer */
- if (inBuf) {
- unsigned int todo = SHA512_BLOCK_LENGTH - inBuf;
- if (inputLen < todo)
- todo = inputLen;
- memcpy(B + inBuf, input, todo);
- input += todo;
- inputLen -= todo;
- if (inBuf + todo == SHA512_BLOCK_LENGTH)
- SHA512_Compress(ctx);
- }
-
- /* if enough data to fill one or more whole buffers, process them. */
- while (inputLen >= SHA512_BLOCK_LENGTH) {
- memcpy(B, input, SHA512_BLOCK_LENGTH);
- input += SHA512_BLOCK_LENGTH;
- inputLen -= SHA512_BLOCK_LENGTH;
- SHA512_Compress(ctx);
- }
- /* if data left over, fill it into buffer */
- if (inputLen)
- memcpy(B, input, inputLen);
-}
-
-void
-SHA512_End(SHA512Context *ctx, unsigned char *digest,
- unsigned int *digestLen, unsigned int maxDigestLen)
-{
-#if defined(HAVE_LONG_LONG)
- unsigned int inBuf = (unsigned int)ctx->sizeLo & 0x7f;
- unsigned int padLen = (inBuf < 112) ? (112 - inBuf) : (112 + 128 - inBuf);
- PRUint64 lo, t1;
- lo = (ctx->sizeLo << 3);
-#else
- unsigned int inBuf = (unsigned int)ctx->sizeLo.lo & 0x7f;
- unsigned int padLen = (inBuf < 112) ? (112 - inBuf) : (112 + 128 - inBuf);
- PRUint64 lo = ctx->sizeLo;
- PRUint32 t1;
- lo.lo <<= 3;
-#endif
-
- SHA512_Update(ctx, pad, padLen);
-
-#if defined(HAVE_LONG_LONG)
- W[14] = 0;
-#else
- W[14].lo = 0;
- W[14].hi = 0;
-#endif
-
- W[15] = lo;
-#if defined(IS_LITTLE_ENDIAN)
- BYTESWAP8(W[15]);
-#endif
- SHA512_Compress(ctx);
-
- /* now output the answer */
-#if defined(IS_LITTLE_ENDIAN)
- BYTESWAP8(H[0]);
- BYTESWAP8(H[1]);
- BYTESWAP8(H[2]);
- BYTESWAP8(H[3]);
- BYTESWAP8(H[4]);
- BYTESWAP8(H[5]);
- BYTESWAP8(H[6]);
- BYTESWAP8(H[7]);
-#endif
- padLen = PR_MIN(SHA512_LENGTH, maxDigestLen);
- memcpy(digest, H, padLen);
- if (digestLen)
- *digestLen = padLen;
-}
-
-SECStatus
-SHA512_HashBuf(unsigned char *dest, const unsigned char *src,
- unsigned int src_length)
-{
- SHA512Context ctx;
- unsigned int outLen;
-
- SHA512_Begin(&ctx);
- SHA512_Update(&ctx, src, src_length);
- SHA512_End(&ctx, dest, &outLen, SHA512_LENGTH);
-
- return SECSuccess;
-}
-
-
-SECStatus
-SHA512_Hash(unsigned char *dest, const char *src)
-{
- return SHA512_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
-}
-
-
-void SHA512_TraceState(SHA512Context *ctx) { }
-
-unsigned int
-SHA512_FlattenSize(SHA512Context *ctx)
-{
- return sizeof *ctx;
-}
-
-SECStatus
-SHA512_Flatten(SHA512Context *ctx,unsigned char *space)
-{
- PORT_Memcpy(space, ctx, sizeof *ctx);
- return SECSuccess;
-}
-
-SHA512Context *
-SHA512_Resurrect(unsigned char *space, void *arg)
-{
- SHA512Context *ctx = SHA512_NewContext();
- if (ctx)
- PORT_Memcpy(ctx, space, sizeof *ctx);
- return ctx;
-}
-
-void SHA512_Clone(SHA512Context *dest, SHA512Context *src)
-{
- memcpy(dest, src, sizeof *dest);
-}
-
-/* ======================================================================= */
-/* SHA384 uses a SHA512Context as the real context.
-** The only differences between SHA384 an SHA512 are:
-** a) the intialization values for the context, and
-** b) the number of bytes of data produced as output.
-*/
-
-/* SHA-384 initial hash values */
-static const PRUint64 H384[8] = {
-#if PR_BYTES_PER_LONG == 8
- 0xcbbb9d5dc1059ed8UL , 0x629a292a367cd507UL ,
- 0x9159015a3070dd17UL , 0x152fecd8f70e5939UL ,
- 0x67332667ffc00b31UL , 0x8eb44a8768581511UL ,
- 0xdb0c2e0d64f98fa7UL , 0x47b5481dbefa4fa4UL
-#else
- ULLC(cbbb9d5d,c1059ed8), ULLC(629a292a,367cd507),
- ULLC(9159015a,3070dd17), ULLC(152fecd8,f70e5939),
- ULLC(67332667,ffc00b31), ULLC(8eb44a87,68581511),
- ULLC(db0c2e0d,64f98fa7), ULLC(47b5481d,befa4fa4)
-#endif
-};
-
-SHA384Context *
-SHA384_NewContext(void)
-{
- return SHA512_NewContext();
-}
-
-void
-SHA384_DestroyContext(SHA384Context *ctx, PRBool freeit)
-{
- SHA512_DestroyContext(ctx, freeit);
-}
-
-void
-SHA384_Begin(SHA384Context *ctx)
-{
- memset(ctx, 0, sizeof *ctx);
- memcpy(H, H384, sizeof H384);
-}
-
-void
-SHA384_Update(SHA384Context *ctx, const unsigned char *input,
- unsigned int inputLen)
-{
- SHA512_Update(ctx, input, inputLen);
-}
-
-void
-SHA384_End(SHA384Context *ctx, unsigned char *digest,
- unsigned int *digestLen, unsigned int maxDigestLen)
-{
-#define SHA_MIN(a,b) (a < b ? a : b)
- unsigned int maxLen = SHA_MIN(maxDigestLen, SHA384_LENGTH);
- SHA512_End(ctx, digest, digestLen, maxLen);
-}
-
-SECStatus
-SHA384_HashBuf(unsigned char *dest, const unsigned char *src,
- unsigned int src_length)
-{
- SHA512Context ctx;
- unsigned int outLen;
-
- SHA384_Begin(&ctx);
- SHA512_Update(&ctx, src, src_length);
- SHA512_End(&ctx, dest, &outLen, SHA384_LENGTH);
-
- return SECSuccess;
-}
-
-SECStatus
-SHA384_Hash(unsigned char *dest, const char *src)
-{
- return SHA384_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
-}
-
-void SHA384_TraceState(SHA384Context *ctx) { }
-
-unsigned int
-SHA384_FlattenSize(SHA384Context *ctx)
-{
- return sizeof(SHA384Context);
-}
-
-SECStatus
-SHA384_Flatten(SHA384Context *ctx,unsigned char *space)
-{
- return SHA512_Flatten(ctx, space);
-}
-
-SHA384Context *
-SHA384_Resurrect(unsigned char *space, void *arg)
-{
- return SHA512_Resurrect(space, arg);
-}
-
-void SHA384_Clone(SHA384Context *dest, SHA384Context *src)
-{
- memcpy(dest, src, sizeof *dest);
-}
-#endif /* Comment out unused code. */
-
-/* ======================================================================= */
-#ifdef SELFTEST
-#include <stdio.h>
-
-static const char abc[] = { "abc" };
-static const char abcdbc[] = {
- "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
-};
-static const char abcdef[] = {
- "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
- "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"
-};
-
-void
-dumpHash32(const unsigned char *buf, unsigned int bufLen)
-{
- unsigned int i;
- for (i = 0; i < bufLen; i += 4) {
- printf(" %02x%02x%02x%02x", buf[i], buf[i+1], buf[i+2], buf[i+3]);
- }
- printf("\n");
-}
-
-void test256(void)
-{
- unsigned char outBuf[SHA256_LENGTH];
-
- printf("SHA256, input = %s\n", abc);
- SHA256_Hash(outBuf, abc);
- dumpHash32(outBuf, sizeof outBuf);
-
- printf("SHA256, input = %s\n", abcdbc);
- SHA256_Hash(outBuf, abcdbc);
- dumpHash32(outBuf, sizeof outBuf);
-}
-
-void
-dumpHash64(const unsigned char *buf, unsigned int bufLen)
-{
- unsigned int i;
- for (i = 0; i < bufLen; i += 8) {
- if (i % 32 == 0)
- printf("\n");
- printf(" %02x%02x%02x%02x%02x%02x%02x%02x",
- buf[i ], buf[i+1], buf[i+2], buf[i+3],
- buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
- }
- printf("\n");
-}
-
-void test512(void)
-{
- unsigned char outBuf[SHA512_LENGTH];
-
- printf("SHA512, input = %s\n", abc);
- SHA512_Hash(outBuf, abc);
- dumpHash64(outBuf, sizeof outBuf);
-
- printf("SHA512, input = %s\n", abcdef);
- SHA512_Hash(outBuf, abcdef);
- dumpHash64(outBuf, sizeof outBuf);
-}
-
-void time512(void)
-{
- unsigned char outBuf[SHA512_LENGTH];
-
- SHA512_Hash(outBuf, abc);
- SHA512_Hash(outBuf, abcdef);
-}
-
-void test384(void)
-{
- unsigned char outBuf[SHA384_LENGTH];
-
- printf("SHA384, input = %s\n", abc);
- SHA384_Hash(outBuf, abc);
- dumpHash64(outBuf, sizeof outBuf);
-
- printf("SHA384, input = %s\n", abcdef);
- SHA384_Hash(outBuf, abcdef);
- dumpHash64(outBuf, sizeof outBuf);
-}
-
-int main (int argc, char *argv[], char *envp[])
-{
- int i = 1;
- if (argc > 1) {
- i = atoi(argv[1]);
- }
- if (i < 2) {
- test256();
- test512();
- test384();
- } else {
- while (i-- > 0) {
- time512();
- }
- printf("done\n");
- }
- return 0;
-}
-
-#endif
diff --git a/crypto/wincrypt_shim.h b/crypto/wincrypt_shim.h
index 48d4b5c5fa..dcfd4adbe5 100644
--- a/crypto/wincrypt_shim.h
+++ b/crypto/wincrypt_shim.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef NET_CRYPTO_WINCRYPT_SHIM_H_
-#define NET_CRYPTO_WINCRYPT_SHIM_H_
+#ifndef CRYPTO_WINCRYPT_SHIM_H_
+#define CRYPTO_WINCRYPT_SHIM_H_
// wincrypt.h defines macros which conflict with OpenSSL's types. This header
// includes wincrypt and undefines the OpenSSL macros which conflict. Any
@@ -22,4 +22,4 @@
#define WINCRYPT_X509_EXTENSIONS ((LPCSTR) 5)
#define WINCRYPT_X509_NAME ((LPCSTR) 7)
-#endif // NET_CRYPTO_WINCRYPT_SHIM_H_
+#endif // CRYPTO_WINCRYPT_SHIM_H_
diff --git a/dbus/BUILD.gn b/dbus/BUILD.gn
index 28efb93fe4..c0bd77d8db 100644
--- a/dbus/BUILD.gn
+++ b/dbus/BUILD.gn
@@ -17,8 +17,6 @@ component("dbus") {
"dbus_statistics.h",
"exported_object.cc",
"exported_object.h",
- "file_descriptor.cc",
- "file_descriptor.h",
"message.cc",
"message.h",
"object_manager.cc",
diff --git a/dbus/bus.cc b/dbus/bus.cc
index 57834d348a..b6a13d6b15 100644
--- a/dbus/bus.cc
+++ b/dbus/bus.cc
@@ -26,6 +26,11 @@ namespace dbus {
namespace {
+const char kDisconnectedSignal[] = "Disconnected";
+const char kDisconnectedMatchRule[] =
+ "type='signal', path='/org/freedesktop/DBus/Local',"
+ "interface='org.freedesktop.DBus.Local', member='Disconnected'";
+
// The NameOwnerChanged member in org.freedesktop.DBus
const char kNameOwnerChangedSignal[] = "NameOwnerChanged";
@@ -40,7 +45,7 @@ const char kServiceNameOwnerChangeMatchRule[] =
class Watch : public base::MessagePumpLibevent::Watcher {
public:
explicit Watch(DBusWatch* watch)
- : raw_watch_(watch) {
+ : raw_watch_(watch), file_descriptor_watcher_(FROM_HERE) {
dbus_watch_set_data(raw_watch_, this, NULL);
}
@@ -395,13 +400,6 @@ void Bus::RemoveObjectManagerInternalHelper(
callback.Run();
}
-void Bus::GetManagedObjects() {
- for (ObjectManagerTable::iterator iter = object_manager_table_.begin();
- iter != object_manager_table_.end(); ++iter) {
- iter->second->GetManagedObjects();
- }
-}
-
bool Bus::Connect() {
// dbus_bus_get_private() and dbus_bus_get() are blocking calls.
AssertOnDBusThread();
@@ -443,6 +441,12 @@ bool Bus::Connect() {
return false;
}
}
+ // We shouldn't exit on the disconnected signal.
+ dbus_connection_set_exit_on_disconnect(connection_, false);
+
+ // Watch Disconnected signal.
+ AddFilterFunction(Bus::OnConnectionDisconnectedFilter, this);
+ AddMatch(kDisconnectedMatchRule, error.get());
return true;
}
@@ -502,6 +506,8 @@ void Bus::ShutdownAndBlock() {
if (connection_) {
// Remove Disconnected watcher.
ScopedDBusError error;
+ RemoveFilterFunction(Bus::OnConnectionDisconnectedFilter, this);
+ RemoveMatch(kDisconnectedMatchRule, error.get());
if (connection_type_ == PRIVATE)
ClosePrivateConnection();
@@ -1180,6 +1186,20 @@ void Bus::OnDispatchStatusChangedThunk(DBusConnection* connection,
}
// static
+DBusHandlerResult Bus::OnConnectionDisconnectedFilter(
+ DBusConnection* /*connection*/,
+ DBusMessage* message,
+ void* /*data*/) {
+ if (dbus_message_is_signal(message,
+ DBUS_INTERFACE_LOCAL,
+ kDisconnectedSignal)) {
+ // Abort when the connection is lost.
+ LOG(FATAL) << "D-Bus connection was disconnected. Aborting.";
+ }
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+}
+
+// static
DBusHandlerResult Bus::OnServiceOwnerChangedFilter(
DBusConnection* /*connection*/,
DBusMessage* message,
diff --git a/dbus/bus.h b/dbus/bus.h
index 7d3915909b..59a19720ec 100644
--- a/dbus/bus.h
+++ b/dbus/bus.h
@@ -28,10 +28,6 @@ class SingleThreadTaskRunner;
class TaskRunner;
}
-namespace tracked_objects {
-class Location;
-}
-
namespace dbus {
class ExportedObject;
@@ -360,12 +356,6 @@ class CHROME_DBUS_EXPORT Bus : public base::RefCountedThreadSafe<Bus> {
const ObjectPath& object_path,
const base::Closure& callback);
- // Instructs all registered object managers to retrieve their set of managed
- // objects from their respective remote objects. There is no need to call this
- // manually, this is called automatically by the D-Bus thread manager once
- // implementation classes are registered.
- virtual void GetManagedObjects();
-
// Shuts down the bus and blocks until it's done. More specifically, this
// function does the following:
//
diff --git a/dbus/dbus.gyp b/dbus/dbus.gyp
deleted file mode 100644
index 264383ee4b..0000000000
--- a/dbus/dbus.gyp
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'targets': [
- {
- 'target_name': 'dbus',
- 'type': '<(component)',
- 'dependencies': [
- '../base/base.gyp:base',
- '../build/linux/system.gyp:dbus',
- '../third_party/protobuf/protobuf.gyp:protobuf_lite',
- ],
- 'export_dependent_settings': [
- '../base/base.gyp:base',
- ],
- 'defines': [
- 'DBUS_IMPLEMENTATION',
- ],
- 'sources': [
- 'bus.cc',
- 'bus.h',
- 'dbus_export.h',
- 'dbus_statistics.cc',
- 'dbus_statistics.h',
- 'exported_object.cc',
- 'exported_object.h',
- 'file_descriptor.cc',
- 'file_descriptor.h',
- 'message.cc',
- 'message.h',
- 'object_manager.cc',
- 'object_manager.h',
- 'object_path.cc',
- 'object_path.h',
- 'object_proxy.cc',
- 'object_proxy.h',
- 'property.cc',
- 'property.h',
- 'scoped_dbus_error.cc',
- 'scoped_dbus_error.h',
- 'string_util.cc',
- 'string_util.h',
- 'util.cc',
- 'util.h',
- 'values_util.cc',
- 'values_util.h',
- ],
- },
- {
- # Protobuf compiler / generator test protocol buffer
- 'target_name': 'dbus_test_proto',
- 'type': 'static_library',
- 'sources': [ 'test_proto.proto' ],
- 'variables': {
- 'proto_out_dir': 'dbus',
- },
- 'includes': [ '../build/protoc.gypi' ],
- },
- {
- # This target contains mocks that can be used to write unit tests
- # without issuing actual D-Bus calls.
- 'target_name': 'dbus_test_support',
- 'type': 'static_library',
- 'dependencies': [
- '../build/linux/system.gyp:dbus',
- '../testing/gmock.gyp:gmock',
- 'dbus',
- ],
- 'sources': [
- 'mock_bus.cc',
- 'mock_bus.h',
- 'mock_exported_object.cc',
- 'mock_exported_object.h',
- 'mock_object_manager.cc',
- 'mock_object_manager.h',
- 'mock_object_proxy.cc',
- 'mock_object_proxy.h',
- ],
- 'include_dirs': [
- '..',
- ],
- },
- {
- 'target_name': 'dbus_unittests',
- 'type': 'executable',
- 'dependencies': [
- '../base/base.gyp:run_all_unittests',
- '../base/base.gyp:test_support_base',
- '../build/linux/system.gyp:dbus',
- '../testing/gmock.gyp:gmock',
- '../testing/gtest.gyp:gtest',
- 'dbus',
- 'dbus_test_proto',
- 'dbus_test_support',
- ],
- 'sources': [
- 'bus_unittest.cc',
- 'dbus_statistics_unittest.cc',
- 'end_to_end_async_unittest.cc',
- 'end_to_end_sync_unittest.cc',
- 'message_unittest.cc',
- 'mock_unittest.cc',
- 'object_manager_unittest.cc',
- 'object_proxy_unittest.cc',
- 'property_unittest.cc',
- 'signal_sender_verification_unittest.cc',
- 'string_util_unittest.cc',
- 'test_service.cc',
- 'test_service.h',
- 'util_unittest.cc',
- 'values_util_unittest.cc',
- ],
- 'include_dirs': [
- '..',
- ],
- },
- {
- 'target_name': 'dbus_test_server',
- 'type': 'executable',
- 'dependencies': [
- '../base/base.gyp:test_support_base',
- '../base/base.gyp:base',
- '../build/linux/system.gyp:dbus',
- 'dbus',
- ],
- 'sources': [
- 'test_server.cc',
- 'test_service.cc',
- 'test_service.h',
- ],
- 'include_dirs': [
- '..',
- ],
- },
- ],
-}
diff --git a/dbus/dbus_statistics.cc b/dbus/dbus_statistics.cc
index e1e0973d5c..2949c5032d 100644
--- a/dbus/dbus_statistics.cc
+++ b/dbus/dbus_statistics.cc
@@ -4,12 +4,11 @@
#include "dbus/dbus_statistics.h"
-#include <memory>
-#include <set>
+#include <map>
+#include <tuple>
#include "base/logging.h"
#include "base/macros.h"
-#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -18,43 +17,24 @@ namespace dbus {
namespace {
-// Used to store dbus statistics sorted alphabetically by service, interface,
-// then method (using std::string <).
-struct Stat {
- Stat(const std::string& service,
- const std::string& interface,
- const std::string& method)
- : service(service),
- interface(interface),
- method(method),
- sent_method_calls(0),
- received_signals(0),
- sent_blocking_method_calls(0) {
- }
+struct StatKey {
std::string service;
std::string interface;
std::string method;
- int sent_method_calls;
- int received_signals;
- int sent_blocking_method_calls;
-
- bool Compare(const Stat& other) const {
- if (service != other.service)
- return service < other.service;
- if (interface != other.interface)
- return interface < other.interface;
- return method < other.method;
- }
+};
- struct PtrCompare {
- bool operator()(Stat* lhs, Stat* rhs) const {
- DCHECK(lhs && rhs);
- return lhs->Compare(*rhs);
- }
- };
+bool operator<(const StatKey& lhs, const StatKey& rhs) {
+ return std::tie(lhs.service, lhs.interface, lhs.method) <
+ std::tie(rhs.service, rhs.interface, rhs.method);
+}
+
+struct StatValue {
+ int sent_method_calls = 0;
+ int received_signals = 0;
+ int sent_blocking_method_calls = 0;
};
-typedef std::set<Stat*, Stat::PtrCompare> StatSet;
+using StatMap = std::map<StatKey, StatValue>;
//------------------------------------------------------------------------------
// DBusStatistics
@@ -69,10 +49,9 @@ class DBusStatistics {
~DBusStatistics() {
DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
- STLDeleteContainerPointers(stats_.begin(), stats_.end());
}
- // Enum to specify which field in Stat to increment in AddStat
+ // Enum to specify which field in Stat to increment in AddStat.
enum StatType {
TYPE_SENT_METHOD_CALLS,
TYPE_RECEIVED_SIGNALS,
@@ -89,7 +68,7 @@ class DBusStatistics {
<< base::PlatformThread::CurrentId();
return;
}
- Stat* stat = GetStat(service, interface, method, true);
+ StatValue* stat = GetStats(service, interface, method, true);
DCHECK(stat);
if (type == TYPE_SENT_METHOD_CALLS)
++stat->sent_method_calls;
@@ -103,33 +82,35 @@ class DBusStatistics {
// Look up the Stat entry in |stats_|. If |add_stat| is true, add a new entry
// if one does not already exist.
- Stat* GetStat(const std::string& service,
- const std::string& interface,
- const std::string& method,
- bool add_stat) {
+ StatValue* GetStats(const std::string& service,
+ const std::string& interface,
+ const std::string& method,
+ bool add_stat) {
DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
- std::unique_ptr<Stat> stat(new Stat(service, interface, method));
- StatSet::iterator found = stats_.find(stat.get());
- if (found != stats_.end())
- return *found;
+
+ StatKey key = {service, interface, method};
+ auto it = stats_.find(key);
+ if (it != stats_.end())
+ return &(it->second);
+
if (!add_stat)
- return NULL;
- found = stats_.insert(stat.release()).first;
- return *found;
+ return nullptr;
+
+ return &(stats_[key]);
}
- StatSet& stats() { return stats_; }
+ StatMap& stats() { return stats_; }
base::Time start_time() { return start_time_; }
private:
- StatSet stats_;
+ StatMap stats_;
base::Time start_time_;
base::PlatformThreadId origin_thread_id_;
DISALLOW_COPY_AND_ASSIGN(DBusStatistics);
};
-DBusStatistics* g_dbus_statistics = NULL;
+DBusStatistics* g_dbus_statistics = nullptr;
} // namespace
@@ -145,7 +126,7 @@ void Initialize() {
void Shutdown() {
delete g_dbus_statistics;
- g_dbus_statistics = NULL;
+ g_dbus_statistics = nullptr;
}
void AddSentMethodCall(const std::string& service,
@@ -182,7 +163,7 @@ std::string GetAsString(ShowInString show, FormatString format) {
if (!g_dbus_statistics)
return "DBusStatistics not initialized.";
- const StatSet& stats = g_dbus_statistics->stats();
+ const StatMap& stats = g_dbus_statistics->stats();
if (stats.empty())
return "No DBus calls.";
@@ -193,19 +174,21 @@ std::string GetAsString(ShowInString show, FormatString format) {
std::string result;
int sent = 0, received = 0, sent_blocking = 0;
// Stats are stored in order by service, then interface, then method.
- for (StatSet::const_iterator iter = stats.begin(); iter != stats.end(); ) {
- StatSet::const_iterator cur_iter = iter;
- StatSet::const_iterator next_iter = ++iter;
- const Stat* stat = *cur_iter;
- sent += stat->sent_method_calls;
- received += stat->received_signals;
- sent_blocking += stat->sent_blocking_method_calls;
+ for (auto iter = stats.begin(); iter != stats.end();) {
+ auto cur_iter = iter;
+ auto next_iter = ++iter;
+ const StatKey& stat_key = cur_iter->first;
+ const StatValue& stat = cur_iter->second;
+ sent += stat.sent_method_calls;
+ received += stat.received_signals;
+ sent_blocking += stat.sent_blocking_method_calls;
// If this is not the last stat, and if the next stat matches the current
// stat, continue.
if (next_iter != stats.end() &&
- (*next_iter)->service == stat->service &&
- (show < SHOW_INTERFACE || (*next_iter)->interface == stat->interface) &&
- (show < SHOW_METHOD || (*next_iter)->method == stat->method))
+ next_iter->first.service == stat_key.service &&
+ (show < SHOW_INTERFACE ||
+ next_iter->first.interface == stat_key.interface) &&
+ (show < SHOW_METHOD || next_iter->first.method == stat_key.method))
continue;
if (!sent && !received && !sent_blocking)
@@ -214,12 +197,12 @@ std::string GetAsString(ShowInString show, FormatString format) {
// Add a line to the result and clear the counts.
std::string line;
if (show == SHOW_SERVICE) {
- line += stat->service;
+ line += stat_key.service;
} else {
// The interface usually includes the service so don't show both.
- line += stat->interface;
+ line += stat_key.interface;
if (show >= SHOW_METHOD)
- line += "." + stat->method;
+ line += "." + stat_key.method;
}
line += base::StringPrintf(":");
if (sent_blocking) {
@@ -269,7 +252,8 @@ bool GetCalls(const std::string& service,
int* blocking) {
if (!g_dbus_statistics)
return false;
- Stat* stat = g_dbus_statistics->GetStat(service, interface, method, false);
+ StatValue* stat =
+ g_dbus_statistics->GetStats(service, interface, method, false);
if (!stat)
return false;
*sent = stat->sent_method_calls;
diff --git a/dbus/exported_object.cc b/dbus/exported_object.cc
index b156308ace..ffc5eb391d 100644
--- a/dbus/exported_object.cc
+++ b/dbus/exported_object.cc
@@ -11,7 +11,7 @@
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
-#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "dbus/bus.h"
@@ -186,8 +186,9 @@ bool ExportedObject::Register() {
return true;
}
-DBusHandlerResult ExportedObject::HandleMessage(DBusConnection*,
- DBusMessage* raw_message) {
+DBusHandlerResult ExportedObject::HandleMessage(
+ DBusConnection* /*connection*/,
+ DBusMessage* raw_message) {
bus_->AssertOnDBusThread();
DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_CALL, dbus_message_get_type(raw_message));
@@ -300,7 +301,8 @@ void ExportedObject::OnMethodCompleted(std::unique_ptr<MethodCall> method_call,
base::TimeTicks::Now() - start_time);
}
-void ExportedObject::OnUnregistered(DBusConnection*) {}
+void ExportedObject::OnUnregistered(DBusConnection* /*connection*/) {
+}
DBusHandlerResult ExportedObject::HandleMessageThunk(
DBusConnection* connection,
diff --git a/dbus/file_descriptor.cc b/dbus/file_descriptor.cc
deleted file mode 100644
index b690881749..0000000000
--- a/dbus/file_descriptor.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/files/file.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/threading/worker_pool.h"
-#include "dbus/file_descriptor.h"
-
-using std::swap;
-
-namespace dbus {
-
-void CHROME_DBUS_EXPORT FileDescriptor::Deleter::operator()(
- FileDescriptor* fd) {
- base::WorkerPool::PostTask(
- FROM_HERE, base::Bind(&base::DeletePointer<FileDescriptor>, fd), false);
-}
-
-FileDescriptor::FileDescriptor(FileDescriptor&& other) : FileDescriptor() {
- Swap(&other);
-}
-
-FileDescriptor::~FileDescriptor() {
- if (owner_)
- base::File auto_closer(value_);
-}
-
-FileDescriptor& FileDescriptor::operator=(FileDescriptor&& other) {
- Swap(&other);
- return *this;
-}
-
-int FileDescriptor::value() const {
- CHECK(valid_);
- return value_;
-}
-
-int FileDescriptor::TakeValue() {
- CHECK(valid_); // NB: check first so owner_ is unchanged if this triggers
- owner_ = false;
- return value_;
-}
-
-void FileDescriptor::CheckValidity() {
- base::File file(value_);
- if (!file.IsValid()) {
- valid_ = false;
- return;
- }
-
- base::File::Info info;
- bool ok = file.GetInfo(&info);
- file.TakePlatformFile(); // Prevent |value_| from being closed by |file|.
- valid_ = (ok && !info.is_directory);
-}
-
-void FileDescriptor::Swap(FileDescriptor* other) {
- swap(value_, other->value_);
- swap(owner_, other->owner_);
- swap(valid_, other->valid_);
-}
-
-} // namespace dbus
diff --git a/dbus/file_descriptor.h b/dbus/file_descriptor.h
deleted file mode 100644
index f8e86777ea..0000000000
--- a/dbus/file_descriptor.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef DBUS_FILE_DESCRIPTOR_H_
-#define DBUS_FILE_DESCRIPTOR_H_
-
-#include <memory>
-
-#include "base/macros.h"
-#include "dbus/dbus_export.h"
-
-namespace dbus {
-
-// FileDescriptor is a type used to encapsulate D-Bus file descriptors
-// and to follow the RAII idiom appropiate for use with message operations
-// where the descriptor might be easily leaked. To guard against this the
-// descriptor is closed when an instance is destroyed if it is owned.
-// Ownership is asserted only when PutValue is used and TakeValue can be
-// used to take ownership.
-//
-// For example, in the following
-// FileDescriptor fd;
-// if (!reader->PopString(&name) ||
-// !reader->PopFileDescriptor(&fd) ||
-// !reader->PopUint32(&flags)) {
-// the descriptor in fd will be closed if the PopUint32 fails. But
-// writer.AppendFileDescriptor(dbus::FileDescriptor(1));
-// will not automatically close "1" because it is not owned.
-//
-// Descriptors must be validated before marshalling in a D-Bus message
-// or using them after unmarshalling. We disallow descriptors to a
-// directory to reduce the security risks. Splitting out validation
-// also allows the caller to do this work on the File thread to conform
-// with i/o restrictions.
-class CHROME_DBUS_EXPORT FileDescriptor {
- public:
- // This provides a simple way to pass around file descriptors since they must
- // be closed on a thread that is allowed to perform I/O.
- struct Deleter {
- void CHROME_DBUS_EXPORT operator()(FileDescriptor* fd);
- };
-
- // Permits initialization without a value for passing to
- // dbus::MessageReader::PopFileDescriptor to fill in and from int values.
- FileDescriptor() : value_(-1), owner_(false), valid_(false) {}
- explicit FileDescriptor(int value) : value_(value), owner_(false),
- valid_(false) {}
-
- FileDescriptor(FileDescriptor&& other);
-
- virtual ~FileDescriptor();
-
- FileDescriptor& operator=(FileDescriptor&& other);
-
- // Retrieves value as an int without affecting ownership.
- int value() const;
-
- // Retrieves whether or not the descriptor is ok to send/receive.
- int is_valid() const { return valid_; }
-
- // Sets the value and assign ownership.
- void PutValue(int value) {
- value_ = value;
- owner_ = true;
- valid_ = false;
- }
-
- // Takes the value and ownership.
- int TakeValue();
-
- // Checks (and records) validity of the file descriptor.
- // We disallow directories to avoid potential sandbox escapes.
- // Note this call must be made on a thread where file i/o is allowed.
- void CheckValidity();
-
- private:
- void Swap(FileDescriptor* other);
-
- int value_;
- bool owner_;
- bool valid_;
-
- DISALLOW_COPY_AND_ASSIGN(FileDescriptor);
-};
-
-using ScopedFileDescriptor =
- std::unique_ptr<FileDescriptor, FileDescriptor::Deleter>;
-
-} // namespace dbus
-
-#endif // DBUS_FILE_DESCRIPTOR_H_
diff --git a/dbus/message.cc b/dbus/message.cc
index 4a84756c41..c8663f72ad 100644
--- a/dbus/message.cc
+++ b/dbus/message.cc
@@ -222,11 +222,11 @@ std::string Message::ToStringInternal(const std::string& indent,
case UNIX_FD: {
CHECK(IsDBusTypeUnixFdSupported());
- FileDescriptor file_descriptor;
+ base::ScopedFD file_descriptor;
if (!reader->PopFileDescriptor(&file_descriptor))
return kBrokenMessage;
output += indent + "fd#" +
- base::IntToString(file_descriptor.value()) + "\n";
+ base::IntToString(file_descriptor.get()) + "\n";
break;
}
default:
@@ -714,15 +714,9 @@ void MessageWriter::AppendVariantOfBasic(int dbus_type, const void* value) {
CloseContainer(&variant_writer);
}
-void MessageWriter::AppendFileDescriptor(const FileDescriptor& value) {
+void MessageWriter::AppendFileDescriptor(int value) {
CHECK(IsDBusTypeUnixFdSupported());
-
- if (!value.is_valid()) {
- // NB: sending a directory potentially enables sandbox escape
- LOG(FATAL) << "Attempt to pass invalid file descriptor";
- }
- int fd = value.value();
- AppendBasic(DBUS_TYPE_UNIX_FD, &fd);
+ AppendBasic(DBUS_TYPE_UNIX_FD, &value); // This duplicates the FD.
}
//
@@ -1016,7 +1010,7 @@ bool MessageReader::PopVariantOfBasic(int dbus_type, void* value) {
return variant_reader.PopBasic(dbus_type, value);
}
-bool MessageReader::PopFileDescriptor(FileDescriptor* value) {
+bool MessageReader::PopFileDescriptor(base::ScopedFD* value) {
CHECK(IsDBusTypeUnixFdSupported());
int fd = -1;
@@ -1024,8 +1018,7 @@ bool MessageReader::PopFileDescriptor(FileDescriptor* value) {
if (!success)
return false;
- value->PutValue(fd);
- // NB: the caller must check validity before using the value
+ *value = base::ScopedFD(fd);
return true;
}
diff --git a/dbus/message.h b/dbus/message.h
index 0aa010ccde..256a8428c5 100644
--- a/dbus/message.h
+++ b/dbus/message.h
@@ -13,9 +13,9 @@
#include <string>
#include <vector>
+#include "base/files/scoped_file.h"
#include "base/macros.h"
#include "dbus/dbus_export.h"
-#include "dbus/file_descriptor.h"
#include "dbus/object_path.h"
namespace google {
@@ -285,7 +285,10 @@ class CHROME_DBUS_EXPORT MessageWriter {
void AppendDouble(double value);
void AppendString(const std::string& value);
void AppendObjectPath(const ObjectPath& value);
- void AppendFileDescriptor(const FileDescriptor& value);
+
+ // Appends a file descriptor to the message.
+ // The FD will be duplicated so you still have to close the original FD.
+ void AppendFileDescriptor(int value);
// Opens an array. The array contents can be added to the array with
// |sub_writer|. The client code must close the array with
@@ -398,7 +401,7 @@ class CHROME_DBUS_EXPORT MessageReader {
bool PopDouble(double* value);
bool PopString(std::string* value);
bool PopObjectPath(ObjectPath* value);
- bool PopFileDescriptor(FileDescriptor* value);
+ bool PopFileDescriptor(base::ScopedFD* value);
// Sets up the given message reader to read an array at the current
// iterator position.
diff --git a/dbus/mock_bus.h b/dbus/mock_bus.h
index b50f230569..40b090b156 100644
--- a/dbus/mock_bus.h
+++ b/dbus/mock_bus.h
@@ -26,15 +26,6 @@ class MockBus : public Bus {
ObjectProxy*(const std::string& service_name,
const ObjectPath& object_path,
int options));
- MOCK_METHOD3(RemoveObjectProxy, bool(
- const std::string& service_name,
- const ObjectPath& object_path,
- const base::Closure& callback));
- MOCK_METHOD4(RemoveObjectProxyWithOptions, bool(
- const std::string& service_name,
- const ObjectPath& object_path,
- int options,
- const base::Closure& callback));
MOCK_METHOD1(GetExportedObject, ExportedObject*(
const ObjectPath& object_path));
MOCK_METHOD2(GetObjectManager, ObjectManager*(const std::string&,
diff --git a/dbus/mock_object_manager.h b/dbus/mock_object_manager.h
index e4c76ba711..2318e497ea 100644
--- a/dbus/mock_object_manager.h
+++ b/dbus/mock_object_manager.h
@@ -31,7 +31,6 @@ class MockObjectManager : public ObjectManager {
MOCK_METHOD1(GetObjectProxy, ObjectProxy*(const ObjectPath&));
MOCK_METHOD2(GetProperties, PropertySet*(const ObjectPath&,
const std::string&));
- MOCK_METHOD0(GetManagedObjects, void());
protected:
virtual ~MockObjectManager();
diff --git a/dbus/mock_object_proxy.h b/dbus/mock_object_proxy.h
index f27f6f6acc..17d2a9f0f4 100644
--- a/dbus/mock_object_proxy.h
+++ b/dbus/mock_object_proxy.h
@@ -56,6 +56,10 @@ class MockObjectProxy : public ObjectProxy {
const std::string& signal_name,
SignalCallback signal_callback,
OnConnectedCallback on_connected_callback));
+ MOCK_METHOD1(SetNameOwnerChangedCallback,
+ void(NameOwnerChangedCallback callback));
+ MOCK_METHOD1(WaitForServiceToBeAvailable,
+ void(WaitForServiceToBeAvailableCallback callback));
MOCK_METHOD0(Detach, void());
protected:
diff --git a/dbus/object_manager.cc b/dbus/object_manager.cc
index 178bb5ff12..08e6e048e2 100644
--- a/dbus/object_manager.cc
+++ b/dbus/object_manager.cc
@@ -167,35 +167,6 @@ void ObjectManager::CleanUp() {
match_rule_.clear();
}
-void ObjectManager::InitializeObjects() {
- DCHECK(bus_);
- DCHECK(object_proxy_);
- DCHECK(setup_success_);
-
- // |object_proxy_| is no longer valid if the Bus was shut down before this
- // call. Don't initiate any other action from the origin thread.
- if (cleanup_called_)
- return;
-
- object_proxy_->ConnectToSignal(
- kObjectManagerInterface,
- kObjectManagerInterfacesAdded,
- base::Bind(&ObjectManager::InterfacesAddedReceived,
- weak_ptr_factory_.GetWeakPtr()),
- base::Bind(&ObjectManager::InterfacesAddedConnected,
- weak_ptr_factory_.GetWeakPtr()));
-
- object_proxy_->ConnectToSignal(
- kObjectManagerInterface,
- kObjectManagerInterfacesRemoved,
- base::Bind(&ObjectManager::InterfacesRemovedReceived,
- weak_ptr_factory_.GetWeakPtr()),
- base::Bind(&ObjectManager::InterfacesRemovedConnected,
- weak_ptr_factory_.GetWeakPtr()));
-
- GetManagedObjects();
-}
-
bool ObjectManager::SetupMatchRuleAndFilter() {
DCHECK(bus_);
DCHECK(!setup_success_);
@@ -235,10 +206,39 @@ bool ObjectManager::SetupMatchRuleAndFilter() {
}
void ObjectManager::OnSetupMatchRuleAndFilterComplete(bool success) {
- LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
- << ": Failed to set up match rule.";
- if (success)
- InitializeObjects();
+ if (!success) {
+ LOG(WARNING) << service_name_ << " " << object_path_.value()
+ << ": Failed to set up match rule.";
+ return;
+ }
+
+ DCHECK(bus_);
+ DCHECK(object_proxy_);
+ DCHECK(setup_success_);
+
+ // |object_proxy_| is no longer valid if the Bus was shut down before this
+ // call. Don't initiate any other action from the origin thread.
+ if (cleanup_called_)
+ return;
+
+ object_proxy_->ConnectToSignal(
+ kObjectManagerInterface,
+ kObjectManagerInterfacesAdded,
+ base::Bind(&ObjectManager::InterfacesAddedReceived,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&ObjectManager::InterfacesAddedConnected,
+ weak_ptr_factory_.GetWeakPtr()));
+
+ object_proxy_->ConnectToSignal(
+ kObjectManagerInterface,
+ kObjectManagerInterfacesRemoved,
+ base::Bind(&ObjectManager::InterfacesRemovedReceived,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&ObjectManager::InterfacesRemovedConnected,
+ weak_ptr_factory_.GetWeakPtr()));
+
+ if (!service_name_owner_.empty())
+ GetManagedObjects();
}
// static
@@ -249,7 +249,7 @@ DBusHandlerResult ObjectManager::HandleMessageThunk(DBusConnection* connection,
return self->HandleMessage(connection, raw_message);
}
-DBusHandlerResult ObjectManager::HandleMessage(DBusConnection*,
+DBusHandlerResult ObjectManager::HandleMessage(DBusConnection* /*connection*/,
DBusMessage* raw_message) {
DCHECK(bus_);
bus_->AssertOnDBusThread();
@@ -385,10 +385,9 @@ void ObjectManager::InterfacesAddedReceived(Signal* signal) {
UpdateObject(object_path, &reader);
}
-void ObjectManager::InterfacesAddedConnected(
- const std::string& /*interface_name*/,
- const std::string& /*signal_name*/,
- bool success) {
+void ObjectManager::InterfacesAddedConnected(const std::string& /*interface_name*/,
+ const std::string& /*signal_name*/,
+ bool success) {
LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
<< ": Failed to connect to InterfacesAdded signal.";
}
diff --git a/dbus/object_manager.h b/dbus/object_manager.h
index a97495e1f9..90cf919639 100644
--- a/dbus/object_manager.h
+++ b/dbus/object_manager.h
@@ -71,8 +71,9 @@
// object_manager_->UnregisterInterface(kInterface);
// }
//
-// The D-Bus thread manager takes care of issuing the necessary call to
-// GetManagedObjects() after the implementation classes have been set up.
+// This class calls GetManagedObjects() asynchronously after the remote service
+// becomes available and additionally refreshes managed objects after the
+// service stops or restarts.
//
// The object manager interface class has one abstract method that must be
// implemented by the class to create Properties structures on demand. As well
@@ -167,7 +168,7 @@ public:
// |interface_name| as appropriate. An implementation class will only
// receive multiple calls if it has registered for multiple interfaces.
virtual void ObjectAdded(const ObjectPath& /*object_path*/,
- const std::string& /*interface_name*/) {}
+ const std::string& /*interface_name*/) { }
// Called by ObjectManager to inform the implementation class than an
// object with the path |object_path| has been removed. Ths D-Bus interface
@@ -179,7 +180,7 @@ public:
// ObjectProxy object for the given interface are cleaned up, it is safe
// to retrieve them during removal to vary processing.
virtual void ObjectRemoved(const ObjectPath& /*object_path*/,
- const std::string& /*interface_name*/) {}
+ const std::string& /*interface_name*/) { }
};
// Client code should use Bus::GetObjectManager() instead of this constructor.
@@ -238,17 +239,14 @@ public:
private:
friend class base::RefCountedThreadSafe<ObjectManager>;
- // Connects the InterfacesAdded and InterfacesRemoved signals and calls
- // GetManagedObjects. Called from OnSetupMatchRuleAndFilterComplete.
- void InitializeObjects();
-
// Called from the constructor to add a match rule for PropertiesChanged
- // signals on the DBus thread and set up a corresponding filter function.
+ // signals on the D-Bus thread and set up a corresponding filter function.
bool SetupMatchRuleAndFilter();
// Called on the origin thread once the match rule and filter have been set
- // up. |success| is false, if an error occurred during set up; it's true
- // otherwise.
+ // up. Connects the InterfacesAdded and InterfacesRemoved signals and
+ // refreshes objects if the service is available. |success| is false if an
+ // error occurred during setup and true otherwise.
void OnSetupMatchRuleAndFilterComplete(bool success);
// Called by dbus:: when a message is received. This is used to filter
diff --git a/dbus/object_proxy.cc b/dbus/object_proxy.cc
index ce0255154a..50e62a3a99 100644
--- a/dbus/object_proxy.cc
+++ b/dbus/object_proxy.cc
@@ -10,7 +10,7 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/strings/string_piece.h"
#include "base/strings/stringprintf.h"
#include "base/task_runner_util.h"
@@ -459,8 +459,9 @@ void ObjectProxy::WaitForServiceToBeAvailableInternal() {
}
}
-DBusHandlerResult ObjectProxy::HandleMessage(DBusConnection*,
- DBusMessage* raw_message) {
+DBusHandlerResult ObjectProxy::HandleMessage(
+ DBusConnection* /*connection*/,
+ DBusMessage* raw_message) {
bus_->AssertOnDBusThread();
if (dbus_message_get_type(raw_message) != DBUS_MESSAGE_TYPE_SIGNAL)
diff --git a/dbus/object_proxy.h b/dbus/object_proxy.h
index 033e88608a..5de390461e 100644
--- a/dbus/object_proxy.h
+++ b/dbus/object_proxy.h
@@ -137,10 +137,10 @@ class CHROME_DBUS_EXPORT ObjectProxy
// from the method (i.e. calling a method that does not return a value),
// EmptyResponseCallback() can be passed to the |callback| parameter.
//
- // If the method call is successful, a pointer to Response object will
- // be passed to the callback. If unsuccessful, the error callback will be
- // called and a pointer to ErrorResponse object will be passed to the error
- // callback if available, otherwise NULL will be passed.
+ // If the method call is successful, |callback| will be invoked with a
+ // Response object. If unsuccessful, |error_callback| will be invoked with an
+ // ErrorResponse object (if the remote object returned an error) or nullptr
+ // (if a response was not received at all).
//
// Must be called in the origin thread.
virtual void CallMethodWithErrorCallback(MethodCall* method_call,
@@ -174,7 +174,11 @@ class CHROME_DBUS_EXPORT ObjectProxy
// represented by |service_name_|.
virtual void SetNameOwnerChangedCallback(NameOwnerChangedCallback callback);
- // Runs the callback as soon as the service becomes available.
+ // Registers |callback| to run when the service becomes available. If the
+ // service is already available, or if connecting to the name-owner-changed
+ // signal fails, |callback| will be run once asynchronously. Otherwise,
+ // |callback| will be run once in the future after the service becomes
+ // available.
virtual void WaitForServiceToBeAvailable(
WaitForServiceToBeAvailableCallback callback);
diff --git a/dbus/property.cc b/dbus/property.cc
index aa58436f51..93f9ed693c 100644
--- a/dbus/property.cc
+++ b/dbus/property.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <memory>
+
#include "base/bind.h"
#include "base/logging.h"
@@ -659,6 +661,134 @@ void Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>::
writer->CloseContainer(&variant_writer);
}
+//
+// Property<std::unordered_map<std::string, std::vector<uint8_t>>>
+// specialization.
+//
+
+template <>
+bool Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
+ PopValueFromReader(MessageReader* reader) {
+ MessageReader variant_reader(nullptr);
+ MessageReader dict_reader(nullptr);
+ if (!reader->PopVariant(&variant_reader) ||
+ !variant_reader.PopArray(&dict_reader))
+ return false;
+
+ value_.clear();
+ while (dict_reader.HasMoreData()) {
+ MessageReader entry_reader(nullptr);
+ if (!dict_reader.PopDictEntry(&entry_reader))
+ return false;
+
+ std::string key;
+ MessageReader value_varient_reader(nullptr);
+ if (!entry_reader.PopString(&key) ||
+ !entry_reader.PopVariant(&value_varient_reader))
+ return false;
+
+ const uint8_t* bytes = nullptr;
+ size_t length = 0;
+ if (!value_varient_reader.PopArrayOfBytes(&bytes, &length))
+ return false;
+
+ value_[key].assign(bytes, bytes + length);
+ }
+ return true;
+}
+
+template <>
+void Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
+ AppendSetValueToWriter(MessageWriter* writer) {
+ MessageWriter variant_writer(nullptr);
+ MessageWriter dict_writer(nullptr);
+
+ writer->OpenVariant("a{sv}", &variant_writer);
+ variant_writer.OpenArray("{sv}", &dict_writer);
+
+ for (const auto& pair : set_value_) {
+ MessageWriter entry_writer(nullptr);
+ dict_writer.OpenDictEntry(&entry_writer);
+
+ entry_writer.AppendString(pair.first);
+
+ MessageWriter value_varient_writer(nullptr);
+ entry_writer.OpenVariant("ay", &value_varient_writer);
+ value_varient_writer.AppendArrayOfBytes(pair.second.data(),
+ pair.second.size());
+ entry_writer.CloseContainer(&value_varient_writer);
+
+ dict_writer.CloseContainer(&entry_writer);
+ }
+
+ variant_writer.CloseContainer(&dict_writer);
+ writer->CloseContainer(&variant_writer);
+}
+
+//
+// Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>
+// specialization.
+//
+
+template <>
+bool Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
+ PopValueFromReader(MessageReader* reader) {
+ MessageReader variant_reader(nullptr);
+ MessageReader dict_reader(nullptr);
+ if (!reader->PopVariant(&variant_reader) ||
+ !variant_reader.PopArray(&dict_reader))
+ return false;
+
+ value_.clear();
+ while (dict_reader.HasMoreData()) {
+ MessageReader entry_reader(nullptr);
+ if (!dict_reader.PopDictEntry(&entry_reader))
+ return false;
+
+ uint16_t key;
+ MessageReader value_varient_reader(nullptr);
+ if (!entry_reader.PopUint16(&key) ||
+ !entry_reader.PopVariant(&value_varient_reader))
+ return false;
+
+ const uint8_t* bytes = nullptr;
+ size_t length = 0;
+ if (!value_varient_reader.PopArrayOfBytes(&bytes, &length))
+ return false;
+
+ value_[key].assign(bytes, bytes + length);
+ }
+ return true;
+}
+
+template <>
+void Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
+ AppendSetValueToWriter(MessageWriter* writer) {
+ MessageWriter variant_writer(nullptr);
+ MessageWriter dict_writer(nullptr);
+
+ writer->OpenVariant("a{qv}", &variant_writer);
+ variant_writer.OpenArray("{qv}", &dict_writer);
+
+ for (const auto& pair : set_value_) {
+ MessageWriter entry_writer(nullptr);
+ dict_writer.OpenDictEntry(&entry_writer);
+
+ entry_writer.AppendUint16(pair.first);
+
+ MessageWriter value_varient_writer(nullptr);
+ entry_writer.OpenVariant("ay", &value_varient_writer);
+ value_varient_writer.AppendArrayOfBytes(pair.second.data(),
+ pair.second.size());
+ entry_writer.CloseContainer(&value_varient_writer);
+
+ dict_writer.CloseContainer(&entry_writer);
+ }
+
+ variant_writer.CloseContainer(&dict_writer);
+ writer->CloseContainer(&variant_writer);
+}
+
template class Property<uint8_t>;
template class Property<bool>;
template class Property<int16_t>;
@@ -675,5 +805,7 @@ template class Property<std::vector<ObjectPath> >;
template class Property<std::vector<uint8_t>>;
template class Property<std::map<std::string, std::string>>;
template class Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>;
+template class Property<std::unordered_map<std::string, std::vector<uint8_t>>>;
+template class Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>;
} // namespace dbus
diff --git a/dbus/property.h b/dbus/property.h
index efbad226a6..0559ea0554 100644
--- a/dbus/property.h
+++ b/dbus/property.h
@@ -9,6 +9,7 @@
#include <map>
#include <string>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -610,6 +611,28 @@ Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>::
extern template class CHROME_DBUS_EXPORT
Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>;
+template <>
+CHROME_DBUS_EXPORT bool
+Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
+ PopValueFromReader(MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void
+Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
+ AppendSetValueToWriter(MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT
+ Property<std::unordered_map<std::string, std::vector<uint8_t>>>;
+
+template <>
+CHROME_DBUS_EXPORT bool
+Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
+ PopValueFromReader(MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void
+Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
+ AppendSetValueToWriter(MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT
+ Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>;
+
#pragma GCC diagnostic pop
} // namespace dbus
diff --git a/dbus/values_util.cc b/dbus/values_util.cc
index bea7bea746..1e035c9cfe 100644
--- a/dbus/values_util.cc
+++ b/dbus/values_util.cc
@@ -67,19 +67,19 @@ bool PopDictionaryEntries(MessageReader* reader,
// Gets the D-Bus type signature for the value.
std::string GetTypeSignature(const base::Value& value) {
switch (value.GetType()) {
- case base::Value::TYPE_BOOLEAN:
+ case base::Value::Type::BOOLEAN:
return "b";
- case base::Value::TYPE_INTEGER:
+ case base::Value::Type::INTEGER:
return "i";
- case base::Value::TYPE_DOUBLE:
+ case base::Value::Type::DOUBLE:
return "d";
- case base::Value::TYPE_STRING:
+ case base::Value::Type::STRING:
return "s";
- case base::Value::TYPE_BINARY:
+ case base::Value::Type::BINARY:
return "ay";
- case base::Value::TYPE_DICTIONARY:
+ case base::Value::Type::DICTIONARY:
return "a{sv}";
- case base::Value::TYPE_LIST:
+ case base::Value::Type::LIST:
return "av";
default:
DLOG(ERROR) << "Unexpected type " << value.GetType();
@@ -98,38 +98,37 @@ std::unique_ptr<base::Value> PopDataAsValue(MessageReader* reader) {
case Message::BYTE: {
uint8_t value = 0;
if (reader->PopByte(&value))
- result = base::MakeUnique<base::FundamentalValue>(value);
+ result = base::MakeUnique<base::Value>(value);
break;
}
case Message::BOOL: {
bool value = false;
if (reader->PopBool(&value))
- result = base::MakeUnique<base::FundamentalValue>(value);
+ result = base::MakeUnique<base::Value>(value);
break;
}
case Message::INT16: {
int16_t value = 0;
if (reader->PopInt16(&value))
- result = base::MakeUnique<base::FundamentalValue>(value);
+ result = base::MakeUnique<base::Value>(value);
break;
}
case Message::UINT16: {
uint16_t value = 0;
if (reader->PopUint16(&value))
- result = base::MakeUnique<base::FundamentalValue>(value);
+ result = base::MakeUnique<base::Value>(value);
break;
}
case Message::INT32: {
int32_t value = 0;
if (reader->PopInt32(&value))
- result = base::MakeUnique<base::FundamentalValue>(value);
+ result = base::MakeUnique<base::Value>(value);
break;
}
case Message::UINT32: {
uint32_t value = 0;
if (reader->PopUint32(&value)) {
- result = base::MakeUnique<base::FundamentalValue>(
- static_cast<double>(value));
+ result = base::MakeUnique<base::Value>(static_cast<double>(value));
}
break;
}
@@ -138,8 +137,7 @@ std::unique_ptr<base::Value> PopDataAsValue(MessageReader* reader) {
if (reader->PopInt64(&value)) {
DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
value << " is not exactly representable by double";
- result = base::MakeUnique<base::FundamentalValue>(
- static_cast<double>(value));
+ result = base::MakeUnique<base::Value>(static_cast<double>(value));
}
break;
}
@@ -148,27 +146,26 @@ std::unique_ptr<base::Value> PopDataAsValue(MessageReader* reader) {
if (reader->PopUint64(&value)) {
DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
value << " is not exactly representable by double";
- result = base::MakeUnique<base::FundamentalValue>(
- static_cast<double>(value));
+ result = base::MakeUnique<base::Value>(static_cast<double>(value));
}
break;
}
case Message::DOUBLE: {
double value = 0;
if (reader->PopDouble(&value))
- result = base::MakeUnique<base::FundamentalValue>(value);
+ result = base::MakeUnique<base::Value>(value);
break;
}
case Message::STRING: {
std::string value;
if (reader->PopString(&value))
- result = base::MakeUnique<base::StringValue>(value);
+ result = base::MakeUnique<base::Value>(value);
break;
}
case Message::OBJECT_PATH: {
ObjectPath value;
if (reader->PopObjectPath(&value))
- result = base::MakeUnique<base::StringValue>(value.value());
+ result = base::MakeUnique<base::Value>(value.value());
break;
}
case Message::UNIX_FD: {
@@ -219,28 +216,28 @@ std::unique_ptr<base::Value> PopDataAsValue(MessageReader* reader) {
void AppendBasicTypeValueData(MessageWriter* writer, const base::Value& value) {
switch (value.GetType()) {
- case base::Value::TYPE_BOOLEAN: {
+ case base::Value::Type::BOOLEAN: {
bool bool_value = false;
bool success = value.GetAsBoolean(&bool_value);
DCHECK(success);
writer->AppendBool(bool_value);
break;
}
- case base::Value::TYPE_INTEGER: {
+ case base::Value::Type::INTEGER: {
int int_value = 0;
bool success = value.GetAsInteger(&int_value);
DCHECK(success);
writer->AppendInt32(int_value);
break;
}
- case base::Value::TYPE_DOUBLE: {
+ case base::Value::Type::DOUBLE: {
double double_value = 0;
bool success = value.GetAsDouble(&double_value);
DCHECK(success);
writer->AppendDouble(double_value);
break;
}
- case base::Value::TYPE_STRING: {
+ case base::Value::Type::STRING: {
std::string string_value;
bool success = value.GetAsString(&string_value);
DCHECK(success);
@@ -263,7 +260,7 @@ void AppendBasicTypeValueDataAsVariant(MessageWriter* writer,
void AppendValueData(MessageWriter* writer, const base::Value& value) {
switch (value.GetType()) {
- case base::Value::TYPE_DICTIONARY: {
+ case base::Value::Type::DICTIONARY: {
const base::DictionaryValue* dictionary = NULL;
value.GetAsDictionary(&dictionary);
dbus::MessageWriter array_writer(NULL);
@@ -279,7 +276,7 @@ void AppendValueData(MessageWriter* writer, const base::Value& value) {
writer->CloseContainer(&array_writer);
break;
}
- case base::Value::TYPE_LIST: {
+ case base::Value::Type::LIST: {
const base::ListValue* list = NULL;
value.GetAsList(&list);
dbus::MessageWriter array_writer(NULL);
@@ -290,10 +287,10 @@ void AppendValueData(MessageWriter* writer, const base::Value& value) {
writer->CloseContainer(&array_writer);
break;
}
- case base::Value::TYPE_BOOLEAN:
- case base::Value::TYPE_INTEGER:
- case base::Value::TYPE_DOUBLE:
- case base::Value::TYPE_STRING:
+ case base::Value::Type::BOOLEAN:
+ case base::Value::Type::INTEGER:
+ case base::Value::Type::DOUBLE:
+ case base::Value::Type::STRING:
AppendBasicTypeValueData(writer, value);
break;
default:
diff --git a/sandbox/BUILD.gn b/sandbox/BUILD.gn
index 8ca3574e18..8c0405ed8e 100644
--- a/sandbox/BUILD.gn
+++ b/sandbox/BUILD.gn
@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/buildflag_header.gni")
+import("//sandbox/features.gni")
+
# Meta-target that forwards to the proper platform one.
group("sandbox") {
if (is_win) {
@@ -19,3 +22,8 @@ group("sandbox") {
]
}
}
+
+buildflag_header("sandbox_features") {
+ header = "sandbox_features.h"
+ flags = [ "USE_SECCOMP_BPF=$use_seccomp_bpf" ]
+}
diff --git a/sandbox/linux/BUILD.gn b/sandbox/linux/BUILD.gn
index a5c041fad0..3e98defa5c 100644
--- a/sandbox/linux/BUILD.gn
+++ b/sandbox/linux/BUILD.gn
@@ -4,6 +4,7 @@
import("//build/config/features.gni")
import("//build/config/nacl/config.gni")
+import("//sandbox/features.gni")
import("//testing/test.gni")
if (is_android) {
@@ -41,10 +42,7 @@ group("sandbox") {
public_deps += [ ":suid_sandbox_client" ]
}
if (use_seccomp_bpf || is_nacl_nonsfi) {
- public_deps += [
- ":seccomp_bpf",
- ":seccomp_bpf_helpers",
- ]
+ public_deps += [ ":seccomp_bpf" ]
}
}
@@ -192,7 +190,6 @@ action("bpf_dsl_golden") {
rebase_path(outputs, root_build_dir) + rebase_path(inputs, root_build_dir)
}
-
test("sandbox_linux_unittests") {
deps = [
":sandbox_linux_unittests_sources",
@@ -222,6 +219,14 @@ component("seccomp_bpf") {
"bpf_dsl/syscall_set.cc",
"bpf_dsl/syscall_set.h",
"bpf_dsl/trap_registry.h",
+ "seccomp-bpf-helpers/baseline_policy.cc",
+ "seccomp-bpf-helpers/baseline_policy.h",
+ "seccomp-bpf-helpers/sigsys_handlers.cc",
+ "seccomp-bpf-helpers/sigsys_handlers.h",
+ "seccomp-bpf-helpers/syscall_parameters_restrictions.cc",
+ "seccomp-bpf-helpers/syscall_parameters_restrictions.h",
+ "seccomp-bpf-helpers/syscall_sets.cc",
+ "seccomp-bpf-helpers/syscall_sets.h",
"seccomp-bpf/die.cc",
"seccomp-bpf/die.h",
"seccomp-bpf/sandbox_bpf.cc",
@@ -251,31 +256,6 @@ component("seccomp_bpf") {
"bpf_dsl/linux_syscall_ranges.h",
"bpf_dsl/seccomp_macros.h",
"bpf_dsl/trap_registry.h",
- ]
- }
-}
-
-component("seccomp_bpf_helpers") {
- sources = [
- "seccomp-bpf-helpers/baseline_policy.cc",
- "seccomp-bpf-helpers/baseline_policy.h",
- "seccomp-bpf-helpers/sigsys_handlers.cc",
- "seccomp-bpf-helpers/sigsys_handlers.h",
- "seccomp-bpf-helpers/syscall_parameters_restrictions.cc",
- "seccomp-bpf-helpers/syscall_parameters_restrictions.h",
- "seccomp-bpf-helpers/syscall_sets.cc",
- "seccomp-bpf-helpers/syscall_sets.h",
- ]
- defines = [ "SANDBOX_IMPLEMENTATION" ]
-
- deps = [
- ":sandbox_services",
- ":seccomp_bpf",
- "//base",
- ]
-
- if (is_nacl_nonsfi) {
- sources -= [
"seccomp-bpf-helpers/baseline_policy.cc",
"seccomp-bpf-helpers/baseline_policy.h",
"seccomp-bpf-helpers/syscall_sets.cc",
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl.h b/sandbox/linux/bpf_dsl/bpf_dsl.h
index 7f81344237..6f0dd4eb39 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl.h
@@ -76,6 +76,11 @@
namespace sandbox {
namespace bpf_dsl {
+template <typename T>
+class Caser;
+
+class Elser;
+
// ResultExpr is an opaque reference to an immutable result expression tree.
using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
index 10477c9b31..af1b48b407 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
@@ -24,14 +24,6 @@ class BoolExprImpl;
using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
-template <typename T>
-class Arg;
-
-class Elser;
-
-template <typename T>
-class Caser;
-
} // namespace bpf_dsl
} // namespace sandbox
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
index 35ff64f498..f397321edd 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
@@ -13,7 +13,6 @@
namespace sandbox {
namespace bpf_dsl {
-class ErrorCode;
class PolicyCompiler;
namespace internal {
diff --git a/sandbox/linux/sandbox_linux.gypi b/sandbox/linux/sandbox_linux.gypi
deleted file mode 100644
index e96ae9eefd..0000000000
--- a/sandbox/linux/sandbox_linux.gypi
+++ /dev/null
@@ -1,434 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'conditions': [
- ['OS=="linux"', {
- 'compile_suid_client': 1,
- 'compile_credentials': 1,
- 'use_base_test_suite': 1,
- }, {
- 'compile_suid_client': 0,
- 'compile_credentials': 0,
- 'use_base_test_suite': 0,
- }],
- ['OS=="linux" and (target_arch=="ia32" or target_arch=="x64" or '
- 'target_arch=="mipsel")', {
- 'compile_seccomp_bpf_demo': 1,
- }, {
- 'compile_seccomp_bpf_demo': 0,
- }],
- ],
- },
- 'target_defaults': {
- 'target_conditions': [
- # All linux/ files will automatically be excluded on Android
- # so make sure we re-include them explicitly.
- ['OS == "android"', {
- 'sources/': [
- ['include', '^linux/'],
- ],
- }],
- ],
- },
- 'targets': [
- # We have two principal targets: sandbox and sandbox_linux_unittests
- # All other targets are listed as dependencies.
- # There is one notable exception: for historical reasons, chrome_sandbox is
- # the setuid sandbox and is its own target.
- {
- 'target_name': 'sandbox',
- 'type': 'none',
- 'dependencies': [
- 'sandbox_services',
- ],
- 'conditions': [
- [ 'compile_suid_client==1', {
- 'dependencies': [
- 'suid_sandbox_client',
- ],
- }],
- # Compile seccomp BPF when we support it.
- [ 'use_seccomp_bpf==1', {
- 'dependencies': [
- 'seccomp_bpf',
- 'seccomp_bpf_helpers',
- ],
- }],
- ],
- },
- {
- 'target_name': 'sandbox_linux_test_utils',
- 'type': 'static_library',
- 'dependencies': [
- '../testing/gtest.gyp:gtest',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- 'tests/sandbox_test_runner.cc',
- 'tests/sandbox_test_runner.h',
- 'tests/sandbox_test_runner_function_pointer.cc',
- 'tests/sandbox_test_runner_function_pointer.h',
- 'tests/test_utils.cc',
- 'tests/test_utils.h',
- 'tests/unit_tests.cc',
- 'tests/unit_tests.h',
- ],
- 'conditions': [
- [ 'use_seccomp_bpf==1', {
- 'sources': [
- 'seccomp-bpf/bpf_tester_compatibility_delegate.h',
- 'seccomp-bpf/bpf_tests.h',
- 'seccomp-bpf/sandbox_bpf_test_runner.cc',
- 'seccomp-bpf/sandbox_bpf_test_runner.h',
- ],
- 'dependencies': [
- 'seccomp_bpf',
- ]
- }],
- [ 'use_base_test_suite==1', {
- 'dependencies': [
- '../base/base.gyp:test_support_base',
- ],
- 'defines': [
- 'SANDBOX_USES_BASE_TEST_SUITE',
- ],
- }],
- ],
- },
- {
- # The main sandboxing test target.
- 'target_name': 'sandbox_linux_unittests',
- 'includes': [
- 'sandbox_linux_test_sources.gypi',
- ],
- 'type': 'executable',
- 'conditions': [
- [ 'OS == "android"', {
- 'variables': {
- 'test_type': 'gtest',
- 'test_suite_name': '<(_target_name)',
- },
- 'includes': [
- '../../build/android/test_runner.gypi',
- ],
- }]
- ]
- },
- {
- 'target_name': 'seccomp_bpf',
- 'type': '<(component)',
- 'sources': [
- 'bpf_dsl/bpf_dsl.cc',
- 'bpf_dsl/bpf_dsl.h',
- 'bpf_dsl/bpf_dsl_forward.h',
- 'bpf_dsl/bpf_dsl_impl.h',
- 'bpf_dsl/codegen.cc',
- 'bpf_dsl/codegen.h',
- 'bpf_dsl/cons.h',
- 'bpf_dsl/errorcode.h',
- 'bpf_dsl/linux_syscall_ranges.h',
- 'bpf_dsl/policy.cc',
- 'bpf_dsl/policy.h',
- 'bpf_dsl/policy_compiler.cc',
- 'bpf_dsl/policy_compiler.h',
- 'bpf_dsl/seccomp_macros.h',
- 'bpf_dsl/seccomp_macros.h',
- 'bpf_dsl/syscall_set.cc',
- 'bpf_dsl/syscall_set.h',
- 'bpf_dsl/trap_registry.h',
- 'seccomp-bpf/die.cc',
- 'seccomp-bpf/die.h',
- 'seccomp-bpf/sandbox_bpf.cc',
- 'seccomp-bpf/sandbox_bpf.h',
- 'seccomp-bpf/syscall.cc',
- 'seccomp-bpf/syscall.h',
- 'seccomp-bpf/trap.cc',
- 'seccomp-bpf/trap.h',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- 'sandbox_services',
- 'sandbox_services_headers',
- ],
- 'defines': [
- 'SANDBOX_IMPLEMENTATION',
- ],
- 'includes': [
- # Disable LTO due to compiler bug
- # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57703
- '../../build/android/disable_gcc_lto.gypi',
- ],
- 'include_dirs': [
- '../..',
- ],
- },
- {
- 'target_name': 'seccomp_bpf_helpers',
- 'type': '<(component)',
- 'sources': [
- 'seccomp-bpf-helpers/baseline_policy.cc',
- 'seccomp-bpf-helpers/baseline_policy.h',
- 'seccomp-bpf-helpers/sigsys_handlers.cc',
- 'seccomp-bpf-helpers/sigsys_handlers.h',
- 'seccomp-bpf-helpers/syscall_parameters_restrictions.cc',
- 'seccomp-bpf-helpers/syscall_parameters_restrictions.h',
- 'seccomp-bpf-helpers/syscall_sets.cc',
- 'seccomp-bpf-helpers/syscall_sets.h',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- 'sandbox_services',
- 'seccomp_bpf',
- ],
- 'defines': [
- 'SANDBOX_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '../..',
- ],
- },
- {
- # The setuid sandbox, for Linux
- 'target_name': 'chrome_sandbox',
- 'type': 'executable',
- 'sources': [
- 'suid/common/sandbox.h',
- 'suid/common/suid_unsafe_environment_variables.h',
- 'suid/process_util.h',
- 'suid/process_util_linux.c',
- 'suid/sandbox.c',
- ],
- 'cflags': [
- # For ULLONG_MAX
- '-std=gnu99',
- ],
- 'include_dirs': [
- '../..',
- ],
- # Do not use any sanitizer tools with this binary. http://crbug.com/382766
- 'cflags/': [
- ['exclude', '-fsanitize'],
- ],
- 'ldflags/': [
- ['exclude', '-fsanitize'],
- ],
- },
- { 'target_name': 'sandbox_services',
- 'type': '<(component)',
- 'sources': [
- 'services/init_process_reaper.cc',
- 'services/init_process_reaper.h',
- 'services/proc_util.cc',
- 'services/proc_util.h',
- 'services/resource_limits.cc',
- 'services/resource_limits.h',
- 'services/scoped_process.cc',
- 'services/scoped_process.h',
- 'services/syscall_wrappers.cc',
- 'services/syscall_wrappers.h',
- 'services/thread_helpers.cc',
- 'services/thread_helpers.h',
- 'services/yama.cc',
- 'services/yama.h',
- 'syscall_broker/broker_channel.cc',
- 'syscall_broker/broker_channel.h',
- 'syscall_broker/broker_client.cc',
- 'syscall_broker/broker_client.h',
- 'syscall_broker/broker_common.h',
- 'syscall_broker/broker_file_permission.cc',
- 'syscall_broker/broker_file_permission.h',
- 'syscall_broker/broker_host.cc',
- 'syscall_broker/broker_host.h',
- 'syscall_broker/broker_policy.cc',
- 'syscall_broker/broker_policy.h',
- 'syscall_broker/broker_process.cc',
- 'syscall_broker/broker_process.h',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- ],
- 'defines': [
- 'SANDBOX_IMPLEMENTATION',
- ],
- 'conditions': [
- ['compile_credentials==1', {
- 'sources': [
- 'services/credentials.cc',
- 'services/credentials.h',
- 'services/namespace_sandbox.cc',
- 'services/namespace_sandbox.h',
- 'services/namespace_utils.cc',
- 'services/namespace_utils.h',
- ],
- 'dependencies': [
- # for capability.h.
- 'sandbox_services_headers',
- ],
- }],
- ],
- 'include_dirs': [
- '..',
- ],
- },
- { 'target_name': 'sandbox_services_headers',
- 'type': 'none',
- 'sources': [
- 'system_headers/arm64_linux_syscalls.h',
- 'system_headers/arm64_linux_ucontext.h',
- 'system_headers/arm_linux_syscalls.h',
- 'system_headers/arm_linux_ucontext.h',
- 'system_headers/capability.h',
- 'system_headers/i386_linux_ucontext.h',
- 'system_headers/linux_futex.h',
- 'system_headers/linux_seccomp.h',
- 'system_headers/linux_syscalls.h',
- 'system_headers/linux_time.h',
- 'system_headers/linux_ucontext.h',
- 'system_headers/mips_linux_syscalls.h',
- 'system_headers/mips_linux_ucontext.h',
- 'system_headers/x86_32_linux_syscalls.h',
- 'system_headers/x86_64_linux_syscalls.h',
- ],
- 'include_dirs': [
- '..',
- ],
- },
- {
- 'target_name': 'suid_sandbox_client',
- 'type': '<(component)',
- 'sources': [
- 'suid/common/sandbox.h',
- 'suid/common/suid_unsafe_environment_variables.h',
- 'suid/client/setuid_sandbox_client.cc',
- 'suid/client/setuid_sandbox_client.h',
- 'suid/client/setuid_sandbox_host.cc',
- 'suid/client/setuid_sandbox_host.h',
- ],
- 'defines': [
- 'SANDBOX_IMPLEMENTATION',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- 'sandbox_services',
- ],
- 'include_dirs': [
- '..',
- ],
- },
- {
- 'target_name': 'bpf_dsl_golden',
- 'type': 'none',
- 'actions': [
- {
- 'action_name': 'generate',
- 'inputs': [
- 'bpf_dsl/golden/generate.py',
- 'bpf_dsl/golden/i386/ArgSizePolicy.txt',
- 'bpf_dsl/golden/i386/BasicPolicy.txt',
- 'bpf_dsl/golden/i386/ElseIfPolicy.txt',
- 'bpf_dsl/golden/i386/MaskingPolicy.txt',
- 'bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
- 'bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
- 'bpf_dsl/golden/i386/SwitchPolicy.txt',
- 'bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
- 'bpf_dsl/golden/x86-64/BasicPolicy.txt',
- 'bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
- 'bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
- 'bpf_dsl/golden/x86-64/MaskingPolicy.txt',
- 'bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
- 'bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
- 'bpf_dsl/golden/x86-64/SwitchPolicy.txt',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
- ],
- 'action': [
- 'python',
- 'linux/bpf_dsl/golden/generate.py',
- '<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
- 'linux/bpf_dsl/golden/i386/ArgSizePolicy.txt',
- 'linux/bpf_dsl/golden/i386/BasicPolicy.txt',
- 'linux/bpf_dsl/golden/i386/ElseIfPolicy.txt',
- 'linux/bpf_dsl/golden/i386/MaskingPolicy.txt',
- 'linux/bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
- 'linux/bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
- 'linux/bpf_dsl/golden/i386/SwitchPolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/BasicPolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/MaskingPolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
- 'linux/bpf_dsl/golden/x86-64/SwitchPolicy.txt',
- ],
- 'message': 'Generating header from golden files ...',
- },
- ],
- },
- ],
- 'conditions': [
- [ 'OS=="android"', {
- 'targets': [
- {
- 'target_name': 'sandbox_linux_unittests_deps',
- 'type': 'none',
- 'dependencies': [
- 'sandbox_linux_unittests',
- ],
- 'variables': {
- 'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests__dist/',
- 'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests',
- 'include_main_binary': 1,
- },
- 'includes': [
- '../../build/android/native_app_dependencies.gypi'
- ],
- }],
- }],
- [ 'OS=="android"', {
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'sandbox_linux_unittests_android_run',
- 'type': 'none',
- 'dependencies': [
- 'sandbox_linux_unittests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- '../sandbox_linux_unittests_android.isolate',
- ],
- },
- ],
- },
- ],
- ],
- }],
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'sandbox_linux_unittests_run',
- 'type': 'none',
- 'dependencies': [
- 'sandbox_linux_unittests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- '../sandbox_linux_unittests.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp b/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp
deleted file mode 100644
index 50e637c360..0000000000
--- a/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'includes': [
- '../../build/common_untrusted.gypi',
- ],
- 'conditions': [
- ['disable_nacl==0 and disable_nacl_untrusted==0', {
- 'targets': [
- {
- 'target_name': 'sandbox_linux_nacl_nonsfi',
- 'type': 'none',
- 'variables': {
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libsandbox_linux_nacl_nonsfi.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_irt': 0,
- 'build_pnacl_newlib': 0,
- 'build_nonsfi_helper': 1,
- 'compile_flags': [
- '-fgnu-inline-asm',
- ],
- 'sources': [
- # This is the subset of linux build target, needed for
- # nacl_helper_nonsfi's sandbox implementation.
- 'bpf_dsl/bpf_dsl.cc',
- 'bpf_dsl/codegen.cc',
- 'bpf_dsl/policy.cc',
- 'bpf_dsl/policy_compiler.cc',
- 'bpf_dsl/syscall_set.cc',
- 'seccomp-bpf-helpers/sigsys_handlers.cc',
- 'seccomp-bpf-helpers/syscall_parameters_restrictions.cc',
- 'seccomp-bpf/die.cc',
- 'seccomp-bpf/sandbox_bpf.cc',
- 'seccomp-bpf/syscall.cc',
- 'seccomp-bpf/trap.cc',
- 'services/credentials.cc',
- 'services/namespace_sandbox.cc',
- 'services/namespace_utils.cc',
- 'services/proc_util.cc',
- 'services/resource_limits.cc',
- 'services/syscall_wrappers.cc',
- 'services/thread_helpers.cc',
- 'suid/client/setuid_sandbox_client.cc',
- ],
- },
- 'dependencies': [
- '../../base/base_nacl.gyp:base_nacl_nonsfi',
- ],
- },
- ],
- }],
-
- ['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
- 'targets': [
- {
- 'target_name': 'sandbox_linux_test_utils_nacl_nonsfi',
- 'type': 'none',
- 'variables': {
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libsandbox_linux_test_utils_nacl_nonsfi.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_irt': 0,
- 'build_pnacl_newlib': 0,
- 'build_nonsfi_helper': 1,
-
- 'sources': [
- 'seccomp-bpf/sandbox_bpf_test_runner.cc',
- 'tests/sandbox_test_runner.cc',
- 'tests/unit_tests.cc',
- ],
- },
- 'dependencies': [
- '../../testing/gtest_nacl.gyp:gtest_nacl',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/sandbox/linux/sandbox_linux_test_sources.gypi b/sandbox/linux/sandbox_linux_test_sources.gypi
deleted file mode 100644
index 612814e1d4..0000000000
--- a/sandbox/linux/sandbox_linux_test_sources.gypi
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Tests need to be compiled in the same link unit, so we have to list them
-# in a separate .gypi file.
-{
- 'dependencies': [
- 'sandbox',
- 'sandbox_linux_test_utils',
- 'sandbox_services',
- '../base/base.gyp:base',
- '../testing/gtest.gyp:gtest',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- 'services/proc_util_unittest.cc',
- 'services/scoped_process_unittest.cc',
- 'services/resource_limits_unittests.cc',
- 'services/syscall_wrappers_unittest.cc',
- 'services/thread_helpers_unittests.cc',
- 'services/yama_unittests.cc',
- 'syscall_broker/broker_file_permission_unittest.cc',
- 'syscall_broker/broker_process_unittest.cc',
- 'tests/main.cc',
- 'tests/scoped_temporary_file.cc',
- 'tests/scoped_temporary_file.h',
- 'tests/scoped_temporary_file_unittest.cc',
- 'tests/test_utils_unittest.cc',
- 'tests/unit_tests_unittest.cc',
- ],
- 'conditions': [
- [ 'compile_suid_client==1', {
- 'sources': [
- 'suid/client/setuid_sandbox_client_unittest.cc',
- 'suid/client/setuid_sandbox_host_unittest.cc',
- ],
- }],
- [ 'use_seccomp_bpf==1', {
- 'sources': [
- 'bpf_dsl/bpf_dsl_unittest.cc',
- 'bpf_dsl/codegen_unittest.cc',
- 'bpf_dsl/cons_unittest.cc',
- 'bpf_dsl/dump_bpf.cc',
- 'bpf_dsl/dump_bpf.h',
- 'bpf_dsl/syscall_set_unittest.cc',
- 'bpf_dsl/test_trap_registry.cc',
- 'bpf_dsl/test_trap_registry.h',
- 'bpf_dsl/test_trap_registry_unittest.cc',
- 'bpf_dsl/verifier.cc',
- 'bpf_dsl/verifier.h',
- 'integration_tests/bpf_dsl_seccomp_unittest.cc',
- 'integration_tests/seccomp_broker_process_unittest.cc',
- 'seccomp-bpf-helpers/baseline_policy_unittest.cc',
- 'seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc',
- 'seccomp-bpf/bpf_tests_unittest.cc',
- 'seccomp-bpf/sandbox_bpf_unittest.cc',
- 'seccomp-bpf/syscall_unittest.cc',
- 'seccomp-bpf/trap_unittest.cc',
- ],
- 'dependencies': [
- 'bpf_dsl_golden',
- ],
- }],
- [ 'compile_credentials==1', {
- 'sources': [
- 'integration_tests/namespace_unix_domain_socket_unittest.cc',
- 'services/credentials_unittest.cc',
- 'services/namespace_utils_unittest.cc',
- ],
- 'dependencies': [
- '../build/linux/system.gyp:libcap'
- ],
- 'conditions': [
- [ 'use_base_test_suite==1', {
- 'sources': [
- 'services/namespace_sandbox_unittest.cc',
- ]
- }]
- ],
- }],
- [ 'use_base_test_suite==1', {
- 'dependencies': [
- '../base/base.gyp:test_support_base',
- ],
- 'defines': [
- 'SANDBOX_USES_BASE_TEST_SUITE',
- ],
- }],
- ],
-}
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
index 2bf572c0b3..88a932607c 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
@@ -169,10 +169,20 @@ ResultExpr EvaluateSyscallImpl(int fs_denied_errno,
if (sysno == __NR_getpriority || sysno ==__NR_setpriority)
return RestrictGetSetpriority(current_pid);
+ if (sysno == __NR_getrandom) {
+ return RestrictGetRandom();
+ }
+
if (sysno == __NR_madvise) {
- // Only allow MADV_DONTNEED (aka MADV_FREE).
+ // Only allow MADV_DONTNEED and MADV_FREE.
const Arg<int> advice(2);
- return If(advice == MADV_DONTNEED, Allow()).Else(Error(EPERM));
+ return If(AnyOf(advice == MADV_DONTNEED
+#if defined(MADV_FREE)
+ // MADV_FREE was introduced in Linux 4.5 and started being
+ // defined in glibc 2.24.
+ , advice == MADV_FREE
+#endif
+ ), Allow()).Else(Error(EPERM));
}
#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
index f0392b1a00..ca812d8a1e 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
@@ -168,6 +168,21 @@ BPF_TEST_C(BaselinePolicy, Socketpair, BaselinePolicy) {
TestPipeOrSocketPair(base::ScopedFD(sv[0]), base::ScopedFD(sv[1]));
}
+#if !defined(GRND_NONBLOCK)
+#define GRND_NONBLOCK 1
+#endif
+
+BPF_TEST_C(BaselinePolicy, GetRandom, BaselinePolicy) {
+ char buf[1];
+
+ // Many systems do not yet support getrandom(2) so ENOSYS is a valid result
+ // here.
+ int ret = HANDLE_EINTR(syscall(__NR_getrandom, buf, sizeof(buf), 0));
+ BPF_ASSERT((ret == -1 && errno == ENOSYS) || ret == 1);
+ ret = HANDLE_EINTR(syscall(__NR_getrandom, buf, sizeof(buf), GRND_NONBLOCK));
+ BPF_ASSERT((ret == -1 && (errno == ENOSYS || errno == EAGAIN)) || ret == 1);
+}
+
// Not all architectures can restrict the domain for socketpair().
#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
BPF_DEATH_TEST_C(BaselinePolicy,
@@ -349,6 +364,17 @@ BPF_DEATH_TEST_C(BaselinePolicy,
clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
}
+#if !defined(GRND_RANDOM)
+#define GRND_RANDOM 2
+#endif
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ GetRandomOfDevRandomCrashes,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ syscall(__NR_getrandom, NULL, 0, GRND_RANDOM);
+}
+
#if !defined(__i386__)
BPF_DEATH_TEST_C(BaselinePolicy,
GetSockOptWrongLevelSigsys,
diff --git a/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc b/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
index 077bc61f38..e6c64defad 100644
--- a/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
@@ -11,6 +11,7 @@
#include <sys/syscall.h>
#include <unistd.h>
+#include "base/debug/crash_logging.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "build/build_config.h"
@@ -49,7 +50,8 @@ void WriteToStdErr(const char* error_message, size_t size) {
while (size > 0) {
// TODO(jln): query the current policy to check if send() is available and
// use it to perform a non-blocking write.
- const int ret = HANDLE_EINTR(write(STDERR_FILENO, error_message, size));
+ const int ret = HANDLE_EINTR(
+ sandbox::sys_write(STDERR_FILENO, error_message, size));
// We can't handle any type of error here.
if (ret <= 0 || static_cast<size_t>(ret) > size) break;
size -= ret;
@@ -92,6 +94,7 @@ void PrintSyscallError(uint32_t sysno) {
rem /= 10;
sysno_base10[i] = '0' + mod;
}
+
#if defined(__mips__) && (_MIPS_SIM == _MIPS_SIM_ABI32)
static const char kSeccompErrorPrefix[] = __FILE__
":**CRASHING**:" SECCOMP_MESSAGE_COMMON_CONTENT " in syscall 4000 + ";
@@ -105,7 +108,88 @@ void PrintSyscallError(uint32_t sysno) {
WriteToStdErr(kSeccompErrorPostfix, sizeof(kSeccompErrorPostfix) - 1);
}
-} // namespace.
+// Helper to convert a number of type T to a hexadecimal string using
+// stack-allocated storage.
+template <typename T>
+class NumberToHex {
+ public:
+ explicit NumberToHex(T value) {
+ static const char kHexChars[] = "0123456789abcdef";
+
+ memset(str_, '0', sizeof(str_));
+ str_[1] = 'x';
+ str_[sizeof(str_) - 1] = '\0';
+
+ T rem = value;
+ T mod = 0;
+ for (size_t i = sizeof(str_) - 2; i >= 2; --i) {
+ mod = rem % 16;
+ rem /= 16;
+ str_[i] = kHexChars[mod];
+ }
+ }
+
+ const char* str() const { return str_; }
+
+ static size_t length() { return sizeof(str_) - 1; }
+
+ private:
+ // HEX uses two characters per byte, with a leading '0x', and a trailing NUL.
+ char str_[sizeof(T) * 2 + 3];
+};
+
+// Records the syscall number and first four arguments in a crash key, to help
+// debug the failure.
+void SetSeccompCrashKey(const struct sandbox::arch_seccomp_data& args) {
+#if !defined(OS_NACL_NONSFI)
+ NumberToHex<int> nr(args.nr);
+ NumberToHex<uint64_t> arg1(args.args[0]);
+ NumberToHex<uint64_t> arg2(args.args[1]);
+ NumberToHex<uint64_t> arg3(args.args[2]);
+ NumberToHex<uint64_t> arg4(args.args[3]);
+
+ // In order to avoid calling into libc sprintf functions from an unsafe signal
+ // context, manually construct the crash key string.
+ const char* const prefixes[] = {
+ "nr=",
+ " arg1=",
+ " arg2=",
+ " arg3=",
+ " arg4=",
+ };
+ const char* const values[] = {
+ nr.str(),
+ arg1.str(),
+ arg2.str(),
+ arg3.str(),
+ arg4.str(),
+ };
+
+ size_t crash_key_length = nr.length() + arg1.length() + arg2.length() +
+ arg3.length() + arg4.length();
+ for (auto* prefix : prefixes) {
+ crash_key_length += strlen(prefix);
+ }
+ ++crash_key_length; // For the trailing NUL byte.
+
+ char crash_key[crash_key_length];
+ memset(crash_key, '\0', crash_key_length);
+
+ size_t offset = 0;
+ for (size_t i = 0; i < arraysize(values); ++i) {
+ const char* strings[2] = { prefixes[i], values[i] };
+ for (auto* string : strings) {
+ size_t string_len = strlen(string);
+ memmove(&crash_key[offset], string, string_len);
+ offset += string_len;
+ }
+ }
+
+ base::debug::SetCrashKeyValue("seccomp-sigsys", crash_key);
+#endif
+}
+
+} // namespace
namespace sandbox {
@@ -113,6 +197,7 @@ intptr_t CrashSIGSYS_Handler(const struct arch_seccomp_data& args, void* aux) {
uint32_t syscall = SyscallNumberToOffsetFromBase(args.nr);
PrintSyscallError(syscall);
+ SetSeccompCrashKey(args);
// Encode 8-bits of the 1st two arguments too, so we can discern which socket
// type, which fcntl, ... etc., without being likely to hit a mapped
@@ -140,6 +225,7 @@ intptr_t SIGSYSCloneFailure(const struct arch_seccomp_data& args, void* aux) {
static const char kSeccompCloneError[] =
__FILE__":**CRASHING**:" SECCOMP_MESSAGE_CLONE_CONTENT "\n";
WriteToStdErr(kSeccompCloneError, sizeof(kSeccompCloneError) - 1);
+ SetSeccompCrashKey(args);
// "flags" is the first argument in the kernel's clone().
// Mark as volatile to be able to find the value on the stack in a minidump.
volatile uint64_t clone_flags = args.args[0];
@@ -160,6 +246,7 @@ intptr_t SIGSYSPrctlFailure(const struct arch_seccomp_data& args,
static const char kSeccompPrctlError[] =
__FILE__":**CRASHING**:" SECCOMP_MESSAGE_PRCTL_CONTENT "\n";
WriteToStdErr(kSeccompPrctlError, sizeof(kSeccompPrctlError) - 1);
+ SetSeccompCrashKey(args);
// Mark as volatile to be able to find the value on the stack in a minidump.
volatile uint64_t option = args.args[0];
volatile char* addr =
@@ -174,6 +261,7 @@ intptr_t SIGSYSIoctlFailure(const struct arch_seccomp_data& args,
static const char kSeccompIoctlError[] =
__FILE__":**CRASHING**:" SECCOMP_MESSAGE_IOCTL_CONTENT "\n";
WriteToStdErr(kSeccompIoctlError, sizeof(kSeccompIoctlError) - 1);
+ SetSeccompCrashKey(args);
// Make "request" volatile so that we can see it on the stack in a minidump.
volatile uint64_t request = args.args[1];
volatile char* addr = reinterpret_cast<volatile char*>(request & 0xFFFF);
@@ -190,6 +278,7 @@ intptr_t SIGSYSKillFailure(const struct arch_seccomp_data& args,
static const char kSeccompKillError[] =
__FILE__":**CRASHING**:" SECCOMP_MESSAGE_KILL_CONTENT "\n";
WriteToStdErr(kSeccompKillError, sizeof(kSeccompKillError) - 1);
+ SetSeccompCrashKey(args);
// Make "pid" volatile so that we can see it on the stack in a minidump.
volatile uint64_t my_pid = sys_getpid();
volatile char* addr = reinterpret_cast<volatile char*>(my_pid & 0xFFF);
@@ -203,6 +292,7 @@ intptr_t SIGSYSFutexFailure(const struct arch_seccomp_data& args,
static const char kSeccompFutexError[] =
__FILE__ ":**CRASHING**:" SECCOMP_MESSAGE_FUTEX_CONTENT "\n";
WriteToStdErr(kSeccompFutexError, sizeof(kSeccompFutexError) - 1);
+ SetSeccompCrashKey(args);
volatile int futex_op = args.args[1];
volatile char* addr = reinterpret_cast<volatile char*>(futex_op & 0xFFF);
*addr = '\0';
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
index 56c4cb387d..061bfb4803 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
@@ -351,4 +351,22 @@ ResultExpr RestrictClockID() {
.Default(CrashSIGSYS());
}
+#if !defined(GRND_NONBLOCK)
+#define GRND_NONBLOCK 1
+#endif
+
+ResultExpr RestrictGetRandom() {
+ const Arg<unsigned int> flags(2);
+ const unsigned int kGoodFlags = GRND_NONBLOCK;
+ return If((flags & ~kGoodFlags) == 0, Allow()).Else(CrashSIGSYS());
+}
+
+ResultExpr RestrictPrlimitToGetrlimit(pid_t target_pid) {
+ const Arg<pid_t> pid(0);
+ const Arg<uintptr_t> new_limit(2);
+ // Only allow 'get' operations, and only for the current process.
+ return If(AllOf(new_limit == 0, AnyOf(pid == 0, pid == target_pid)), Allow())
+ .Else(Error(EPERM));
+}
+
} // namespace sandbox.
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
index b96fe20e35..c4577dc97d 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
@@ -94,6 +94,15 @@ SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictGetrusage();
// about the state of the host OS.
SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictClockID();
+// Restrict the flags argument to getrandom() to allow only no flags, or
+// GRND_NONBLOCK.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictGetRandom();
+
+// Restrict |new_limit| to NULL, and |pid| to the calling process (or 0) for
+// prlimit64(). This allows only getting rlimits on the current process.
+// Otherwise, fail gracefully; see crbug.com/160157.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictPrlimitToGetrlimit(pid_t target_pid);
+
} // namespace sandbox.
#endif // SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_PARAMETERS_RESTRICTIONS_H_
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
index 804a8fea1e..c068cd2d04 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
@@ -13,6 +13,7 @@
#include <unistd.h>
#include "base/bind.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/sys_info.h"
#include "base/threading/thread.h"
@@ -164,7 +165,7 @@ BPF_TEST_C(ParameterRestrictions,
// different.
base::Thread getparam_thread("sched_getparam_thread");
BPF_ASSERT(getparam_thread.Start());
- getparam_thread.message_loop()->PostTask(
+ getparam_thread.task_runner()->PostTask(
FROM_HERE, base::Bind(&SchedGetParamThread, &thread_run));
BPF_ASSERT(thread_run.TimedWait(base::TimeDelta::FromMilliseconds(5000)));
getparam_thread.Stop();
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
index c217d47e2d..1d9f95cd64 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
@@ -120,9 +120,7 @@ bool SyscallSets::IsFileSystem(int sysno) {
#if defined(__i386__) || defined(__arm__) || defined(__mips__)
case __NR_lstat64:
#endif
-#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
case __NR_memfd_create:
-#endif
case __NR_mkdirat:
case __NR_mknodat:
#if defined(__i386__)
@@ -414,6 +412,7 @@ bool SyscallSets::IsAllowedEpoll(int sysno) {
case __NR_epoll_create:
case __NR_epoll_wait:
#endif
+ case __NR_epoll_pwait:
case __NR_epoll_create1:
case __NR_epoll_ctl:
return true;
@@ -421,7 +420,6 @@ bool SyscallSets::IsAllowedEpoll(int sysno) {
#if defined(__x86_64__)
case __NR_epoll_ctl_old:
#endif
- case __NR_epoll_pwait:
#if defined(__x86_64__)
case __NR_epoll_wait_old:
#endif
@@ -550,7 +548,7 @@ bool SyscallSets::IsAllowedGeneralIo(int sysno) {
#if defined(__i386__) || defined(__arm__) || defined(__mips__)
case __NR__newselect:
#endif
-#if defined(__arm__)
+#if defined(__arm__) || defined(__mips__)
case __NR_send:
#endif
#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
diff --git a/sandbox/linux/services/credentials.cc b/sandbox/linux/services/credentials.cc
index 0c617d4b2f..50a109e2f4 100644
--- a/sandbox/linux/services/credentials.cc
+++ b/sandbox/linux/services/credentials.cc
@@ -150,6 +150,18 @@ int CapabilityToKernelValue(Credentials::Capability cap) {
return 0;
}
+void SetGidAndUidMaps(gid_t gid, uid_t uid) {
+ if (NamespaceUtils::KernelSupportsDenySetgroups()) {
+ PCHECK(NamespaceUtils::DenySetgroups());
+ }
+ DCHECK(GetRESIds(NULL, NULL));
+ const char kGidMapFile[] = "/proc/self/gid_map";
+ const char kUidMapFile[] = "/proc/self/uid_map";
+ PCHECK(NamespaceUtils::WriteToIdMapFile(kGidMapFile, gid));
+ PCHECK(NamespaceUtils::WriteToIdMapFile(kUidMapFile, uid));
+ DCHECK(GetRESIds(NULL, NULL));
+}
+
} // namespace.
// static
@@ -253,8 +265,14 @@ bool Credentials::CanCreateProcessInNewUserNS() {
return false;
#endif
- // This is roughly a fork().
- const pid_t pid = sys_clone(CLONE_NEWUSER | SIGCHLD, 0, 0, 0, 0);
+ uid_t uid;
+ gid_t gid;
+ if (!GetRESIds(&uid, &gid)) {
+ return false;
+ }
+
+ const pid_t pid =
+ base::ForkWithFlags(CLONE_NEWUSER | SIGCHLD, nullptr, nullptr);
if (pid == -1) {
CheckCloneNewUserErrno(errno);
@@ -262,20 +280,28 @@ bool Credentials::CanCreateProcessInNewUserNS() {
}
// The parent process could have had threads. In the child, these threads
- // have disappeared. Make sure to not do anything in the child, as this is a
- // fragile execution environment.
+ // have disappeared.
if (pid == 0) {
- _exit(kExitSuccess);
+ // unshare() requires the effective uid and gid to have a mapping in the
+ // parent namespace.
+ SetGidAndUidMaps(gid, uid);
+
+ // Make sure we drop CAP_SYS_ADMIN.
+ CHECK(sandbox::Credentials::DropAllCapabilities());
+
+ // Ensure we have unprivileged use of CLONE_NEWUSER. Debian
+ // Jessie explicitly forbids this case. See:
+ // add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by-default.patch
+ _exit(!!sys_unshare(CLONE_NEWUSER));
}
// Always reap the child.
int status = -1;
PCHECK(HANDLE_EINTR(waitpid(pid, &status, 0)) == pid);
- CHECK(WIFEXITED(status));
- CHECK_EQ(kExitSuccess, WEXITSTATUS(status));
- // clone(2) succeeded, we can use CLONE_NEWUSER.
- return true;
+ // clone(2) succeeded. Now return true only if the system grants
+ // unprivileged use of CLONE_NEWUSER as well.
+ return WIFEXITED(status) && WEXITSTATUS(status) == kExitSuccess;
}
bool Credentials::MoveToNewUserNS() {
@@ -296,18 +322,9 @@ bool Credentials::MoveToNewUserNS() {
return false;
}
- if (NamespaceUtils::KernelSupportsDenySetgroups()) {
- PCHECK(NamespaceUtils::DenySetgroups());
- }
-
// The current {r,e,s}{u,g}id is now an overflow id (c.f.
// /proc/sys/kernel/overflowuid). Setup the uid and gid maps.
- DCHECK(GetRESIds(NULL, NULL));
- const char kGidMapFile[] = "/proc/self/gid_map";
- const char kUidMapFile[] = "/proc/self/uid_map";
- PCHECK(NamespaceUtils::WriteToIdMapFile(kGidMapFile, gid));
- PCHECK(NamespaceUtils::WriteToIdMapFile(kUidMapFile, uid));
- DCHECK(GetRESIds(NULL, NULL));
+ SetGidAndUidMaps(gid, uid);
return true;
}
@@ -315,12 +332,16 @@ bool Credentials::DropFileSystemAccess(int proc_fd) {
CHECK_LE(0, proc_fd);
CHECK(ChrootToSafeEmptyDir());
- CHECK(!base::DirectoryExists(base::FilePath("/proc")));
+ CHECK(!HasFileSystemAccess());
CHECK(!ProcUtil::HasOpenDirectory(proc_fd));
// We never let this function fail.
return true;
}
+bool Credentials::HasFileSystemAccess() {
+ return base::DirectoryExists(base::FilePath("/proc"));
+}
+
pid_t Credentials::ForkAndDropCapabilitiesInChild() {
pid_t pid = fork();
if (pid != 0) {
diff --git a/sandbox/linux/services/credentials.h b/sandbox/linux/services/credentials.h
index b89a6aa7cf..157c8e75e8 100644
--- a/sandbox/linux/services/credentials.h
+++ b/sandbox/linux/services/credentials.h
@@ -94,6 +94,9 @@ class SANDBOX_EXPORT Credentials {
// - DropAllCapabilities() must be called to prevent escapes.
static bool DropFileSystemAccess(int proc_fd) WARN_UNUSED_RESULT;
+ // This function returns true if the process can still access the filesystem.
+ static bool HasFileSystemAccess();
+
// Forks and drops capabilities in the child.
static pid_t ForkAndDropCapabilitiesInChild();
diff --git a/sandbox/linux/services/credentials_unittest.cc b/sandbox/linux/services/credentials_unittest.cc
index b95ba0bab2..41c04bbcc2 100644
--- a/sandbox/linux/services/credentials_unittest.cc
+++ b/sandbox/linux/services/credentials_unittest.cc
@@ -145,11 +145,12 @@ SANDBOX_TEST(Credentials, CanDetectRoot) {
// Disabled on ASAN because of crbug.com/451603.
SANDBOX_TEST(Credentials, DISABLE_ON_ASAN(DropFileSystemAccessIsSafe)) {
+ CHECK(Credentials::HasFileSystemAccess());
CHECK(Credentials::DropAllCapabilities());
// Probably missing kernel support.
if (!Credentials::MoveToNewUserNS()) return;
CHECK(Credentials::DropFileSystemAccess(ProcUtil::OpenProc().get()));
- CHECK(!base::DirectoryExists(base::FilePath("/proc")));
+ CHECK(!Credentials::HasFileSystemAccess());
CHECK(WorkingDirectoryIsRoot());
CHECK(base::IsDirectoryEmpty(base::FilePath("/")));
// We want the chroot to never have a subdirectory. A subdirectory
@@ -245,18 +246,19 @@ void SignalHandler(int sig) {
signal_handler_called = 1;
}
+// glibc (and some other libcs) caches the PID and TID in TLS. This test
+// verifies that these values are correct after DropFilesystemAccess.
// Disabled on ASAN because of crbug.com/451603.
SANDBOX_TEST(Credentials, DISABLE_ON_ASAN(DropFileSystemAccessPreservesTLS)) {
// Probably missing kernel support.
if (!Credentials::MoveToNewUserNS()) return;
CHECK(Credentials::DropFileSystemAccess(ProcUtil::OpenProc().get()));
- // In glibc, pthread_getattr_np makes an assertion about the cached PID/TID in
- // TLS.
- pthread_attr_t attr;
- EXPECT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
+ // The libc getpid implementation may return a cached PID. Ensure that
+ // it matches the PID returned from the getpid system call.
+ CHECK_EQ(sys_getpid(), getpid());
- // raise also uses the cached TID in glibc.
+ // raise uses the cached TID in glibc.
struct sigaction action = {};
action.sa_handler = &SignalHandler;
PCHECK(sigaction(SIGUSR1, &action, nullptr) == 0);
diff --git a/sandbox/linux/services/syscall_wrappers.cc b/sandbox/linux/services/syscall_wrappers.cc
index 7132d2ade9..9c7727cee5 100644
--- a/sandbox/linux/services/syscall_wrappers.cc
+++ b/sandbox/linux/services/syscall_wrappers.cc
@@ -32,6 +32,10 @@ pid_t sys_gettid(void) {
return syscall(__NR_gettid);
}
+ssize_t sys_write(int fd, const char* buffer, size_t buffer_size) {
+ return syscall(__NR_write, fd, buffer, buffer_size);
+}
+
long sys_clone(unsigned long flags,
std::nullptr_t child_stack,
pid_t* ptid,
diff --git a/sandbox/linux/services/syscall_wrappers.h b/sandbox/linux/services/syscall_wrappers.h
index 057e4c87f4..1975bfbd88 100644
--- a/sandbox/linux/services/syscall_wrappers.h
+++ b/sandbox/linux/services/syscall_wrappers.h
@@ -28,6 +28,10 @@ SANDBOX_EXPORT pid_t sys_getpid(void);
SANDBOX_EXPORT pid_t sys_gettid(void);
+SANDBOX_EXPORT ssize_t sys_write(int fd,
+ const char* buffer,
+ size_t buffer_size);
+
SANDBOX_EXPORT long sys_clone(unsigned long flags);
// |regs| is not supported and must be passed as nullptr. |child_stack| must be
diff --git a/sandbox/linux/suid/process_util_linux.c b/sandbox/linux/suid/process_util_linux.c
index 40949bd6ac..d28d5766c3 100644
--- a/sandbox/linux/suid/process_util_linux.c
+++ b/sandbox/linux/suid/process_util_linux.c
@@ -59,6 +59,7 @@ bool AdjustOOMScore(pid_t process, int score) {
fd = openat(dirfd, "oom_adj", O_WRONLY);
if (fd < 0) {
// Nope, that doesn't work either.
+ close(dirfd);
return false;
} else {
// If we're using the old oom_adj file, the allowed range is now
diff --git a/sandbox/linux/syscall_broker/broker_file_permission.h b/sandbox/linux/syscall_broker/broker_file_permission.h
index 03300d1d74..ddc62d5629 100644
--- a/sandbox/linux/syscall_broker/broker_file_permission.h
+++ b/sandbox/linux/syscall_broker/broker_file_permission.h
@@ -61,7 +61,7 @@ class SANDBOX_EXPORT BrokerFilePermission {
// or a pointer the matched path in the whitelist if an absolute
// match.
// If not NULL |unlink_after_open| is set to point to true if the
- // caller should unlink the path after openning.
+ // caller should unlink the path after opening.
// Async signal safe if |file_to_open| is NULL.
bool CheckOpen(const char* requested_filename,
int flags,
diff --git a/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc b/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
index b58a901cde..83840779f9 100644
--- a/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
+++ b/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
@@ -46,10 +46,19 @@ SANDBOX_TEST(BrokerFilePermission, CreateGoodRecursive) {
BrokerFilePermission perm = BrokerFilePermission::ReadOnlyRecursive(kPath);
}
+// In official builds, CHECK(x) causes a SIGTRAP on the architectures where the
+// sanbox is enabled (that are x86, x86_64, arm64 and 32-bit arm processes
+// running on a arm64 kernel).
+#if defined(OFFICIAL_BUILD)
+#define DEATH_BY_CHECK(msg) DEATH_BY_SIGNAL(SIGTRAP)
+#else
+#define DEATH_BY_CHECK(msg) DEATH_MESSAGE(msg)
+#endif
+
SANDBOX_DEATH_TEST(
BrokerFilePermission,
CreateBad,
- DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
const char kPath[] = "/tmp/bad/";
BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
}
@@ -57,7 +66,7 @@ SANDBOX_DEATH_TEST(
SANDBOX_DEATH_TEST(
BrokerFilePermission,
CreateBadRecursive,
- DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
const char kPath[] = "/tmp/bad";
BrokerFilePermission perm = BrokerFilePermission::ReadOnlyRecursive(kPath);
}
@@ -65,7 +74,7 @@ SANDBOX_DEATH_TEST(
SANDBOX_DEATH_TEST(
BrokerFilePermission,
CreateBadNotAbs,
- DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
const char kPath[] = "tmp/bad";
BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
}
@@ -73,7 +82,7 @@ SANDBOX_DEATH_TEST(
SANDBOX_DEATH_TEST(
BrokerFilePermission,
CreateBadEmpty,
- DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
const char kPath[] = "";
BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
}
diff --git a/sandbox/linux/system_headers/arm64_linux_syscalls.h b/sandbox/linux/system_headers/arm64_linux_syscalls.h
index 8acb2d1000..59d0eab8ec 100644
--- a/sandbox/linux/system_headers/arm64_linux_syscalls.h
+++ b/sandbox/linux/system_headers/arm64_linux_syscalls.h
@@ -1059,4 +1059,8 @@
#define __NR_getrandom 278
#endif
+#if !defined(__NR_memfd_create)
+#define __NR_memfd_create 279
+#endif
+
#endif // SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_SYSCALLS_H_
diff --git a/sandbox/linux/system_headers/mips64_linux_syscalls.h b/sandbox/linux/system_headers/mips64_linux_syscalls.h
index d003124284..90f3d1bea8 100644
--- a/sandbox/linux/system_headers/mips64_linux_syscalls.h
+++ b/sandbox/linux/system_headers/mips64_linux_syscalls.h
@@ -1263,4 +1263,12 @@
#define __NR_seccomp (__NR_Linux + 312)
#endif
+#if !defined(__NR_getrandom)
+#define __NR_getrandom (__NR_Linux + 313)
+#endif
+
+#if !defined(__NR_memfd_create)
+#define __NR_memfd_create (__NR_Linux + 314)
+#endif
+
#endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS64_LINUX_SYSCALLS_H_
diff --git a/sandbox/linux/system_headers/mips_linux_syscalls.h b/sandbox/linux/system_headers/mips_linux_syscalls.h
index eb1717aad9..784d6b8ae0 100644
--- a/sandbox/linux/system_headers/mips_linux_syscalls.h
+++ b/sandbox/linux/system_headers/mips_linux_syscalls.h
@@ -1425,4 +1425,12 @@
#define __NR_seccomp (__NR_Linux + 352)
#endif
+#if !defined(__NR_getrandom)
+#define __NR_getrandom (__NR_Linux + 353)
+#endif
+
+#if !defined(__NR_memfd_create)
+#define __NR_memfd_create (__NR_Linux + 354)
+#endif
+
#endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_SYSCALLS_H_
diff --git a/sandbox/mac/BUILD.gn b/sandbox/mac/BUILD.gn
index fd53131dbb..5174b54f81 100644
--- a/sandbox/mac/BUILD.gn
+++ b/sandbox/mac/BUILD.gn
@@ -35,6 +35,8 @@ component("sandbox") {
component("seatbelt") {
sources = [
+ "sandbox_compiler.cc",
+ "sandbox_compiler.h",
"seatbelt.cc",
"seatbelt.h",
"seatbelt_export.h",
@@ -47,6 +49,8 @@ test("sandbox_mac_unittests") {
sources = [
"bootstrap_sandbox_unittest.mm",
"policy_unittest.cc",
+ "sandbox_mac_compiler_unittest.mm",
+ "sandbox_mac_compiler_v2_unittest.mm",
"xpc_message_server_unittest.cc",
]
@@ -57,6 +61,7 @@ test("sandbox_mac_unittests") {
deps = [
":sandbox",
+ ":seatbelt",
"//base",
"//base/test:run_all_unittests",
"//testing/gtest",
diff --git a/sandbox/mac/sandbox_mac.gypi b/sandbox/mac/sandbox_mac.gypi
deleted file mode 100644
index 79740e5a84..0000000000
--- a/sandbox/mac/sandbox_mac.gypi
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'seatbelt',
- 'type' : '<(component)',
- 'sources': [
- 'seatbelt.cc',
- 'seatbelt.h',
- 'seatbelt_export.h',
- ],
- 'defines': [
- 'SEATBELT_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/usr/lib/libsandbox.dylib',
- ],
- }
- },
- {
- 'target_name': 'sandbox',
- 'type': '<(component)',
- 'sources': [
- 'bootstrap_sandbox.cc',
- 'bootstrap_sandbox.h',
- 'launchd_interception_server.cc',
- 'launchd_interception_server.h',
- 'mach_message_server.cc',
- 'mach_message_server.h',
- 'message_server.h',
- 'os_compatibility.cc',
- 'os_compatibility.h',
- 'policy.cc',
- 'policy.h',
- 'pre_exec_delegate.cc',
- 'pre_exec_delegate.h',
- 'xpc.h',
- 'xpc_message_server.cc',
- 'xpc_message_server.h',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- ],
- 'include_dirs': [
- '..',
- '<(SHARED_INTERMEDIATE_DIR)',
- ],
- 'defines': [
- 'SANDBOX_IMPLEMENTATION',
- ],
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/usr/lib/libbsm.dylib',
- ],
- },
- },
- {
- 'target_name': 'sandbox_mac_unittests',
- 'type': 'executable',
- 'sources': [
- 'bootstrap_sandbox_unittest.mm',
- 'policy_unittest.cc',
- 'xpc_message_server_unittest.cc',
- ],
- 'dependencies': [
- 'sandbox',
- '../base/base.gyp:base',
- '../base/base.gyp:run_all_unittests',
- '../testing/gtest.gyp:gtest',
- ],
- 'include_dirs': [
- '..',
- ],
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
- '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
- ],
- },
- },
- ],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'sandbox_mac_unittests_run',
- 'type': 'none',
- 'dependencies': [
- 'sandbox_mac_unittests',
- ],
- 'includes': [ '../../build/isolate.gypi' ],
- 'sources': [ '../sandbox_mac_unittests.isolate' ],
- },
- ],
- }],
- ],
-}
diff --git a/sandbox/sandbox.gyp b/sandbox/sandbox.gyp
deleted file mode 100644
index f93fa1862a..0000000000
--- a/sandbox/sandbox.gyp
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'conditions': [
- [ 'OS=="win"', {
- 'includes': [
- 'win/sandbox_win.gypi',
- ],
- }],
- [ 'OS=="linux" or OS=="android"', {
- 'includes': [
- 'linux/sandbox_linux.gypi',
- ],
- }],
- [ 'OS=="mac" and OS!="ios"', {
- 'includes': [
- 'mac/sandbox_mac.gypi',
- ],
- }],
- [ 'OS!="win" and OS!="mac" and OS!="linux" and OS!="android"', {
- # A 'default' to accomodate the "sandbox" target.
- 'targets': [
- {
- 'target_name': 'sandbox',
- 'type': 'none',
- }
- ]
- }],
- ],
-}
diff --git a/sandbox/sandbox_linux_unittests.isolate b/sandbox/sandbox_linux_unittests.isolate
deleted file mode 100644
index 2b7c2a73af..0000000000
--- a/sandbox/sandbox_linux_unittests.isolate
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Because of a limitation in isolate_driver.py, this file needs to be in
-# the same directory as the main .gyp file.
-
-{
- 'conditions': [
- ['OS=="android" or OS=="linux"', {
- 'variables': {
- 'command': [
- '<(PRODUCT_DIR)/sandbox_linux_unittests',
- ],
- },
- }],
- ],
- 'includes': [
- # This is needed because of base/ dependencies on
- # icudtl.dat.
- '../base/base.isolate',
- ],
-}
diff --git a/sandbox/win/BUILD.gn b/sandbox/win/BUILD.gn
index 60bb499af3..1d51220c5a 100644
--- a/sandbox/win/BUILD.gn
+++ b/sandbox/win/BUILD.gn
@@ -154,31 +154,18 @@ static_library("sandbox") {
]
}
+ # Disable sanitizer coverage in the sandbox code. The sandbox code runs before
+ # sanitizer coverage can initialize. http://crbug.com/484711
+ configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
+ configs +=
+ [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
+
configs += [ "//build/config:precompiled_headers" ]
deps = [
"//base",
"//base:base_static",
]
- if (current_cpu == "x86") {
- deps += [ ":copy_wow_helper" ]
- }
-}
-
-if (current_cpu == "x86") {
- # Make a target that copies the wow_helper files to the out dir.
- #
- # TODO(brettw) we can probably just build this now that we have proper
- # toolchain support.
- copy("copy_wow_helper") {
- sources = [
- "wow_helper/wow_helper.exe",
- "wow_helper/wow_helper.pdb",
- ]
- outputs = [
- "$root_out_dir/{{source_file_part}}",
- ]
- }
}
test("sbox_integration_tests") {
@@ -204,6 +191,7 @@ test("sbox_integration_tests") {
"tests/common/controller.h",
"tests/common/test_utils.cc",
"tests/common/test_utils.h",
+ "tests/integration_tests/cfi_unittest.cc",
"tests/integration_tests/integration_tests.cc",
"tests/integration_tests/integration_tests_common.h",
"tests/integration_tests/integration_tests_test.cc",
@@ -211,15 +199,30 @@ test("sbox_integration_tests") {
deps = [
":sandbox",
- ":sbox_integration_test_hook_dll",
- ":sbox_integration_test_win_proc",
"//base/test:test_support",
"//testing/gtest",
]
+ data_deps = [
+ ":cfi_unittest_exe",
+ ":sbox_integration_test_hook_dll",
+ ":sbox_integration_test_win_proc",
+ ]
+
libs = [ "dxva2.lib" ]
}
+executable("cfi_unittest_exe") {
+ sources = [
+ "tests/integration_tests/cfi_unittest_exe.cc",
+ ]
+ deps = [
+ "//base",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+}
+
loadable_module("sbox_integration_test_hook_dll") {
sources = [
"tests/integration_tests/hooking_dll.cc",
diff --git a/sandbox/win/sandbox_win.gypi b/sandbox/win/sandbox_win.gypi
deleted file mode 100644
index e9673aa9a1..0000000000
--- a/sandbox/win/sandbox_win.gypi
+++ /dev/null
@@ -1,432 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'target_defaults': {
- 'variables': {
- 'sandbox_windows_target': 0,
- 'target_arch%': 'ia32',
- },
- 'target_conditions': [
- ['sandbox_windows_target==1', {
- # Files that are shared between the 32-bit and the 64-bit versions
- # of the Windows sandbox library.
- 'sources': [
- 'src/acl.cc',
- 'src/acl.h',
- 'src/broker_services.cc',
- 'src/broker_services.h',
- 'src/crosscall_client.h',
- 'src/crosscall_params.h',
- 'src/crosscall_server.cc',
- 'src/crosscall_server.h',
- 'src/eat_resolver.cc',
- 'src/eat_resolver.h',
- 'src/filesystem_dispatcher.cc',
- 'src/filesystem_dispatcher.h',
- 'src/filesystem_interception.cc',
- 'src/filesystem_interception.h',
- 'src/filesystem_policy.cc',
- 'src/filesystem_policy.h',
- 'src/handle_closer.cc',
- 'src/handle_closer.h',
- 'src/handle_closer_agent.cc',
- 'src/handle_closer_agent.h',
- 'src/interception.cc',
- 'src/interception.h',
- 'src/interception_agent.cc',
- 'src/interception_agent.h',
- 'src/interception_internal.h',
- 'src/interceptors.h',
- 'src/internal_types.h',
- 'src/ipc_tags.h',
- 'src/job.cc',
- 'src/job.h',
- 'src/named_pipe_dispatcher.cc',
- 'src/named_pipe_dispatcher.h',
- 'src/named_pipe_interception.cc',
- 'src/named_pipe_interception.h',
- 'src/named_pipe_policy.cc',
- 'src/named_pipe_policy.h',
- 'src/nt_internals.h',
- 'src/policy_broker.cc',
- 'src/policy_broker.h',
- 'src/policy_engine_opcodes.cc',
- 'src/policy_engine_opcodes.h',
- 'src/policy_engine_params.h',
- 'src/policy_engine_processor.cc',
- 'src/policy_engine_processor.h',
- 'src/policy_low_level.cc',
- 'src/policy_low_level.h',
- 'src/policy_params.h',
- 'src/policy_target.cc',
- 'src/policy_target.h',
- 'src/process_mitigations.cc',
- 'src/process_mitigations.h',
- 'src/process_mitigations_win32k_dispatcher.cc',
- 'src/process_mitigations_win32k_dispatcher.h',
- 'src/process_mitigations_win32k_interception.cc',
- 'src/process_mitigations_win32k_interception.h',
- 'src/process_mitigations_win32k_policy.cc',
- 'src/process_mitigations_win32k_policy.h',
- 'src/process_thread_dispatcher.cc',
- 'src/process_thread_dispatcher.h',
- 'src/process_thread_interception.cc',
- 'src/process_thread_interception.h',
- 'src/process_thread_policy.cc',
- 'src/process_thread_policy.h',
- 'src/registry_dispatcher.cc',
- 'src/registry_dispatcher.h',
- 'src/registry_interception.cc',
- 'src/registry_interception.h',
- 'src/registry_policy.cc',
- 'src/registry_policy.h',
- 'src/resolver.cc',
- 'src/resolver.h',
- 'src/restricted_token_utils.cc',
- 'src/restricted_token_utils.h',
- 'src/restricted_token.cc',
- 'src/restricted_token.h',
- 'src/sandbox_factory.h',
- 'src/sandbox_globals.cc',
- 'src/sandbox_nt_types.h',
- 'src/sandbox_nt_util.cc',
- 'src/sandbox_nt_util.h',
- 'src/sandbox_policy_base.cc',
- 'src/sandbox_policy_base.h',
- 'src/sandbox_policy.h',
- 'src/sandbox_rand.cc',
- 'src/sandbox_rand.h',
- 'src/sandbox_types.h',
- 'src/sandbox_utils.cc',
- 'src/sandbox_utils.h',
- 'src/sandbox.cc',
- 'src/sandbox.h',
- 'src/security_level.h',
- 'src/service_resolver.cc',
- 'src/service_resolver.h',
- 'src/sharedmem_ipc_client.cc',
- 'src/sharedmem_ipc_client.h',
- 'src/sharedmem_ipc_server.cc',
- 'src/sharedmem_ipc_server.h',
- 'src/sid.cc',
- 'src/sid.h',
- 'src/sync_dispatcher.cc',
- 'src/sync_dispatcher.h',
- 'src/sync_interception.cc',
- 'src/sync_interception.h',
- 'src/sync_policy.cc',
- 'src/sync_policy.h',
- 'src/target_interceptions.cc',
- 'src/target_interceptions.h',
- 'src/target_process.cc',
- 'src/target_process.h',
- 'src/target_services.cc',
- 'src/target_services.h',
- 'src/top_level_dispatcher.cc',
- 'src/top_level_dispatcher.h',
- 'src/win_utils.cc',
- 'src/win_utils.h',
- 'src/win2k_threadpool.cc',
- 'src/win2k_threadpool.h',
- 'src/window.cc',
- 'src/window.h',
- ],
- 'target_conditions': [
- ['target_arch=="x64"', {
- 'sources': [
- 'src/interceptors_64.cc',
- 'src/interceptors_64.h',
- 'src/resolver_64.cc',
- 'src/service_resolver_64.cc',
- ],
- }],
- ['target_arch=="ia32"', {
- 'sources': [
- 'src/resolver_32.cc',
- 'src/service_resolver_32.cc',
- 'src/sidestep_resolver.cc',
- 'src/sidestep_resolver.h',
- 'src/sidestep\ia32_modrm_map.cpp',
- 'src/sidestep\ia32_opcode_map.cpp',
- 'src/sidestep\mini_disassembler_types.h',
- 'src/sidestep\mini_disassembler.cpp',
- 'src/sidestep\mini_disassembler.h',
- 'src/sidestep\preamble_patcher_with_stub.cpp',
- 'src/sidestep\preamble_patcher.h',
- ],
- }],
- ],
- }],
- ],
- },
- 'targets': [
- {
- 'target_name': 'sandbox',
- 'type': 'static_library',
- 'variables': {
- 'sandbox_windows_target': 1,
- },
- 'dependencies': [
- '../base/base.gyp:base',
- '../base/base.gyp:base_static',
- ],
- 'export_dependent_settings': [
- '../base/base.gyp:base',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'target_conditions': [
- ['target_arch=="ia32"', {
- 'copies': [
- {
- 'destination': '<(PRODUCT_DIR)',
- 'files': [
- 'wow_helper/wow_helper.exe',
- 'wow_helper/wow_helper.pdb',
- ],
- },
- ],
- }],
- ],
- },
- {
- 'target_name': 'sbox_integration_tests',
- 'type': 'executable',
- 'dependencies': [
- 'sandbox',
- 'sbox_integration_test_hook_dll',
- 'sbox_integration_test_win_proc',
- '../base/base.gyp:test_support_base',
- '../testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'src/address_sanitizer_test.cc',
- 'src/app_container_test.cc',
- 'src/file_policy_test.cc',
- 'src/handle_inheritance_test.cc',
- 'tests/integration_tests/integration_tests_test.cc',
- 'src/handle_closer_test.cc',
- 'src/integrity_level_test.cc',
- 'src/ipc_ping_test.cc',
- 'src/lpc_policy_test.cc',
- 'src/named_pipe_policy_test.cc',
- 'src/policy_target_test.cc',
- 'src/process_mitigations_test.cc',
- 'src/process_policy_test.cc',
- 'src/registry_policy_test.cc',
- 'src/restricted_token_test.cc',
- 'src/sync_policy_test.cc',
- 'src/sync_policy_test.h',
- 'src/unload_dll_test.cc',
- 'tests/common/controller.cc',
- 'tests/common/controller.h',
- 'tests/common/test_utils.cc',
- 'tests/common/test_utils.h',
- 'tests/integration_tests/integration_tests.cc',
- 'tests/integration_tests/integration_tests_common.h',
- ],
- 'link_settings': {
- 'libraries': [
- '-ldxva2.lib',
- ],
- },
- },
- {
- 'target_name': 'sbox_integration_test_hook_dll',
- 'type': 'shared_library',
- 'dependencies': [
- ],
- 'sources': [
- 'tests/integration_tests/hooking_dll.cc',
- 'tests/integration_tests/integration_tests_common.h',
- ],
- },
- {
- 'target_name': 'sbox_integration_test_win_proc',
- 'type': 'executable',
- 'dependencies': [
- ],
- 'sources': [
- 'tests/integration_tests/hooking_win_proc.cc',
- 'tests/integration_tests/integration_tests_common.h',
- ],
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
- },
- },
- },
- {
- 'target_name': 'sbox_validation_tests',
- 'type': 'executable',
- 'dependencies': [
- 'sandbox',
- '../base/base.gyp:test_support_base',
- '../testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'tests/common/controller.cc',
- 'tests/common/controller.h',
- 'tests/validation_tests/unit_tests.cc',
- 'tests/validation_tests/commands.cc',
- 'tests/validation_tests/commands.h',
- 'tests/validation_tests/suite.cc',
- ],
- 'link_settings': {
- 'libraries': [
- '-lshlwapi.lib',
- ],
- },
- },
- {
- 'target_name': 'sbox_unittests',
- 'type': 'executable',
- 'dependencies': [
- 'sandbox',
- '../base/base.gyp:test_support_base',
- '../testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'src/interception_unittest.cc',
- 'src/service_resolver_unittest.cc',
- 'src/restricted_token_unittest.cc',
- 'src/job_unittest.cc',
- 'src/sid_unittest.cc',
- 'src/policy_engine_unittest.cc',
- 'src/policy_low_level_unittest.cc',
- 'src/policy_opcodes_unittest.cc',
- 'src/ipc_unittest.cc',
- 'src/sandbox_nt_util_unittest.cc',
- 'src/threadpool_unittest.cc',
- 'src/win_utils_unittest.cc',
- 'tests/common/test_utils.cc',
- 'tests/common/test_utils.h',
- 'tests/unit_tests/unit_tests.cc',
- ],
- },
- {
- 'target_name': 'sandbox_poc',
- 'type': 'executable',
- 'dependencies': [
- 'sandbox',
- 'pocdll',
- ],
- 'sources': [
- 'sandbox_poc/main_ui_window.cc',
- 'sandbox_poc/main_ui_window.h',
- 'sandbox_poc/resource.h',
- 'sandbox_poc/sandbox.cc',
- 'sandbox_poc/sandbox.h',
- 'sandbox_poc/sandbox.ico',
- 'sandbox_poc/sandbox.rc',
- ],
- 'link_settings': {
- 'libraries': [
- '-lcomctl32.lib',
- ],
- },
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
- },
- },
- },
- {
- 'target_name': 'pocdll',
- 'type': 'shared_library',
- 'sources': [
- 'sandbox_poc/pocdll/exports.h',
- 'sandbox_poc/pocdll/fs.cc',
- 'sandbox_poc/pocdll/handles.cc',
- 'sandbox_poc/pocdll/invasive.cc',
- 'sandbox_poc/pocdll/network.cc',
- 'sandbox_poc/pocdll/pocdll.cc',
- 'sandbox_poc/pocdll/processes_and_threads.cc',
- 'sandbox_poc/pocdll/registry.cc',
- 'sandbox_poc/pocdll/spyware.cc',
- 'sandbox_poc/pocdll/utils.h',
- ],
- 'defines': [
- 'POCDLL_EXPORTS',
- ],
- 'include_dirs': [
- '../..',
- ],
- },
- ],
- 'conditions': [
- ['OS=="win" and target_arch=="ia32"', {
- 'targets': [
- {
- 'target_name': 'sandbox_win64',
- 'type': 'static_library',
- 'variables': {
- 'sandbox_windows_target': 1,
- 'target_arch': 'x64',
- },
- 'dependencies': [
- '../base/base.gyp:base_win64',
- '../base/base.gyp:base_static_win64',
- ],
- 'configurations': {
- 'Common_Base': {
- 'msvs_target_platform': 'x64',
- },
- },
- 'include_dirs': [
- '../..',
- ],
- 'defines': [
- '<@(nacl_win64_defines)',
- ]
- },
- ],
- }],
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'sbox_integration_tests_run',
- 'type': 'none',
- 'dependencies': [
- 'sbox_integration_tests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- '../sbox_integration_tests.isolate',
- ],
- },
- {
- 'target_name': 'sbox_unittests_run',
- 'type': 'none',
- 'dependencies': [
- 'sbox_unittests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- '../sbox_unittests.isolate',
- ],
- },
- {
- 'target_name': 'sbox_validation_tests_run',
- 'type': 'none',
- 'dependencies': [
- 'sbox_validation_tests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- '../sbox_validation_tests.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/sandbox/win/src/internal_types.h b/sandbox/win/src/internal_types.h
index e1028189d8..7ea4b7d62e 100644
--- a/sandbox/win/src/internal_types.h
+++ b/sandbox/win/src/internal_types.h
@@ -13,7 +13,7 @@ const wchar_t kNtdllName[] = L"ntdll.dll";
const wchar_t kKerneldllName[] = L"kernel32.dll";
const wchar_t kKernelBasedllName[] = L"kernelbase.dll";
-// Defines the supported C++ types encoding to numeric id. Like a poor's man
+// Defines the supported C++ types encoding to numeric id. Like a simplified
// RTTI. Note that true C++ RTTI will not work because the types are not
// polymorphic anyway.
enum ArgType {
diff --git a/sandbox/win/src/sandbox.vcproj b/sandbox/win/src/sandbox.vcproj
index f206e01a1f..229441cbd5 100644
--- a/sandbox/win/src/sandbox.vcproj
+++ b/sandbox/win/src/sandbox.vcproj
@@ -64,11 +64,6 @@
<Tool
Name="VCFxCopTool"
/>
- <Tool
- Name="VCPostBuildEventTool"
- Description="Copy wow_helper to output directory"
- CommandLine="copy $(ProjectDir)\..\wow_helper\wow_helper.exe $(OutDir) &amp;&amp; copy $(ProjectDir)\..\wow_helper\wow_helper.pdb $(OutDir)"
- />
</Configuration>
<Configuration
Name="Release|Win32"
@@ -118,11 +113,6 @@
<Tool
Name="VCFxCopTool"
/>
- <Tool
- Name="VCPostBuildEventTool"
- Description="Copy wow_helper to output directory"
- CommandLine="copy $(ProjectDir)\..\wow_helper\wow_helper.exe $(OutDir) &amp;&amp; copy $(ProjectDir)\..\wow_helper\wow_helper.pdb $(OutDir)"
- />
</Configuration>
</Configurations>
<References>
diff --git a/sandbox/win/src/sandbox_types.h b/sandbox/win/src/sandbox_types.h
index 919086a828..ae36ef5c95 100644
--- a/sandbox/win/src/sandbox_types.h
+++ b/sandbox/win/src/sandbox_types.h
@@ -5,6 +5,7 @@
#ifndef SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
#define SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
+#include "base/process/kill.h"
#include "base/process/launch.h"
namespace sandbox {
@@ -103,6 +104,8 @@ enum ResultCode : int {
SBOX_ERROR_CANNOT_RESOLVE_INTERCEPTION_THUNK = 41,
// Cannot write interception thunk to child process.
SBOX_ERROR_CANNOT_WRITE_INTERCEPTION_THUNK = 42,
+ // Cannot find the base address of the new process.
+ SBOX_ERROR_CANNOT_FIND_BASE_ADDRESS = 43,
// Placeholder for last item of the enum.
SBOX_ERROR_LAST
};
@@ -121,6 +124,10 @@ enum TerminationCodes {
SBOX_FATAL_LAST
};
+static_assert(SBOX_FATAL_MEMORY_EXCEEDED ==
+ base::win::kSandboxFatalMemoryExceeded,
+ "Value for SBOX_FATAL_MEMORY_EXCEEDED must match base.");
+
class BrokerServices;
class TargetServices;
diff --git a/sandbox/win/src/security_level.h b/sandbox/win/src/security_level.h
index d8524c1fac..ecca64d8fc 100644
--- a/sandbox/win/src/security_level.h
+++ b/sandbox/win/src/security_level.h
@@ -154,11 +154,13 @@ const MitigationFlags MITIGATION_DEP_NO_ATL_THUNK = 0x00000002;
// PROCESS_CREATION_MITIGATION_POLICY_SEHOP_ENABLE.
const MitigationFlags MITIGATION_SEHOP = 0x00000004;
-// Forces ASLR on all images in the child process. Corresponds to
+// Forces ASLR on all images in the child process. In debug builds, must be
+// enabled after startup. Corresponds to
// PROCESS_CREATION_MITIGATION_POLICY_FORCE_RELOCATE_IMAGES_ALWAYS_ON .
const MitigationFlags MITIGATION_RELOCATE_IMAGE = 0x00000008;
-// Refuses to load DLLs that cannot support ASLR. Corresponds to
+// Refuses to load DLLs that cannot support ASLR. In debug builds, must be
+// enabled after startup. Corresponds to
// PROCESS_CREATION_MITIGATION_POLICY_FORCE_RELOCATE_IMAGES_ALWAYS_ON_REQ_RELOCS.
const MitigationFlags MITIGATION_RELOCATE_IMAGE_REQUIRED = 0x00000010;
@@ -185,6 +187,11 @@ const MitigationFlags MITIGATION_STRICT_HANDLE_CHECKS = 0x00000100;
// Prevents the process from making Win32k calls. Corresponds to
// PROCESS_CREATION_MITIGATION_POLICY_WIN32K_SYSTEM_CALL_DISABLE_ALWAYS_ON.
+//
+// Applications linked to user32.dll or gdi32.dll make Win32k calls during
+// setup, even if Win32k is not otherwise used. So they also need to add a rule
+// with SUBSYS_WIN32K_LOCKDOWN and semantics FAKE_USER_GDI_INIT to allow the
+// initialization to succeed.
const MitigationFlags MITIGATION_WIN32K_DISABLE = 0x00000200;
// Prevents certain built-in third party extension points from being used.
diff --git a/sandbox/win/wow_helper.sln b/sandbox/win/wow_helper.sln
deleted file mode 100644
index 26d0da2526..0000000000
--- a/sandbox/win/wow_helper.sln
+++ /dev/null
@@ -1,19 +0,0 @@
-Microsoft Visual Studio Solution File, Format Version 9.00
-# Visual Studio 2005
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "wow_helper", "wow_helper\wow_helper.vcproj", "{BCF3A457-39F1-4DAA-9A65-93CFCD559036}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|x64 = Debug|x64
- Release|x64 = Release|x64
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Debug|x64.ActiveCfg = Debug|x64
- {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Debug|x64.Build.0 = Debug|x64
- {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Release|x64.ActiveCfg = Release|x64
- {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Release|x64.Build.0 = Release|x64
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
-EndGlobal
diff --git a/sandbox/win/wow_helper/wow_helper.exe b/sandbox/win/wow_helper/wow_helper.exe
deleted file mode 100755
index f9bfb4bbdd..0000000000
--- a/sandbox/win/wow_helper/wow_helper.exe
+++ /dev/null
Binary files differ
diff --git a/sandbox/win/wow_helper/wow_helper.pdb b/sandbox/win/wow_helper/wow_helper.pdb
deleted file mode 100644
index 9cb67d001d..0000000000
--- a/sandbox/win/wow_helper/wow_helper.pdb
+++ /dev/null
Binary files differ
diff --git a/sandbox/win/wow_helper/wow_helper.vcproj b/sandbox/win/wow_helper/wow_helper.vcproj
deleted file mode 100644
index c8e7c9ebff..0000000000
--- a/sandbox/win/wow_helper/wow_helper.vcproj
+++ /dev/null
@@ -1,215 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
- ProjectType="Visual C++"
- Version="8.00"
- Name="wow_helper"
- ProjectGUID="{BCF3A457-39F1-4DAA-9A65-93CFCD559036}"
- RootNamespace="wow_helper"
- Keyword="Win32Proj"
- >
- <Platforms>
- <Platform
- Name="x64"
- />
- </Platforms>
- <ToolFiles>
- </ToolFiles>
- <Configurations>
- <Configuration
- Name="Debug|x64"
- OutputDirectory="$(ProjectDir)"
- IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
- ConfigurationType="1"
- CharacterSet="1"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- TargetEnvironment="3"
- />
- <Tool
- Name="VCCLCompilerTool"
- Optimization="0"
- AdditionalIncludeDirectories="$(SolutionDir)..;$(SolutionDir)..\third_party\platformsdk_win2008_6_1\files\Include;$(VSInstallDir)\VC\atlmfc\include"
- PreprocessorDefinitions="_WIN32_WINNT=0x0501;WINVER=0x0501;WIN32;_DEBUG"
- MinimalRebuild="true"
- BasicRuntimeChecks="0"
- RuntimeLibrary="1"
- BufferSecurityCheck="false"
- RuntimeTypeInfo="false"
- UsePrecompiledHeader="0"
- WarningLevel="3"
- Detect64BitPortabilityProblems="true"
- DebugInformationFormat="3"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- LinkIncremental="1"
- GenerateDebugInformation="true"
- SubSystem="2"
- TargetMachine="17"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- <Configuration
- Name="Release|x64"
- OutputDirectory="$(ProjectDir)"
- IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
- ConfigurationType="1"
- CharacterSet="1"
- WholeProgramOptimization="1"
- >
- <Tool
- Name="VCPreBuildEventTool"
- />
- <Tool
- Name="VCCustomBuildTool"
- />
- <Tool
- Name="VCXMLDataGeneratorTool"
- />
- <Tool
- Name="VCWebServiceProxyGeneratorTool"
- />
- <Tool
- Name="VCMIDLTool"
- TargetEnvironment="3"
- />
- <Tool
- Name="VCCLCompilerTool"
- AdditionalIncludeDirectories="$(SolutionDir)..;$(SolutionDir)..\third_party\platformsdk_win2008_6_1\files\Include;$(VSInstallDir)\VC\atlmfc\include"
- PreprocessorDefinitions="_WIN32_WINNT=0x0501;WINVER=0x0501;WIN32;NDEBUG"
- RuntimeLibrary="0"
- BufferSecurityCheck="false"
- RuntimeTypeInfo="false"
- UsePrecompiledHeader="0"
- WarningLevel="3"
- Detect64BitPortabilityProblems="true"
- DebugInformationFormat="3"
- />
- <Tool
- Name="VCManagedResourceCompilerTool"
- />
- <Tool
- Name="VCResourceCompilerTool"
- />
- <Tool
- Name="VCPreLinkEventTool"
- />
- <Tool
- Name="VCLinkerTool"
- LinkIncremental="1"
- GenerateDebugInformation="true"
- SubSystem="2"
- OptimizeReferences="2"
- EnableCOMDATFolding="2"
- TargetMachine="17"
- />
- <Tool
- Name="VCALinkTool"
- />
- <Tool
- Name="VCManifestTool"
- />
- <Tool
- Name="VCXDCMakeTool"
- />
- <Tool
- Name="VCBscMakeTool"
- />
- <Tool
- Name="VCFxCopTool"
- />
- <Tool
- Name="VCAppVerifierTool"
- />
- <Tool
- Name="VCWebDeploymentTool"
- />
- <Tool
- Name="VCPostBuildEventTool"
- />
- </Configuration>
- </Configurations>
- <References>
- </References>
- <Files>
- <Filter
- Name="sandbox"
- >
- <File
- RelativePath="..\src\nt_internals.h"
- >
- </File>
- <File
- RelativePath="..\src\resolver.h"
- >
- </File>
- </Filter>
- <File
- RelativePath=".\service64_resolver.cc"
- >
- </File>
- <File
- RelativePath=".\service64_resolver.h"
- >
- </File>
- <File
- RelativePath=".\target_code.cc"
- >
- </File>
- <File
- RelativePath=".\target_code.h"
- >
- </File>
- <File
- RelativePath=".\wow_helper.cc"
- >
- </File>
- </Files>
- <Globals>
- </Globals>
-</VisualStudioProject>