summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuis Hector Chavez <lhchavez@google.com>2016-07-27 16:52:26 +0000
committerandroid-build-merger <android-build-merger@google.com>2016-07-27 16:52:26 +0000
commitd196d04a858f3380196d0595138435c559d93826 (patch)
treefb97dc88b72d681efeb9cfa1b8693a6183180ad9
parent1f19dfcad9e0088c7d8baebe7867bc1720e68901 (diff)
parent0c4f26a46430b8c503c65f5cae1d2b6876d53e30 (diff)
downloadlibchrome-d196d04a858f3380196d0595138435c559d93826.tar.gz
libchrome: Uprev the library to r405848 from Chromium
am: 0c4f26a464 Change-Id: Icbb2fa8ba540d8b0f3516bba568ddb43d2e51710
-rw-r--r--Android.mk7
-rw-r--r--SConstruct5
-rw-r--r--base/BUILD.gn125
-rw-r--r--base/OWNERS32
-rw-r--r--base/allocator/BUILD.gn32
-rw-r--r--base/allocator/allocator.gyp16
-rw-r--r--base/at_exit.cc3
-rw-r--r--base/base.gyp79
-rw-r--r--base/base.gypi32
-rw-r--r--base/base_switches.cc10
-rw-r--r--base/base_switches.h1
-rw-r--r--base/bind.h91
-rw-r--r--base/bind_helpers.h218
-rw-r--r--base/bind_internal.h621
-rw-r--r--base/bind_internal_win.h73
-rw-r--r--base/bind_unittest.cc61
-rw-r--r--base/bit_cast.h100
-rw-r--r--base/callback.h21
-rw-r--r--base/callback_helpers.cc22
-rw-r--r--base/callback_helpers.h19
-rw-r--r--base/callback_helpers_unittest.cc47
-rw-r--r--base/callback_internal.h1
-rw-r--r--base/callback_unittest.cc53
-rw-r--r--base/command_line.cc11
-rw-r--r--base/command_line.h32
-rw-r--r--base/debug/stack_trace.cc55
-rw-r--r--base/environment.cc33
-rw-r--r--base/environment.h16
-rw-r--r--base/feature_list.cc19
-rw-r--r--base/feature_list.h4
-rw-r--r--base/feature_list_unittest.cc11
-rw-r--r--base/files/file.h13
-rw-r--r--base/files/file_path.cc10
-rw-r--r--base/files/file_path_watcher_fsevents.cc63
-rw-r--r--base/files/file_path_watcher_fsevents.h10
-rw-r--r--base/files/file_path_watcher_linux.cc2
-rw-r--r--base/files/file_path_watcher_unittest.cc15
-rw-r--r--base/files/file_util.h6
-rw-r--r--base/files/file_util_posix.cc21
-rw-r--r--base/files/important_file_writer_unittest.cc6
-rw-r--r--base/files/scoped_file.cc12
-rw-r--r--base/json/json_parser.cc122
-rw-r--r--base/json/json_parser.h6
-rw-r--r--base/json/json_reader.h14
-rw-r--r--base/json/json_reader_unittest.cc1092
-rw-r--r--base/json/json_writer.cc4
-rw-r--r--base/json/json_writer_unittest.cc9
-rw-r--r--base/logging.cc56
-rw-r--r--base/logging.h26
-rw-r--r--base/logging_unittest.cc7
-rw-r--r--base/mac/bind_objc_block.h7
-rw-r--r--base/mac/foundation_util.mm2
-rw-r--r--base/mac/libdispatch_task_runner.cc82
-rw-r--r--base/mac/libdispatch_task_runner.h80
-rw-r--r--base/mac/mac_logging.mm4
-rw-r--r--base/mac/mac_util.h55
-rw-r--r--base/mac/mach_port_broker_unittest.cc3
-rw-r--r--base/mac/scoped_block.h17
-rw-r--r--base/mac/scoped_nsobject.h159
-rw-r--r--base/mac/scoped_typeref.h42
-rw-r--r--base/macros.h4
-rw-r--r--base/memory/ref_counted.h2
-rw-r--r--base/memory/scoped_vector.h6
-rw-r--r--base/memory/shared_memory.h12
-rw-r--r--base/memory/shared_memory_unittest.cc5
-rw-r--r--base/memory/weak_ptr.cc6
-rw-r--r--base/memory/weak_ptr.h64
-rw-r--r--base/memory/weak_ptr_unittest.cc44
-rw-r--r--base/memory/weak_ptr_unittest.nc12
-rw-r--r--base/message_loop/incoming_task_queue.cc93
-rw-r--r--base/message_loop/incoming_task_queue.h8
-rw-r--r--base/message_loop/message_loop.cc74
-rw-r--r--base/message_loop/message_loop.h43
-rw-r--r--base/message_loop/message_loop_task_runner_unittest.cc16
-rw-r--r--base/message_loop/message_loop_test.cc341
-rw-r--r--base/message_loop/message_loop_unittest.cc44
-rw-r--r--base/message_loop/message_pump.cc7
-rw-r--r--base/message_loop/message_pump.h9
-rw-r--r--base/message_loop/message_pump_default.cc30
-rw-r--r--base/message_loop/message_pump_glib.cc4
-rw-r--r--base/metrics/OWNERS1
-rw-r--r--base/metrics/field_trial.cc111
-rw-r--r--base/metrics/field_trial.h14
-rw-r--r--base/metrics/histogram.cc3
-rw-r--r--base/metrics/histogram_base.h4
-rw-r--r--base/metrics/histogram_base_unittest.cc2
-rw-r--r--base/metrics/histogram_delta_serialization_unittest.cc3
-rw-r--r--base/metrics/histogram_snapshot_manager.cc118
-rw-r--r--base/metrics/histogram_snapshot_manager.h71
-rw-r--r--base/metrics/histogram_snapshot_manager_unittest.cc41
-rw-r--r--base/metrics/histogram_unittest.cc2
-rw-r--r--base/metrics/persistent_histogram_allocator.cc407
-rw-r--r--base/metrics/persistent_histogram_allocator.h35
-rw-r--r--base/metrics/persistent_histogram_allocator_unittest.cc87
-rw-r--r--base/metrics/persistent_memory_allocator.cc117
-rw-r--r--base/metrics/persistent_memory_allocator.h28
-rw-r--r--base/metrics/persistent_memory_allocator_unittest.cc111
-rw-r--r--base/metrics/sparse_histogram.cc1
-rw-r--r--base/metrics/sparse_histogram_unittest.cc2
-rw-r--r--base/metrics/statistics_recorder.cc25
-rw-r--r--base/metrics/statistics_recorder.h32
-rw-r--r--base/metrics/statistics_recorder_unittest.cc66
-rw-r--r--base/metrics/user_metrics.h6
-rw-r--r--base/metrics/user_metrics_action.h13
-rw-r--r--base/move.h44
-rw-r--r--base/native_library.h16
-rw-r--r--base/native_library_posix.cc12
-rw-r--r--base/observer_list.h4
-rw-r--r--base/observer_list_threadsafe.h9
-rw-r--r--base/observer_list_unittest.cc2
-rw-r--r--base/optional.h457
-rw-r--r--base/optional_unittest.cc1301
-rw-r--r--base/pending_task.cc16
-rw-r--r--base/pending_task.h17
-rw-r--r--base/posix/global_descriptors.h7
-rw-r--r--base/posix/unix_domain_socket_linux_unittest.cc3
-rw-r--r--base/process/launch.h10
-rw-r--r--base/process/launch_posix.cc108
-rw-r--r--base/process/process.h6
-rw-r--r--base/process/process_metrics.h8
-rw-r--r--base/process/process_metrics_linux.cc10
-rw-r--r--base/run_loop.cc8
-rw-r--r--base/run_loop.h28
-rw-r--r--base/scoped_generic.h5
-rw-r--r--base/sequence_checker_unittest.cc5
-rw-r--r--base/strings/string16.h2
-rw-r--r--base/strings/string_number_conversions.cc4
-rw-r--r--base/strings/string_number_conversions.h8
-rw-r--r--base/strings/string_number_conversions_unittest.cc1
-rw-r--r--base/strings/string_util.cc1
-rw-r--r--base/synchronization/read_write_lock.h105
-rw-r--r--base/synchronization/read_write_lock_posix.cc40
-rw-r--r--base/synchronization/waitable_event.h19
-rw-r--r--base/synchronization/waitable_event_posix.cc22
-rw-r--r--base/synchronization/waitable_event_unittest.cc26
-rw-r--r--base/sys_byteorder.h30
-rw-r--r--base/sys_info.h10
-rw-r--r--base/sys_info_linux.cc24
-rw-r--r--base/sys_info_posix.cc30
-rw-r--r--base/sys_info_unittest.cc17
-rw-r--r--base/task_runner.h5
-rw-r--r--base/task_scheduler/scheduler_lock_unittest.cc7
-rw-r--r--base/task_scheduler/sequence.h4
-rw-r--r--base/task_scheduler/task_traits.h2
-rw-r--r--base/template_util.h46
-rw-r--r--base/template_util_unittest.cc77
-rw-r--r--base/test/BUILD.gn20
-rw-r--r--base/test/data/prefs/invalid.json1
-rw-r--r--base/test/data/prefs/read.json8
-rw-r--r--base/test/data/prefs/write.golden.json1
-rw-r--r--base/test/sequenced_worker_pool_owner.cc3
-rw-r--r--base/test/test_io_thread.cc3
-rw-r--r--base/test/test_switches.cc4
-rw-r--r--base/test/test_switches.h1
-rw-r--r--base/test/trace_event_analyzer_unittest.cc4
-rw-r--r--base/threading/platform_thread.h8
-rw-r--r--base/threading/platform_thread_freebsd.cc0
-rw-r--r--base/threading/platform_thread_posix.cc5
-rw-r--r--base/threading/platform_thread_unittest.cc56
-rw-r--r--base/threading/sequenced_task_runner_handle.cc1
-rw-r--r--base/threading/sequenced_task_runner_handle.h3
-rw-r--r--base/threading/simple_thread.cc25
-rw-r--r--base/threading/simple_thread_unittest.cc9
-rw-r--r--base/threading/thread.cc12
-rw-r--r--base/threading/thread_local_unittest.cc3
-rw-r--r--base/threading/thread_restrictions.h18
-rw-r--r--base/threading/thread_task_runner_handle.cc1
-rw-r--r--base/threading/thread_task_runner_handle.h3
-rw-r--r--base/threading/thread_unittest.cc3
-rw-r--r--base/threading/worker_pool_posix.cc5
-rw-r--r--base/threading/worker_pool_posix_unittest.cc3
-rw-r--r--base/threading/worker_pool_unittest.cc11
-rw-r--r--base/time/time.cc13
-rw-r--r--base/time/time.h52
-rw-r--r--base/time/time_mac.cc28
-rw-r--r--base/time/time_posix.cc24
-rw-r--r--base/time/time_unittest.cc64
-rw-r--r--base/timer/timer_unittest.cc26
-rw-r--r--base/trace_event/etw_manifest/BUILD.gn10
-rw-r--r--base/trace_event/heap_profiler_allocation_context.cc11
-rw-r--r--base/trace_event/heap_profiler_allocation_context.h6
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker.cc4
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc4
-rw-r--r--base/trace_event/heap_profiler_allocation_register.cc275
-rw-r--r--base/trace_event/heap_profiler_allocation_register.h392
-rw-r--r--base/trace_event/heap_profiler_allocation_register_posix.cc9
-rw-r--r--base/trace_event/malloc_dump_provider.cc2
-rw-r--r--base/trace_event/memory_allocator_dump.cc7
-rw-r--r--base/trace_event/memory_allocator_dump_unittest.cc5
-rw-r--r--base/trace_event/memory_dump_manager.cc252
-rw-r--r--base/trace_event/memory_dump_manager.h44
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc90
-rw-r--r--base/trace_event/memory_dump_provider.h6
-rw-r--r--base/trace_event/memory_dump_request_args.cc4
-rw-r--r--base/trace_event/memory_dump_request_args.h33
-rw-r--r--base/trace_event/memory_infra_background_whitelist.cc131
-rw-r--r--base/trace_event/memory_infra_background_whitelist.h33
-rw-r--r--base/trace_event/process_memory_dump.cc51
-rw-r--r--base/trace_event/process_memory_dump.h30
-rw-r--r--base/trace_event/process_memory_dump_unittest.cc142
-rw-r--r--base/trace_event/process_memory_maps_dump_provider.h0
-rw-r--r--base/trace_event/trace_config.cc320
-rw-r--r--base/trace_event/trace_config.h59
-rw-r--r--base/trace_event/trace_config_memory_test_util.h24
-rw-r--r--base/trace_event/trace_config_unittest.cc162
-rw-r--r--base/trace_event/trace_event.gypi2
-rw-r--r--base/trace_event/trace_event_argument.cc8
-rw-r--r--base/trace_event/trace_event_impl.cc2
-rw-r--r--base/trace_event/trace_event_memory_overhead.cc2
-rw-r--r--base/trace_event/trace_event_unittest.cc115
-rw-r--r--base/trace_event/trace_log.cc21
-rw-r--r--base/trace_event/trace_sampling_thread.cc4
-rw-r--r--base/tracked_objects.cc9
-rw-r--r--base/tracked_objects.h4
-rw-r--r--base/tuple.h94
-rw-r--r--base/tuple_unittest.cc81
-rw-r--r--base/values.cc133
-rw-r--r--base/values.h18
-rw-r--r--base/values_unittest.cc2
-rw-r--r--base/version.cc193
-rw-r--r--base/version.h76
-rw-r--r--base/version_unittest.cc184
-rw-r--r--base/win/scoped_handle_test_dll.cc3
-rw-r--r--components/timers/BUILD.gn14
-rw-r--r--crypto/BUILD.gn30
-rw-r--r--crypto/ec_private_key.h36
-rw-r--r--crypto/hmac_unittest.cc2
-rw-r--r--crypto/nss_key_util.cc13
-rw-r--r--crypto/nss_key_util.h4
-rw-r--r--crypto/nss_key_util_unittest.cc2
-rw-r--r--crypto/nss_util.h4
-rw-r--r--crypto/nss_util_internal.h6
-rw-r--r--crypto/nss_util_unittest.cc3
-rw-r--r--crypto/secure_hash.cc11
-rw-r--r--crypto/secure_hash.h6
-rw-r--r--crypto/signature_creator.h5
-rw-r--r--crypto/symmetric_key.cc40
-rw-r--r--crypto/symmetric_key.h21
-rw-r--r--crypto/symmetric_key_unittest.cc23
-rw-r--r--crypto/wincrypt_shim.h2
-rw-r--r--dbus/BUILD.gn5
-rw-r--r--dbus/OWNERS1
-rw-r--r--dbus/bus.cc8
-rw-r--r--dbus/file_descriptor.h6
-rw-r--r--dbus/values_util.cc53
-rw-r--r--dbus/values_util.h5
-rw-r--r--sandbox/linux/BUILD.gn21
-rw-r--r--sandbox/linux/bpf_dsl/codegen.cc14
-rw-r--r--sandbox/linux/bpf_dsl/codegen.h9
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc37
-rw-r--r--sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc4
-rw-r--r--sandbox/win/BUILD.gn20
-rw-r--r--sandbox/win/sandbox_win.gypi28
-rw-r--r--sandbox/win/src/nt_internals.h93
-rw-r--r--sandbox/win/src/security_level.h10
-rw-r--r--testing/multiprocess_func_list.cc2
-rw-r--r--testing/multiprocess_func_list.h2
257 files changed, 8280 insertions, 4210 deletions
diff --git a/Android.mk b/Android.mk
index 7648e8852e..75812fd796 100644
--- a/Android.mk
+++ b/Android.mk
@@ -143,6 +143,7 @@ libchromeCommonSrc := \
base/synchronization/condition_variable_posix.cc \
base/synchronization/lock.cc \
base/synchronization/lock_impl_posix.cc \
+ base/synchronization/read_write_lock_posix.cc \
base/synchronization/waitable_event_posix.cc \
base/sync_socket_posix.cc \
base/sys_info.cc \
@@ -188,12 +189,13 @@ libchromeCommonSrc := \
base/trace_event/heap_profiler_heap_dump_writer.cc \
base/trace_event/heap_profiler_stack_frame_deduplicator.cc \
base/trace_event/heap_profiler_type_name_deduplicator.cc \
+ base/trace_event/malloc_dump_provider.cc \
base/trace_event/memory_allocator_dump.cc \
base/trace_event/memory_allocator_dump_guid.cc \
base/trace_event/memory_dump_manager.cc \
- base/trace_event/malloc_dump_provider.cc \
base/trace_event/memory_dump_request_args.cc \
base/trace_event/memory_dump_session_state.cc \
+ base/trace_event/memory_infra_background_whitelist.cc \
base/trace_event/process_memory_dump.cc \
base/trace_event/process_memory_maps.cc \
base/trace_event/process_memory_totals.cc \
@@ -209,6 +211,7 @@ libchromeCommonSrc := \
base/tracked_objects.cc \
base/tracking_info.cc \
base/values.cc \
+ base/version.cc \
base/vlog.cc \
libchromeLinuxSrc := \
@@ -316,6 +319,7 @@ libchromeCommonUnittestSrc := \
base/metrics/statistics_recorder_unittest.cc \
base/numerics/safe_numerics_unittest.cc \
base/observer_list_unittest.cc \
+ base/optional_unittest.cc \
base/pickle_unittest.cc \
base/posix/file_descriptor_shuffle_unittest.cc \
base/posix/unix_domain_socket_linux_unittest.cc \
@@ -392,6 +396,7 @@ libchromeCommonUnittestSrc := \
base/tracked_objects_unittest.cc \
base/tuple_unittest.cc \
base/values_unittest.cc \
+ base/version_unittest.cc \
base/vlog_unittest.cc \
testing/multiprocess_func_list.cc \
testrunner.cc \
diff --git a/SConstruct b/SConstruct
index 170d4b1ac4..72e022e6cd 100644
--- a/SConstruct
+++ b/SConstruct
@@ -147,6 +147,7 @@ base_libs = [
synchronization/condition_variable_posix.cc
synchronization/lock.cc
synchronization/lock_impl_posix.cc
+ synchronization/read_write_lock_posix.cc
synchronization/waitable_event_posix.cc
synchronization/waitable_event_watcher_posix.cc
sync_socket_posix.cc
@@ -190,7 +191,6 @@ base_libs = [
time/tick_clock.cc
time/time.cc
time/time_posix.cc
- trace_event/malloc_dump_provider.cc
trace_event/heap_profiler_allocation_context.cc
trace_event/heap_profiler_allocation_context_tracker.cc
trace_event/heap_profiler_allocation_register.cc
@@ -198,11 +198,13 @@ base_libs = [
trace_event/heap_profiler_heap_dump_writer.cc
trace_event/heap_profiler_stack_frame_deduplicator.cc
trace_event/heap_profiler_type_name_deduplicator.cc
+ trace_event/malloc_dump_provider.cc
trace_event/memory_allocator_dump.cc
trace_event/memory_allocator_dump_guid.cc
trace_event/memory_dump_manager.cc
trace_event/memory_dump_request_args.cc
trace_event/memory_dump_session_state.cc
+ trace_event/memory_infra_background_whitelist.cc
trace_event/process_memory_dump.cc
trace_event/process_memory_maps.cc
trace_event/process_memory_totals.cc
@@ -218,6 +220,7 @@ base_libs = [
tracked_objects.cc
tracking_info.cc
values.cc
+ version.cc
vlog.cc
""",
'prefix' : 'base',
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 5712663ed8..c14798959b 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -54,6 +54,7 @@ config("base_flags") {
config("base_implementation") {
defines = [ "BASE_IMPLEMENTATION" ]
+ configs = [ "//build/config/compiler:wexit_time_destructors" ]
}
if (is_win) {
@@ -107,8 +108,10 @@ if (is_nacl) {
}
}
-config("android_system_libs") {
- libs = [ "log" ] # Used by logging.cc.
+if (is_android) {
+ config("android_system_libs") {
+ libs = [ "log" ] # Used by logging.cc.
+ }
}
# Base and everything it depends on should be a static library rather than
@@ -124,12 +127,14 @@ config("android_system_libs") {
# test code (test support and anything in the test directory) which should use
# source_set as is recommended for GN targets).
component("base") {
- # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
- # hacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
- # reasons for this seem to involve obscure toolchain bugs. This should be
- # fixed and this target should always be a static_library in the
- # non-component case.
- component_never_use_source_set = !is_nacl_nonsfi
+ if (is_nacl_nonsfi) {
+ # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
+ # nacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
+ # reasons for this seem to involve obscure toolchain bugs. This should be
+ # fixed and this target should always be a static_library in the
+ # non-component case.
+ static_component_type = "source_set"
+ }
sources = [
"allocator/allocator_check.cc",
@@ -228,7 +233,6 @@ component("base") {
"bind_helpers.cc",
"bind_helpers.h",
"bind_internal.h",
- "bind_internal_win.h",
"bit_cast.h",
"bits.h",
"build_time.cc",
@@ -394,6 +398,7 @@ component("base") {
"mac/call_with_eh_frame.cc",
"mac/call_with_eh_frame.h",
"mac/call_with_eh_frame_asm.S",
+ "mac/close_nocancel.cc",
"mac/cocoa_protocols.h",
"mac/dispatch_source_mach.cc",
"mac/dispatch_source_mach.h",
@@ -403,8 +408,6 @@ component("base") {
"mac/launch_services_util.h",
"mac/launchd.cc",
"mac/launchd.h",
- "mac/libdispatch_task_runner.cc",
- "mac/libdispatch_task_runner.h",
"mac/mac_logging.h",
"mac/mac_logging.mm",
"mac/mac_util.h",
@@ -423,6 +426,7 @@ component("base") {
"mac/scoped_authorizationref.h",
"mac/scoped_block.h",
"mac/scoped_cftyperef.h",
+ "mac/scoped_dispatch_object.h",
"mac/scoped_ioobject.h",
"mac/scoped_ioplugininterface.h",
"mac/scoped_launch_data.h",
@@ -433,6 +437,7 @@ component("base") {
"mac/scoped_nsautorelease_pool.h",
"mac/scoped_nsautorelease_pool.mm",
"mac/scoped_nsobject.h",
+ "mac/scoped_nsobject.mm",
"mac/scoped_objc_class_swizzler.h",
"mac/scoped_objc_class_swizzler.mm",
"mac/scoped_sending_event.h",
@@ -542,7 +547,6 @@ component("base") {
"metrics/user_metrics.cc",
"metrics/user_metrics.h",
"metrics/user_metrics_action.h",
- "move.h",
"native_library.h",
"native_library_ios.mm",
"native_library_mac.mm",
@@ -677,9 +681,8 @@ component("base") {
"sequenced_task_runner.cc",
"sequenced_task_runner.h",
"sequenced_task_runner_helpers.h",
+ "sha1.cc",
"sha1.h",
- "sha1_portable.cc",
- "sha1_win.cc",
"single_thread_task_runner.h",
"stl_util.h",
"strings/latin1_string_conversions.cc",
@@ -732,6 +735,10 @@ component("base") {
"synchronization/lock_impl.h",
"synchronization/lock_impl_posix.cc",
"synchronization/lock_impl_win.cc",
+ "synchronization/read_write_lock.h",
+ "synchronization/read_write_lock_nacl.cc",
+ "synchronization/read_write_lock_posix.cc",
+ "synchronization/read_write_lock_win.cc",
"synchronization/spin_wait.h",
"synchronization/waitable_event.h",
"synchronization/waitable_event_posix.cc",
@@ -769,13 +776,13 @@ component("base") {
"task_scheduler/scheduler_lock_impl.h",
"task_scheduler/scheduler_service_thread.cc",
"task_scheduler/scheduler_service_thread.h",
- "task_scheduler/scheduler_thread_pool.h",
- "task_scheduler/scheduler_thread_pool_impl.cc",
- "task_scheduler/scheduler_thread_pool_impl.h",
- "task_scheduler/scheduler_worker_thread.cc",
- "task_scheduler/scheduler_worker_thread.h",
- "task_scheduler/scheduler_worker_thread_stack.cc",
- "task_scheduler/scheduler_worker_thread_stack.h",
+ "task_scheduler/scheduler_worker.cc",
+ "task_scheduler/scheduler_worker.h",
+ "task_scheduler/scheduler_worker_pool.h",
+ "task_scheduler/scheduler_worker_pool_impl.cc",
+ "task_scheduler/scheduler_worker_pool_impl.h",
+ "task_scheduler/scheduler_worker_stack.cc",
+ "task_scheduler/scheduler_worker_stack.h",
"task_scheduler/sequence.cc",
"task_scheduler/sequence.h",
"task_scheduler/sequence_sort_key.cc",
@@ -899,6 +906,8 @@ component("base") {
"trace_event/memory_dump_request_args.h",
"trace_event/memory_dump_session_state.cc",
"trace_event/memory_dump_session_state.h",
+ "trace_event/memory_infra_background_whitelist.cc",
+ "trace_event/memory_infra_background_whitelist.h",
"trace_event/process_memory_dump.cc",
"trace_event/process_memory_dump.h",
"trace_event/process_memory_maps.cc",
@@ -1124,6 +1133,7 @@ component("base") {
"process/process_posix.cc",
"scoped_native_library.cc",
"sync_socket_posix.cc",
+ "synchronization/read_write_lock_posix.cc",
"sys_info.cc",
"sys_info_posix.cc",
"trace_event/trace_event_system_stats_monitor.cc",
@@ -1158,6 +1168,7 @@ component("base") {
"os_compat_nacl.cc",
"os_compat_nacl.h",
"rand_util_nacl.cc",
+ "synchronization/read_write_lock_nacl.cc",
]
}
@@ -1171,13 +1182,12 @@ component("base") {
sources -= [
"message_loop/message_pump_libevent.cc",
"strings/string16.cc",
-
- # Not using sha1_win.cc because it may have caused a
- # regression to page cycler moz.
- "sha1_win.cc",
]
- deps += [ "//base/trace_event/etw_manifest:chrome_events_win" ]
+ deps += [
+ "//base/trace_event/etw_manifest:chrome_events_win",
+ "//base/win:base_win_features",
+ ]
if (is_component_build) {
# Copy the VS runtime DLLs into the isolate so that they don't have to be
@@ -1256,7 +1266,6 @@ component("base") {
libs = [
"cfgmgr32.lib",
- "netapi32.lib",
"powrprof.lib",
"setupapi.lib",
"userenv.lib",
@@ -1397,6 +1406,7 @@ component("base") {
"mac/mach_logging.h",
"mac/objc_property_releaser.h",
"mac/objc_property_releaser.mm",
+ "mac/scoped_block.h",
"mac/scoped_mach_port.cc",
"mac/scoped_mach_port.h",
"mac/scoped_mach_vm.cc",
@@ -1404,8 +1414,10 @@ component("base") {
"mac/scoped_nsautorelease_pool.h",
"mac/scoped_nsautorelease_pool.mm",
"mac/scoped_nsobject.h",
+ "mac/scoped_nsobject.mm",
"mac/scoped_objc_class_swizzler.h",
"mac/scoped_objc_class_swizzler.mm",
+ "mac/scoped_typeref.h",
"memory/shared_memory_posix.cc",
"message_loop/message_pump_mac.h",
"message_loop/message_pump_mac.mm",
@@ -1631,10 +1643,11 @@ if (is_win) {
]
deps = [
":base",
+ "//base/win:base_win_features",
]
}
- if (target_cpu == "x64") {
+ if (current_cpu == "x64") {
# Must be a shared library so that it can be unloaded during testing.
shared_library("base_profiler_test_support_library") {
sources = [
@@ -1678,6 +1691,23 @@ bundle_data("base_unittests_bundle_data") {
]
}
+if (is_ios || is_mac) {
+ source_set("base_unittests_arc") {
+ testonly = true
+ set_sources_assignment_filter([])
+ sources = [
+ "mac/bind_objc_block_unittest_arc.mm",
+ "mac/scoped_nsobject_unittest_arc.mm",
+ ]
+ set_sources_assignment_filter(sources_assignment_filter)
+ configs += [ "//build/config/compiler:enable_arc" ]
+ deps = [
+ ":base",
+ "//testing/gtest",
+ ]
+ }
+}
+
test("base_unittests") {
sources = [
"allocator/tcmalloc_unittest.cc",
@@ -1722,7 +1752,7 @@ test("base_unittests") {
"deferred_sequenced_task_runner_unittest.cc",
"environment_unittest.cc",
"feature_list_unittest.cc",
- "file_version_info_unittest.cc",
+ "file_version_info_win_unittest.cc",
"files/dir_reader_posix_unittest.cc",
"files/file_locking_unittest.cc",
"files/file_path_unittest.cc",
@@ -1764,7 +1794,6 @@ test("base_unittests") {
"mac/call_with_eh_frame_unittest.mm",
"mac/dispatch_source_mach_unittest.cc",
"mac/foundation_util_unittest.mm",
- "mac/libdispatch_task_runner_unittest.cc",
"mac/mac_util_unittest.mm",
"mac/mach_port_broker_unittest.cc",
"mac/objc_property_releaser_unittest.mm",
@@ -1827,6 +1856,7 @@ test("base_unittests") {
"profiler/stack_sampling_profiler_unittest.cc",
"profiler/tracked_time_unittest.cc",
"rand_util_unittest.cc",
+ "run_loop_unittest.cc",
"scoped_clear_errno_unittest.cc",
"scoped_generic_unittest.cc",
"scoped_native_library_unittest.cc",
@@ -1854,8 +1884,10 @@ test("base_unittests") {
"synchronization/cancellation_flag_unittest.cc",
"synchronization/condition_variable_unittest.cc",
"synchronization/lock_unittest.cc",
+ "synchronization/read_write_lock_unittest.cc",
"synchronization/waitable_event_unittest.cc",
"synchronization/waitable_event_watcher_unittest.cc",
+ "sys_byteorder_unittest.cc",
"sys_info_unittest.cc",
"system_monitor/system_monitor_unittest.cc",
"task/cancelable_task_tracker_unittest.cc",
@@ -1864,9 +1896,9 @@ test("base_unittests") {
"task_scheduler/priority_queue_unittest.cc",
"task_scheduler/scheduler_lock_unittest.cc",
"task_scheduler/scheduler_service_thread_unittest.cc",
- "task_scheduler/scheduler_thread_pool_impl_unittest.cc",
- "task_scheduler/scheduler_worker_thread_stack_unittest.cc",
- "task_scheduler/scheduler_worker_thread_unittest.cc",
+ "task_scheduler/scheduler_worker_pool_impl_unittest.cc",
+ "task_scheduler/scheduler_worker_stack_unittest.cc",
+ "task_scheduler/scheduler_worker_unittest.cc",
"task_scheduler/sequence_sort_key_unittest.cc",
"task_scheduler/sequence_unittest.cc",
"task_scheduler/task_scheduler_impl_unittest.cc",
@@ -1913,7 +1945,6 @@ test("base_unittests") {
"trace_event/memory_allocator_dump_unittest.cc",
"trace_event/memory_dump_manager_unittest.cc",
"trace_event/process_memory_dump_unittest.cc",
- "trace_event/trace_config_memory_test_util.h",
"trace_event/trace_config_unittest.cc",
"trace_event/trace_event_argument_unittest.cc",
"trace_event/trace_event_synthetic_delay_unittest.cc",
@@ -1949,6 +1980,8 @@ test("base_unittests") {
"win/wrapped_window_proc_unittest.cc",
]
+ defines = []
+
deps = [
":base",
":i18n",
@@ -1961,6 +1994,10 @@ test("base_unittests") {
"//third_party/icu",
]
+ if (is_ios || is_mac) {
+ deps += [ ":base_unittests_arc" ]
+ }
+
public_deps = [
":base_unittests_bundle_data",
]
@@ -1975,7 +2012,7 @@ test("base_unittests") {
# Allow more direct string conversions on platforms with native utf8
# strings
if (is_mac || is_ios || is_chromeos || is_chromecast) {
- defines = [ "SYSTEM_NATIVE_UTF8" ]
+ defines += [ "SYSTEM_NATIVE_UTF8" ]
}
if (is_android) {
@@ -2021,8 +2058,6 @@ test("base_unittests") {
}
if (is_linux) {
- sources -= [ "file_version_info_unittest.cc" ]
-
if (is_desktop_linux) {
sources += [ "nix/xdg_util_unittest.cc" ]
}
@@ -2060,7 +2095,7 @@ test("base_unittests") {
if (is_win) {
deps += [ "//base:scoped_handle_test_dll" ]
- if (target_cpu == "x64") {
+ if (current_cpu == "x64") {
sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
deps += [ ":base_profiler_test_support_library" ]
}
@@ -2082,6 +2117,11 @@ test("base_unittests") {
# data += [ "$root_out_dir/base_unittests.dSYM/" ]
}
}
+
+ if (use_cfi_cast) {
+ # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+ defines += [ "CFI_CAST_CHECK" ]
+ }
}
action("build_date") {
@@ -2216,6 +2256,7 @@ if (is_android) {
"android/java/src/org/chromium/base/PerfTraceEvent.java",
"android/java/src/org/chromium/base/PowerMonitor.java",
"android/java/src/org/chromium/base/PowerStatusReceiver.java",
+ "android/java/src/org/chromium/base/Promise.java",
"android/java/src/org/chromium/base/ResourceExtractor.java",
"android/java/src/org/chromium/base/SecureRandomInitializer.java",
"android/java/src/org/chromium/base/StreamUtil.java",
@@ -2280,6 +2321,7 @@ if (is_android) {
]
java_files = [
"test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
"test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
@@ -2300,6 +2342,7 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/util/PerfTest.java",
"test/android/javatests/src/org/chromium/base/test/util/Restriction.java",
"test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java",
+ "test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java",
"test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java",
"test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java",
"test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java",
@@ -2333,6 +2376,7 @@ if (is_android) {
java_files = [
"android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
"android/junit/src/org/chromium/base/LogTest.java",
+ "android/junit/src/org/chromium/base/PromiseTest.java",
"test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
"test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
"test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
@@ -2364,6 +2408,11 @@ if (is_android) {
"android/java/templates/BuildConfig.template",
]
package_name = "org/chromium/base"
+
+ defines = []
+ if (!is_java_debug) {
+ defines += [ "NDEBUG" ]
+ }
}
# GYP: //base/base.gyp:base_native_libraries_gen
diff --git a/base/OWNERS b/base/OWNERS
index 4d4a2391a4..b6cfce4dfd 100644
--- a/base/OWNERS
+++ b/base/OWNERS
@@ -1,8 +1,5 @@
-mark@chromium.org
-thakis@chromium.org
-danakj@chromium.org
-thestig@chromium.org
-
+# About src/base:
+#
# Chromium is a very mature project, most things that are generally useful are
# already here, and that things not here aren't generally useful.
#
@@ -15,11 +12,20 @@ thestig@chromium.org
# Adding a new logging macro DPVELOG_NE is not more clear than just
# writing the stuff you want to log in a regular logging statement, even
# if it makes your calling code longer. Just add it to your own code.
+#
+# If the code in question does not need to be used inside base, but will have
+# multiple consumers across the codebase, consider placing it in a new directory
+# under components/ instead.
-per-file *.isolate=maruel@chromium.org
-per-file *.isolate=tandrii@chromium.org
-per-file *.isolate=vadimsh@chromium.org
-per-file security_unittest.cc=jln@chromium.org
+mark@chromium.org
+thakis@chromium.org
+danakj@chromium.org
+thestig@chromium.org
+dcheng@chromium.org
+
+# For Bind/Callback:
+per-file bind*=tzik@chromium.org
+per-file callback*=tzik@chromium.org
# For Android-specific changes:
per-file *android*=nyquist@chromium.org
@@ -30,3 +36,11 @@ per-file *android*=yfriedman@chromium.org
# For FeatureList API:
per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org
+
+# For bot infrastructure:
+per-file *.isolate=maruel@chromium.org
+per-file *.isolate=tandrii@chromium.org
+per-file *.isolate=vadimsh@chromium.org
+
+# For TCMalloc tests:
+per-file security_unittest.cc=jln@chromium.org
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 96ccad239a..490b8e871b 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -71,6 +71,18 @@ config("tcmalloc_flags") {
# typedefs.
"-Wno-unused-private-field",
]
+ } else {
+ cflags = []
+ }
+
+ if (is_linux || is_android) {
+ # We enable all warnings by default, but upstream disables a few.
+ # Keep "-Wno-*" flags in sync with upstream by comparing against:
+ # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
+ cflags += [
+ "-Wno-sign-compare",
+ "-Wno-unused-result",
+ ]
}
}
@@ -228,16 +240,16 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/windows/port.h",
]
- # We enable all warnings by default, but upstream disables a few.
- # Keep "-Wno-*" flags in sync with upstream by comparing against:
- # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
- cflags = [
- "-Wno-sign-compare",
- "-Wno-unused-result",
- ]
-
- configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
- configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ # Compiling tcmalloc with -fvisibility=default is only necessary when
+ # not using the allocator shim, which provides the correct visibility
+ # annotations for those symbols which need to be exported (see
+ # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+ # //base/allocator/allocator_shim_internals.h for the definition of
+ # SHIM_ALWAYS_EXPORT).
+ if (!use_experimental_allocator_shim) {
+ configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+ configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ }
ldflags = [
# Don't let linker rip this symbol out, otherwise the heap&cpu
diff --git a/base/allocator/allocator.gyp b/base/allocator/allocator.gyp
index 3844c08add..674d4d645f 100644
--- a/base/allocator/allocator.gyp
+++ b/base/allocator/allocator.gyp
@@ -301,9 +301,6 @@
'-Wno-sign-compare',
'-Wno-unused-result',
],
- 'cflags!': [
- '-fvisibility=hidden',
- ],
'link_settings': {
'ldflags': [
# Don't let linker rip this symbol out, otherwise the heap&cpu
@@ -315,6 +312,19 @@
'-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv',
],
},
+ # Compiling tcmalloc with -fvisibility=default is only necessary when
+ # not using the allocator shim, which provides the correct visibility
+ # annotations for those symbols which need to be exported (see
+ # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+ # //base/allocator/allocator_shim_internals.h for the definition of
+ # SHIM_ALWAYS_EXPORT).
+ 'conditions': [
+ ['use_experimental_allocator_shim==0', {
+ 'cflags!': [
+ '-fvisibility=hidden',
+ ],
+ }],
+ ],
}],
['profiling!=1', {
'sources!': [
diff --git a/base/at_exit.cc b/base/at_exit.cc
index f9aa2d1094..cfe4cf9a58 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -6,6 +6,7 @@
#include <stddef.h>
#include <ostream>
+#include <utility>
#include "base/bind.h"
#include "base/callback.h"
@@ -57,7 +58,7 @@ void AtExitManager::RegisterTask(base::Closure task) {
AutoLock lock(g_top_manager->lock_);
DCHECK(!g_top_manager->processing_callbacks_);
- g_top_manager->stack_.push(task);
+ g_top_manager->stack_.push(std::move(task));
}
// static
diff --git a/base/base.gyp b/base/base.gyp
index 86d2331e8f..a534d5ccb7 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -24,6 +24,7 @@
'allocator/allocator.gyp:allocator',
'allocator/allocator.gyp:allocator_features#target',
'base_debugging_flags#target',
+ 'base_win_features#target',
'base_static',
'base_build_date#target',
'../testing/gtest.gyp:gtest_prod',
@@ -406,7 +407,7 @@
'deferred_sequenced_task_runner_unittest.cc',
'environment_unittest.cc',
'feature_list_unittest.cc',
- 'file_version_info_unittest.cc',
+ 'file_version_info_win_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'files/file_locking_unittest.cc',
'files/file_path_unittest.cc',
@@ -449,7 +450,6 @@
'mac/call_with_eh_frame_unittest.mm',
'mac/dispatch_source_mach_unittest.cc',
'mac/foundation_util_unittest.mm',
- 'mac/libdispatch_task_runner_unittest.cc',
'mac/mac_util_unittest.mm',
'mac/mach_port_broker_unittest.cc',
'mac/objc_property_releaser_unittest.mm',
@@ -514,6 +514,7 @@
'profiler/stack_sampling_profiler_unittest.cc',
'profiler/tracked_time_unittest.cc',
'rand_util_unittest.cc',
+ 'run_loop_unittest.cc',
'scoped_clear_errno_unittest.cc',
'scoped_generic_unittest.cc',
'scoped_native_library_unittest.cc',
@@ -541,8 +542,10 @@
'synchronization/cancellation_flag_unittest.cc',
'synchronization/condition_variable_unittest.cc',
'synchronization/lock_unittest.cc',
+ 'synchronization/read_write_lock_unittest.cc',
'synchronization/waitable_event_unittest.cc',
'synchronization/waitable_event_watcher_unittest.cc',
+ 'sys_byteorder_unittest.cc',
'sys_info_unittest.cc',
'system_monitor/system_monitor_unittest.cc',
'task/cancelable_task_tracker_unittest.cc',
@@ -551,9 +554,9 @@
'task_scheduler/priority_queue_unittest.cc',
'task_scheduler/scheduler_lock_unittest.cc',
'task_scheduler/scheduler_service_thread_unittest.cc',
- 'task_scheduler/scheduler_thread_pool_impl_unittest.cc',
- 'task_scheduler/scheduler_worker_thread_stack_unittest.cc',
- 'task_scheduler/scheduler_worker_thread_unittest.cc',
+ 'task_scheduler/scheduler_worker_unittest.cc',
+ 'task_scheduler/scheduler_worker_pool_impl_unittest.cc',
+ 'task_scheduler/scheduler_worker_stack_unittest.cc',
'task_scheduler/sequence_sort_key_unittest.cc',
'task_scheduler/sequence_unittest.cc',
'task_scheduler/task_scheduler_impl_unittest.cc',
@@ -636,6 +639,17 @@
'module_dir': 'base'
},
'conditions': [
+ ['cfi_vptr==1 and cfi_cast==1', {
+ 'defines': [
+ # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+ 'CFI_CAST_CHECK',
+ ],
+ }],
+ ['OS == "ios" or OS == "mac"', {
+ 'dependencies': [
+ 'base_unittests_arc',
+ ],
+ }],
['OS == "android"', {
'dependencies': [
'android/jni_generator/jni_generator.gyp:jni_generator_tests',
@@ -675,9 +689,6 @@
'defines': [
'USE_SYMBOLIZE',
],
- 'sources!': [
- 'file_version_info_unittest.cc',
- ],
'conditions': [
[ 'desktop_linux==1', {
'sources': [
@@ -867,6 +878,8 @@
'test/ios/wait_util.mm',
'test/launcher/test_launcher.cc',
'test/launcher/test_launcher.h',
+ 'test/launcher/test_launcher_tracer.cc',
+ 'test/launcher/test_launcher_tracer.h',
'test/launcher/test_result.cc',
'test/launcher/test_result.h',
'test/launcher/test_results_tracker.cc',
@@ -1019,7 +1032,7 @@
},
{
# GN version: //base/debug:debugging_flags
- # Since this generates a file, it most only be referenced in the target
+ # Since this generates a file, it must only be referenced in the target
# toolchain or there will be multiple rules that generate the header.
# When referenced from a target that might be compiled in the host
# toolchain, always refer to 'base_debugging_flags#target'.
@@ -1033,6 +1046,27 @@
},
},
{
+ # GN version: //base/win:base_win_features
+ # Since this generates a file, it must only be referenced in the target
+ # toolchain or there will be multiple rules that generate the header.
+ # When referenced from a target that might be compiled in the host
+ # toolchain, always refer to 'base_win_features#target'.
+ 'target_name': 'base_win_features',
+ 'conditions': [
+ ['OS=="win"', {
+ 'includes': [ '../build/buildflag_header.gypi' ],
+ 'variables': {
+ 'buildflag_header_path': 'base/win/base_features.h',
+ 'buildflag_flags': [
+ 'SINGLE_MODULE_MODE_HANDLE_VERIFIER=<(single_module_mode_handle_verifier)',
+ ],
+ },
+ }, {
+ 'type': 'none',
+ }],
+ ],
+ },
+ {
'type': 'none',
'target_name': 'base_build_date',
'hard_dependency': 1,
@@ -1736,5 +1770,32 @@
},
],
}],
+ ['OS == "ios" or OS == "mac"', {
+ 'targets': [
+ {
+ 'target_name': 'base_unittests_arc',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'mac/bind_objc_block_unittest_arc.mm',
+ 'mac/scoped_nsobject_unittest_arc.mm'
+ ],
+ 'xcode_settings': {
+ 'CLANG_ENABLE_OBJC_ARC': 'YES',
+ },
+ 'target_conditions': [
+ ['OS == "ios" and _toolset != "host"', {
+ 'sources/': [
+ ['include', 'mac/bind_objc_block_unittest_arc\\.mm$'],
+ ['include', 'mac/scoped_nsobject_unittest_arc\\.mm$'],
+ ],
+ }]
+ ],
+ },
+ ],
+ }],
],
}
diff --git a/base/base.gypi b/base/base.gypi
index dc86c59f91..cb41e79310 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -121,7 +121,6 @@
'bind_helpers.cc',
'bind_helpers.h',
'bind_internal.h',
- 'bind_internal_win.h',
'bit_cast.h',
'bits.h',
'build_time.cc',
@@ -298,8 +297,6 @@
'mac/launch_services_util.h',
'mac/launchd.cc',
'mac/launchd.h',
- 'mac/libdispatch_task_runner.cc',
- 'mac/libdispatch_task_runner.h',
'mac/mac_logging.h',
'mac/mac_logging.mm',
'mac/mac_util.h',
@@ -318,6 +315,7 @@
'mac/scoped_authorizationref.h',
'mac/scoped_block.h',
'mac/scoped_cftyperef.h',
+ 'mac/scoped_dispatch_object.h',
'mac/scoped_ioobject.h',
'mac/scoped_ioplugininterface.h',
'mac/scoped_launch_data.h',
@@ -328,6 +326,7 @@
'mac/scoped_nsautorelease_pool.h',
'mac/scoped_nsautorelease_pool.mm',
'mac/scoped_nsobject.h',
+ 'mac/scoped_nsobject.mm',
'mac/scoped_objc_class_swizzler.h',
'mac/scoped_objc_class_swizzler.mm',
'mac/scoped_sending_event.h',
@@ -429,7 +428,6 @@
'metrics/user_metrics.cc',
'metrics/user_metrics.h',
'metrics/user_metrics_action.h',
- 'move.h',
'native_library.h',
'native_library_ios.mm',
'native_library_mac.mm',
@@ -558,9 +556,8 @@
'sequenced_task_runner.cc',
'sequenced_task_runner.h',
'sequenced_task_runner_helpers.h',
+ 'sha1.cc',
'sha1.h',
- 'sha1_portable.cc',
- 'sha1_win.cc',
'single_thread_task_runner.h',
'stl_util.h',
'strings/latin1_string_conversions.cc',
@@ -610,6 +607,10 @@
'synchronization/lock_impl.h',
'synchronization/lock_impl_posix.cc',
'synchronization/lock_impl_win.cc',
+ 'synchronization/read_write_lock.h',
+ 'synchronization/read_write_lock_nacl.cc',
+ 'synchronization/read_write_lock_posix.cc',
+ 'synchronization/read_write_lock_win.cc',
'synchronization/spin_wait.h',
'synchronization/waitable_event.h',
'synchronization/waitable_event_posix.cc',
@@ -646,13 +647,13 @@
'task_scheduler/scheduler_lock_impl.h',
'task_scheduler/scheduler_service_thread.cc',
'task_scheduler/scheduler_service_thread.h',
- 'task_scheduler/scheduler_thread_pool.h',
- 'task_scheduler/scheduler_thread_pool_impl.cc',
- 'task_scheduler/scheduler_thread_pool_impl.h',
- 'task_scheduler/scheduler_worker_thread.cc',
- 'task_scheduler/scheduler_worker_thread.h',
- 'task_scheduler/scheduler_worker_thread_stack.cc',
- 'task_scheduler/scheduler_worker_thread_stack.h',
+ 'task_scheduler/scheduler_worker.cc',
+ 'task_scheduler/scheduler_worker.h',
+ 'task_scheduler/scheduler_worker_pool.h',
+ 'task_scheduler/scheduler_worker_pool_impl.cc',
+ 'task_scheduler/scheduler_worker_pool_impl.h',
+ 'task_scheduler/scheduler_worker_stack.cc',
+ 'task_scheduler/scheduler_worker_stack.h',
'task_scheduler/sequence.cc',
'task_scheduler/sequence.h',
'task_scheduler/sequence_sort_key.cc',
@@ -877,6 +878,7 @@
'process/process_posix.cc',
'rand_util_posix.cc',
'scoped_native_library.cc',
+ 'synchronization/read_write_lock_posix.cc',
'sys_info.cc',
'sys_info_posix.cc',
'third_party/dynamic_annotations/dynamic_annotations.c',
@@ -934,6 +936,7 @@
['include', '^mac/mac_logging\\.'],
['include', '^mac/mach_logging\\.'],
['include', '^mac/objc_property_releaser\\.'],
+ ['include', '^mac/scoped_block\\.'],
['include', '^mac/scoped_mach_port\\.'],
['include', '^mac/scoped_mach_vm\\.'],
['include', '^mac/scoped_nsautorelease_pool\\.'],
@@ -1002,9 +1005,6 @@
'files/file_path_watcher_stub.cc',
'message_loop/message_pump_libevent.cc',
'posix/file_descriptor_shuffle.cc',
- # Not using sha1_win.cc because it may have caused a
- # regression to page cycler moz.
- 'sha1_win.cc',
'strings/string16.cc',
],
},],
diff --git a/base/base_switches.cc b/base/base_switches.cc
index fa0bc33371..f5c6eb3f59 100644
--- a/base/base_switches.cc
+++ b/base/base_switches.cc
@@ -89,6 +89,16 @@ const char kProfilerTiming[] = "profiler-timing";
// chrome://profiler.
const char kProfilerTimingDisabledValue[] = "0";
+// Specifies a location for profiling output. This will only work if chrome has
+// been built with the gyp variable profiling=1 or gn arg enable_profiling=true.
+//
+// {pid} if present will be replaced by the pid of the process.
+// {count} if present will be incremented each time a profile is generated
+// for this process.
+// The default is chrome-profile-{pid} for the browser and test-profile-{pid}
+// for tests.
+const char kProfilingFile[] = "profiling-file";
+
#if defined(OS_WIN)
// Disables the USB keyboard detection for blocking the OSK on Win8+.
const char kDisableUsbKeyboardDetect[] = "disable-usb-keyboard-detect";
diff --git a/base/base_switches.h b/base/base_switches.h
index b80077b6bb..0585186038 100644
--- a/base/base_switches.h
+++ b/base/base_switches.h
@@ -22,6 +22,7 @@ extern const char kFullMemoryCrashReport[];
extern const char kNoErrorDialogs[];
extern const char kProfilerTiming[];
extern const char kProfilerTimingDisabledValue[];
+extern const char kProfilingFile[];
extern const char kTestChildProcess[];
extern const char kTestDoNotInitializeIcu[];
extern const char kTraceToFile[];
diff --git a/base/bind.h b/base/bind.h
index 46dbb913be..9cf65b6776 100644
--- a/base/bind.h
+++ b/base/bind.h
@@ -21,90 +21,21 @@
// If you're reading the implementation, before proceeding further, you should
// read the top comment of base/bind_internal.h for a definition of common
// terms and concepts.
-//
-// RETURN TYPES
-//
-// Though Bind()'s result is meant to be stored in a Callback<> type, it
-// cannot actually return the exact type without requiring a large amount
-// of extra template specializations. The problem is that in order to
-// discern the correct specialization of Callback<>, Bind would need to
-// unwrap the function signature to determine the signature's arity, and
-// whether or not it is a method.
-//
-// Each unique combination of (arity, function_type, num_prebound) where
-// function_type is one of {function, method, const_method} would require
-// one specialization. We eventually have to do a similar number of
-// specializations anyways in the implementation (see the Invoker<>,
-// classes). However, it is avoidable in Bind if we return the result
-// via an indirection like we do below.
-//
-// TODO(ajwong): We might be able to avoid this now, but need to test.
-//
-// It is possible to move most of the static_assert into BindState<>, but it
-// feels a little nicer to have the asserts here so people do not need to crack
-// open bind_internal.h. On the other hand, it makes Bind() harder to read.
namespace base {
-namespace internal {
-
-// Don't use Alias Template directly here to avoid a compile error on MSVC2013.
-template <typename Functor, typename... Args>
-struct MakeUnboundRunTypeImpl {
- using Type =
- typename BindState<
- typename FunctorTraits<Functor>::RunnableType,
- typename FunctorTraits<Functor>::RunType,
- Args...>::UnboundRunType;
-};
-
-} // namespace internal
-
template <typename Functor, typename... Args>
-using MakeUnboundRunType =
- typename internal::MakeUnboundRunTypeImpl<Functor, Args...>::Type;
-
-template <typename Functor, typename... Args>
-base::Callback<MakeUnboundRunType<Functor, Args...>>
-Bind(Functor functor, Args&&... args) {
- // Type aliases for how to store and run the functor.
- using RunnableType = typename internal::FunctorTraits<Functor>::RunnableType;
- using RunType = typename internal::FunctorTraits<Functor>::RunType;
-
- // Use RunnableType::RunType instead of RunType above because our
- // checks below for bound references need to know what the actual
- // functor is going to interpret the argument as.
- using BoundRunType = typename RunnableType::RunType;
-
- using BoundArgs =
- internal::TakeTypeListItem<sizeof...(Args),
- internal::ExtractArgs<BoundRunType>>;
-
- // Do not allow binding a non-const reference parameter. Non-const reference
- // parameters are disallowed by the Google style guide. Also, binding a
- // non-const reference parameter can make for subtle bugs because the
- // invoked function will receive a reference to the stored copy of the
- // argument and not the original.
- static_assert(!internal::HasNonConstReferenceItem<BoundArgs>::value,
- "do not bind functions with nonconst ref");
-
- const bool is_method = internal::HasIsMethodTag<RunnableType>::value;
-
- // For methods, we need to be careful for parameter 1. We do not require
- // a scoped_refptr because BindState<> itself takes care of AddRef() for
- // methods. We also disallow binding of an array as the method's target
- // object.
- static_assert(!internal::BindsArrayToFirstArg<is_method, Args...>::value,
- "first bound argument to method cannot be array");
- static_assert(
- !internal::HasRefCountedParamAsRawPtr<is_method, Args...>::value,
- "a parameter is a refcounted type and needs scoped_refptr");
-
- using BindState = internal::BindState<RunnableType, RunType, Args...>;
-
- return Callback<typename BindState::UnboundRunType>(
- new BindState(internal::MakeRunnable(functor),
- std::forward<Args>(args)...));
+inline base::Callback<MakeUnboundRunType<Functor, Args...>> Bind(
+ Functor&& functor,
+ Args&&... args) {
+ using BindState = internal::MakeBindStateType<Functor, Args...>;
+ using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+ using Invoker = internal::Invoker<BindState, UnboundRunType>;
+
+ using CallbackType = Callback<UnboundRunType>;
+ return CallbackType(new BindState(std::forward<Functor>(functor),
+ std::forward<Args>(args)...),
+ &Invoker::Run);
}
} // namespace base
diff --git a/base/bind_helpers.h b/base/bind_helpers.h
index 590d788b96..93d02e37a9 100644
--- a/base/bind_helpers.h
+++ b/base/bind_helpers.h
@@ -170,145 +170,11 @@
#include "build/build_config.h"
namespace base {
-namespace internal {
-
-// Use the Substitution Failure Is Not An Error (SFINAE) trick to inspect T
-// for the existence of AddRef() and Release() functions of the correct
-// signature.
-//
-// http://en.wikipedia.org/wiki/Substitution_failure_is_not_an_error
-// http://stackoverflow.com/questions/257288/is-it-possible-to-write-a-c-template-to-check-for-a-functions-existence
-// http://stackoverflow.com/questions/4358584/sfinae-approach-comparison
-// http://stackoverflow.com/questions/1966362/sfinae-to-check-for-inherited-member-functions
-//
-// The last link in particular show the method used below.
-//
-// For SFINAE to work with inherited methods, we need to pull some extra tricks
-// with multiple inheritance. In the more standard formulation, the overloads
-// of Check would be:
-//
-// template <typename C>
-// Yes NotTheCheckWeWant(Helper<&C::TargetFunc>*);
-//
-// template <typename C>
-// No NotTheCheckWeWant(...);
-//
-// static const bool value = sizeof(NotTheCheckWeWant<T>(0)) == sizeof(Yes);
-//
-// The problem here is that template resolution will not match
-// C::TargetFunc if TargetFunc does not exist directly in C. That is, if
-// TargetFunc in inherited from an ancestor, &C::TargetFunc will not match,
-// |value| will be false. This formulation only checks for whether or
-// not TargetFunc exist directly in the class being introspected.
-//
-// To get around this, we play a dirty trick with multiple inheritance.
-// First, We create a class BaseMixin that declares each function that we
-// want to probe for. Then we create a class Base that inherits from both T
-// (the class we wish to probe) and BaseMixin. Note that the function
-// signature in BaseMixin does not need to match the signature of the function
-// we are probing for; thus it's easiest to just use void().
-//
-// Now, if TargetFunc exists somewhere in T, then &Base::TargetFunc has an
-// ambiguous resolution between BaseMixin and T. This lets us write the
-// following:
-//
-// template <typename C>
-// No GoodCheck(Helper<&C::TargetFunc>*);
-//
-// template <typename C>
-// Yes GoodCheck(...);
-//
-// static const bool value = sizeof(GoodCheck<Base>(0)) == sizeof(Yes);
-//
-// Notice here that the variadic version of GoodCheck() returns Yes here
-// instead of No like the previous one. Also notice that we calculate |value|
-// by specializing GoodCheck() on Base instead of T.
-//
-// We've reversed the roles of the variadic, and Helper overloads.
-// GoodCheck(Helper<&C::TargetFunc>*), when C = Base, fails to be a valid
-// substitution if T::TargetFunc exists. Thus GoodCheck<Base>(0) will resolve
-// to the variadic version if T has TargetFunc. If T::TargetFunc does not
-// exist, then &C::TargetFunc is not ambiguous, and the overload resolution
-// will prefer GoodCheck(Helper<&C::TargetFunc>*).
-//
-// This method of SFINAE will correctly probe for inherited names, but it cannot
-// typecheck those names. It's still a good enough sanity check though.
-//
-// Works on gcc-4.2, gcc-4.4, and Visual Studio 2008.
-//
-// TODO(ajwong): Move to ref_counted.h or template_util.h when we've vetted
-// this works well.
-//
-// TODO(ajwong): Make this check for Release() as well.
-// See http://crbug.com/82038.
-template <typename T>
-class SupportsAddRefAndRelease {
- using Yes = char[1];
- using No = char[2];
-
- struct BaseMixin {
- void AddRef();
- };
-
-// MSVC warns when you try to use Base if T has a private destructor, the
-// common pattern for refcounted types. It does this even though no attempt to
-// instantiate Base is made. We disable the warning for this definition.
-#if defined(OS_WIN)
-#pragma warning(push)
-#pragma warning(disable:4624)
-#endif
- struct Base : public T, public BaseMixin {
- };
-#if defined(OS_WIN)
-#pragma warning(pop)
-#endif
-
- template <void(BaseMixin::*)()> struct Helper {};
-
- template <typename C>
- static No& Check(Helper<&C::AddRef>*);
-
- template <typename >
- static Yes& Check(...);
-
- public:
- enum { value = sizeof(Check<Base>(0)) == sizeof(Yes) };
-};
-
-// Helpers to assert that arguments of a recounted type are bound with a
-// scoped_refptr.
-template <bool IsClasstype, typename T>
-struct UnsafeBindtoRefCountedArgHelper : std::false_type {
-};
-
-template <typename T>
-struct UnsafeBindtoRefCountedArgHelper<true, T>
- : std::integral_constant<bool, SupportsAddRefAndRelease<T>::value> {
-};
template <typename T>
-struct UnsafeBindtoRefCountedArg : std::false_type {
-};
+struct IsWeakReceiver;
-template <typename T>
-struct UnsafeBindtoRefCountedArg<T*>
- : UnsafeBindtoRefCountedArgHelper<std::is_class<T>::value, T> {
-};
-
-template <typename T>
-class HasIsMethodTag {
- using Yes = char[1];
- using No = char[2];
-
- template <typename U>
- static Yes& Check(typename U::IsMethod*);
-
- template <typename U>
- static No& Check(...);
-
- public:
- enum { value = sizeof(Check<T>(0)) == sizeof(Yes) };
-};
+namespace internal {
template <typename T>
class UnretainedWrapper {
@@ -340,18 +206,12 @@ class RetainedRefWrapper {
template <typename T>
struct IgnoreResultHelper {
- explicit IgnoreResultHelper(T functor) : functor_(functor) {}
+ explicit IgnoreResultHelper(T functor) : functor_(std::move(functor)) {}
+ explicit operator bool() const { return !!functor_; }
T functor_;
};
-template <typename T>
-struct IgnoreResultHelper<Callback<T> > {
- explicit IgnoreResultHelper(const Callback<T>& functor) : functor_(functor) {}
-
- const Callback<T>& functor_;
-};
-
// An alternate implementation is to avoid the destructive copy, and instead
// specialize ParamTraits<> for OwnedWrapper<> to change the StorageType to
// a class that is essentially a std::unique_ptr<>.
@@ -365,7 +225,7 @@ class OwnedWrapper {
explicit OwnedWrapper(T* o) : ptr_(o) {}
~OwnedWrapper() { delete ptr_; }
T* get() const { return ptr_; }
- OwnedWrapper(const OwnedWrapper& other) {
+ OwnedWrapper(OwnedWrapper&& other) {
ptr_ = other.ptr_;
other.ptr_ = NULL;
}
@@ -402,7 +262,7 @@ class PassedWrapper {
public:
explicit PassedWrapper(T&& scoper)
: is_valid_(true), scoper_(std::move(scoper)) {}
- PassedWrapper(const PassedWrapper& other)
+ PassedWrapper(PassedWrapper&& other)
: is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
T Take() const {
CHECK(is_valid_);
@@ -417,17 +277,17 @@ class PassedWrapper {
// Unwrap the stored parameters for the wrappers above.
template <typename T>
-const T& Unwrap(const T& o) {
- return o;
+T&& Unwrap(T&& o) {
+ return std::forward<T>(o);
}
template <typename T>
-T* Unwrap(UnretainedWrapper<T> unretained) {
+T* Unwrap(const UnretainedWrapper<T>& unretained) {
return unretained.get();
}
template <typename T>
-const T& Unwrap(ConstRefWrapper<T> const_ref) {
+const T& Unwrap(const ConstRefWrapper<T>& const_ref) {
return const_ref.get();
}
@@ -437,17 +297,12 @@ T* Unwrap(const RetainedRefWrapper<T>& o) {
}
template <typename T>
-const WeakPtr<T>& Unwrap(const WeakPtr<T>& o) {
- return o;
-}
-
-template <typename T>
T* Unwrap(const OwnedWrapper<T>& o) {
return o.get();
}
template <typename T>
-T Unwrap(PassedWrapper<T>& o) {
+T Unwrap(const PassedWrapper<T>& o) {
return o.Take();
}
@@ -458,16 +313,11 @@ T Unwrap(PassedWrapper<T>& o) {
//
// The first argument should be the type of the object that will be received by
// the method.
-template <bool IsMethod, typename... Args>
-struct IsWeakMethod : public std::false_type {};
+template <bool is_method, typename... Args>
+struct IsWeakMethod : std::false_type {};
template <typename T, typename... Args>
-struct IsWeakMethod<true, WeakPtr<T>, Args...> : public std::true_type {};
-
-template <typename T, typename... Args>
-struct IsWeakMethod<true, ConstRefWrapper<WeakPtr<T>>, Args...>
- : public std::true_type {};
-
+struct IsWeakMethod<true, T, Args...> : IsWeakReceiver<T> {};
// Packs a list of types to hold them in a single type.
template <typename... Types>
@@ -550,19 +400,25 @@ struct MakeFunctionTypeImpl<R, TypeList<Args...>> {
template <typename R, typename ArgList>
using MakeFunctionType = typename MakeFunctionTypeImpl<R, ArgList>::Type;
-// Used for ExtractArgs.
+// Used for ExtractArgs and ExtractReturnType.
template <typename Signature>
struct ExtractArgsImpl;
template <typename R, typename... Args>
struct ExtractArgsImpl<R(Args...)> {
- using Type = TypeList<Args...>;
+ using ReturnType = R;
+ using ArgsList = TypeList<Args...>;
};
// A type-level function that extracts function arguments into a TypeList.
// E.g. ExtractArgs<R(A, B, C)> is evaluated to TypeList<A, B, C>.
template <typename Signature>
-using ExtractArgs = typename ExtractArgsImpl<Signature>::Type;
+using ExtractArgs = typename ExtractArgsImpl<Signature>::ArgsList;
+
+// A type-level function that extracts the return type of a function.
+// E.g. ExtractReturnType<R(A, B, C)> is evaluated to R.
+template <typename Signature>
+using ExtractReturnType = typename ExtractArgsImpl<Signature>::ReturnType;
} // namespace internal
@@ -611,13 +467,7 @@ static inline internal::PassedWrapper<T> Passed(T* scoper) {
template <typename T>
static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
- return internal::IgnoreResultHelper<T>(data);
-}
-
-template <typename T>
-static inline internal::IgnoreResultHelper<Callback<T> >
-IgnoreResult(const Callback<T>& data) {
- return internal::IgnoreResultHelper<Callback<T> >(data);
+ return internal::IgnoreResultHelper<T>(std::move(data));
}
BASE_EXPORT void DoNothing();
@@ -627,6 +477,26 @@ void DeletePointer(T* obj) {
delete obj;
}
+// An injection point to control |this| pointer behavior on a method invocation.
+// If IsWeakReceiver<> is true_type for |T| and |T| is used for a receiver of a
+// method, base::Bind cancels the method invocation if the receiver is tested as
+// false.
+// E.g. Foo::bar() is not called:
+// struct Foo : base::SupportsWeakPtr<Foo> {
+// void bar() {}
+// };
+//
+// WeakPtr<Foo> oo = nullptr;
+// base::Bind(&Foo::bar, oo).Run();
+template <typename T>
+struct IsWeakReceiver : std::false_type {};
+
+template <typename T>
+struct IsWeakReceiver<internal::ConstRefWrapper<T>> : IsWeakReceiver<T> {};
+
+template <typename T>
+struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
+
} // namespace base
#endif // BASE_BIND_HELPERS_H_
diff --git a/base/bind_internal.h b/base/bind_internal.h
index 6e0a425eab..3d6ca09c41 100644
--- a/base/bind_internal.h
+++ b/base/bind_internal.h
@@ -7,6 +7,7 @@
#include <stddef.h>
+#include <tuple>
#include <type_traits>
#include "base/bind_helpers.h"
@@ -17,10 +18,6 @@
#include "base/tuple.h"
#include "build/build_config.h"
-#if defined(OS_WIN)
-#include "base/bind_internal_win.h"
-#endif
-
namespace base {
namespace internal {
@@ -28,56 +25,80 @@ namespace internal {
//
//
// CONCEPTS:
-// Runnable -- A type (really a type class) that has a single Run() method
-// and a RunType typedef that corresponds to the type of Run().
-// A Runnable can declare that it should treated like a method
-// call by including a typedef named IsMethod. The value of
-// this typedef is NOT inspected, only the existence. When a
-// Runnable declares itself a method, Bind() will enforce special
-// refcounting + WeakPtr handling semantics for the first
-// parameter which is expected to be an object.
-// Functor -- A copyable type representing something that should be called.
-// All function pointers, Callback<>, and Runnables are functors
-// even if the invocation syntax differs.
+// Functor -- A movable type representing something that should be called.
+// All function pointers and Callback<> are functors even if the
+// invocation syntax differs.
// RunType -- A function type (as opposed to function _pointer_ type) for
-// a Run() function. Usually just a convenience typedef.
+// a Callback<>::Run(). Usually just a convenience typedef.
// (Bound)Args -- A set of types that stores the arguments.
//
// Types:
-// RunnableAdapter<> -- Wraps the various "function" pointer types into an
-// object that adheres to the Runnable interface.
// ForceVoidReturn<> -- Helper class for translating function signatures to
// equivalent forms with a "void" return type.
-// FunctorTraits<> -- Type traits used determine the correct RunType and
-// RunnableType for a Functor. This is where function
+// FunctorTraits<> -- Type traits used to determine the correct RunType and
+// invocation manner for a Functor. This is where function
// signature adapters are applied.
-// MakeRunnable<> -- Takes a Functor and returns an object in the Runnable
-// type class that represents the underlying Functor.
-// InvokeHelper<> -- Take a Runnable + arguments and actully invokes it.
+// InvokeHelper<> -- Take a Functor + arguments and actully invokes it.
// Handle the differing syntaxes needed for WeakPtr<>
-// support, and for ignoring return values. This is separate
-// from Invoker to avoid creating multiple version of
-// Invoker<>.
-// Invoker<> -- Unwraps the curried parameters and executes the Runnable.
+// support. This is separate from Invoker to avoid creating
+// multiple version of Invoker<>.
+// Invoker<> -- Unwraps the curried parameters and executes the Functor.
// BindState<> -- Stores the curried parameters, and is the main entry point
-// into the Bind() system, doing most of the type resolution.
-// There are ARITY BindState types.
+// into the Bind() system.
-// HasNonConstReferenceParam selects true_type when any of the parameters in
-// |Sig| is a non-const reference.
-// Implementation note: This non-specialized case handles zero-arity case only.
-// Non-zero-arity cases should be handled by the specialization below.
-template <typename List>
-struct HasNonConstReferenceItem : std::false_type {};
+template <typename...>
+struct make_void {
+ using type = void;
+};
-// Implementation note: Select true_type if the first parameter is a non-const
-// reference. Otherwise, skip the first parameter and check rest of parameters
-// recursively.
-template <typename T, typename... Args>
-struct HasNonConstReferenceItem<TypeList<T, Args...>>
- : std::conditional<is_non_const_reference<T>::value,
- std::true_type,
- HasNonConstReferenceItem<TypeList<Args...>>>::type {};
+// A clone of C++17 std::void_t.
+// Unlike the original version, we need |make_void| as a helper struct to avoid
+// a C++14 defect.
+// ref: http://en.cppreference.com/w/cpp/types/void_t
+// ref: http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
+template <typename... Ts>
+using void_t = typename make_void<Ts...>::type;
+
+template <typename Callable,
+ typename Signature = decltype(&Callable::operator())>
+struct ExtractCallableRunTypeImpl;
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R(Callable::*)(Args...) const> {
+ using Type = R(Args...);
+};
+
+// Evaluated to RunType of the given callable type.
+// Example:
+// auto f = [](int, char*) { return 0.1; };
+// ExtractCallableRunType<decltype(f)>
+// is evaluated to
+// double(int, char*);
+template <typename Callable>
+using ExtractCallableRunType =
+ typename ExtractCallableRunTypeImpl<Callable>::Type;
+
+// IsConvertibleToRunType<Functor> is std::true_type if |Functor| has operator()
+// and convertible to the corresponding function pointer. Otherwise, it's
+// std::false_type.
+// Example:
+// IsConvertibleToRunType<void(*)()>::value is false.
+//
+// struct Foo {};
+// IsConvertibleToRunType<void(Foo::*)()>::value is false.
+//
+// auto f = []() {};
+// IsConvertibleToRunType<decltype(f)>::value is true.
+//
+// int i = 0;
+// auto g = [i]() {};
+// IsConvertibleToRunType<decltype(g)>::value is false.
+template <typename Functor, typename SFINAE = void>
+struct IsConvertibleToRunType : std::false_type {};
+
+template <typename Callable>
+struct IsConvertibleToRunType<Callable, void_t<decltype(&Callable::operator())>>
+ : std::is_convertible<Callable, ExtractCallableRunType<Callable>*> {};
// HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
// pointer to a RefCounted type.
@@ -95,354 +116,338 @@ struct HasRefCountedTypeAsRawPtr<T, Args...>
std::true_type,
HasRefCountedTypeAsRawPtr<Args...>>::type {};
-// BindsArrayToFirstArg selects true_type when |is_method| is true and the first
-// item of |Args| is an array type.
-// Implementation note: This non-specialized case handles !is_method case and
-// zero-arity case only. Other cases should be handled by the specialization
-// below.
-template <bool is_method, typename... Args>
-struct BindsArrayToFirstArg : std::false_type {};
-
-template <typename T, typename... Args>
-struct BindsArrayToFirstArg<true, T, Args...>
- : std::is_array<typename std::remove_reference<T>::type> {};
-
-// HasRefCountedParamAsRawPtr is the same to HasRefCountedTypeAsRawPtr except
-// when |is_method| is true HasRefCountedParamAsRawPtr skips the first argument.
-// Implementation note: This non-specialized case handles !is_method case and
-// zero-arity case only. Other cases should be handled by the specialization
-// below.
-template <bool is_method, typename... Args>
-struct HasRefCountedParamAsRawPtr : HasRefCountedTypeAsRawPtr<Args...> {};
+// ForceVoidReturn<>
+//
+// Set of templates that support forcing the function return type to void.
+template <typename Sig>
+struct ForceVoidReturn;
-template <typename T, typename... Args>
-struct HasRefCountedParamAsRawPtr<true, T, Args...>
- : HasRefCountedTypeAsRawPtr<Args...> {};
+template <typename R, typename... Args>
+struct ForceVoidReturn<R(Args...)> {
+ using RunType = void(Args...);
+};
-// RunnableAdapter<>
-//
-// The RunnableAdapter<> templates provide a uniform interface for invoking
-// a function pointer, method pointer, or const method pointer. The adapter
-// exposes a Run() method with an appropriate signature. Using this wrapper
-// allows for writing code that supports all three pointer types without
-// undue repetition. Without it, a lot of code would need to be repeated 3
-// times.
-//
-// For method pointers and const method pointers the first argument to Run()
-// is considered to be the received of the method. This is similar to STL's
-// mem_fun().
-//
-// This class also exposes a RunType typedef that is the function type of the
-// Run() function.
+// FunctorTraits<>
//
-// If and only if the wrapper contains a method or const method pointer, an
-// IsMethod typedef is exposed. The existence of this typedef (NOT the value)
-// marks that the wrapper should be considered a method wrapper.
+// See description at top of file.
+template <typename Functor, typename SFINAE = void>
+struct FunctorTraits;
+// For a callable type that is convertible to the corresponding function type.
+// This specialization is intended to allow binding captureless lambdas by
+// base::Bind(), based on the fact that captureless lambdas can be convertible
+// to the function type while capturing lambdas can't.
template <typename Functor>
-class RunnableAdapter;
+struct FunctorTraits<
+ Functor,
+ typename std::enable_if<IsConvertibleToRunType<Functor>::value>::type> {
+ using RunType = ExtractCallableRunType<Functor>;
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = false;
-// Function.
+ template <typename... RunArgs>
+ static ExtractReturnType<RunType>
+ Invoke(const Functor& functor, RunArgs&&... args) {
+ return functor(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For functions.
template <typename R, typename... Args>
-class RunnableAdapter<R(*)(Args...)> {
- public:
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef R RunType(Args...);
+struct FunctorTraits<R (*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
- explicit RunnableAdapter(R(*function)(Args...))
- : function_(function) {
+ template <typename... RunArgs>
+ static R Invoke(R (*function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
}
+};
+
+#if defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__stdcall*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
template <typename... RunArgs>
- R Run(RunArgs&&... args) {
- return function_(std::forward<RunArgs>(args)...);
+ static R Invoke(R(__stdcall* function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
}
-
- private:
- R (*function_)(Args...);
};
-// Method.
-template <typename R, typename T, typename... Args>
-class RunnableAdapter<R(T::*)(Args...)> {
- public:
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef R RunType(T*, Args...);
- using IsMethod = std::true_type;
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__fastcall*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
- explicit RunnableAdapter(R(T::*method)(Args...))
- : method_(method) {
+ template <typename... RunArgs>
+ static R Invoke(R(__fastcall* function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
}
+};
- template <typename Receiver, typename... RunArgs>
- R Run(Receiver&& receiver_ptr, RunArgs&&... args) {
+#endif // defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...)> {
+ using RunType = R(Receiver*, Args...);
+ static constexpr bool is_method = true;
+ static constexpr bool is_nullable = true;
+
+ template <typename ReceiverPtr, typename... RunArgs>
+ static R Invoke(R (Receiver::*method)(Args...),
+ ReceiverPtr&& receiver_ptr,
+ RunArgs&&... args) {
// Clang skips CV qualifier check on a method pointer invocation when the
// receiver is a subclass. Store the receiver into a const reference to
// T to ensure the CV check works.
// https://llvm.org/bugs/show_bug.cgi?id=27037
- T& receiver = *receiver_ptr;
- return (receiver.*method_)(std::forward<RunArgs>(args)...);
+ Receiver& receiver = *receiver_ptr;
+ return (receiver.*method)(std::forward<RunArgs>(args)...);
}
-
- private:
- R (T::*method_)(Args...);
};
-// Const Method.
-template <typename R, typename T, typename... Args>
-class RunnableAdapter<R(T::*)(Args...) const> {
- public:
- using RunType = R(const T*, Args...);
- using IsMethod = std::true_type;
-
- explicit RunnableAdapter(R(T::*method)(Args...) const)
- : method_(method) {
- }
-
- template <typename Receiver, typename... RunArgs>
- R Run(Receiver&& receiver_ptr, RunArgs&&... args) {
+// For const methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) const> {
+ using RunType = R(const Receiver*, Args...);
+ static constexpr bool is_method = true;
+ static constexpr bool is_nullable = true;
+
+ template <typename ReceiverPtr, typename... RunArgs>
+ static R Invoke(R (Receiver::*method)(Args...) const,
+ ReceiverPtr&& receiver_ptr,
+ RunArgs&&... args) {
// Clang skips CV qualifier check on a method pointer invocation when the
- // receiver is a subclass. Store the receiver into a unqualified reference
- // to T to ensure the CV check works.
+ // receiver is a subclass. Store the receiver into a const reference to
+ // T to ensure the CV check works.
// https://llvm.org/bugs/show_bug.cgi?id=27037
- const T& receiver = *receiver_ptr;
- return (receiver.*method_)(std::forward<RunArgs>(args)...);
+ const Receiver& receiver = *receiver_ptr;
+ return (receiver.*method)(std::forward<RunArgs>(args)...);
}
-
- private:
- R (T::*method_)(Args...) const;
};
-
-// ForceVoidReturn<>
-//
-// Set of templates that support forcing the function return type to void.
-template <typename Sig>
-struct ForceVoidReturn;
-
-template <typename R, typename... Args>
-struct ForceVoidReturn<R(Args...)> {
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef void RunType(Args...);
-};
-
-
-// FunctorTraits<>
-//
-// See description at top of file.
+// For IgnoreResults.
template <typename T>
-struct FunctorTraits {
- using RunnableType = RunnableAdapter<T>;
- using RunType = typename RunnableType::RunType;
-};
-
-template <typename T>
-struct FunctorTraits<IgnoreResultHelper<T>> {
- using RunnableType = typename FunctorTraits<T>::RunnableType;
+struct FunctorTraits<IgnoreResultHelper<T>> : FunctorTraits<T> {
using RunType =
- typename ForceVoidReturn<typename RunnableType::RunType>::RunType;
-};
+ typename ForceVoidReturn<typename FunctorTraits<T>::RunType>::RunType;
-template <typename T>
-struct FunctorTraits<Callback<T>> {
- using RunnableType = Callback<T> ;
- using RunType = typename Callback<T>::RunType;
+ template <typename IgnoreResultType, typename... RunArgs>
+ static void Invoke(IgnoreResultType&& ignore_result_helper,
+ RunArgs&&... args) {
+ FunctorTraits<T>::Invoke(ignore_result_helper.functor_,
+ std::forward<RunArgs>(args)...);
+ }
};
-
-// MakeRunnable<>
-//
-// Converts a passed in functor to a RunnableType using type inference.
-
-template <typename T>
-typename FunctorTraits<T>::RunnableType MakeRunnable(const T& t) {
- return RunnableAdapter<T>(t);
-}
-
-template <typename T>
-typename FunctorTraits<T>::RunnableType
-MakeRunnable(const IgnoreResultHelper<T>& t) {
- return MakeRunnable(t.functor_);
-}
-
-template <typename T>
-const typename FunctorTraits<Callback<T>>::RunnableType&
-MakeRunnable(const Callback<T>& t) {
- DCHECK(!t.is_null());
- return t;
-}
-
+// For Callbacks.
+template <typename R, typename... Args, CopyMode copy_mode>
+struct FunctorTraits<Callback<R(Args...), copy_mode>> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename CallbackType, typename... RunArgs>
+ static R Invoke(CallbackType&& callback, RunArgs&&... args) {
+ DCHECK(!callback.is_null());
+ return std::forward<CallbackType>(callback).Run(
+ std::forward<RunArgs>(args)...);
+ }
+};
// InvokeHelper<>
//
-// There are 3 logical InvokeHelper<> specializations: normal, void-return,
-// WeakCalls.
+// There are 2 logical InvokeHelper<> specializations: normal, WeakCalls.
//
// The normal type just calls the underlying runnable.
//
-// We need a InvokeHelper to handle void return types in order to support
-// IgnoreResult(). Normally, if the Runnable's RunType had a void return,
-// the template system would just accept "return functor.Run()" ignoring
-// the fact that a void function is being used with return. This piece of
-// sugar breaks though when the Runnable's RunType is not void. Thus, we
-// need a partial specialization to change the syntax to drop the "return"
-// from the invocation call.
-//
-// WeakCalls similarly need special syntax that is applied to the first
-// argument to check if they should no-op themselves.
-template <bool IsWeakCall, typename ReturnType, typename Runnable>
+// WeakCalls need special syntax that is applied to the first argument to check
+// if they should no-op themselves.
+template <bool is_weak_call, typename ReturnType>
struct InvokeHelper;
-template <typename ReturnType, typename Runnable>
-struct InvokeHelper<false, ReturnType, Runnable> {
- template <typename... RunArgs>
- static ReturnType MakeItSo(Runnable runnable, RunArgs&&... args) {
- return runnable.Run(std::forward<RunArgs>(args)...);
- }
-};
-
-template <typename Runnable>
-struct InvokeHelper<false, void, Runnable> {
- template <typename... RunArgs>
- static void MakeItSo(Runnable runnable, RunArgs&&... args) {
- runnable.Run(std::forward<RunArgs>(args)...);
+template <typename ReturnType>
+struct InvokeHelper<false, ReturnType> {
+ template <typename Functor, typename... RunArgs>
+ static inline ReturnType MakeItSo(Functor&& functor, RunArgs&&... args) {
+ using Traits = FunctorTraits<typename std::decay<Functor>::type>;
+ return Traits::Invoke(std::forward<Functor>(functor),
+ std::forward<RunArgs>(args)...);
}
};
-template <typename Runnable>
-struct InvokeHelper<true, void, Runnable> {
- template <typename BoundWeakPtr, typename... RunArgs>
- static void MakeItSo(Runnable runnable,
- BoundWeakPtr weak_ptr,
- RunArgs&&... args) {
- if (!weak_ptr.get()) {
- return;
- }
- runnable.Run(weak_ptr.get(), std::forward<RunArgs>(args)...);
- }
-};
-
-#if !defined(_MSC_VER)
-
-template <typename ReturnType, typename Runnable>
-struct InvokeHelper<true, ReturnType, Runnable> {
+template <typename ReturnType>
+struct InvokeHelper<true, ReturnType> {
// WeakCalls are only supported for functions with a void return type.
// Otherwise, the function result would be undefined if the the WeakPtr<>
// is invalidated.
static_assert(std::is_void<ReturnType>::value,
"weak_ptrs can only bind to methods without return values");
-};
-#endif
+ template <typename Functor, typename BoundWeakPtr, typename... RunArgs>
+ static inline void MakeItSo(Functor&& functor,
+ BoundWeakPtr&& weak_ptr,
+ RunArgs&&... args) {
+ if (!weak_ptr)
+ return;
+ using Traits = FunctorTraits<typename std::decay<Functor>::type>;
+ Traits::Invoke(std::forward<Functor>(functor),
+ std::forward<BoundWeakPtr>(weak_ptr),
+ std::forward<RunArgs>(args)...);
+ }
+};
// Invoker<>
//
// See description at the top of the file.
-template <typename BoundIndices, typename StorageType,
- typename InvokeHelperType, typename UnboundForwardRunType>
+template <typename StorageType, typename UnboundRunType>
struct Invoker;
-template <size_t... bound_indices,
- typename StorageType,
- typename InvokeHelperType,
- typename R,
- typename... UnboundArgs>
-struct Invoker<IndexSequence<bound_indices...>,
- StorageType,
- InvokeHelperType,
- R(UnboundArgs...)> {
+template <typename StorageType, typename R, typename... UnboundArgs>
+struct Invoker<StorageType, R(UnboundArgs...)> {
static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
- StorageType* storage = static_cast<StorageType*>(base);
// Local references to make debugger stepping easier. If in a debugger,
// you really want to warp ahead and step through the
// InvokeHelper<>::MakeItSo() call below.
- return InvokeHelperType::MakeItSo(
- storage->runnable_, Unwrap(get<bound_indices>(storage->bound_args_))...,
- std::forward<UnboundArgs>(unbound_args)...);
+ const StorageType* storage = static_cast<StorageType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return RunImpl(storage->functor_,
+ storage->bound_args_,
+ MakeIndexSequence<num_bound_args>(),
+ std::forward<UnboundArgs>(unbound_args)...);
}
-};
-// Used to implement MakeArgsStorage.
-template <bool is_method, typename... BoundArgs>
-struct MakeArgsStorageImpl {
- using Type = std::tuple<BoundArgs...>;
+ private:
+ template <typename Functor, typename BoundArgsTuple, size_t... indices>
+ static inline R RunImpl(Functor&& functor,
+ BoundArgsTuple&& bound,
+ IndexSequence<indices...>,
+ UnboundArgs&&... unbound_args) {
+ static constexpr bool is_method =
+ FunctorTraits<typename std::decay<Functor>::type>::is_method;
+
+ using DecayedArgsTuple = typename std::decay<BoundArgsTuple>::type;
+ static constexpr bool is_weak_call =
+ IsWeakMethod<is_method,
+ typename std::tuple_element<
+ indices,
+ DecayedArgsTuple>::type...>::value;
+
+ return InvokeHelper<is_weak_call, R>::MakeItSo(
+ std::forward<Functor>(functor),
+ Unwrap(base::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
};
-template <typename Obj, typename... BoundArgs>
-struct MakeArgsStorageImpl<true, Obj*, BoundArgs...> {
- using Type = std::tuple<scoped_refptr<Obj>, BoundArgs...>;
+// Used to implement MakeUnboundRunType.
+template <typename Functor, typename... BoundArgs>
+struct MakeUnboundRunTypeImpl {
+ using RunType =
+ typename FunctorTraits<typename std::decay<Functor>::type>::RunType;
+ using ReturnType = ExtractReturnType<RunType>;
+ using Args = ExtractArgs<RunType>;
+ using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), Args>;
+ using Type = MakeFunctionType<ReturnType, UnboundArgs>;
};
+template <typename Functor>
+typename std::enable_if<FunctorTraits<Functor>::is_nullable, bool>::type
+IsNull(const Functor& functor) {
+ return !functor;
+}
-// Constructs a tuple type to store BoundArgs into BindState.
-// This wraps the first argument into a scoped_refptr if |is_method| is true and
-// the first argument is a raw pointer.
-// Other arguments are adjusted for store and packed into a tuple.
-template <bool is_method, typename... BoundArgs>
-using MakeArgsStorage = typename MakeArgsStorageImpl<
- is_method, typename std::decay<BoundArgs>::type...>::Type;
+template <typename Functor>
+typename std::enable_if<!FunctorTraits<Functor>::is_nullable, bool>::type
+IsNull(const Functor&) {
+ return false;
+}
// BindState<>
//
-// This stores all the state passed into Bind() and is also where most
-// of the template resolution magic occurs.
-//
-// Runnable is the functor we are binding arguments to.
-// RunType is type of the Run() function that the Invoker<> should use.
-// Normally, this is the same as the RunType of the Runnable, but it can
-// be different if an adapter like IgnoreResult() has been used.
-//
-// BoundArgs contains the storage type for all the bound arguments.
-template <typename Runnable, typename RunType, typename... BoundArgs>
-struct BindState;
-
-template <typename Runnable,
- typename R,
- typename... Args,
- typename... BoundArgs>
-struct BindState<Runnable, R(Args...), BoundArgs...> final
- : public BindStateBase {
- private:
- using StorageType = BindState<Runnable, R(Args...), BoundArgs...>;
- using RunnableType = Runnable;
+// This stores all the state passed into Bind().
+template <typename Functor, typename... BoundArgs>
+struct BindState final : BindStateBase {
+ template <typename ForwardFunctor, typename... ForwardBoundArgs>
+ explicit BindState(ForwardFunctor&& functor, ForwardBoundArgs&&... bound_args)
+ : BindStateBase(&Destroy),
+ functor_(std::forward<ForwardFunctor>(functor)),
+ bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+ DCHECK(!IsNull(functor_));
+ }
- enum { is_method = HasIsMethodTag<Runnable>::value };
+ Functor functor_;
+ std::tuple<BoundArgs...> bound_args_;
- // true_type if Runnable is a method invocation and the first bound argument
- // is a WeakPtr.
- using IsWeakCall =
- IsWeakMethod<is_method, typename std::decay<BoundArgs>::type...>;
+ private:
+ ~BindState() {}
- using BoundIndices = MakeIndexSequence<sizeof...(BoundArgs)>;
- using InvokeHelperType = InvokeHelper<IsWeakCall::value, R, Runnable>;
+ static void Destroy(BindStateBase* self) {
+ delete static_cast<BindState*>(self);
+ }
+};
- using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), TypeList<Args...>>;
+// Used to implement MakeBindStateType.
+template <bool is_method, typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl;
- public:
- using UnboundRunType = MakeFunctionType<R, UnboundArgs>;
- using InvokerType =
- Invoker<BoundIndices, StorageType, InvokeHelperType, UnboundRunType>;
+template <typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl<false, Functor, BoundArgs...> {
+ static_assert(!HasRefCountedTypeAsRawPtr<BoundArgs...>::value,
+ "A parameter is a refcounted type and needs scoped_refptr.");
+ using Type = BindState<typename std::decay<Functor>::type,
+ typename std::decay<BoundArgs>::type...>;
+};
- template <typename... ForwardArgs>
- BindState(const Runnable& runnable, ForwardArgs&&... bound_args)
- : BindStateBase(&Destroy),
- runnable_(runnable),
- bound_args_(std::forward<ForwardArgs>(bound_args)...) {}
+template <typename Functor>
+struct MakeBindStateTypeImpl<true, Functor> {
+ using Type = BindState<typename std::decay<Functor>::type>;
+};
- RunnableType runnable_;
- MakeArgsStorage<is_method, BoundArgs...> bound_args_;
+template <typename Functor, typename Receiver, typename... BoundArgs>
+struct MakeBindStateTypeImpl<true, Functor, Receiver, BoundArgs...> {
+ static_assert(
+ !std::is_array<typename std::remove_reference<Receiver>::type>::value,
+ "First bound argument to a method cannot be an array.");
+ static_assert(!HasRefCountedTypeAsRawPtr<BoundArgs...>::value,
+ "A parameter is a refcounted type and needs scoped_refptr.");
private:
- ~BindState() {}
+ using DecayedReceiver = typename std::decay<Receiver>::type;
- static void Destroy(BindStateBase* self) {
- delete static_cast<BindState*>(self);
- }
+ public:
+ using Type = BindState<
+ typename std::decay<Functor>::type,
+ typename std::conditional<
+ std::is_pointer<DecayedReceiver>::value,
+ scoped_refptr<typename std::remove_pointer<DecayedReceiver>::type>,
+ DecayedReceiver>::type,
+ typename std::decay<BoundArgs>::type...>;
};
+template <typename Functor, typename... BoundArgs>
+using MakeBindStateType = typename MakeBindStateTypeImpl<
+ FunctorTraits<typename std::decay<Functor>::type>::is_method,
+ Functor,
+ BoundArgs...>::Type;
+
} // namespace internal
+
+// Returns a RunType of bound functor.
+// E.g. MakeUnboundRunType<R(A, B, C), A, B> is evaluated to R(C).
+template <typename Functor, typename... BoundArgs>
+using MakeUnboundRunType =
+ typename internal::MakeUnboundRunTypeImpl<Functor, BoundArgs...>::Type;
+
} // namespace base
#endif // BASE_BIND_INTERNAL_H_
diff --git a/base/bind_internal_win.h b/base/bind_internal_win.h
deleted file mode 100644
index 2def874382..0000000000
--- a/base/bind_internal_win.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Specializations of RunnableAdapter<> for Windows specific calling
-// conventions. Please see base/bind_internal.h for more info.
-
-#ifndef BASE_BIND_INTERNAL_WIN_H_
-#define BASE_BIND_INTERNAL_WIN_H_
-
-#include <utility>
-
-#include "build/build_config.h"
-
-// In the x64 architecture in Windows, __fastcall, __stdcall, etc, are all
-// the same as __cdecl which would turn the following specializations into
-// multiple definitions.
-#if !defined(ARCH_CPU_X86_64)
-
-namespace base {
-namespace internal {
-
-template <typename Functor>
-class RunnableAdapter;
-
-// __stdcall Function.
-template <typename R, typename... Args>
-class RunnableAdapter<R(__stdcall *)(Args...)> {
- public:
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef R RunType(Args...);
-
- explicit RunnableAdapter(R(__stdcall *function)(Args...))
- : function_(function) {
- }
-
- template <typename... RunArgs>
- R Run(RunArgs&&... args) {
- return function_(std::forward<RunArgs>(args)...);
- }
-
- private:
- R (__stdcall *function_)(Args...);
-};
-
-// __fastcall Function.
-template <typename R, typename... Args>
-class RunnableAdapter<R(__fastcall *)(Args...)> {
- public:
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef R RunType(Args...);
-
- explicit RunnableAdapter(R(__fastcall *function)(Args...))
- : function_(function) {
- }
-
- template <typename... RunArgs>
- R Run(RunArgs&&... args) {
- return function_(std::forward<RunArgs>(args)...);
- }
-
- private:
- R (__fastcall *function_)(Args...);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // !defined(ARCH_CPU_X86_64)
-
-#endif // BASE_BIND_INTERNAL_WIN_H_
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index de1886555d..ba5113b507 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -63,7 +63,7 @@ static const int kChildValue = 2;
class Parent {
public:
- virtual ~Parent() = default;
+ virtual ~Parent() {}
void AddRef() const {}
void Release() const {}
virtual void VirtualSet() { value = kParentValue; }
@@ -73,14 +73,14 @@ class Parent {
class Child : public Parent {
public:
- ~Child() override = default;
+ ~Child() override {}
void VirtualSet() override { value = kChildValue; }
void NonVirtualSet() { value = kChildValue; }
};
class NoRefParent {
public:
- virtual ~NoRefParent() = default;
+ virtual ~NoRefParent() {}
virtual void VirtualSet() { value = kParentValue; }
void NonVirtualSet() { value = kParentValue; }
int value;
@@ -88,7 +88,8 @@ class NoRefParent {
class NoRefChild : public NoRefParent {
public:
- ~NoRefChild() override = default;
+ ~NoRefChild() override {}
+ private:
void VirtualSet() override { value = kChildValue; }
void NonVirtualSet() { value = kChildValue; }
};
@@ -655,28 +656,6 @@ TEST_F(BindTest, ArrayArgumentBinding) {
EXPECT_EQ(3, const_array_cb.Run());
}
-// Verify SupportsAddRefAndRelease correctly introspects the class type for
-// AddRef() and Release().
-// - Class with AddRef() and Release()
-// - Class without AddRef() and Release()
-// - Derived Class with AddRef() and Release()
-// - Derived Class without AddRef() and Release()
-// - Derived Class with AddRef() and Release() and a private destructor.
-TEST_F(BindTest, SupportsAddRefAndRelease) {
- EXPECT_TRUE(internal::SupportsAddRefAndRelease<HasRef>::value);
- EXPECT_FALSE(internal::SupportsAddRefAndRelease<NoRef>::value);
-
- // StrictMock<T> is a derived class of T. So, we use StrictMock<HasRef> and
- // StrictMock<NoRef> to test that SupportsAddRefAndRelease works over
- // inheritance.
- EXPECT_TRUE(internal::SupportsAddRefAndRelease<StrictMock<HasRef> >::value);
- EXPECT_FALSE(internal::SupportsAddRefAndRelease<StrictMock<NoRef> >::value);
-
- // This matters because the implementation creates a dummy class that
- // inherits from the template type.
- EXPECT_TRUE(internal::SupportsAddRefAndRelease<HasRefPrivateDtor>::value);
-}
-
// Unretained() wrapper support.
// - Method bound to Unretained() non-const object.
// - Const method bound to Unretained() non-const object.
@@ -1065,6 +1044,36 @@ TEST_F(BindTest, ArgumentCopiesAndMoves) {
EXPECT_EQ(0, move_assigns);
}
+TEST_F(BindTest, CapturelessLambda) {
+ EXPECT_FALSE(internal::IsConvertibleToRunType<void>::value);
+ EXPECT_FALSE(internal::IsConvertibleToRunType<int>::value);
+ EXPECT_FALSE(internal::IsConvertibleToRunType<void(*)()>::value);
+ EXPECT_FALSE(internal::IsConvertibleToRunType<void(NoRef::*)()>::value);
+
+ auto f = []() {};
+ EXPECT_TRUE(internal::IsConvertibleToRunType<decltype(f)>::value);
+
+ int i = 0;
+ auto g = [i]() {};
+ EXPECT_FALSE(internal::IsConvertibleToRunType<decltype(g)>::value);
+
+ auto h = [](int, double) { return 'k'; };
+ EXPECT_TRUE((std::is_same<
+ char(int, double),
+ internal::ExtractCallableRunType<decltype(h)>>::value));
+
+ EXPECT_EQ(42, Bind([] { return 42; }).Run());
+ EXPECT_EQ(42, Bind([](int i) { return i * 7; }, 6).Run());
+
+ int x = 1;
+ base::Callback<void(int)> cb =
+ Bind([](int* x, int i) { *x *= i; }, Unretained(&x));
+ cb.Run(6);
+ EXPECT_EQ(6, x);
+ cb.Run(7);
+ EXPECT_EQ(42, x);
+}
+
// Callback construction and assignment tests.
// - Construction from an InvokerStorageHolder should not cause ref/deref.
// - Assignment from other callback should only cause one ref
diff --git a/base/bit_cast.h b/base/bit_cast.h
new file mode 100644
index 0000000000..c9514bceef
--- /dev/null
+++ b/base/bit_cast.h
@@ -0,0 +1,100 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIT_CAST_H_
+#define BASE_BIT_CAST_H_
+
+#include <string.h>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+// bit_cast<Dest,Source> is a template function that implements the equivalent
+// of "*reinterpret_cast<Dest*>(&source)". We need this in very low-level
+// functions like the protobuf library and fast math support.
+//
+// float f = 3.14159265358979;
+// int i = bit_cast<int32_t>(f);
+// // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+// // WRONG
+// float f = 3.14159265358979; // WRONG
+// int i = * reinterpret_cast<int*>(&f); // WRONG
+//
+// The address-casting method actually produces undefined behavior according to
+// the ISO C++98 specification, section 3.10 ("basic.lval"), paragraph 15.
+// (This did not substantially change in C++11.) Roughly, this section says: if
+// an object in memory has one type, and a program accesses it with a different
+// type, then the result is undefined behavior for most values of "different
+// type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f). And it is particularly true for conversions
+// between integral lvalues and floating-point lvalues.
+//
+// The purpose of this paragraph is to allow optimizing compilers to assume that
+// expressions with different types refer to different memory. Compilers are
+// known to take advantage of this. So a non-conforming program quietly
+// produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast. The problem is type punning:
+// holding an object in memory of one type and reading its bits back using a
+// different type.
+//
+// The C++ standard is more subtle and complex than this, but that is the basic
+// idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard, especially by the
+// example in section 3.9 . Also, of course, bit_cast<> wraps up the nasty
+// logic in one place.
+//
+// Fortunately memcpy() is very fast. In optimized mode, compilers replace
+// calls to memcpy() with inline object code when the size argument is a
+// compile-time constant. On a 32-bit system, memcpy(d,s,4) compiles to one
+// load and one store, and memcpy(d,s,8) compiles to two loads and two stores.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "bit_cast requires source and destination to be the same size");
+
+#if (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) || \
+ (defined(__clang__) && defined(_LIBCPP_VERSION)))
+ // GCC 5.1 contains the first libstdc++ with is_trivially_copyable.
+ // Assume libc++ Just Works: is_trivially_copyable added on May 13th 2011.
+ // However, with libc++ when GCC is the compiler the trait is buggy, see
+ // crbug.com/607158, so fall back to the less strict variant for non-clang.
+ static_assert(std::is_trivially_copyable<Dest>::value,
+ "non-trivially-copyable bit_cast is undefined");
+ static_assert(std::is_trivially_copyable<Source>::value,
+ "non-trivially-copyable bit_cast is undefined");
+#elif HAS_FEATURE(is_trivially_copyable)
+ // The compiler supports an equivalent intrinsic.
+ static_assert(__is_trivially_copyable(Dest),
+ "non-trivially-copyable bit_cast is undefined");
+ static_assert(__is_trivially_copyable(Source),
+ "non-trivially-copyable bit_cast is undefined");
+#elif COMPILER_GCC
+ // Fallback to compiler intrinsic on GCC and clang (which pretends to be
+ // GCC). This isn't quite the same as is_trivially_copyable but it'll do for
+ // our purpose.
+ static_assert(__has_trivial_copy(Dest),
+ "non-trivially-copyable bit_cast is undefined");
+ static_assert(__has_trivial_copy(Source),
+ "non-trivially-copyable bit_cast is undefined");
+#else
+ // Do nothing, let the bots handle it.
+#endif
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+#endif // BASE_BIT_CAST_H_
diff --git a/base/callback.h b/base/callback.h
index abb907bef9..e087c731d1 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -345,14 +345,13 @@
// please include "base/callback_forward.h" instead.
namespace base {
-namespace internal {
-template <typename Runnable, typename RunType, typename... BoundArgsType>
-struct BindState;
-} // namespace internal
template <typename R, typename... Args, internal::CopyMode copy_mode>
class Callback<R(Args...), copy_mode>
: public internal::CallbackBase<copy_mode> {
+ private:
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
+
public:
// MSVC 2013 doesn't support Type Alias of function types.
// Revisit this after we update it to newer version.
@@ -360,16 +359,9 @@ class Callback<R(Args...), copy_mode>
Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
- template <typename Runnable, typename BindRunType, typename... BoundArgs>
- explicit Callback(
- internal::BindState<Runnable, BindRunType, BoundArgs...>* bind_state)
+ Callback(internal::BindStateBase* bind_state,
+ PolymorphicInvoke invoke_func)
: internal::CallbackBase<copy_mode>(bind_state) {
- // Force the assignment to a local variable of PolymorphicInvoke
- // so the compiler will typecheck that the passed in Run() method has
- // the correct type.
- PolymorphicInvoke invoke_func =
- &internal::BindState<Runnable, BindRunType, BoundArgs...>
- ::InvokerType::Run;
using InvokeFuncStorage =
typename internal::CallbackBase<copy_mode>::InvokeFuncStorage;
this->polymorphic_invoke_ =
@@ -396,9 +388,6 @@ class Callback<R(Args...), copy_mode>
reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke_);
return f(this->bind_state_.get(), std::forward<Args>(args)...);
}
-
- private:
- using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
};
} // namespace base
diff --git a/base/callback_helpers.cc b/base/callback_helpers.cc
index ef02b2bde0..838e6c8d84 100644
--- a/base/callback_helpers.cc
+++ b/base/callback_helpers.cc
@@ -8,29 +8,33 @@
namespace base {
-ScopedClosureRunner::ScopedClosureRunner() {
-}
+ScopedClosureRunner::ScopedClosureRunner() {}
ScopedClosureRunner::ScopedClosureRunner(const Closure& closure)
- : closure_(closure) {
-}
+ : closure_(closure) {}
ScopedClosureRunner::~ScopedClosureRunner() {
if (!closure_.is_null())
closure_.Run();
}
-void ScopedClosureRunner::Reset() {
+ScopedClosureRunner::ScopedClosureRunner(ScopedClosureRunner&& other)
+ : closure_(other.Release()) {}
+
+ScopedClosureRunner& ScopedClosureRunner::operator=(
+ ScopedClosureRunner&& other) {
+ ReplaceClosure(other.Release());
+ return *this;
+}
+
+void ScopedClosureRunner::RunAndReset() {
Closure old_closure = Release();
if (!old_closure.is_null())
old_closure.Run();
}
-void ScopedClosureRunner::Reset(const Closure& closure) {
- Closure old_closure = Release();
+void ScopedClosureRunner::ReplaceClosure(const Closure& closure) {
closure_ = closure;
- if (!old_closure.is_null())
- old_closure.Run();
}
Closure ScopedClosureRunner::Release() {
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
index 860803989f..782371f6e7 100644
--- a/base/callback_helpers.h
+++ b/base/callback_helpers.h
@@ -27,16 +27,27 @@ base::Callback<Sig> ResetAndReturn(base::Callback<Sig>* cb) {
return ret;
}
-// ScopedClosureRunner is akin to scoped_ptr for Closures. It ensures that the
-// Closure is executed and deleted no matter how the current scope exits.
+// ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
+// that the Closure is executed no matter how the current scope exits.
class BASE_EXPORT ScopedClosureRunner {
public:
ScopedClosureRunner();
explicit ScopedClosureRunner(const Closure& closure);
~ScopedClosureRunner();
- void Reset();
- void Reset(const Closure& closure);
+ ScopedClosureRunner(ScopedClosureRunner&& other);
+
+ // Releases the current closure if it's set and replaces it with the closure
+ // from |other|.
+ ScopedClosureRunner& operator=(ScopedClosureRunner&& other);
+
+ // Calls the current closure and resets it, so it wont be called again.
+ void RunAndReset();
+
+ // Replaces closure with the new one releasing the old one without calling it.
+ void ReplaceClosure(const Closure& closure);
+
+ // Releases the Closure without calling.
Closure Release() WARN_UNUSED_RESULT;
private:
diff --git a/base/callback_helpers_unittest.cc b/base/callback_helpers_unittest.cc
index 3b17a6b754..8283996379 100644
--- a/base/callback_helpers_unittest.cc
+++ b/base/callback_helpers_unittest.cc
@@ -14,7 +14,7 @@ void Increment(int* value) {
(*value)++;
}
-TEST(BindHelpersTest, TestScopedClosureRunnerExitScope) {
+TEST(CallbackHelpersTest, TestScopedClosureRunnerExitScope) {
int run_count = 0;
{
base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
@@ -23,7 +23,7 @@ TEST(BindHelpersTest, TestScopedClosureRunnerExitScope) {
EXPECT_EQ(1, run_count);
}
-TEST(BindHelpersTest, TestScopedClosureRunnerRelease) {
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRelease) {
int run_count = 0;
base::Closure c;
{
@@ -36,26 +36,59 @@ TEST(BindHelpersTest, TestScopedClosureRunnerRelease) {
EXPECT_EQ(1, run_count);
}
-TEST(BindHelpersTest, TestScopedClosureRunnerReset) {
+TEST(CallbackHelpersTest, TestScopedClosureRunnerReplaceClosure) {
int run_count_1 = 0;
int run_count_2 = 0;
{
base::ScopedClosureRunner runner;
- runner.Reset(base::Bind(&Increment, &run_count_1));
- runner.Reset(base::Bind(&Increment, &run_count_2));
- EXPECT_EQ(1, run_count_1);
+ runner.ReplaceClosure(base::Bind(&Increment, &run_count_1));
+ runner.ReplaceClosure(base::Bind(&Increment, &run_count_2));
+ EXPECT_EQ(0, run_count_1);
EXPECT_EQ(0, run_count_2);
}
+ EXPECT_EQ(0, run_count_1);
EXPECT_EQ(1, run_count_2);
+}
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRunAndReset) {
int run_count_3 = 0;
{
base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_3));
EXPECT_EQ(0, run_count_3);
- runner.Reset();
+ runner.RunAndReset();
EXPECT_EQ(1, run_count_3);
}
EXPECT_EQ(1, run_count_3);
}
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveConstructor) {
+ int run_count = 0;
+ {
+ std::unique_ptr<base::ScopedClosureRunner> runner(
+ new base::ScopedClosureRunner(base::Bind(&Increment, &run_count)));
+ base::ScopedClosureRunner runner2(std::move(*runner));
+ runner.reset();
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveAssignment) {
+ int run_count_1 = 0;
+ int run_count_2 = 0;
+ {
+ base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_1));
+ {
+ base::ScopedClosureRunner runner2(base::Bind(&Increment, &run_count_2));
+ runner = std::move(runner2);
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(0, run_count_2);
+ }
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(0, run_count_2);
+ }
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(1, run_count_2);
+}
+
} // namespace
diff --git a/base/callback_internal.h b/base/callback_internal.h
index 3e8e10f75c..0fe0b2d9e1 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -64,6 +64,7 @@ class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
// Returns true if Callback is null (doesn't refer to anything).
bool is_null() const { return bind_state_.get() == NULL; }
+ explicit operator bool() const { return !is_null(); }
// Returns the Callback into an uninitialized state.
void Reset();
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index 176ea0650a..ce453a1075 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -14,63 +14,38 @@
namespace base {
-namespace {
-
-struct FakeInvoker {
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef void RunType(internal::BindStateBase*);
- static void Run(internal::BindStateBase*) {
- }
-};
-
-} // namespace
-
-namespace internal {
+void NopInvokeFunc(internal::BindStateBase*) {}
// White-box testpoints to inject into a Callback<> object for checking
// comparators and emptiness APIs. Use a BindState that is specialized
// based on a type we declared in the anonymous namespace above to remove any
// chance of colliding with another instantiation and breaking the
// one-definition-rule.
-template <>
-struct BindState<void(), void(), FakeInvoker>
- : public BindStateBase {
- public:
- BindState() : BindStateBase(&Destroy) {}
- using InvokerType = FakeInvoker;
+struct FakeBindState1 : internal::BindStateBase {
+ FakeBindState1() : BindStateBase(&Destroy) {}
private:
- ~BindState() {}
- static void Destroy(BindStateBase* self) {
- delete static_cast<BindState*>(self);
+ ~FakeBindState1() {}
+ static void Destroy(internal::BindStateBase* self) {
+ delete static_cast<FakeBindState1*>(self);
}
};
-template <>
-struct BindState<void(), void(), FakeInvoker, FakeInvoker>
- : public BindStateBase {
- public:
- BindState() : BindStateBase(&Destroy) {}
- using InvokerType = FakeInvoker;
+struct FakeBindState2 : internal::BindStateBase {
+ FakeBindState2() : BindStateBase(&Destroy) {}
private:
- ~BindState() {}
- static void Destroy(BindStateBase* self) {
- delete static_cast<BindState*>(self);
+ ~FakeBindState2() {}
+ static void Destroy(internal::BindStateBase* self) {
+ delete static_cast<FakeBindState2*>(self);
}
};
-} // namespace internal
namespace {
-using FakeBindState1 = internal::BindState<void(), void(), FakeInvoker>;
-using FakeBindState2 =
- internal::BindState<void(), void(), FakeInvoker, FakeInvoker>;
-
class CallbackTest : public ::testing::Test {
public:
CallbackTest()
- : callback_a_(new FakeBindState1()),
- callback_b_(new FakeBindState2()) {
+ : callback_a_(new FakeBindState1(), &NopInvokeFunc),
+ callback_b_(new FakeBindState2(), &NopInvokeFunc) {
}
~CallbackTest() override {}
@@ -113,7 +88,7 @@ TEST_F(CallbackTest, Equals) {
EXPECT_FALSE(callback_b_.Equals(callback_a_));
// We should compare based on instance, not type.
- Callback<void()> callback_c(new FakeBindState1());
+ Callback<void()> callback_c(new FakeBindState1(), &NopInvokeFunc);
Callback<void()> callback_a2 = callback_a_;
EXPECT_TRUE(callback_a_.Equals(callback_a2));
EXPECT_FALSE(callback_a_.Equals(callback_c));
diff --git a/base/command_line.cc b/base/command_line.cc
index 8b45c36e79..099bb185a4 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -194,6 +194,17 @@ void CommandLine::set_slash_is_not_a_switch() {
DCHECK_EQ(wcscmp(kSwitchPrefixes[arraysize(kSwitchPrefixes) - 1], L"/"), 0);
switch_prefix_count = arraysize(kSwitchPrefixes) - 1;
}
+
+// static
+void CommandLine::InitUsingArgvForTesting(int argc, const char* const* argv) {
+ DCHECK(!current_process_commandline_);
+ current_process_commandline_ = new CommandLine(NO_PROGRAM);
+ // On Windows we need to convert the command line arguments to string16.
+ base::CommandLine::StringVector argv_vector;
+ for (int i = 0; i < argc; ++i)
+ argv_vector.push_back(UTF8ToUTF16(argv[i]));
+ current_process_commandline_->InitFromArgv(argv_vector);
+}
#endif
// static
diff --git a/base/command_line.h b/base/command_line.h
index 3de8873e26..3d29f8fee7 100644
--- a/base/command_line.h
+++ b/base/command_line.h
@@ -33,15 +33,15 @@ class BASE_EXPORT CommandLine {
public:
#if defined(OS_WIN)
// The native command line string type.
- typedef base::string16 StringType;
+ using StringType = string16;
#elif defined(OS_POSIX)
- typedef std::string StringType;
+ using StringType = std::string;
#endif
- typedef StringType::value_type CharType;
- typedef std::vector<StringType> StringVector;
- typedef std::map<std::string, StringType> SwitchMap;
- typedef std::map<base::StringPiece, const StringType*> StringPieceSwitchMap;
+ using CharType = StringType::value_type;
+ using StringVector = std::vector<StringType>;
+ using SwitchMap = std::map<std::string, StringType>;
+ using StringPieceSwitchMap = std::map<StringPiece, const StringType*>;
// A constructor for CommandLines that only carry switches and arguments.
enum NoProgram { NO_PROGRAM };
@@ -69,6 +69,13 @@ class BASE_EXPORT CommandLine {
// object and the behavior will be the same as Posix systems (only hyphens
// begin switches, everything else will be an arg).
static void set_slash_is_not_a_switch();
+
+ // Normally when the CommandLine singleton is initialized it gets the command
+ // line via the GetCommandLineW API and then uses the shell32 API
+ // CommandLineToArgvW to parse the command line and convert it back to
+ // argc and argv. Tests who don't want this dependency on shell32 and need
+ // to honor the arguments passed in should use this function.
+ static void InitUsingArgvForTesting(int argc, const char* const* argv);
#endif
// Initialize the current process CommandLine singleton. On Windows, ignores
@@ -83,6 +90,7 @@ class BASE_EXPORT CommandLine {
// you want to reset the base library to its initial state (for example, in an
// outer library that needs to be able to terminate, and be re-initialized).
// If Init is called only once, as in main(), Reset() is not necessary.
+ // Do not call this in tests. Use base::test::ScopedCommandLine instead.
static void Reset();
// Get the singleton CommandLine representing the current process's
@@ -94,7 +102,7 @@ class BASE_EXPORT CommandLine {
static bool InitializedForCurrentProcess();
#if defined(OS_WIN)
- static CommandLine FromString(const base::string16& command_line);
+ static CommandLine FromString(const string16& command_line);
#endif
// Initialize from an argv vector.
@@ -152,15 +160,15 @@ class BASE_EXPORT CommandLine {
// The second override provides an optimized version to avoid inlining codegen
// at every callsite to find the length of the constant and construct a
// StringPiece.
- bool HasSwitch(const base::StringPiece& switch_string) const;
+ bool HasSwitch(const StringPiece& switch_string) const;
bool HasSwitch(const char switch_constant[]) const;
// Returns the value associated with the given switch. If the switch has no
// value or isn't present, this method returns the empty string.
// Switch names must be lowercase.
- std::string GetSwitchValueASCII(const base::StringPiece& switch_string) const;
- FilePath GetSwitchValuePath(const base::StringPiece& switch_string) const;
- StringType GetSwitchValueNative(const base::StringPiece& switch_string) const;
+ std::string GetSwitchValueASCII(const StringPiece& switch_string) const;
+ FilePath GetSwitchValuePath(const StringPiece& switch_string) const;
+ StringType GetSwitchValueNative(const StringPiece& switch_string) const;
// Get a copy of all switches, along with their values.
const SwitchMap& GetSwitches() const { return switches_; }
@@ -203,7 +211,7 @@ class BASE_EXPORT CommandLine {
#if defined(OS_WIN)
// Initialize by parsing the given command line string.
// The program name is assumed to be the first item in the string.
- void ParseFromString(const base::string16& command_line);
+ void ParseFromString(const string16& command_line);
#endif
private:
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 1c96a569d9..ac0ead76be 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -11,6 +11,12 @@
#include "base/macros.h"
+#if HAVE_TRACE_STACK_FRAME_POINTERS && defined(OS_ANDROID)
+#include <pthread.h>
+#include "base/process/process_handle.h"
+#include "base/threading/platform_thread.h"
+#endif
+
namespace base {
namespace debug {
@@ -41,6 +47,44 @@ std::string StackTrace::ToString() const {
#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if defined(OS_ANDROID)
+
+static uintptr_t GetStackEnd() {
+ // Bionic reads proc/maps on every call to pthread_getattr_np() when called
+ // from the main thread. So we need to cache end of stack in that case to get
+ // acceptable performance.
+ // For all other threads pthread_getattr_np() is fast enough as it just reads
+ // values from its pthread_t argument.
+ static uintptr_t main_stack_end = 0;
+
+ bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
+
+ if (is_main_thread && main_stack_end) {
+ return main_stack_end;
+ }
+
+ uintptr_t stack_begin = 0;
+ size_t stack_size = 0;
+ pthread_attr_t attributes;
+ int error = pthread_getattr_np(pthread_self(), &attributes);
+ if (!error) {
+ error = pthread_attr_getstack(
+ &attributes,
+ reinterpret_cast<void**>(&stack_begin),
+ &stack_size);
+ pthread_attr_destroy(&attributes);
+ }
+ DCHECK(!error);
+
+ uintptr_t stack_end = stack_begin + stack_size;
+ if (is_main_thread) {
+ main_stack_end = stack_end;
+ }
+ return stack_end;
+}
+
+#endif // defined(OS_ANDROID)
+
size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial) {
@@ -49,6 +93,10 @@ size_t TraceStackFramePointers(const void** out_trace,
// be valid.
uintptr_t sp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+#if defined(OS_ANDROID)
+ uintptr_t stack_end = GetStackEnd();
+#endif
+
size_t depth = 0;
while (depth < max_depth) {
#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
@@ -58,6 +106,13 @@ size_t TraceStackFramePointers(const void** out_trace,
sp -= sizeof(uintptr_t);
#endif
+#if defined(OS_ANDROID)
+ // Both sp[0] and s[1] must be valid.
+ if (sp + 2 * sizeof(uintptr_t) > stack_end) {
+ break;
+ }
+#endif
+
if (skip_initial != 0) {
skip_initial--;
} else {
diff --git a/base/environment.cc b/base/environment.cc
index 9eef42967b..534a7a8812 100644
--- a/base/environment.cc
+++ b/base/environment.cc
@@ -8,6 +8,7 @@
#include <vector>
+#include "base/memory/ptr_util.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
@@ -25,7 +26,7 @@ namespace {
class EnvironmentImpl : public Environment {
public:
- bool GetVar(const char* variable_name, std::string* result) override {
+ bool GetVar(StringPiece variable_name, std::string* result) override {
if (GetVarImpl(variable_name, result))
return true;
@@ -44,19 +45,19 @@ class EnvironmentImpl : public Environment {
return GetVarImpl(alternate_case_var.c_str(), result);
}
- bool SetVar(const char* variable_name,
+ bool SetVar(StringPiece variable_name,
const std::string& new_value) override {
return SetVarImpl(variable_name, new_value);
}
- bool UnSetVar(const char* variable_name) override {
+ bool UnSetVar(StringPiece variable_name) override {
return UnSetVarImpl(variable_name);
}
private:
- bool GetVarImpl(const char* variable_name, std::string* result) {
+ bool GetVarImpl(StringPiece variable_name, std::string* result) {
#if defined(OS_POSIX)
- const char* env_value = getenv(variable_name);
+ const char* env_value = getenv(variable_name.data());
if (!env_value)
return false;
// Note that the variable may be defined but empty.
@@ -64,8 +65,8 @@ class EnvironmentImpl : public Environment {
*result = env_value;
return true;
#elif defined(OS_WIN)
- DWORD value_length = ::GetEnvironmentVariable(
- UTF8ToWide(variable_name).c_str(), NULL, 0);
+ DWORD value_length =
+ ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr, 0);
if (value_length == 0)
return false;
if (result) {
@@ -80,10 +81,10 @@ class EnvironmentImpl : public Environment {
#endif
}
- bool SetVarImpl(const char* variable_name, const std::string& new_value) {
+ bool SetVarImpl(StringPiece variable_name, const std::string& new_value) {
#if defined(OS_POSIX)
// On success, zero is returned.
- return !setenv(variable_name, new_value.c_str(), 1);
+ return !setenv(variable_name.data(), new_value.c_str(), 1);
#elif defined(OS_WIN)
// On success, a nonzero value is returned.
return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(),
@@ -91,13 +92,13 @@ class EnvironmentImpl : public Environment {
#endif
}
- bool UnSetVarImpl(const char* variable_name) {
+ bool UnSetVarImpl(StringPiece variable_name) {
#if defined(OS_POSIX)
// On success, zero is returned.
- return !unsetenv(variable_name);
+ return !unsetenv(variable_name.data());
#elif defined(OS_WIN)
// On success, a nonzero value is returned.
- return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), NULL);
+ return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr);
#endif
}
};
@@ -134,12 +135,12 @@ const char kHome[] = "HOME";
Environment::~Environment() {}
// static
-Environment* Environment::Create() {
- return new EnvironmentImpl();
+std::unique_ptr<Environment> Environment::Create() {
+ return MakeUnique<EnvironmentImpl>();
}
-bool Environment::HasVar(const char* variable_name) {
- return GetVar(variable_name, NULL);
+bool Environment::HasVar(StringPiece variable_name) {
+ return GetVar(variable_name, nullptr);
}
#if defined(OS_WIN)
diff --git a/base/environment.h b/base/environment.h
index 12eeaf7ebb..3a4ed04e4b 100644
--- a/base/environment.h
+++ b/base/environment.h
@@ -11,6 +11,7 @@
#include "base/base_export.h"
#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
#include "build/build_config.h"
namespace base {
@@ -27,23 +28,22 @@ class BASE_EXPORT Environment {
public:
virtual ~Environment();
- // Static factory method that returns the implementation that provide the
- // appropriate platform-specific instance.
- static Environment* Create();
+ // Returns the appropriate platform-specific instance.
+ static std::unique_ptr<Environment> Create();
// Gets an environment variable's value and stores it in |result|.
// Returns false if the key is unset.
- virtual bool GetVar(const char* variable_name, std::string* result) = 0;
+ virtual bool GetVar(StringPiece variable_name, std::string* result) = 0;
- // Syntactic sugar for GetVar(variable_name, NULL);
- virtual bool HasVar(const char* variable_name);
+ // Syntactic sugar for GetVar(variable_name, nullptr);
+ virtual bool HasVar(StringPiece variable_name);
// Returns true on success, otherwise returns false.
- virtual bool SetVar(const char* variable_name,
+ virtual bool SetVar(StringPiece variable_name,
const std::string& new_value) = 0;
// Returns true on success, otherwise returns false.
- virtual bool UnSetVar(const char* variable_name) = 0;
+ virtual bool UnSetVar(StringPiece variable_name) = 0;
};
diff --git a/base/feature_list.cc b/base/feature_list.cc
index 46732108dd..435165e10c 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -23,6 +23,9 @@ namespace {
// have more control over initialization timing. Leaky.
FeatureList* g_instance = nullptr;
+// Tracks whether the FeatureList instance was initialized via an accessor.
+bool g_initialized_from_accessor = false;
+
// Some characters are not allowed to appear in feature names or the associated
// field trial names, as they are used as special characters for command-line
// serialization. This function checks that the strings are ASCII (since they
@@ -35,10 +38,7 @@ bool IsValidFeatureOrFieldTrialName(const std::string& name) {
} // namespace
-FeatureList::FeatureList()
- : initialized_(false),
- initialized_from_command_line_(false) {
-}
+FeatureList::FeatureList() {}
FeatureList::~FeatureList() {}
@@ -133,7 +133,11 @@ void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
// static
bool FeatureList::IsEnabled(const Feature& feature) {
- return GetInstance()->IsFeatureEnabled(feature);
+ if (!g_instance) {
+ g_initialized_from_accessor = true;
+ return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+ }
+ return g_instance->IsFeatureEnabled(feature);
}
// static
@@ -158,6 +162,10 @@ bool FeatureList::InitializeInstance(const std::string& enable_features,
// For example, we initialize an instance in chrome/browser/
// chrome_browser_main.cc and do not override it in content/browser/
// browser_main_loop.cc.
+ // If the singleton was previously initialized from within an accessor, we
+ // want to prevent callers from reinitializing the singleton and masking the
+ // accessor call(s) which likely returned incorrect information.
+ CHECK(!g_initialized_from_accessor);
bool instance_existed_before = false;
if (g_instance) {
if (g_instance->initialized_from_command_line_)
@@ -192,6 +200,7 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
void FeatureList::ClearInstanceForTesting() {
delete g_instance;
g_instance = nullptr;
+ g_initialized_from_accessor = false;
}
void FeatureList::FinalizeInitialization() {
diff --git a/base/feature_list.h b/base/feature_list.h
index 2a47427fb2..e9ed00a124 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -247,10 +247,10 @@ class BASE_EXPORT FeatureList {
// Whether this object has been fully initialized. This gets set to true as a
// result of FinalizeInitialization().
- bool initialized_;
+ bool initialized_ = false;
// Whether this object has been initialized from command line.
- bool initialized_from_command_line_;
+ bool initialized_from_command_line_ = false;
DISALLOW_COPY_AND_ASSIGN(FeatureList);
};
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
index a7e7b71183..9d1dcb72f3 100644
--- a/base/feature_list_unittest.cc
+++ b/base/feature_list_unittest.cc
@@ -457,4 +457,15 @@ TEST_F(FeatureListTest, InitializeInstance) {
EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
}
+TEST_F(FeatureListTest, UninitializedInstance_IsEnabledReturnsFalse) {
+ ClearFeatureListInstance();
+ // This test case simulates the calling pattern found in code which does not
+ // explicitly initialize the features list.
+ // All IsEnabled() calls should return the default value in this scenario.
+ EXPECT_EQ(nullptr, FeatureList::GetInstance());
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_EQ(nullptr, FeatureList::GetInstance());
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
} // namespace base
diff --git a/base/files/file.h b/base/files/file.h
index 7ab5ca5859..ae2bd1b61b 100644
--- a/base/files/file.h
+++ b/base/files/file.h
@@ -13,7 +13,7 @@
#include "base/files/file_path.h"
#include "base/files/file_tracing.h"
#include "base/files/scoped_file.h"
-#include "base/move.h"
+#include "base/macros.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -29,10 +29,13 @@
namespace base {
#if defined(OS_WIN)
-typedef HANDLE PlatformFile;
+using PlatformFile = HANDLE;
+
+const PlatformFile kInvalidPlatformFile = INVALID_HANDLE_VALUE;
#elif defined(OS_POSIX)
-typedef int PlatformFile;
+using PlatformFile = int;
+const PlatformFile kInvalidPlatformFile = -1;
#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
typedef struct stat stat_wrapper_t;
#else
@@ -51,8 +54,6 @@ typedef struct stat64 stat_wrapper_t;
// to the OS is not considered const, even if there is no apparent change to
// member variables.
class BASE_EXPORT File {
- MOVE_ONLY_TYPE_FOR_CPP_03(File)
-
public:
// FLAG_(OPEN|CREATE).* are mutually exclusive. You should specify exactly one
// of the five (possibly combining with other flags) when opening or creating
@@ -331,6 +332,8 @@ class BASE_EXPORT File {
Error error_details_;
bool created_;
bool async_;
+
+ DISALLOW_COPY_AND_ASSIGN(File);
};
} // namespace base
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 4adfa279dc..29f12a80aa 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -539,7 +539,7 @@ bool FilePath::IsAbsolute() const {
bool FilePath::EndsWithSeparator() const {
if (empty())
return false;
- return IsSeparator(path_[path_.size() - 1]);
+ return IsSeparator(path_.back());
}
FilePath FilePath::AsEndingWithSeparator() const {
@@ -695,6 +695,10 @@ bool FilePath::ReadFromPickle(PickleIterator* iter) {
int FilePath::CompareIgnoreCase(StringPieceType string1,
StringPieceType string2) {
+ static decltype(::CharUpperW)* const char_upper_api =
+ reinterpret_cast<decltype(::CharUpperW)*>(
+ ::GetProcAddress(::GetModuleHandle(L"user32.dll"), "CharUpperW"));
+ CHECK(char_upper_api);
// Perform character-wise upper case comparison rather than using the
// fully Unicode-aware CompareString(). For details see:
// http://blogs.msdn.com/michkap/archive/2005/10/17/481600.aspx
@@ -704,9 +708,9 @@ int FilePath::CompareIgnoreCase(StringPieceType string1,
StringPieceType::const_iterator string2end = string2.end();
for ( ; i1 != string1end && i2 != string2end; ++i1, ++i2) {
wchar_t c1 =
- (wchar_t)LOWORD(::CharUpperW((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
+ (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
wchar_t c2 =
- (wchar_t)LOWORD(::CharUpperW((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
+ (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
if (c1 < c2)
return -1;
if (c1 > c2)
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc
index 824e3d8ad6..e9d25080e7 100644
--- a/base/files/file_path_watcher_fsevents.cc
+++ b/base/files/file_path_watcher_fsevents.cc
@@ -4,16 +4,18 @@
#include "base/files/file_path_watcher_fsevents.h"
+#include <dispatch/dispatch.h>
+
#include <list>
#include "base/bind.h"
#include "base/files/file_util.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/mac/libdispatch_task_runner.h"
#include "base/mac/scoped_cftyperef.h"
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
namespace base {
@@ -23,19 +25,6 @@ namespace {
// The latency parameter passed to FSEventsStreamCreate().
const CFAbsoluteTime kEventLatencySeconds = 0.3;
-class FSEventsTaskRunner : public mac::LibDispatchTaskRunner {
- public:
- FSEventsTaskRunner()
- : mac::LibDispatchTaskRunner("org.chromium.FilePathWatcherFSEvents") {
- }
-
- protected:
- ~FSEventsTaskRunner() override {}
-};
-
-static LazyInstance<FSEventsTaskRunner>::Leaky g_task_runner =
- LAZY_INSTANCE_INITIALIZER;
-
// Resolve any symlinks in the path.
FilePath ResolvePath(const FilePath& path) {
const unsigned kMaxLinksToResolve = 255;
@@ -79,7 +68,12 @@ FilePath ResolvePath(const FilePath& path) {
} // namespace
-FilePathWatcherFSEvents::FilePathWatcherFSEvents() : fsevent_stream_(NULL) {
+FilePathWatcherFSEvents::FilePathWatcherFSEvents()
+ : queue_(dispatch_queue_create(
+ base::StringPrintf(
+ "org.chromium.base.FilePathWatcher.%p", this).c_str(),
+ DISPATCH_QUEUE_SERIAL)),
+ fsevent_stream_(nullptr) {
}
bool FilePathWatcherFSEvents::Watch(const FilePath& path,
@@ -98,9 +92,14 @@ bool FilePathWatcherFSEvents::Watch(const FilePath& path,
callback_ = callback;
FSEventStreamEventId start_event = FSEventsGetCurrentEventId();
- g_task_runner.Get().PostTask(
- FROM_HERE, Bind(&FilePathWatcherFSEvents::StartEventStream, this,
- start_event, path));
+ // The block runtime would implicitly capture the reference, not the object
+ // it's referencing. Copy the path into a local, so that the value is
+ // captured by the block's scope.
+ const FilePath path_copy(path);
+
+ dispatch_async(queue_, ^{
+ StartEventStream(start_event, path_copy);
+ });
return true;
}
@@ -108,10 +107,12 @@ void FilePathWatcherFSEvents::Cancel() {
set_cancelled();
callback_.Reset();
- // Switch to the dispatch queue thread to tear down the event stream.
- g_task_runner.Get().PostTask(
- FROM_HERE,
- Bind(&FilePathWatcherFSEvents::CancelOnMessageLoopThread, this));
+ // Switch to the dispatch queue to tear down the event stream. As the queue
+ // is owned by this object, and this method is called from the destructor,
+ // execute the block synchronously.
+ dispatch_sync(queue_, ^{
+ CancelOnMessageLoopThread();
+ });
}
// static
@@ -124,8 +125,6 @@ void FilePathWatcherFSEvents::FSEventsCallback(
const FSEventStreamEventId event_ids[]) {
FilePathWatcherFSEvents* watcher =
reinterpret_cast<FilePathWatcherFSEvents*>(event_watcher);
- DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
-
bool root_changed = watcher->ResolveTargetPath();
std::vector<FilePath> paths;
FSEventStreamEventId root_change_at = FSEventStreamGetLatestEventId(stream);
@@ -144,10 +143,9 @@ void FilePathWatcherFSEvents::FSEventsCallback(
if (root_changed) {
// Resetting the event stream from within the callback fails (FSEvents spews
// bad file descriptor errors), so post a task to do the reset.
- g_task_runner.Get().PostTask(
- FROM_HERE,
- Bind(&FilePathWatcherFSEvents::UpdateEventStream, watcher,
- root_change_at));
+ dispatch_async(watcher->queue_, ^{
+ watcher->UpdateEventStream(root_change_at);
+ });
}
watcher->OnFilePathsChanged(paths);
@@ -165,7 +163,6 @@ FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
void FilePathWatcherFSEvents::OnFilePathsChanged(
const std::vector<FilePath>& paths) {
- DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
DCHECK(!resolved_target_.empty());
task_runner()->PostTask(
FROM_HERE, Bind(&FilePathWatcherFSEvents::DispatchEvents, this, paths,
@@ -194,7 +191,6 @@ void FilePathWatcherFSEvents::CancelOnMessageLoopThread() {
// For all other implementations, the "message loop thread" is the IO thread,
// as returned by task_runner(). This implementation, however, needs to
// cancel pending work on the Dispatch Queue thread.
- DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
if (fsevent_stream_) {
DestroyEventStream();
@@ -205,8 +201,6 @@ void FilePathWatcherFSEvents::CancelOnMessageLoopThread() {
void FilePathWatcherFSEvents::UpdateEventStream(
FSEventStreamEventId start_event) {
- DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
-
// It can happen that the watcher gets canceled while tasks that call this
// function are still in flight, so abort if this situation is detected.
if (resolved_target_.empty())
@@ -237,8 +231,7 @@ void FilePathWatcherFSEvents::UpdateEventStream(
start_event,
kEventLatencySeconds,
kFSEventStreamCreateFlagWatchRoot);
- FSEventStreamSetDispatchQueue(fsevent_stream_,
- g_task_runner.Get().GetDispatchQueue());
+ FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
if (!FSEventStreamStart(fsevent_stream_)) {
task_runner()->PostTask(
@@ -247,7 +240,6 @@ void FilePathWatcherFSEvents::UpdateEventStream(
}
bool FilePathWatcherFSEvents::ResolveTargetPath() {
- DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
FilePath resolved = ResolvePath(target_).StripTrailingSeparators();
bool changed = resolved != resolved_target_;
resolved_target_ = resolved;
@@ -274,7 +266,6 @@ void FilePathWatcherFSEvents::DestroyEventStream() {
void FilePathWatcherFSEvents::StartEventStream(FSEventStreamEventId start_event,
const FilePath& path) {
- DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
DCHECK(resolved_target_.empty());
target_ = path;
diff --git a/base/files/file_path_watcher_fsevents.h b/base/files/file_path_watcher_fsevents.h
index 1ebe4636e4..cfbe020b51 100644
--- a/base/files/file_path_watcher_fsevents.h
+++ b/base/files/file_path_watcher_fsevents.h
@@ -12,6 +12,7 @@
#include "base/files/file_path.h"
#include "base/files/file_path_watcher.h"
+#include "base/mac/scoped_dispatch_object.h"
#include "base/macros.h"
namespace base {
@@ -76,16 +77,19 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
// (Only accessed from the message_loop() thread.)
FilePathWatcher::Callback callback_;
+ // The dispatch queue on which the the event stream is scheduled.
+ ScopedDispatchObject<dispatch_queue_t> queue_;
+
// Target path to watch (passed to callback).
- // (Only accessed from the libdispatch thread.)
+ // (Only accessed from the libdispatch queue.)
FilePath target_;
// Target path with all symbolic links resolved.
- // (Only accessed from the libdispatch thread.)
+ // (Only accessed from the libdispatch queue.)
FilePath resolved_target_;
// Backend stream we receive event callbacks from (strong reference).
- // (Only accessed from the libdispatch thread.)
+ // (Only accessed from the libdispatch queue.)
FSEventStreamRef fsevent_stream_;
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents);
diff --git a/base/files/file_path_watcher_linux.cc b/base/files/file_path_watcher_linux.cc
index ae293fe86a..87bddd3dea 100644
--- a/base/files/file_path_watcher_linux.cc
+++ b/base/files/file_path_watcher_linux.cc
@@ -677,7 +677,7 @@ bool FilePathWatcherImpl::HasValidWatchVector() const {
if (watches_[i].subdir.empty())
return false;
}
- return watches_[watches_.size() - 1].subdir.empty();
+ return watches_.back().subdir.empty();
}
} // namespace
diff --git a/base/files/file_path_watcher_unittest.cc b/base/files/file_path_watcher_unittest.cc
index c85a50a42c..a40e4858b4 100644
--- a/base/files/file_path_watcher_unittest.cc
+++ b/base/files/file_path_watcher_unittest.cc
@@ -196,7 +196,11 @@ class FilePathWatcherTest : public testing::Test {
bool WaitForEvents() WARN_UNUSED_RESULT {
collector_->Reset();
- loop_.Run();
+ // Make sure we timeout if we don't get notified.
+ loop_.PostDelayedTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure(),
+ TestTimeouts::action_timeout());
+ RunLoop().Run();
return collector_->Success();
}
@@ -215,7 +219,8 @@ bool FilePathWatcherTest::SetupWatch(const FilePath& target,
FilePathWatcher* watcher,
TestDelegateBase* delegate,
bool recursive_watch) {
- base::WaitableEvent completion(false, false);
+ base::WaitableEvent completion(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
bool result;
file_thread_.task_runner()->PostTask(
FROM_HERE, base::Bind(SetupWatchCallback, target, watcher, delegate,
@@ -889,9 +894,9 @@ TEST_F(FilePathWatcherTest, DirAttributesChanged) {
// We should not get notified in this case as it hasn't affected our ability
// to access the file.
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, false));
- loop_.PostDelayedTask(FROM_HERE,
- MessageLoop::QuitWhenIdleClosure(),
- TestTimeouts::tiny_timeout());
+ loop_.task_runner()->PostDelayedTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure(),
+ TestTimeouts::tiny_timeout());
ASSERT_FALSE(WaitForEvents());
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, true));
diff --git a/base/files/file_util.h b/base/files/file_util.h
index 8fd9fffeb3..420dcaee61 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -37,6 +37,7 @@
namespace base {
+class Environment;
class Time;
//-----------------------------------------------------------------------------
@@ -199,6 +200,11 @@ BASE_EXPORT bool GetPosixFilePermissions(const FilePath& path, int* mode);
// the permission of a file which the symlink points to.
BASE_EXPORT bool SetPosixFilePermissions(const FilePath& path, int mode);
+// Returns true iff |executable| can be found in any directory specified by the
+// environment variable in |env|.
+BASE_EXPORT bool ExecutableExistsInPath(Environment* env,
+ const FilePath::StringType& executable);
+
#endif // OS_POSIX
// Returns true if the given directory is empty
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
index 599759a1c6..85a1b41d46 100644
--- a/base/files/file_util_posix.cc
+++ b/base/files/file_util_posix.cc
@@ -22,6 +22,7 @@
#include <time.h>
#include <unistd.h>
+#include "base/environment.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/files/scoped_file.h"
@@ -30,6 +31,7 @@
#include "base/memory/singleton.h"
#include "base/posix/eintr_wrapper.h"
#include "base/stl_util.h"
+#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/sys_string_conversions.h"
@@ -455,6 +457,25 @@ bool SetPosixFilePermissions(const FilePath& path,
return true;
}
+bool ExecutableExistsInPath(Environment* env,
+ const FilePath::StringType& executable) {
+ std::string path;
+ if (!env->GetVar("PATH", &path)) {
+ LOG(ERROR) << "No $PATH variable. Assuming no " << executable << ".";
+ return false;
+ }
+
+ for (const StringPiece& cur_path :
+ SplitStringPiece(path, ":", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+ FilePath file(cur_path);
+ int permissions;
+ if (GetPosixFilePermissions(file.Append(executable), &permissions) &&
+ (permissions & FILE_PERMISSION_EXECUTE_BY_USER))
+ return true;
+ }
+ return false;
+}
+
#if !defined(OS_MACOSX)
// This is implemented in file_util_mac.mm for Mac.
bool GetTempDir(FilePath* path) {
diff --git a/base/files/important_file_writer_unittest.cc b/base/files/important_file_writer_unittest.cc
index ba1d4d3f93..43e051ebcf 100644
--- a/base/files/important_file_writer_unittest.cc
+++ b/base/files/important_file_writer_unittest.cc
@@ -157,7 +157,7 @@ TEST_F(ImportantFileWriterTest, ScheduleWrite) {
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
TimeDelta::FromMilliseconds(100));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_FALSE(writer.HasPendingWrite());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("foo", GetFileContent(writer.path()));
@@ -173,7 +173,7 @@ TEST_F(ImportantFileWriterTest, DoScheduledWrite) {
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
TimeDelta::FromMilliseconds(100));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_FALSE(writer.HasPendingWrite());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("foo", GetFileContent(writer.path()));
@@ -190,7 +190,7 @@ TEST_F(ImportantFileWriterTest, BatchingWrites) {
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
TimeDelta::FromMilliseconds(100));
- MessageLoop::current()->Run();
+ RunLoop().Run();
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("baz", GetFileContent(writer.path()));
}
diff --git a/base/files/scoped_file.cc b/base/files/scoped_file.cc
index 8971280776..8ce45b8ba3 100644
--- a/base/files/scoped_file.cc
+++ b/base/files/scoped_file.cc
@@ -8,8 +8,10 @@
#include "build/build_config.h"
#if defined(OS_POSIX)
+#include <errno.h>
#include <unistd.h>
+#include "base/debug/alias.h"
#include "base/posix/eintr_wrapper.h"
#endif
@@ -27,7 +29,15 @@ void ScopedFDCloseTraits::Free(int fd) {
// Chrome relies on being able to "drop" such access.
// It's especially problematic on Linux with the setuid sandbox, where
// a single open directory would bypass the entire security model.
- PCHECK(0 == IGNORE_EINTR(close(fd)));
+ int ret = IGNORE_EINTR(close(fd));
+
+ // TODO(davidben): Remove this once it's been determined whether
+ // https://crbug.com/603354 is caused by EBADF or a network filesystem
+ // returning some other error.
+ int close_errno = errno;
+ base::debug::Alias(&close_errno);
+
+ PCHECK(0 == ret);
}
#endif // OS_POSIX
diff --git a/base/json/json_parser.cc b/base/json/json_parser.cc
index c1bcf4a927..d97eccc96c 100644
--- a/base/json/json_parser.cc
+++ b/base/json/json_parser.cc
@@ -192,9 +192,9 @@ class StackMarker {
JSONParser::JSONParser(int options)
: options_(options),
- start_pos_(NULL),
- pos_(NULL),
- end_pos_(NULL),
+ start_pos_(nullptr),
+ pos_(nullptr),
+ end_pos_(nullptr),
index_(0),
stack_depth_(0),
line_number_(0),
@@ -213,7 +213,7 @@ std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
// be used, so do not bother copying the input because StringPiece will not
// be used anywhere.
if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
- input_copy = WrapUnique(new std::string(input.as_string()));
+ input_copy = MakeUnique<std::string>(input.as_string());
start_pos_ = input_copy->data();
} else {
start_pos_ = input.data();
@@ -255,12 +255,14 @@ std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
// hidden root.
if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
if (root->IsType(Value::TYPE_DICTIONARY)) {
- return WrapUnique(new DictionaryHiddenRootValue(std::move(input_copy),
- std::move(root)));
- } else if (root->IsType(Value::TYPE_LIST)) {
- return WrapUnique(
- new ListHiddenRootValue(std::move(input_copy), std::move(root)));
- } else if (root->IsType(Value::TYPE_STRING)) {
+ return MakeUnique<DictionaryHiddenRootValue>(std::move(input_copy),
+ std::move(root));
+ }
+ if (root->IsType(Value::TYPE_LIST)) {
+ return MakeUnique<ListHiddenRootValue>(std::move(input_copy),
+ std::move(root));
+ }
+ if (root->IsType(Value::TYPE_STRING)) {
// A string type could be a JSONStringValue, but because there's no
// corresponding HiddenRootValue, the memory will be lost. Deep copy to
// preserve it.
@@ -291,16 +293,12 @@ int JSONParser::error_column() const {
// StringBuilder ///////////////////////////////////////////////////////////////
-JSONParser::StringBuilder::StringBuilder()
- : pos_(NULL),
- length_(0),
- string_(NULL) {
-}
+JSONParser::StringBuilder::StringBuilder() : StringBuilder(nullptr) {}
JSONParser::StringBuilder::StringBuilder(const char* pos)
: pos_(pos),
length_(0),
- string_(NULL) {
+ string_(nullptr) {
}
void JSONParser::StringBuilder::Swap(StringBuilder* other) {
@@ -489,20 +487,20 @@ Value* JSONParser::ParseToken(Token token) {
return ConsumeLiteral();
default:
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return NULL;
+ return nullptr;
}
}
Value* JSONParser::ConsumeDictionary() {
if (*pos_ != '{') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return NULL;
+ return nullptr;
}
StackMarker depth_check(&stack_depth_);
if (depth_check.IsTooDeep()) {
ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
- return NULL;
+ return nullptr;
}
std::unique_ptr<DictionaryValue> dict(new DictionaryValue);
@@ -512,13 +510,13 @@ Value* JSONParser::ConsumeDictionary() {
while (token != T_OBJECT_END) {
if (token != T_STRING) {
ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 1);
- return NULL;
+ return nullptr;
}
// First consume the key.
StringBuilder key;
if (!ConsumeStringRaw(&key)) {
- return NULL;
+ return nullptr;
}
// Read the separator.
@@ -526,7 +524,7 @@ Value* JSONParser::ConsumeDictionary() {
token = GetNextToken();
if (token != T_OBJECT_PAIR_SEPARATOR) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
// The next token is the value. Ownership transfers to |dict|.
@@ -534,7 +532,7 @@ Value* JSONParser::ConsumeDictionary() {
Value* value = ParseNextToken();
if (!value) {
// ReportError from deeper level.
- return NULL;
+ return nullptr;
}
dict->SetWithoutPathExpansion(key.AsString(), value);
@@ -546,11 +544,11 @@ Value* JSONParser::ConsumeDictionary() {
token = GetNextToken();
if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
- return NULL;
+ return nullptr;
}
} else if (token != T_OBJECT_END) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
- return NULL;
+ return nullptr;
}
}
@@ -560,13 +558,13 @@ Value* JSONParser::ConsumeDictionary() {
Value* JSONParser::ConsumeList() {
if (*pos_ != '[') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return NULL;
+ return nullptr;
}
StackMarker depth_check(&stack_depth_);
if (depth_check.IsTooDeep()) {
ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
- return NULL;
+ return nullptr;
}
std::unique_ptr<ListValue> list(new ListValue);
@@ -577,7 +575,7 @@ Value* JSONParser::ConsumeList() {
Value* item = ParseToken(token);
if (!item) {
// ReportError from deeper level.
- return NULL;
+ return nullptr;
}
list->Append(item);
@@ -589,11 +587,11 @@ Value* JSONParser::ConsumeList() {
token = GetNextToken();
if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
- return NULL;
+ return nullptr;
}
} else if (token != T_ARRAY_END) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
}
@@ -603,17 +601,16 @@ Value* JSONParser::ConsumeList() {
Value* JSONParser::ConsumeString() {
StringBuilder string;
if (!ConsumeStringRaw(&string))
- return NULL;
+ return nullptr;
// Create the Value representation, using a hidden root, if configured
// to do so, and if the string can be represented by StringPiece.
- if (string.CanBeStringPiece() && !(options_ & JSON_DETACHABLE_CHILDREN)) {
+ if (string.CanBeStringPiece() && !(options_ & JSON_DETACHABLE_CHILDREN))
return new JSONStringValue(string.AsStringPiece());
- } else {
- if (string.CanBeStringPiece())
- string.Convert();
- return new StringValue(string.AsString());
- }
+
+ if (string.CanBeStringPiece())
+ string.Convert();
+ return new StringValue(string.AsString());
}
bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
@@ -638,11 +635,23 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
return false;
}
- // If this character is an escape sequence...
- if (next_char == '\\') {
- // The input string will be adjusted (either by combining the two
- // characters of an encoded escape sequence, or with a UTF conversion),
- // so using StringPiece isn't possible -- force a conversion.
+ if (next_char == '"') {
+ --index_; // Rewind by one because of CBU8_NEXT.
+ out->Swap(&string);
+ return true;
+ }
+
+ // If this character is not an escape sequence...
+ if (next_char != '\\') {
+ if (next_char < kExtendedASCIIStart)
+ string.Append(static_cast<char>(next_char));
+ else
+ DecodeUTF8(next_char, &string);
+ } else {
+ // And if it is an escape sequence, the input string will be adjusted
+ // (either by combining the two characters of an encoded escape sequence,
+ // or with a UTF conversion), so using StringPiece isn't possible -- force
+ // a conversion.
string.Convert();
if (!CanConsume(1)) {
@@ -724,15 +733,6 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
return false;
}
- } else if (next_char == '"') {
- --index_; // Rewind by one because of CBU8_NEXT.
- out->Swap(&string);
- return true;
- } else {
- if (next_char < kExtendedASCIIStart)
- string.Append(static_cast<char>(next_char));
- else
- DecodeUTF8(next_char, &string);
}
}
@@ -837,7 +837,7 @@ Value* JSONParser::ConsumeNumber() {
if (!ReadInt(false)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
end_index = index_;
@@ -845,12 +845,12 @@ Value* JSONParser::ConsumeNumber() {
if (*pos_ == '.') {
if (!CanConsume(1)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
NextChar();
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
end_index = index_;
}
@@ -862,7 +862,7 @@ Value* JSONParser::ConsumeNumber() {
NextChar();
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
end_index = index_;
}
@@ -882,7 +882,7 @@ Value* JSONParser::ConsumeNumber() {
break;
default:
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
pos_ = exit_pos;
@@ -900,7 +900,7 @@ Value* JSONParser::ConsumeNumber() {
return new FundamentalValue(num_double);
}
- return NULL;
+ return nullptr;
}
bool JSONParser::ReadInt(bool allow_leading_zeros) {
@@ -930,7 +930,7 @@ Value* JSONParser::ConsumeLiteral() {
if (!CanConsume(kTrueLen - 1) ||
!StringsAreEqual(pos_, kTrueLiteral, kTrueLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
NextNChars(kTrueLen - 1);
return new FundamentalValue(true);
@@ -941,7 +941,7 @@ Value* JSONParser::ConsumeLiteral() {
if (!CanConsume(kFalseLen - 1) ||
!StringsAreEqual(pos_, kFalseLiteral, kFalseLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
NextNChars(kFalseLen - 1);
return new FundamentalValue(false);
@@ -952,14 +952,14 @@ Value* JSONParser::ConsumeLiteral() {
if (!CanConsume(kNullLen - 1) ||
!StringsAreEqual(pos_, kNullLiteral, kNullLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return NULL;
+ return nullptr;
}
NextNChars(kNullLen - 1);
return Value::CreateNullValue().release();
}
default:
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return NULL;
+ return nullptr;
}
}
diff --git a/base/json/json_parser.h b/base/json/json_parser.h
index 5bdec588e0..7539fa99ca 100644
--- a/base/json/json_parser.h
+++ b/base/json/json_parser.h
@@ -50,7 +50,9 @@ class BASE_EXPORT JSONParser {
~JSONParser();
// Parses the input string according to the set options and returns the
- // result as a Value owned by the caller.
+ // result as a Value.
+ // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+ // convert to a FooValue at the same time.
std::unique_ptr<Value> Parse(StringPiece input);
// Returns the error code.
@@ -219,7 +221,7 @@ class BASE_EXPORT JSONParser {
const std::string& description);
// base::JSONParserOptions that control parsing.
- int options_;
+ const int options_;
// Pointer to the start of the input data.
const char* start_pos_;
diff --git a/base/json/json_reader.h b/base/json/json_reader.h
index f647724f94..a954821a28 100644
--- a/base/json/json_reader.h
+++ b/base/json/json_reader.h
@@ -91,17 +91,17 @@ class BASE_EXPORT JSONReader {
~JSONReader();
- // Reads and parses |json|, returning a Value. The caller owns the returned
- // instance. If |json| is not a properly formed JSON string, returns NULL.
+ // Reads and parses |json|, returning a Value.
+ // If |json| is not a properly formed JSON string, returns nullptr.
+ // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+ // convert to a FooValue at the same time.
static std::unique_ptr<Value> Read(StringPiece json);
- // Reads and parses |json|, returning a Value owned by the caller. The
- // parser respects the given |options|. If the input is not properly formed,
- // returns NULL.
+ // Same as Read() above, but the parser respects the given |options|.
static std::unique_ptr<Value> Read(StringPiece json, int options);
// Reads and parses |json| like Read(). |error_code_out| and |error_msg_out|
- // are optional. If specified and NULL is returned, they will be populated
+ // are optional. If specified and nullptr is returned, they will be populated
// an error code and a formatted error message (including error location if
// appropriate). Otherwise, they will be unmodified.
static std::unique_ptr<Value> ReadAndReturnError(
@@ -116,7 +116,7 @@ class BASE_EXPORT JSONReader {
// Returns an empty string if error_code is JSON_NO_ERROR.
static std::string ErrorCodeToString(JsonParseError error_code);
- // Parses an input string into a Value that is owned by the caller.
+ // Non-static version of Read() above.
std::unique_ptr<Value> ReadToValue(StringPiece json);
// Returns the error code if the last call to ReadToValue() failed.
diff --git a/base/json/json_reader_unittest.cc b/base/json/json_reader_unittest.cc
index c54dafa6ca..84732c4d75 100644
--- a/base/json/json_reader_unittest.cc
+++ b/base/json/json_reader_unittest.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <memory>
+
#if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
#include "base/base_paths.h"
#include "base/path_service.h"
@@ -23,525 +25,549 @@
namespace base {
TEST(JSONReaderTest, Reading) {
- // some whitespace checking
- std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
-
- // Invalid JSON string
- root = JSONReader().ReadToValue("nu");
- EXPECT_FALSE(root.get());
-
- // Simple bool
- root = JSONReader().ReadToValue("true ");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
-
- // Embedded comment
- root = JSONReader().ReadToValue("/* comment */null");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
- root = JSONReader().ReadToValue("40 /* comment */");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
- root = JSONReader().ReadToValue("true // comment");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
- root = JSONReader().ReadToValue("/* comment */\"sample string\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- std::string value;
- EXPECT_TRUE(root->GetAsString(&value));
- EXPECT_EQ("sample string", value);
- root = JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]");
- ASSERT_TRUE(root.get());
- ListValue* list = static_cast<ListValue*>(root.get());
- EXPECT_EQ(2u, list->GetSize());
- int int_val = 0;
- EXPECT_TRUE(list->GetInteger(0, &int_val));
- EXPECT_EQ(1, int_val);
- EXPECT_TRUE(list->GetInteger(1, &int_val));
- EXPECT_EQ(3, int_val);
- root = JSONReader().ReadToValue("[1, /*a*/2, 3]");
- ASSERT_TRUE(root.get());
- list = static_cast<ListValue*>(root.get());
- EXPECT_EQ(3u, list->GetSize());
- root = JSONReader().ReadToValue("/* comment **/42");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(42, int_val);
- root = JSONReader().ReadToValue(
- "/* comment **/\n"
- "// */ 43\n"
- "44");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(44, int_val);
-
- // Test number formats
- root = JSONReader().ReadToValue("43");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(43, int_val);
-
- // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
- root = JSONReader().ReadToValue("043");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("0x43");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("00");
- EXPECT_FALSE(root.get());
-
- // Test 0 (which needs to be special cased because of the leading zero
- // clause).
- root = JSONReader().ReadToValue("0");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
- int_val = 1;
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(0, int_val);
-
- // Numbers that overflow ints should succeed, being internally promoted to
- // storage as doubles
- root = JSONReader().ReadToValue("2147483648");
- ASSERT_TRUE(root.get());
- double double_val;
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(2147483648.0, double_val);
- root = JSONReader().ReadToValue("-2147483649");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
-
- // Parse a double
- root = JSONReader().ReadToValue("43.1");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(43.1, double_val);
-
- root = JSONReader().ReadToValue("4.3e-1");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(.43, double_val);
-
- root = JSONReader().ReadToValue("2.1e0");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(2.1, double_val);
-
- root = JSONReader().ReadToValue("2.1e+0001");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(21.0, double_val);
-
- root = JSONReader().ReadToValue("0.01");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(0.01, double_val);
-
- root = JSONReader().ReadToValue("1.00");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(1.0, double_val);
-
- // Fractional parts must have a digit before and after the decimal point.
- root = JSONReader().ReadToValue("1.");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue(".1");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("1.e10");
- EXPECT_FALSE(root.get());
-
- // Exponent must have a digit following the 'e'.
- root = JSONReader().ReadToValue("1e");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("1E");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("1e1.");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("1e1.0");
- EXPECT_FALSE(root.get());
-
- // INF/-INF/NaN are not valid
- root = JSONReader().ReadToValue("1e1000");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("-1e1000");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("NaN");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("nan");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("inf");
- EXPECT_FALSE(root.get());
-
- // Invalid number formats
- root = JSONReader().ReadToValue("4.3.1");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("4e3.1");
- EXPECT_FALSE(root.get());
-
- // Test string parser
- root = JSONReader().ReadToValue("\"hello world\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("hello world", str_val);
-
- // Empty string
- root = JSONReader().ReadToValue("\"\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- str_val.clear();
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("", str_val);
-
- // Test basic string escapes
- root = JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- str_val.clear();
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
-
- // Test hex and unicode escapes including the null character.
- root = JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- str_val.clear();
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
-
- // Test invalid strings
- root = JSONReader().ReadToValue("\"no closing quote");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("\"\\z invalid escape char\"");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("\"\\xAQ invalid hex code\"");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("not enough hex chars\\x1\"");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("\"not enough escape chars\\u123\"");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("\"extra backslash at end of input\\\"");
- EXPECT_FALSE(root.get());
-
- // Basic array
- root = JSONReader::Read("[true, false, null]");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
- list = static_cast<ListValue*>(root.get());
- EXPECT_EQ(3U, list->GetSize());
-
- // Test with trailing comma. Should be parsed the same as above.
- std::unique_ptr<Value> root2 =
- JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_TRUE(root->Equals(root2.get()));
-
- // Empty array
- root = JSONReader::Read("[]");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
- list = static_cast<ListValue*>(root.get());
- EXPECT_EQ(0U, list->GetSize());
-
- // Nested arrays
- root = JSONReader::Read("[[true], [], [false, [], [null]], null]");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
- list = static_cast<ListValue*>(root.get());
- EXPECT_EQ(4U, list->GetSize());
-
- // Lots of trailing commas.
- root2 = JSONReader::Read("[[true], [], [false, [], [null, ] , ], null,]",
- JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_TRUE(root->Equals(root2.get()));
-
- // Invalid, missing close brace.
- root = JSONReader::Read("[[true], [], [false, [], [null]], null");
- EXPECT_FALSE(root.get());
-
- // Invalid, too many commas
- root = JSONReader::Read("[true,, null]");
- EXPECT_FALSE(root.get());
- root = JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
-
- // Invalid, no commas
- root = JSONReader::Read("[true null]");
- EXPECT_FALSE(root.get());
-
- // Invalid, trailing comma
- root = JSONReader::Read("[true,]");
- EXPECT_FALSE(root.get());
-
- // Valid if we set |allow_trailing_comma| to true.
- root = JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS);
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
- list = static_cast<ListValue*>(root.get());
- EXPECT_EQ(1U, list->GetSize());
- Value* tmp_value = NULL;
- ASSERT_TRUE(list->Get(0, &tmp_value));
- EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
- bool bool_value = false;
- EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
- EXPECT_TRUE(bool_value);
-
- // Don't allow empty elements, even if |allow_trailing_comma| is
- // true.
- root = JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
- root = JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
- root = JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
- root = JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
-
- // Test objects
- root = JSONReader::Read("{}");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
+ {
+ // some whitespace checking
+ std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ }
- root = JSONReader::Read(
- "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
- DictionaryValue* dict_val = static_cast<DictionaryValue*>(root.get());
- double_val = 0.0;
- EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
- EXPECT_DOUBLE_EQ(9.87654321, double_val);
- Value* null_val = NULL;
- ASSERT_TRUE(dict_val->Get("null", &null_val));
- EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
- str_val.clear();
- EXPECT_TRUE(dict_val->GetString("S", &str_val));
- EXPECT_EQ("str", str_val);
-
- root2 = JSONReader::Read(
- "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
- JSON_ALLOW_TRAILING_COMMAS);
- ASSERT_TRUE(root2.get());
- EXPECT_TRUE(root->Equals(root2.get()));
-
- // Test newline equivalence.
- root2 = JSONReader::Read(
- "{\n"
- " \"number\":9.87654321,\n"
- " \"null\":null,\n"
- " \"\\x53\":\"str\",\n"
- "}\n",
- JSON_ALLOW_TRAILING_COMMAS);
- ASSERT_TRUE(root2.get());
- EXPECT_TRUE(root->Equals(root2.get()));
-
- root2 = JSONReader::Read(
- "{\r\n"
- " \"number\":9.87654321,\r\n"
- " \"null\":null,\r\n"
- " \"\\x53\":\"str\",\r\n"
- "}\r\n",
- JSON_ALLOW_TRAILING_COMMAS);
- ASSERT_TRUE(root2.get());
- EXPECT_TRUE(root->Equals(root2.get()));
-
- // Test nesting
- root = JSONReader::Read(
- "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
- dict_val = static_cast<DictionaryValue*>(root.get());
- DictionaryValue* inner_dict = NULL;
- ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
- ListValue* inner_array = NULL;
- ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
- EXPECT_EQ(1U, inner_array->GetSize());
- bool_value = true;
- EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
- EXPECT_FALSE(bool_value);
- inner_dict = NULL;
- EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
-
- root2 = JSONReader::Read(
- "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
- JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_TRUE(root->Equals(root2.get()));
-
- // Test keys with periods
- root = JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
- dict_val = static_cast<DictionaryValue*>(root.get());
- int integer_value = 0;
- EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
- EXPECT_EQ(3, integer_value);
- EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
- EXPECT_EQ(2, integer_value);
- inner_dict = NULL;
- ASSERT_TRUE(dict_val->GetDictionaryWithoutPathExpansion("d.e.f",
- &inner_dict));
- EXPECT_EQ(1U, inner_dict->size());
- EXPECT_TRUE(inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j",
- &integer_value));
- EXPECT_EQ(1, integer_value);
-
- root = JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
- dict_val = static_cast<DictionaryValue*>(root.get());
- EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
- EXPECT_EQ(2, integer_value);
- EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
- EXPECT_EQ(1, integer_value);
-
- // Invalid, no closing brace
- root = JSONReader::Read("{\"a\": true");
- EXPECT_FALSE(root.get());
-
- // Invalid, keys must be quoted
- root = JSONReader::Read("{foo:true}");
- EXPECT_FALSE(root.get());
-
- // Invalid, trailing comma
- root = JSONReader::Read("{\"a\":true,}");
- EXPECT_FALSE(root.get());
-
- // Invalid, too many commas
- root = JSONReader::Read("{\"a\":true,,\"b\":false}");
- EXPECT_FALSE(root.get());
- root =
- JSONReader::Read("{\"a\":true,,\"b\":false}", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
-
- // Invalid, no separator
- root = JSONReader::Read("{\"a\" \"b\"}");
- EXPECT_FALSE(root.get());
-
- // Invalid, lone comma.
- root = JSONReader::Read("{,}");
- EXPECT_FALSE(root.get());
- root = JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
- root = JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
- root = JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
- root =
- JSONReader::Read("{\"a\":true,,\"b\":false}", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_FALSE(root.get());
-
- // Test stack overflow
- std::string evil(1000000, '[');
- evil.append(std::string(1000000, ']'));
- root = JSONReader::Read(evil);
- EXPECT_FALSE(root.get());
-
- // A few thousand adjacent lists is fine.
- std::string not_evil("[");
- not_evil.reserve(15010);
- for (int i = 0; i < 5000; ++i) {
- not_evil.append("[],");
- }
- not_evil.append("[]]");
- root = JSONReader::Read(not_evil);
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
- list = static_cast<ListValue*>(root.get());
- EXPECT_EQ(5001U, list->GetSize());
-
- // Test utf8 encoded input
- root = JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- str_val.clear();
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
-
- root = JSONReader().ReadToValue(
- "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
- EXPECT_TRUE(root->GetAsDictionary(&dict_val));
- EXPECT_TRUE(dict_val->GetString("path", &str_val));
- EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
-
- // Test invalid utf8 encoded input
- root = JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\"");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("\"123\xc0\x81\"");
- EXPECT_FALSE(root.get());
- root = JSONReader().ReadToValue("\"abc\xc0\xae\"");
- EXPECT_FALSE(root.get());
-
- // Test utf16 encoded strings.
- root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- str_val.clear();
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("\xe2\x82\xac""3,14", str_val);
-
- root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
- str_val.clear();
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
-
- // Test invalid utf16 strings.
- const char* const cases[] = {
- "\"\\u123\"", // Invalid scalar.
- "\"\\ud83d\"", // Invalid scalar.
- "\"\\u$%@!\"", // Invalid scalar.
- "\"\\uzz89\"", // Invalid scalar.
- "\"\\ud83d\\udca\"", // Invalid lower surrogate.
- "\"\\ud83d\\ud83d\"", // Invalid lower surrogate.
- "\"\\ud83foo\"", // No lower surrogate.
- "\"\\ud83\\foo\"" // No lower surrogate.
- };
- for (size_t i = 0; i < arraysize(cases); ++i) {
- root = JSONReader().ReadToValue(cases[i]);
- EXPECT_FALSE(root.get()) << cases[i];
+ {
+ // Invalid JSON string
+ EXPECT_FALSE(JSONReader().ReadToValue("nu"));
+ }
+
+ {
+ // Simple bool
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("true ");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
}
- // Test literal root objects.
- root = JSONReader::Read("null");
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ {
+ // Embedded comment
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ root = JSONReader().ReadToValue("40 /* comment */");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ root = JSONReader().ReadToValue("true // comment");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+ root = JSONReader().ReadToValue("/* comment */\"sample string\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string value;
+ EXPECT_TRUE(root->GetAsString(&value));
+ EXPECT_EQ("sample string", value);
+ std::unique_ptr<ListValue> list = ListValue::From(
+ JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(2u, list->GetSize());
+ int int_val = 0;
+ EXPECT_TRUE(list->GetInteger(0, &int_val));
+ EXPECT_EQ(1, int_val);
+ EXPECT_TRUE(list->GetInteger(1, &int_val));
+ EXPECT_EQ(3, int_val);
+ list = ListValue::From(JSONReader().ReadToValue("[1, /*a*/2, 3]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(3u, list->GetSize());
+ root = JSONReader().ReadToValue("/* comment **/42");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(42, int_val);
+ root = JSONReader().ReadToValue(
+ "/* comment **/\n"
+ "// */ 43\n"
+ "44");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(44, int_val);
+ }
+
+ {
+ // Test number formats
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ int int_val = 0;
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(43, int_val);
+ }
+
+ {
+ // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
+ EXPECT_FALSE(JSONReader().ReadToValue("043"));
+ EXPECT_FALSE(JSONReader().ReadToValue("0x43"));
+ EXPECT_FALSE(JSONReader().ReadToValue("00"));
+ }
- root = JSONReader::Read("true");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->GetAsBoolean(&bool_value));
- EXPECT_TRUE(bool_value);
+ {
+ // Test 0 (which needs to be special cased because of the leading zero
+ // clause).
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ int int_val = 1;
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(0, int_val);
+ }
- root = JSONReader::Read("10");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->GetAsInteger(&integer_value));
- EXPECT_EQ(10, integer_value);
+ {
+ // Numbers that overflow ints should succeed, being internally promoted to
+ // storage as doubles
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
+ ASSERT_TRUE(root);
+ double double_val;
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(2147483648.0, double_val);
+ root = JSONReader().ReadToValue("-2147483649");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
+ }
- root = JSONReader::Read("\"root\"");
- ASSERT_TRUE(root.get());
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("root", str_val);
+ {
+ // Parse a double
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(43.1, double_val);
+
+ root = JSONReader().ReadToValue("4.3e-1");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(.43, double_val);
+
+ root = JSONReader().ReadToValue("2.1e0");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(2.1, double_val);
+
+ root = JSONReader().ReadToValue("2.1e+0001");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(21.0, double_val);
+
+ root = JSONReader().ReadToValue("0.01");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(0.01, double_val);
+
+ root = JSONReader().ReadToValue("1.00");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(1.0, double_val);
+ }
+
+ {
+ // Fractional parts must have a digit before and after the decimal point.
+ EXPECT_FALSE(JSONReader().ReadToValue("1."));
+ EXPECT_FALSE(JSONReader().ReadToValue(".1"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1.e10"));
+ }
+
+ {
+ // Exponent must have a digit following the 'e'.
+ EXPECT_FALSE(JSONReader().ReadToValue("1e"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1E"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1."));
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1.0"));
+ }
+
+ {
+ // INF/-INF/NaN are not valid
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1000"));
+ EXPECT_FALSE(JSONReader().ReadToValue("-1e1000"));
+ EXPECT_FALSE(JSONReader().ReadToValue("NaN"));
+ EXPECT_FALSE(JSONReader().ReadToValue("nan"));
+ EXPECT_FALSE(JSONReader().ReadToValue("inf"));
+ }
+
+ {
+ // Invalid number formats
+ EXPECT_FALSE(JSONReader().ReadToValue("4.3.1"));
+ EXPECT_FALSE(JSONReader().ReadToValue("4e3.1"));
+ }
+
+ {
+ // Test string parser
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("hello world", str_val);
+ }
+
+ {
+ // Empty string
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("", str_val);
+ }
+
+ {
+ // Test basic string escapes
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
+ }
+
+ {
+ // Test hex and unicode escapes including the null character.
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
+ }
+
+ {
+ // Test invalid strings
+ EXPECT_FALSE(JSONReader().ReadToValue("\"no closing quote"));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"\\z invalid escape char\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"\\xAQ invalid hex code\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("not enough hex chars\\x1\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"not enough escape chars\\u123\""));
+ EXPECT_FALSE(
+ JSONReader().ReadToValue("\"extra backslash at end of input\\\""));
+ }
+
+ {
+ // Basic array
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader::Read("[true, false, null]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(3U, list->GetSize());
+
+ // Test with trailing comma. Should be parsed the same as above.
+ std::unique_ptr<Value> root2 =
+ JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(list->Equals(root2.get()));
+ }
+
+ {
+ // Empty array
+ std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read("[]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(0U, list->GetSize());
+ }
+
+ {
+ // Nested arrays
+ std::unique_ptr<ListValue> list = ListValue::From(
+ JSONReader::Read("[[true], [], [false, [], [null]], null]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(4U, list->GetSize());
+
+ // Lots of trailing commas.
+ std::unique_ptr<Value> root2 =
+ JSONReader::Read("[[true], [], [false, [], [null, ] , ], null,]",
+ JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(list->Equals(root2.get()));
+ }
+
+ {
+ // Invalid, missing close brace.
+ EXPECT_FALSE(JSONReader::Read("[[true], [], [false, [], [null]], null"));
+
+ // Invalid, too many commas
+ EXPECT_FALSE(JSONReader::Read("[true,, null]"));
+ EXPECT_FALSE(JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS));
+
+ // Invalid, no commas
+ EXPECT_FALSE(JSONReader::Read("[true null]"));
+
+ // Invalid, trailing comma
+ EXPECT_FALSE(JSONReader::Read("[true,]"));
+ }
+
+ {
+ // Valid if we set |allow_trailing_comma| to true.
+ std::unique_ptr<ListValue> list = ListValue::From(
+ JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(1U, list->GetSize());
+ Value* tmp_value = nullptr;
+ ASSERT_TRUE(list->Get(0, &tmp_value));
+ EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
+ bool bool_value = false;
+ EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
+ EXPECT_TRUE(bool_value);
+ }
+
+ {
+ // Don't allow empty elements, even if |allow_trailing_comma| is
+ // true.
+ EXPECT_FALSE(JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS));
+ }
+
+ {
+ // Test objects
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader::Read("{}"));
+ ASSERT_TRUE(dict_val);
+
+ dict_val = DictionaryValue::From(JSONReader::Read(
+ "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }"));
+ ASSERT_TRUE(dict_val);
+ double double_val = 0.0;
+ EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
+ EXPECT_DOUBLE_EQ(9.87654321, double_val);
+ Value* null_val = nullptr;
+ ASSERT_TRUE(dict_val->Get("null", &null_val));
+ EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
+ std::string str_val;
+ EXPECT_TRUE(dict_val->GetString("S", &str_val));
+ EXPECT_EQ("str", str_val);
+
+ std::unique_ptr<Value> root2 = JSONReader::Read(
+ "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+ // Test newline equivalence.
+ root2 = JSONReader::Read(
+ "{\n"
+ " \"number\":9.87654321,\n"
+ " \"null\":null,\n"
+ " \"\\x53\":\"str\",\n"
+ "}\n",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+ root2 = JSONReader::Read(
+ "{\r\n"
+ " \"number\":9.87654321,\r\n"
+ " \"null\":null,\r\n"
+ " \"\\x53\":\"str\",\r\n"
+ "}\r\n",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+ }
+
+ {
+ // Test nesting
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader::Read(
+ "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}"));
+ ASSERT_TRUE(dict_val);
+ DictionaryValue* inner_dict = nullptr;
+ ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
+ ListValue* inner_array = nullptr;
+ ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
+ EXPECT_EQ(1U, inner_array->GetSize());
+ bool bool_value = true;
+ EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
+ EXPECT_FALSE(bool_value);
+ inner_dict = nullptr;
+ EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
+
+ std::unique_ptr<Value> root2 = JSONReader::Read(
+ "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
+ JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+ }
+
+ {
+ // Test keys with periods
+ std::unique_ptr<DictionaryValue> dict_val = DictionaryValue::From(
+ JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}"));
+ ASSERT_TRUE(dict_val);
+ int integer_value = 0;
+ EXPECT_TRUE(
+ dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+ EXPECT_EQ(3, integer_value);
+ EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
+ EXPECT_EQ(2, integer_value);
+ DictionaryValue* inner_dict = nullptr;
+ ASSERT_TRUE(
+ dict_val->GetDictionaryWithoutPathExpansion("d.e.f", &inner_dict));
+ EXPECT_EQ(1U, inner_dict->size());
+ EXPECT_TRUE(
+ inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j", &integer_value));
+ EXPECT_EQ(1, integer_value);
+
+ dict_val =
+ DictionaryValue::From(JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}"));
+ ASSERT_TRUE(dict_val);
+ EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
+ EXPECT_EQ(2, integer_value);
+ EXPECT_TRUE(
+ dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+ EXPECT_EQ(1, integer_value);
+ }
+
+ {
+ // Invalid, no closing brace
+ EXPECT_FALSE(JSONReader::Read("{\"a\": true"));
+
+ // Invalid, keys must be quoted
+ EXPECT_FALSE(JSONReader::Read("{foo:true}"));
+
+ // Invalid, trailing comma
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,}"));
+
+ // Invalid, too many commas
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}"));
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+ JSON_ALLOW_TRAILING_COMMAS));
+
+ // Invalid, no separator
+ EXPECT_FALSE(JSONReader::Read("{\"a\" \"b\"}"));
+
+ // Invalid, lone comma.
+ EXPECT_FALSE(JSONReader::Read("{,}"));
+ EXPECT_FALSE(JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(
+ JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+ JSON_ALLOW_TRAILING_COMMAS));
+ }
+
+ {
+ // Test stack overflow
+ std::string evil(1000000, '[');
+ evil.append(std::string(1000000, ']'));
+ EXPECT_FALSE(JSONReader::Read(evil));
+ }
+
+ {
+ // A few thousand adjacent lists is fine.
+ std::string not_evil("[");
+ not_evil.reserve(15010);
+ for (int i = 0; i < 5000; ++i)
+ not_evil.append("[],");
+ not_evil.append("[]]");
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader::Read(not_evil));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(5001U, list->GetSize());
+ }
+
+ {
+ // Test utf8 encoded input
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
+
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader().ReadToValue(
+ "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}"));
+ ASSERT_TRUE(dict_val);
+ EXPECT_TRUE(dict_val->GetString("path", &str_val));
+ EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
+ }
+
+ {
+ // Test invalid utf8 encoded input
+ EXPECT_FALSE(JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"123\xc0\x81\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"abc\xc0\xae\""));
+ }
+
+ {
+ // Test utf16 encoded strings.
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(
+ "\xe2\x82\xac"
+ "3,14",
+ str_val);
+
+ root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ str_val.clear();
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
+ }
+
+ {
+ // Test invalid utf16 strings.
+ const char* const cases[] = {
+ "\"\\u123\"", // Invalid scalar.
+ "\"\\ud83d\"", // Invalid scalar.
+ "\"\\u$%@!\"", // Invalid scalar.
+ "\"\\uzz89\"", // Invalid scalar.
+ "\"\\ud83d\\udca\"", // Invalid lower surrogate.
+ "\"\\ud83d\\ud83d\"", // Invalid lower surrogate.
+ "\"\\ud83foo\"", // No lower surrogate.
+ "\"\\ud83\\foo\"" // No lower surrogate.
+ };
+ std::unique_ptr<Value> root;
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ root = JSONReader().ReadToValue(cases[i]);
+ EXPECT_FALSE(root) << cases[i];
+ }
+ }
+
+ {
+ // Test literal root objects.
+ std::unique_ptr<Value> root = JSONReader::Read("null");
+ EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+
+ root = JSONReader::Read("true");
+ ASSERT_TRUE(root);
+ bool bool_value;
+ EXPECT_TRUE(root->GetAsBoolean(&bool_value));
+ EXPECT_TRUE(bool_value);
+
+ root = JSONReader::Read("10");
+ ASSERT_TRUE(root);
+ int integer_value;
+ EXPECT_TRUE(root->GetAsInteger(&integer_value));
+ EXPECT_EQ(10, integer_value);
+
+ root = JSONReader::Read("\"root\"");
+ ASSERT_TRUE(root);
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("root", str_val);
+ }
}
#if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
@@ -552,12 +578,11 @@ TEST(JSONReaderTest, ReadFromFile) {
ASSERT_TRUE(base::PathExists(path));
std::string input;
- ASSERT_TRUE(ReadFileToString(
- path.Append(FILE_PATH_LITERAL("bom_feff.json")), &input));
+ ASSERT_TRUE(ReadFileToString(path.AppendASCII("bom_feff.json"), &input));
JSONReader reader;
std::unique_ptr<Value> root(reader.ReadToValue(input));
- ASSERT_TRUE(root.get()) << reader.GetErrorMessage();
+ ASSERT_TRUE(root) << reader.GetErrorMessage();
EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
}
#endif // !__ANDROID__ && !__ANDROID_HOST__
@@ -587,25 +612,25 @@ TEST(JSONReaderTest, StringOptimizations) {
" ]"
"}",
JSON_DETACHABLE_CHILDREN);
- ASSERT_TRUE(root.get());
+ ASSERT_TRUE(root);
- DictionaryValue* root_dict = NULL;
+ DictionaryValue* root_dict = nullptr;
ASSERT_TRUE(root->GetAsDictionary(&root_dict));
- DictionaryValue* dict = NULL;
- ListValue* list = NULL;
+ DictionaryValue* dict = nullptr;
+ ListValue* list = nullptr;
ASSERT_TRUE(root_dict->GetDictionary("test", &dict));
ASSERT_TRUE(root_dict->GetList("list", &list));
- EXPECT_TRUE(dict->Remove("foo", &dict_literal_0));
- EXPECT_TRUE(dict->Remove("bar", &dict_literal_1));
- EXPECT_TRUE(dict->Remove("baz", &dict_string_0));
- EXPECT_TRUE(dict->Remove("moo", &dict_string_1));
+ ASSERT_TRUE(dict->Remove("foo", &dict_literal_0));
+ ASSERT_TRUE(dict->Remove("bar", &dict_literal_1));
+ ASSERT_TRUE(dict->Remove("baz", &dict_string_0));
+ ASSERT_TRUE(dict->Remove("moo", &dict_string_1));
ASSERT_EQ(2u, list->GetSize());
- EXPECT_TRUE(list->Remove(0, &list_value_0));
- EXPECT_TRUE(list->Remove(0, &list_value_1));
+ ASSERT_TRUE(list->Remove(0, &list_value_0));
+ ASSERT_TRUE(list->Remove(0, &list_value_1));
}
bool b = false;
@@ -634,19 +659,14 @@ TEST(JSONReaderTest, StringOptimizations) {
// parser implementation against buffer overflow. Best run with DCHECKs so
// that the one in NextChar fires.
TEST(JSONReaderTest, InvalidSanity) {
- const char* const invalid_json[] = {
- "/* test *",
- "{\"foo\"",
- "{\"foo\":",
- " [",
- "\"\\u123g\"",
- "{\n\"eh:\n}",
+ const char* const kInvalidJson[] = {
+ "/* test *", "{\"foo\"", "{\"foo\":", " [", "\"\\u123g\"", "{\n\"eh:\n}",
};
- for (size_t i = 0; i < arraysize(invalid_json); ++i) {
+ for (size_t i = 0; i < arraysize(kInvalidJson); ++i) {
JSONReader reader;
- LOG(INFO) << "Sanity test " << i << ": <" << invalid_json[i] << ">";
- EXPECT_FALSE(reader.ReadToValue(invalid_json[i]));
+ LOG(INFO) << "Sanity test " << i << ": <" << kInvalidJson[i] << ">";
+ EXPECT_FALSE(reader.ReadToValue(kInvalidJson[i]));
EXPECT_NE(JSONReader::JSON_NO_ERROR, reader.error_code());
EXPECT_NE("", reader.GetErrorMessage());
}
diff --git a/base/json/json_writer.cc b/base/json/json_writer.cc
index 19bc0da972..0b658eed59 100644
--- a/base/json/json_writer.cc
+++ b/base/json/json_writer.cc
@@ -127,9 +127,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
bool first_value_has_been_output = false;
bool result = node.GetAsList(&list);
DCHECK(result);
- for (ListValue::const_iterator it = list->begin(); it != list->end();
- ++it) {
- const Value* value = *it;
+ for (const auto& value : *list) {
if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY)
continue;
diff --git a/base/json/json_writer_unittest.cc b/base/json/json_writer_unittest.cc
index 37ad268684..233ac5e867 100644
--- a/base/json/json_writer_unittest.cc
+++ b/base/json/json_writer_unittest.cc
@@ -129,14 +129,11 @@ TEST(JSONWriterTest, BinaryValues) {
EXPECT_EQ("[5,2]", output_js);
DictionaryValue binary_dict;
- binary_dict.Set(
- "a", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+ binary_dict.Set("a", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
binary_dict.SetInteger("b", 5);
- binary_dict.Set(
- "c", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+ binary_dict.Set("c", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
binary_dict.SetInteger("d", 2);
- binary_dict.Set(
- "e", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+ binary_dict.Set("e", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
EXPECT_FALSE(JSONWriter::Write(binary_dict, &output_js));
EXPECT_TRUE(JSONWriter::WriteWithOptions(
binary_dict, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
diff --git a/base/logging.cc b/base/logging.cc
index 3b659624f2..ca310f4410 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -13,8 +13,6 @@
#if defined(OS_WIN)
#include <io.h>
#include <windows.h>
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
typedef HANDLE FileHandle;
typedef HANDLE MutexHandle;
// Windows warns on using write(). It prefers _write().
@@ -293,13 +291,24 @@ bool InitializeLogFileHandle() {
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
+ // We are intentionally not using FilePath or FileUtil here to reduce the
+ // dependencies of the logging implementation. For e.g. FilePath and
+ // FileUtil depend on shell32 and user32.dll. This is not acceptable for
+ // some consumers of base logging like chrome_elf, etc.
+ // Please don't change the code below to use FilePath.
// try the current directory
- base::FilePath file_path;
- if (!base::GetCurrentDirectory(&file_path))
+ wchar_t system_buffer[MAX_PATH];
+ system_buffer[0] = 0;
+ DWORD len = ::GetCurrentDirectory(arraysize(system_buffer),
+ system_buffer);
+ if (len == 0 || len > arraysize(system_buffer))
return false;
- *g_log_file_name = file_path.Append(
- FILE_PATH_LITERAL("debug.log")).value();
+ *g_log_file_name = system_buffer;
+ // Append a trailing backslash if needed.
+ if (g_log_file_name->back() != L'\\')
+ *g_log_file_name += L"\\";
+ *g_log_file_name += L"debug.log";
g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
@@ -348,23 +357,21 @@ bool BaseInitLoggingImpl(const LoggingSettings& settings) {
// Can log only to the system debug log.
CHECK_EQ(settings.logging_dest & ~LOG_TO_SYSTEM_DEBUG_LOG, 0);
#endif
- if (base::CommandLine::InitializedForCurrentProcess()) {
- base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
- // Don't bother initializing |g_vlog_info| unless we use one of the
- // vlog switches.
- if (command_line->HasSwitch(switches::kV) ||
- command_line->HasSwitch(switches::kVModule)) {
- // NOTE: If |g_vlog_info| has already been initialized, it might be in use
- // by another thread. Don't delete the old VLogInfo, just create a second
- // one. We keep track of both to avoid memory leak warnings.
- CHECK(!g_vlog_info_prev);
- g_vlog_info_prev = g_vlog_info;
-
- g_vlog_info =
- new VlogInfo(command_line->GetSwitchValueASCII(switches::kV),
- command_line->GetSwitchValueASCII(switches::kVModule),
- &g_min_log_level);
- }
+ base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+ // Don't bother initializing |g_vlog_info| unless we use one of the
+ // vlog switches.
+ if (command_line->HasSwitch(switches::kV) ||
+ command_line->HasSwitch(switches::kVModule)) {
+ // NOTE: If |g_vlog_info| has already been initialized, it might be in use
+ // by another thread. Don't delete the old VLogInfo, just create a second
+ // one. We keep track of both to avoid memory leak warnings.
+ CHECK(!g_vlog_info_prev);
+ g_vlog_info_prev = g_vlog_info;
+
+ g_vlog_info =
+ new VlogInfo(command_line->GetSwitchValueASCII(switches::kV),
+ command_line->GetSwitchValueASCII(switches::kVModule),
+ &g_min_log_level);
}
g_logging_destination = settings.logging_dest;
@@ -460,8 +467,7 @@ template std::string* MakeCheckOpString<unsigned int, unsigned long>(
template std::string* MakeCheckOpString<std::string, std::string>(
const std::string&, const std::string&, const char* name);
-template <>
-void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t&) {
+void MakeCheckOpValueString(std::ostream* os, std::nullptr_t) {
(*os) << "nullptr";
}
diff --git a/base/logging.h b/base/logging.h
index bebf52605c..2bfc972601 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -11,11 +11,13 @@
#include <cstring>
#include <sstream>
#include <string>
-#include <typeinfo>
+#include <type_traits>
+#include <utility>
#include "base/base_export.h"
#include "base/debug/debugger.h"
#include "base/macros.h"
+#include "base/template_util.h"
#include "build/build_config.h"
//
@@ -551,14 +553,26 @@ class CheckOpResult {
// This formats a value for a failing CHECK_XX statement. Ordinarily,
// it uses the definition for operator<<, with a few special cases below.
template <typename T>
-inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
+inline typename std::enable_if<
+ base::internal::SupportsOstreamOperator<const T&>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
(*os) << v;
}
-// We need an explicit specialization for std::nullptr_t.
-template <>
-BASE_EXPORT void MakeCheckOpValueString(std::ostream* os,
- const std::nullptr_t& p);
+// We need overloads for enums that don't support operator<<.
+// (i.e. scoped enums where no operator<< overload was declared).
+template <typename T>
+inline typename std::enable_if<
+ !base::internal::SupportsOstreamOperator<const T&>::value &&
+ std::is_enum<T>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << static_cast<typename base::underlying_type<T>::type>(v);
+}
+
+// We need an explicit overload for std::nullptr_t.
+BASE_EXPORT void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p);
// Build the error message string. This is separate from the "Impl"
// function template because it is not performance critical and so can
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
index 7254265b17..8a20c54fb4 100644
--- a/base/logging_unittest.cc
+++ b/base/logging_unittest.cc
@@ -251,6 +251,13 @@ TEST_F(LoggingTest, Dcheck) {
DCHECK_NE(p_not_null, nullptr);
DCHECK_NE(nullptr, p_not_null);
EXPECT_EQ(0, log_sink_call_count);
+
+ // Test DCHECK on a scoped enum.
+ enum class Animal { DOG, CAT };
+ DCHECK_EQ(Animal::DOG, Animal::DOG);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(Animal::DOG, Animal::CAT);
+ EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
}
TEST_F(LoggingTest, DcheckReleaseBehavior) {
diff --git a/base/mac/bind_objc_block.h b/base/mac/bind_objc_block.h
index c31f26e5a3..2434d444f5 100644
--- a/base/mac/bind_objc_block.h
+++ b/base/mac/bind_objc_block.h
@@ -45,8 +45,11 @@ R RunBlock(base::mac::ScopedBlock<R(^)(Args...)> block, Args... args) {
// note above).
template<typename R, typename... Args>
base::Callback<R(Args...)> BindBlock(R(^block)(Args...)) {
- return base::Bind(&base::internal::RunBlock<R, Args...>,
- base::mac::ScopedBlock<R(^)(Args...)>(Block_copy(block)));
+ return base::Bind(
+ &base::internal::RunBlock<R, Args...>,
+ base::mac::ScopedBlock<R (^)(Args...)>(
+ base::mac::internal::ScopedBlockTraits<R (^)(Args...)>::Retain(
+ block)));
}
} // namespace base
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
index d872fc35d3..4f6fa60afd 100644
--- a/base/mac/foundation_util.mm
+++ b/base/mac/foundation_util.mm
@@ -155,7 +155,7 @@ FilePath GetAppBundlePath(const FilePath& exec_name) {
exec_name.GetComponents(&components);
// It's an error if we don't get any components.
- if (!components.size())
+ if (components.empty())
return FilePath();
// Don't prepend '/' to the first component.
diff --git a/base/mac/libdispatch_task_runner.cc b/base/mac/libdispatch_task_runner.cc
deleted file mode 100644
index 9d18f97e3f..0000000000
--- a/base/mac/libdispatch_task_runner.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/libdispatch_task_runner.h"
-
-#include <stdint.h>
-
-#include "base/callback.h"
-
-namespace base {
-namespace mac {
-
-LibDispatchTaskRunner::LibDispatchTaskRunner(const char* name)
- : queue_(dispatch_queue_create(name, NULL)),
- queue_finalized_(false, false) {
- dispatch_set_context(queue_, this);
- dispatch_set_finalizer_f(queue_, &LibDispatchTaskRunner::Finalizer);
-}
-
-bool LibDispatchTaskRunner::PostDelayedTask(
- const tracked_objects::Location& /* from_here */,
- const Closure& task,
- base::TimeDelta delay) {
- if (!queue_)
- return false;
-
- // The block runtime would implicitly copy the reference, not the object
- // it's referencing. Copy the closure into block storage so it's available
- // to run.
- __block const Closure task_copy = task;
- void(^run_task)(void) = ^{
- task_copy.Run();
- };
-
- int64_t delay_nano =
- delay.InMicroseconds() * base::Time::kNanosecondsPerMicrosecond;
- if (delay_nano > 0) {
- dispatch_time_t time = dispatch_time(DISPATCH_TIME_NOW, delay_nano);
- dispatch_after(time, queue_, run_task);
- } else {
- dispatch_async(queue_, run_task);
- }
- return true;
-}
-
-bool LibDispatchTaskRunner::RunsTasksOnCurrentThread() const {
- return queue_ == dispatch_get_current_queue();
-}
-
-bool LibDispatchTaskRunner::PostNonNestableDelayedTask(
- const tracked_objects::Location& from_here,
- const Closure& task,
- base::TimeDelta delay) {
- return PostDelayedTask(from_here, task, delay);
-}
-
-void LibDispatchTaskRunner::Shutdown() {
- dispatch_release(queue_);
- queue_ = NULL;
- queue_finalized_.Wait();
-}
-
-dispatch_queue_t LibDispatchTaskRunner::GetDispatchQueue() const {
- return queue_;
-}
-
-LibDispatchTaskRunner::~LibDispatchTaskRunner() {
- if (queue_) {
- dispatch_set_context(queue_, NULL);
- dispatch_set_finalizer_f(queue_, NULL);
- dispatch_release(queue_);
- }
-}
-
-void LibDispatchTaskRunner::Finalizer(void* context) {
- LibDispatchTaskRunner* self = static_cast<LibDispatchTaskRunner*>(context);
- self->queue_finalized_.Signal();
-}
-
-} // namespace mac
-} // namespace base
diff --git a/base/mac/libdispatch_task_runner.h b/base/mac/libdispatch_task_runner.h
deleted file mode 100644
index b479bc7aa2..0000000000
--- a/base/mac/libdispatch_task_runner.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_LIBDISPATCH_TASK_RUNNER_H_
-#define BASE_MAC_LIBDISPATCH_TASK_RUNNER_H_
-
-#include <dispatch/dispatch.h>
-
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/waitable_event.h"
-
-namespace base {
-namespace mac {
-
-// This is an implementation of the TaskRunner interface that runs closures on
-// a thread managed by Apple's libdispatch. This has the benefit of being able
-// to PostTask() and friends to a dispatch queue, while being reusable as a
-// dispatch_queue_t.
-//
-// One would use this class if an object lives exclusively on one thread but
-// needs a dispatch_queue_t for use in a system API. This ensures all dispatch
-// callbacks happen on the same thread as Closure tasks.
-//
-// A LibDispatchTaskRunner will continue to run until all references to the
-// underlying dispatch queue are released.
-//
-// Important Notes:
-// - There is no MessageLoop running on this thread, and ::current() returns
-// NULL.
-// - No nested loops can be run, and all tasks are run non-nested.
-// - Work scheduled via libdispatch runs at the same priority as and is
-// interleaved with posted tasks, though FIFO order is guaranteed.
-//
-class BASE_EXPORT LibDispatchTaskRunner : public base::SingleThreadTaskRunner {
- public:
- // Starts a new serial dispatch queue with a given name.
- explicit LibDispatchTaskRunner(const char* name);
-
- // base::TaskRunner:
- bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- base::TimeDelta delay) override;
- bool RunsTasksOnCurrentThread() const override;
-
- // base::SequencedTaskRunner:
- bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- base::TimeDelta delay) override;
-
- // This blocks the calling thread until all work on the dispatch queue has
- // been run and the queue has been destroyed. Destroying a queue requires
- // ALL retained references to it to be released. Any new tasks posted to
- // this thread after shutdown are dropped.
- void Shutdown();
-
- // Returns the dispatch queue associated with this task runner, for use with
- // system APIs that take dispatch queues. The caller is responsible for
- // retaining the result.
- //
- // All properties (context, finalizer, etc.) are managed by this class, and
- // clients should only use the result of this for dispatch_async().
- dispatch_queue_t GetDispatchQueue() const;
-
- protected:
- ~LibDispatchTaskRunner() override;
-
- private:
- static void Finalizer(void* context);
-
- dispatch_queue_t queue_;
-
- // The event on which Shutdown waits until Finalizer runs.
- base::WaitableEvent queue_finalized_;
-};
-
-} // namespace mac
-} // namespace base
-
-#endif // BASE_MAC_LIBDISPATCH_TASK_RUNNER_H_
diff --git a/base/mac/mac_logging.mm b/base/mac/mac_logging.mm
index 381ad30614..f0d3c07da8 100644
--- a/base/mac/mac_logging.mm
+++ b/base/mac/mac_logging.mm
@@ -32,8 +32,8 @@ OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
OSStatusLogMessage::~OSStatusLogMessage() {
#if defined(OS_IOS)
- // TODO(ios): Consider using NSError with NSOSStatusErrorDomain to try to
- // get a description of the failure.
+ // TODO(crbug.com/546375): Consider using NSError with NSOSStatusErrorDomain
+ // to try to get a description of the failure.
stream() << ": " << status_;
#else
stream() << ": "
diff --git a/base/mac/mac_util.h b/base/mac/mac_util.h
index c72c5f1433..84948f7ce8 100644
--- a/base/mac/mac_util.h
+++ b/base/mac/mac_util.h
@@ -113,59 +113,38 @@ BASE_EXPORT bool RemoveQuarantineAttribute(const FilePath& file_path);
// "OrLater" variants to those that check for a specific version, unless you
// know for sure that you need to check for a specific version.
-// Mountain Lion is Mac OS X 10.8, Darwin 12.
-BASE_EXPORT bool IsOSMountainLion();
-BASE_EXPORT bool IsOSMountainLionOrEarlier();
-BASE_EXPORT bool IsOSMountainLionOrLater();
-
-// Mavericks is Mac OS X 10.9, Darwin 13.
+// Mavericks is OS X 10.9, Darwin 13.
BASE_EXPORT bool IsOSMavericks();
-BASE_EXPORT bool IsOSMavericksOrEarlier();
-BASE_EXPORT bool IsOSMavericksOrLater();
-// Yosemite is Mac OS X 10.10, Darwin 14.
+// Yosemite is OS X 10.10, Darwin 14.
BASE_EXPORT bool IsOSYosemite();
BASE_EXPORT bool IsOSYosemiteOrEarlier();
BASE_EXPORT bool IsOSYosemiteOrLater();
-// El Capitan is Mac OS X 10.11, Darwin 15.
+// El Capitan is OS X 10.11, Darwin 15.
BASE_EXPORT bool IsOSElCapitan();
+BASE_EXPORT bool IsOSElCapitanOrEarlier();
BASE_EXPORT bool IsOSElCapitanOrLater();
+// Sierra is macOS 10.12, Darwin 16.
+BASE_EXPORT bool IsOSSierra();
+BASE_EXPORT bool IsOSSierraOrLater();
+
// This should be infrequently used. It only makes sense to use this to avoid
// codepaths that are very likely to break on future (unreleased, untested,
// unborn) OS releases, or to log when the OS is newer than any known version.
-BASE_EXPORT bool IsOSLaterThanElCapitan_DontCallThis();
+BASE_EXPORT bool IsOSLaterThanSierra_DontCallThis();
// Inline functions that are redundant due to version ranges being mutually-
// exclusive.
-inline bool IsOSMountainLionOrEarlier() { return !IsOSMavericksOrLater(); }
-inline bool IsOSMavericksOrEarlier() { return !IsOSYosemiteOrLater(); }
inline bool IsOSYosemiteOrEarlier() { return !IsOSElCapitanOrLater(); }
+inline bool IsOSElCapitanOrEarlier() { return !IsOSSierraOrLater(); }
// When the deployment target is set, the code produced cannot run on earlier
// OS releases. That enables some of the IsOS* family to be implemented as
// constant-value inline functions. The MAC_OS_X_VERSION_MIN_REQUIRED macro
// contains the value of the deployment target.
-#if defined(MAC_OS_X_VERSION_10_8) && \
- MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_8
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_8
-inline bool IsOSMountainLionOrLater() { return true; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_8) && \
- MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_8
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_8
-inline bool IsOSMountainLion() { return false; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_9) && \
- MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_9
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_9
-inline bool IsOSMavericksOrLater() { return true; }
-#endif
-
#if defined(MAC_OS_X_VERSION_10_9) && \
MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_9
#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_9
@@ -194,7 +173,19 @@ inline bool IsOSElCapitanOrLater() { return true; }
MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_11
#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11
inline bool IsOSElCapitan() { return false; }
-inline bool IsOSLaterThanElCapitan_DontCallThis() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_12
+inline bool IsOSSierraOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12
+inline bool IsOSSierra() { return false; }
+inline bool IsOSLaterThanSierra_DontCallThis() { return true; }
#endif
// Retrieve the system's model identifier string from the IOKit registry:
diff --git a/base/mac/mach_port_broker_unittest.cc b/base/mac/mach_port_broker_unittest.cc
index c15afb68fd..bff8eb6a9b 100644
--- a/base/mac/mach_port_broker_unittest.cc
+++ b/base/mac/mach_port_broker_unittest.cc
@@ -23,7 +23,8 @@ class MachPortBrokerTest : public testing::Test,
public:
MachPortBrokerTest()
: broker_(kBootstrapPortName),
- event_(true, false),
+ event_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
received_process_(kNullProcessHandle) {
broker_.AddObserver(this);
}
diff --git a/base/mac/scoped_block.h b/base/mac/scoped_block.h
index bc2688f13a..8199677f15 100644
--- a/base/mac/scoped_block.h
+++ b/base/mac/scoped_block.h
@@ -9,6 +9,12 @@
#include "base/mac/scoped_typeref.h"
+#if defined(__has_feature) && __has_feature(objc_arc)
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) (__bridge TYPE)(VALUE)
+#else
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) VALUE
+#endif
+
namespace base {
namespace mac {
@@ -17,8 +23,13 @@ namespace internal {
template <typename B>
struct ScopedBlockTraits {
static B InvalidValue() { return nullptr; }
- static B Retain(B block) { return Block_copy(block); }
- static void Release(B block) { Block_release(block); }
+ static B Retain(B block) {
+ return BASE_MAC_BRIDGE_CAST(
+ B, Block_copy(BASE_MAC_BRIDGE_CAST(const void*, block)));
+ }
+ static void Release(B block) {
+ Block_release(BASE_MAC_BRIDGE_CAST(const void*, block));
+ }
};
} // namespace internal
@@ -32,4 +43,6 @@ using ScopedBlock = ScopedTypeRef<B, internal::ScopedBlockTraits<B>>;
} // namespace mac
} // namespace base
+#undef BASE_MAC_BRIDGE_CAST
+
#endif // BASE_MAC_SCOPED_BLOCK_H_
diff --git a/base/mac/scoped_nsobject.h b/base/mac/scoped_nsobject.h
index 4b26acf758..cc54aa0ca8 100644
--- a/base/mac/scoped_nsobject.h
+++ b/base/mac/scoped_nsobject.h
@@ -12,10 +12,13 @@
// singled out because it is most typically included from other header files.
#import <Foundation/NSObject.h>
+#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/mac/scoped_typeref.h"
+#if !defined(__has_feature) || !__has_feature(objc_arc)
@class NSAutoreleasePool;
+#endif
namespace base {
@@ -38,14 +41,39 @@ namespace base {
// scoped_nsautorelease_pool.h instead.
// We check for bad uses of scoped_nsobject and NSAutoreleasePool at compile
// time with a template specialization (see below).
+//
+// If Automatic Reference Counting (aka ARC) is enabled then the ownership
+// policy is not controllable by the user as ARC make it really difficult to
+// transfer ownership (the reference passed to scoped_nsobject constructor is
+// sunk by ARC and __attribute((ns_consumed)) appears to not work correctly
+// with Objective-C++ see https://llvm.org/bugs/show_bug.cgi?id=27887). Due to
+// that, the policy is always to |RETAIN| when using ARC.
namespace internal {
+BASE_EXPORT id ScopedNSProtocolTraitsRetain(__unsafe_unretained id obj)
+ __attribute((ns_returns_not_retained));
+BASE_EXPORT id ScopedNSProtocolTraitsAutoRelease(__unsafe_unretained id obj)
+ __attribute((ns_returns_not_retained));
+BASE_EXPORT void ScopedNSProtocolTraitsRelease(__unsafe_unretained id obj);
+
+// Traits for ScopedTypeRef<>. As this class may be compiled from file with
+// Automatic Reference Counting enable or not all methods have annotation to
+// enforce the same code generation in both case (in particular, the Retain
+// method uses ns_returns_not_retained to prevent ARC to insert a -release
+// call on the returned value and thus defeating the -retain).
template <typename NST>
struct ScopedNSProtocolTraits {
- static NST InvalidValue() { return nil; }
- static NST Retain(NST nst) { return [nst retain]; }
- static void Release(NST nst) { [nst release]; }
+ static NST InvalidValue() __attribute((ns_returns_not_retained)) {
+ return nil;
+ }
+ static NST Retain(__unsafe_unretained NST nst)
+ __attribute((ns_returns_not_retained)) {
+ return ScopedNSProtocolTraitsRetain(nst);
+ }
+ static void Release(__unsafe_unretained NST nst) {
+ ScopedNSProtocolTraitsRelease(nst);
+ }
};
} // namespace internal
@@ -54,11 +82,49 @@ template <typename NST>
class scoped_nsprotocol
: public ScopedTypeRef<NST, internal::ScopedNSProtocolTraits<NST>> {
public:
- using ScopedTypeRef<NST,
- internal::ScopedNSProtocolTraits<NST>>::ScopedTypeRef;
+ using Traits = internal::ScopedNSProtocolTraits<NST>;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsprotocol(
+ NST object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : ScopedTypeRef<NST, Traits>(object, policy) {}
+#else
+ explicit scoped_nsprotocol(NST object = Traits::InvalidValue())
+ : ScopedTypeRef<NST, Traits>(object, base::scoped_policy::RETAIN) {}
+#endif
+
+ scoped_nsprotocol(const scoped_nsprotocol<NST>& that)
+ : ScopedTypeRef<NST, Traits>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsprotocol(const scoped_nsprotocol<NSR>& that_as_subclass)
+ : ScopedTypeRef<NST, Traits>(that_as_subclass) {}
+
+ scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
+ : ScopedTypeRef<NST, Traits>(that) {}
+
+ scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
+ ScopedTypeRef<NST, Traits>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(NST object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ ScopedTypeRef<NST, Traits>::reset(object, policy);
+ }
+#else
+ void reset(NST object = Traits::InvalidValue()) {
+ ScopedTypeRef<NST, Traits>::reset(object, base::scoped_policy::RETAIN);
+ }
+#endif
// Shift reference to the autorelease pool to be released later.
- NST autorelease() { return [this->release() autorelease]; }
+ NST autorelease() __attribute((ns_returns_not_retained)) {
+ return internal::ScopedNSProtocolTraitsAutoRelease(this->release());
+ }
};
// Free functions
@@ -80,17 +146,92 @@ bool operator!=(C p1, const scoped_nsprotocol<C>& p2) {
template <typename NST>
class scoped_nsobject : public scoped_nsprotocol<NST*> {
public:
- using scoped_nsprotocol<NST*>::scoped_nsprotocol;
-
+ using Traits = typename scoped_nsprotocol<NST*>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsobject(
+ NST* object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : scoped_nsprotocol<NST*>(object, policy) {}
+#else
+ explicit scoped_nsobject(NST* object = Traits::InvalidValue())
+ : scoped_nsprotocol<NST*>(object) {}
+#endif
+
+ scoped_nsobject(const scoped_nsobject<NST>& that)
+ : scoped_nsprotocol<NST*>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+ : scoped_nsprotocol<NST*>(that_as_subclass) {}
+
+ scoped_nsobject(scoped_nsobject<NST>&& that)
+ : scoped_nsprotocol<NST*>(that) {}
+
+ scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
+ scoped_nsprotocol<NST*>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(NST* object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ scoped_nsprotocol<NST*>::reset(object, policy);
+ }
+#else
+ void reset(NST* object = Traits::InvalidValue()) {
+ scoped_nsprotocol<NST*>::reset(object);
+ }
+#endif
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
static_assert(std::is_same<NST, NSAutoreleasePool>::value == false,
"Use ScopedNSAutoreleasePool instead");
+#endif
};
// Specialization to make scoped_nsobject<id> work.
template<>
class scoped_nsobject<id> : public scoped_nsprotocol<id> {
public:
- using scoped_nsprotocol<id>::scoped_nsprotocol;
+ using Traits = typename scoped_nsprotocol<id>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsobject(
+ id object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : scoped_nsprotocol<id>(object, policy) {}
+#else
+ explicit scoped_nsobject(id object = Traits::InvalidValue())
+ : scoped_nsprotocol<id>(object) {}
+#endif
+
+ scoped_nsobject(const scoped_nsobject<id>& that)
+ : scoped_nsprotocol<id>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+ : scoped_nsprotocol<id>(that_as_subclass) {}
+
+ scoped_nsobject(scoped_nsobject<id>&& that) : scoped_nsprotocol<id>(that) {}
+
+ scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
+ scoped_nsprotocol<id>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(id object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ scoped_nsprotocol<id>::reset(object, policy);
+ }
+#else
+ void reset(id object = Traits::InvalidValue()) {
+ scoped_nsprotocol<id>::reset(object);
+ }
+#endif
};
} // namespace base
diff --git a/base/mac/scoped_typeref.h b/base/mac/scoped_typeref.h
index eed5afb539..b8d8a14262 100644
--- a/base/mac/scoped_typeref.h
+++ b/base/mac/scoped_typeref.h
@@ -53,8 +53,8 @@ class ScopedTypeRef {
public:
typedef T element_type;
- ScopedTypeRef(
- T object = Traits::InvalidValue(),
+ explicit ScopedTypeRef(
+ __unsafe_unretained T object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
: object_(object) {
if (object_ && policy == base::scoped_policy::RETAIN)
@@ -67,12 +67,10 @@ class ScopedTypeRef {
object_ = Traits::Retain(object_);
}
- // Without this, passing a ScopedTypeRef<A,TraitsX> to construct a
- // ScopedTypeRef<A,TraitsY> would automatically cast down to an A, and then
- // ASSUME ownership of A, when a retain is what was needed.
- template<typename OtherTraits>
- ScopedTypeRef(const ScopedTypeRef<T, OtherTraits>& that_with_other_traits)
- : object_(that_with_other_traits.get()) {
+ // This allows passing an object to a function that takes its superclass.
+ template <typename R, typename RTraits>
+ explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
+ : object_(that_as_subclass.get()) {
if (object_)
object_ = Traits::Retain(object_);
}
@@ -99,9 +97,9 @@ class ScopedTypeRef {
return &object_;
}
- void reset(T object = Traits::InvalidValue(),
+ void reset(__unsafe_unretained T object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy =
- base::scoped_policy::ASSUME) {
+ base::scoped_policy::ASSUME) {
if (object && policy == base::scoped_policy::RETAIN)
object = Traits::Retain(object);
if (object_)
@@ -109,24 +107,16 @@ class ScopedTypeRef {
object_ = object;
}
- bool operator==(T that) const {
- return object_ == that;
- }
+ bool operator==(__unsafe_unretained T that) const { return object_ == that; }
- bool operator!=(T that) const {
- return object_ != that;
- }
+ bool operator!=(__unsafe_unretained T that) const { return object_ != that; }
- operator T() const {
- return object_;
- }
+ operator T() const __attribute((ns_returns_not_retained)) { return object_; }
- T get() const {
- return object_;
- }
+ T get() const __attribute((ns_returns_not_retained)) { return object_; }
void swap(ScopedTypeRef& that) {
- T temp = that.object_;
+ __unsafe_unretained T temp = that.object_;
that.object_ = object_;
object_ = temp;
}
@@ -134,14 +124,14 @@ class ScopedTypeRef {
// ScopedTypeRef<>::release() is like std::unique_ptr<>::release. It is NOT
// a wrapper for Release(). To force a ScopedTypeRef<> object to call
// Release(), use ScopedTypeRef<>::reset().
- T release() WARN_UNUSED_RESULT {
- T temp = object_;
+ T release() __attribute((ns_returns_not_retained)) WARN_UNUSED_RESULT {
+ __unsafe_unretained T temp = object_;
object_ = Traits::InvalidValue();
return temp;
}
private:
- T object_;
+ __unsafe_unretained T object_;
};
} // namespace base
diff --git a/base/macros.h b/base/macros.h
index 2a825371b3..4c62300e32 100644
--- a/base/macros.h
+++ b/base/macros.h
@@ -34,8 +34,8 @@
// We define this macro conditionally as it may be defined by another libraries.
#if !defined(DISALLOW_COPY_AND_ASSIGN)
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete
#endif
// A macro to disallow all the implicit constructors, namely the
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index 96231af233..b026d9ab03 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -111,7 +111,7 @@ class BASE_EXPORT RefCountedThreadSafeBase {
//
// A base class for reference counted classes. Otherwise, known as a cheap
-// knock-off of WebKit's RefCounted<T> class. To use this guy just extend your
+// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
// class from it like so:
//
// class MyFoo : public base::RefCounted<MyFoo> {
diff --git a/base/memory/scoped_vector.h b/base/memory/scoped_vector.h
index adbab8cf49..f3581eaa9b 100644
--- a/base/memory/scoped_vector.h
+++ b/base/memory/scoped_vector.h
@@ -11,7 +11,7 @@
#include <vector>
#include "base/logging.h"
-#include "base/move.h"
+#include "base/macros.h"
#include "base/stl_util.h"
// ScopedVector wraps a vector deleting the elements from its
@@ -21,8 +21,6 @@
// we have support for moveable types inside containers).
template <class T>
class ScopedVector {
- MOVE_ONLY_TYPE_FOR_CPP_03(ScopedVector)
-
public:
typedef typename std::vector<T*>::allocator_type allocator_type;
typedef typename std::vector<T*>::size_type size_type;
@@ -142,6 +140,8 @@ class ScopedVector {
private:
std::vector<T*> v_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedVector);
};
#endif // BASE_MEMORY_SCOPED_VECTOR_H_
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
index 257b9aec2e..e1c9fa70bd 100644
--- a/base/memory/shared_memory.h
+++ b/base/memory/shared_memory.h
@@ -24,6 +24,10 @@
#include "base/files/scoped_file.h"
#endif
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
namespace base {
class FilePath;
@@ -55,12 +59,6 @@ struct BASE_EXPORT SharedMemoryCreateOptions {
// If true, the file can be shared read-only to a process.
bool share_read_only;
-
-#if defined(OS_WIN)
- // If true, creates a file mapping without a name or proper ACLs. This is a
- // stop-gap measure during investigation of https://crbug.com/585013.
- bool create_without_name_or_permissions = false;
-#endif
};
// Platform abstraction for shared memory. Provides a C++ wrapper
@@ -269,7 +267,7 @@ class BASE_EXPORT SharedMemory {
// before being mapped.
bool external_section_;
std::wstring name_;
- HANDLE mapped_file_;
+ win::ScopedHandle mapped_file_;
#elif defined(OS_MACOSX) && !defined(OS_IOS)
// The OS primitive that backs the shared memory region.
SharedMemoryHandle shm_;
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
index 8251f60840..f29865c21a 100644
--- a/base/memory/shared_memory_unittest.cc
+++ b/base/memory/shared_memory_unittest.cc
@@ -592,11 +592,6 @@ TEST(SharedMemoryTest, UnsafeImageSection) {
EXPECT_FALSE(shared_memory_open.Map(1));
EXPECT_EQ(nullptr, shared_memory_open.memory());
- SharedMemory shared_memory_handle_dup(
- SharedMemoryHandle(section_handle.Get(), ::GetCurrentProcessId()), true);
- EXPECT_FALSE(shared_memory_handle_dup.Map(1));
- EXPECT_EQ(nullptr, shared_memory_handle_dup.memory());
-
SharedMemory shared_memory_handle_local(
SharedMemoryHandle(section_handle.Take(), ::GetCurrentProcessId()), true);
EXPECT_FALSE(shared_memory_handle_local.Map(1));
diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc
index 16d3dff10a..4e77b04973 100644
--- a/base/memory/weak_ptr.cc
+++ b/base/memory/weak_ptr.cc
@@ -34,14 +34,16 @@ WeakReference::Flag::~Flag() {
WeakReference::WeakReference() {
}
-WeakReference::WeakReference(const WeakReference& other) = default;
-
WeakReference::WeakReference(const Flag* flag) : flag_(flag) {
}
WeakReference::~WeakReference() {
}
+WeakReference::WeakReference(WeakReference&& other) = default;
+
+WeakReference::WeakReference(const WeakReference& other) = default;
+
bool WeakReference::is_valid() const { return flag_.get() && flag_->IsValid(); }
WeakReferenceOwner::WeakReferenceOwner() {
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
index 3b8bcb1b07..3544439dd3 100644
--- a/base/memory/weak_ptr.h
+++ b/base/memory/weak_ptr.h
@@ -109,10 +109,14 @@ class BASE_EXPORT WeakReference {
};
WeakReference();
- WeakReference(const WeakReference& other);
explicit WeakReference(const Flag* flag);
~WeakReference();
+ WeakReference(WeakReference&& other);
+ WeakReference(const WeakReference& other);
+ WeakReference& operator=(WeakReference&& other) = default;
+ WeakReference& operator=(const WeakReference& other) = default;
+
bool is_valid() const;
private:
@@ -145,6 +149,11 @@ class BASE_EXPORT WeakPtrBase {
WeakPtrBase();
~WeakPtrBase();
+ WeakPtrBase(const WeakPtrBase& other) = default;
+ WeakPtrBase(WeakPtrBase&& other) = default;
+ WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+ WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+
protected:
explicit WeakPtrBase(const WeakReference& ref);
@@ -205,10 +214,13 @@ class WeakPtr : public internal::WeakPtrBase {
WeakPtr(std::nullptr_t) : ptr_(nullptr) {}
// Allow conversion from U to T provided U "is a" T. Note that this
- // is separate from the (implicit) copy constructor.
+ // is separate from the (implicit) copy and move constructors.
template <typename U>
WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.ptr_) {
}
+ template <typename U>
+ WeakPtr(WeakPtr<U>&& other)
+ : WeakPtrBase(std::move(other)), ptr_(other.ptr_) {}
T* get() const { return ref_.is_valid() ? ptr_ : nullptr; }
@@ -226,36 +238,10 @@ class WeakPtr : public internal::WeakPtrBase {
ptr_ = nullptr;
}
- // Implement "Safe Bool Idiom"
- // https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Safe_bool
- //
- // Allow WeakPtr<element_type> to be used in boolean expressions such as
- // if (weak_ptr_instance)
- // But do not become convertible to a real bool (which is dangerous).
- // Implementation requires:
- // typedef Testable
- // operator Testable() const
- // operator==
- // operator!=
- //
- // == and != operators must be declared explicitly or dissallowed, as
- // otherwise "ptr1 == ptr2" will compile but do the wrong thing (i.e., convert
- // to Testable and then do the comparison).
- //
- // C++11 provides for "explicit operator bool()", however it is currently
- // banned due to MSVS2013. https://chromium-cpp.appspot.com/#core-blacklist
- private:
- typedef T* WeakPtr::*Testable;
-
- public:
- operator Testable() const { return get() ? &WeakPtr::ptr_ : nullptr; }
+ // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+ explicit operator bool() const { return get() != nullptr; }
private:
- // Explicitly declare comparison operators as required by the "Safe Bool
- // Idiom", but keep them private.
- template <class U> bool operator==(WeakPtr<U> const&) const;
- template <class U> bool operator!=(WeakPtr<U> const&) const;
-
friend class internal::SupportsWeakPtrBase;
template <typename U> friend class WeakPtr;
friend class SupportsWeakPtr<T>;
@@ -271,6 +257,24 @@ class WeakPtr : public internal::WeakPtrBase {
T* ptr_;
};
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr == nullptr;
+}
+
// A class may be composed of a WeakPtrFactory and thereby
// control how it exposes weak pointers to itself. This is helpful if you only
// need weak pointers within the implementation of a class. This class is also
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
index df6c24f8a8..ebcf33c57e 100644
--- a/base/memory/weak_ptr_unittest.cc
+++ b/base/memory/weak_ptr_unittest.cc
@@ -69,7 +69,8 @@ class BackgroundThread : public Thread {
~BackgroundThread() override { Stop(); }
void CreateArrowFromTarget(Arrow** arrow, Target* target) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromTarget, arrow,
target, &completion));
@@ -77,7 +78,8 @@ class BackgroundThread : public Thread {
}
void CreateArrowFromArrow(Arrow** arrow, const Arrow* other) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromArrow, arrow,
other, &completion));
@@ -85,7 +87,8 @@ class BackgroundThread : public Thread {
}
void DeleteTarget(Target* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE,
base::Bind(&BackgroundThread::DoDeleteTarget, object, &completion));
@@ -93,7 +96,8 @@ class BackgroundThread : public Thread {
}
void CopyAndAssignArrow(Arrow* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrow, object,
&completion));
@@ -101,7 +105,8 @@ class BackgroundThread : public Thread {
}
void CopyAndAssignArrowBase(Arrow* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrowBase,
object, &completion));
@@ -109,7 +114,8 @@ class BackgroundThread : public Thread {
}
void DeleteArrow(Arrow* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE,
base::Bind(&BackgroundThread::DoDeleteArrow, object, &completion));
@@ -117,7 +123,8 @@ class BackgroundThread : public Thread {
}
Target* DeRef(const Arrow* arrow) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
Target* result = nullptr;
task_runner()->PostTask(FROM_HERE, base::Bind(&BackgroundThread::DoDeRef,
arrow, &result, &completion));
@@ -196,6 +203,16 @@ TEST(WeakPtrFactoryTest, Comparison) {
EXPECT_EQ(ptr.get(), ptr2.get());
}
+TEST(WeakPtrFactoryTest, Move) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ WeakPtr<int> ptr2 = factory.GetWeakPtr();
+ WeakPtr<int> ptr3 = std::move(ptr2);
+ EXPECT_NE(ptr.get(), ptr2.get());
+ EXPECT_EQ(ptr.get(), ptr3.get());
+}
+
TEST(WeakPtrFactoryTest, OutOfScope) {
WeakPtr<int> ptr;
EXPECT_EQ(nullptr, ptr.get());
@@ -301,6 +318,19 @@ TEST(WeakPtrFactoryTest, BooleanTesting) {
}
}
+TEST(WeakPtrFactoryTest, ComparisonToNull) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+
+ WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+ EXPECT_NE(nullptr, ptr_to_an_instance);
+ EXPECT_NE(ptr_to_an_instance, nullptr);
+
+ WeakPtr<int> null_ptr;
+ EXPECT_EQ(null_ptr, nullptr);
+ EXPECT_EQ(nullptr, null_ptr);
+}
+
TEST(WeakPtrTest, InvalidateWeakPtrs) {
int data;
WeakPtrFactory<int> factory(&data);
diff --git a/base/memory/weak_ptr_unittest.nc b/base/memory/weak_ptr_unittest.nc
index 32deca9f17..9b1226b794 100644
--- a/base/memory/weak_ptr_unittest.nc
+++ b/base/memory/weak_ptr_unittest.nc
@@ -59,7 +59,7 @@ void WontCompile() {
SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
void WontCompile() {
Producer f;
@@ -73,14 +73,14 @@ void WontCompile() {
WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
void WontCompile() {
Producer f;
WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
}
-#elif defined(NCTEST_UNSAFE_HELPER_CAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNSAFE_HELPER_CAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
void WontCompile() {
DerivedProducer f;
@@ -94,14 +94,14 @@ void WontCompile() {
WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
void WontCompile() {
DerivedProducer f;
WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNRELATED_HELPER) // [r"fatal error: cannot initialize a member subobject of type 'base::Unrelated \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNRELATED_HELPER) // [r"fatal error: cannot initialize a member subobject of type 'base::Unrelated \*' with an lvalue of type 'base::DerivedProducer \*'"]
void WontCompile() {
DerivedProducer f;
@@ -129,7 +129,7 @@ void WontCompile() {
WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
}
-#elif defined(NCTEST_AMBIGUOUS_ANCESTORS) // [r"fatal error: member 'AsWeakPtr' found in multiple base classes of different types"]
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS) // [r"fatal error: use of undeclared identifier 'AsWeakPtrImpl'"]
void WontCompile() {
MultiplyDerivedProducer f;
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index 97df54f5bd..bca1d52762 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -69,8 +69,7 @@ bool IncomingTaskQueue::AddToIncomingQueue(
<< " seconds from here: " << from_here.ToString();
PendingTask pending_task(
- from_here, task, CalculateDelayedRuntime(delay), nestable);
- AutoLock locked(incoming_queue_lock_);
+ from_here, task, CalculateDelayedRuntime(delay), nestable);
#if defined(OS_WIN)
// We consider the task needs a high resolution timer if the delay is
// more than 0 and less than 32ms. This caps the relative error to
@@ -78,7 +77,6 @@ bool IncomingTaskQueue::AddToIncomingQueue(
// resolution on Windows is between 10 and 15ms.
if (delay > TimeDelta() &&
delay.InMilliseconds() < (2 * Time::kMinLowResolutionThresholdMs)) {
- ++high_res_task_count_;
pending_task.is_high_res = true;
}
#endif
@@ -107,7 +105,7 @@ int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
// incoming queue becomes nonempty we need to schedule it again.
message_loop_scheduled_ = false;
} else {
- incoming_queue_.Swap(work_queue);
+ incoming_queue_.swap(*work_queue);
}
// Reset the count of high resolution tasks since our queue is now empty.
int high_res_tasks = high_res_task_count_;
@@ -116,17 +114,25 @@ int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
}
void IncomingTaskQueue::WillDestroyCurrentMessageLoop() {
- AutoLock lock(incoming_queue_lock_);
+ base::subtle::AutoWriteLock lock(message_loop_lock_);
message_loop_ = NULL;
}
void IncomingTaskQueue::StartScheduling() {
- AutoLock lock(incoming_queue_lock_);
- DCHECK(!is_ready_for_scheduling_);
- DCHECK(!message_loop_scheduled_);
- is_ready_for_scheduling_ = true;
- if (!incoming_queue_.empty())
- ScheduleWork();
+ bool schedule_work;
+ {
+ AutoLock lock(incoming_queue_lock_);
+ DCHECK(!is_ready_for_scheduling_);
+ DCHECK(!message_loop_scheduled_);
+ is_ready_for_scheduling_ = true;
+ schedule_work = !incoming_queue_.empty();
+ }
+ if (schedule_work) {
+ DCHECK(message_loop_);
+ // Don't need to lock |message_loop_lock_| here because this function is
+ // called by MessageLoop on its thread.
+ message_loop_->ScheduleWork();
+ }
}
IncomingTaskQueue::~IncomingTaskQueue() {
@@ -139,44 +145,55 @@ bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
// directly, as it could starve handling of foreign threads. Put every task
// into this queue.
- // This should only be called while the lock is taken.
- incoming_queue_lock_.AssertAcquired();
+ // Ensures |message_loop_| isn't destroyed while running.
+ base::subtle::AutoReadLock hold_message_loop(message_loop_lock_);
if (!message_loop_) {
pending_task->task.Reset();
return false;
}
- // Initialize the sequence number. The sequence number is used for delayed
- // tasks (to facilitate FIFO sorting when two tasks have the same
- // delayed_run_time value) and for identifying the task in about:tracing.
- pending_task->sequence_num = next_sequence_num_++;
-
- message_loop_->task_annotator()->DidQueueTask("MessageLoop::PostTask",
- *pending_task);
+ bool schedule_work = false;
+ {
+ AutoLock hold(incoming_queue_lock_);
- bool was_empty = incoming_queue_.empty();
- incoming_queue_.push(*pending_task);
- pending_task->task.Reset();
+#if defined(OS_WIN)
+ if (pending_task->is_high_res)
+ ++high_res_task_count_;
+#endif
- if (is_ready_for_scheduling_ &&
- (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
- ScheduleWork();
+ // Initialize the sequence number. The sequence number is used for delayed
+ // tasks (to facilitate FIFO sorting when two tasks have the same
+ // delayed_run_time value) and for identifying the task in about:tracing.
+ pending_task->sequence_num = next_sequence_num_++;
+
+ message_loop_->task_annotator()->DidQueueTask("MessageLoop::PostTask",
+ *pending_task);
+
+ bool was_empty = incoming_queue_.empty();
+ incoming_queue_.push(std::move(*pending_task));
+
+ if (is_ready_for_scheduling_ &&
+ (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
+ schedule_work = true;
+ // After we've scheduled the message loop, we do not need to do so again
+ // until we know it has processed all of the work in our queue and is
+ // waiting for more work again. The message loop will always attempt to
+ // reload from the incoming queue before waiting again so we clear this
+ // flag in ReloadWorkQueue().
+ message_loop_scheduled_ = true;
+ }
}
- return true;
-}
+ // Wake up the message loop and schedule work. This is done outside
+ // |incoming_queue_lock_| because signaling the message loop may cause this
+ // thread to be switched. If |incoming_queue_lock_| is held, any other thread
+ // that wants to post a task will be blocked until this thread switches back
+ // in and releases |incoming_queue_lock_|.
+ if (schedule_work)
+ message_loop_->ScheduleWork();
-void IncomingTaskQueue::ScheduleWork() {
- DCHECK(is_ready_for_scheduling_);
- // Wake up the message loop.
- message_loop_->ScheduleWork();
- // After we've scheduled the message loop, we do not need to do so again
- // until we know it has processed all of the work in our queue and is
- // waiting for more work again. The message loop will always attempt to
- // reload from the incoming queue before waiting again so we clear this flag
- // in ReloadWorkQueue().
- message_loop_scheduled_ = true;
+ return true;
}
} // namespace internal
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index 608eca0bbd..aff71d20bf 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -10,6 +10,7 @@
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
#include "base/synchronization/lock.h"
+#include "base/synchronization/read_write_lock.h"
#include "base/time/time.h"
namespace base {
@@ -75,9 +76,14 @@ class BASE_EXPORT IncomingTaskQueue
// so that ReloadWorkQueue() completes in constant time.
int high_res_task_count_;
- // The lock that protects access to the members of this class.
+ // The lock that protects access to the members of this class, except
+ // |message_loop_|.
base::Lock incoming_queue_lock_;
+ // Lock that protects |message_loop_| to prevent it from being deleted while a
+ // task is being posted.
+ base::subtle::ReadWriteLock message_loop_lock_;
+
// An incoming queue of tasks that are acquired under a mutex for processing
// on this instance's thread. These tasks have not yet been been pushed to
// |message_loop_|.
diff --git a/base/message_loop/message_loop.cc b/base/message_loop/message_loop.cc
index a86e8e826b..54369a9b27 100644
--- a/base/message_loop/message_loop.cc
+++ b/base/message_loop/message_loop.cc
@@ -18,6 +18,7 @@
#include "base/metrics/statistics_recorder.h"
#include "base/run_loop.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
@@ -303,9 +304,9 @@ void MessageLoop::RunUntilIdle() {
void MessageLoop::QuitWhenIdle() {
DCHECK_EQ(this, current());
if (run_loop_) {
- run_loop_->quit_when_idle_received_ = true;
+ run_loop_->QuitWhenIdle();
} else {
- NOTREACHED() << "Must be inside Run to call Quit";
+ NOTREACHED() << "Must be inside Run to call QuitWhenIdle";
}
}
@@ -389,16 +390,14 @@ MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
in_high_res_mode_(false),
#endif
nestable_tasks_allowed_(true),
-#if defined(OS_WIN)
- os_modal_loop_(false),
-#endif // OS_WIN
pump_factory_(pump_factory),
message_histogram_(NULL),
run_loop_(NULL),
incoming_task_queue_(new internal::IncomingTaskQueue(this)),
unbound_task_runner_(
new internal::MessageLoopTaskRunner(incoming_task_queue_)),
- task_runner_(unbound_task_runner_) {
+ task_runner_(unbound_task_runner_),
+ thread_id_(kInvalidThreadId) {
// If type is TYPE_CUSTOM non-null pump_factory must be given.
DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
}
@@ -417,6 +416,22 @@ void MessageLoop::BindToCurrentThread() {
unbound_task_runner_->BindToCurrentThread();
unbound_task_runner_ = nullptr;
SetThreadTaskRunnerHandle();
+ {
+ // Save the current thread's ID for potential use by other threads
+ // later from GetThreadName().
+ thread_id_ = PlatformThread::CurrentId();
+ subtle::MemoryBarrier();
+ }
+}
+
+std::string MessageLoop::GetThreadName() const {
+ if (thread_id_ == kInvalidThreadId) {
+ // |thread_id_| may already have been initialized but this thread might not
+ // have received the update yet.
+ subtle::MemoryBarrier();
+ DCHECK_NE(kInvalidThreadId, thread_id_);
+ }
+ return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
}
void MessageLoop::SetTaskRunner(
@@ -449,7 +464,8 @@ bool MessageLoop::ProcessNextDelayedNonNestableTask() {
if (deferred_non_nestable_work_queue_.empty())
return false;
- PendingTask pending_task = deferred_non_nestable_work_queue_.front();
+ PendingTask pending_task =
+ std::move(deferred_non_nestable_work_queue_.front());
deferred_non_nestable_work_queue_.pop();
RunTask(pending_task);
@@ -482,7 +498,7 @@ void MessageLoop::RunTask(const PendingTask& pending_task) {
nestable_tasks_allowed_ = true;
}
-bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
+bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
if (pending_task.nestable || run_loop_->run_depth_ == 1) {
RunTask(pending_task);
// Show that we ran a task (Note: a new one might arrive as a
@@ -492,25 +508,25 @@ bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
// We couldn't run the task now because we're in a nested message loop
// and the task isn't nestable.
- deferred_non_nestable_work_queue_.push(pending_task);
+ deferred_non_nestable_work_queue_.push(std::move(pending_task));
return false;
}
-void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
+void MessageLoop::AddToDelayedWorkQueue(PendingTask pending_task) {
// Move to the delayed work queue.
- delayed_work_queue_.push(pending_task);
+ delayed_work_queue_.push(std::move(pending_task));
}
bool MessageLoop::DeletePendingTasks() {
bool did_work = !work_queue_.empty();
while (!work_queue_.empty()) {
- PendingTask pending_task = work_queue_.front();
+ PendingTask pending_task = std::move(work_queue_.front());
work_queue_.pop();
if (!pending_task.delayed_run_time.is_null()) {
// We want to delete delayed tasks in the same order in which they would
// normally be deleted in case of any funny dependencies between delayed
// tasks.
- AddToDelayedWorkQueue(pending_task);
+ AddToDelayedWorkQueue(std::move(pending_task));
}
}
did_work |= !deferred_non_nestable_work_queue_.empty();
@@ -549,6 +565,12 @@ void MessageLoop::ScheduleWork() {
pump_->ScheduleWork();
}
+#if defined(OS_WIN)
+bool MessageLoop::MessagePumpWasSignaled() {
+ return pump_->WasSignaled();
+}
+#endif
+
//------------------------------------------------------------------------------
// Method and data for histogramming events and actions taken by each instance
// on each thread.
@@ -557,13 +579,12 @@ void MessageLoop::StartHistogrammer() {
#if !defined(OS_NACL) // NaCl build has no metrics code.
if (enable_histogrammer_ && !message_histogram_
&& StatisticsRecorder::IsActive()) {
- DCHECK(!thread_name_.empty());
+ std::string thread_name = GetThreadName();
+ DCHECK(!thread_name.empty());
message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
- "MsgLoop:" + thread_name_,
- kLeastNonZeroMessageId, kMaxMessageId,
+ "MsgLoop:" + thread_name, kLeastNonZeroMessageId, kMaxMessageId,
kNumberOfDistinctMessagesDisplayed,
- HistogramBase::kHexRangePrintingFlag,
- event_descriptions_);
+ HistogramBase::kHexRangePrintingFlag, event_descriptions_);
}
#endif
}
@@ -593,15 +614,17 @@ bool MessageLoop::DoWork() {
// Execute oldest task.
do {
- PendingTask pending_task = work_queue_.front();
+ PendingTask pending_task = std::move(work_queue_.front());
work_queue_.pop();
if (!pending_task.delayed_run_time.is_null()) {
- AddToDelayedWorkQueue(pending_task);
+ int sequence_num = pending_task.sequence_num;
+ TimeTicks delayed_run_time = pending_task.delayed_run_time;
+ AddToDelayedWorkQueue(std::move(pending_task));
// If we changed the topmost task, then it is time to reschedule.
- if (delayed_work_queue_.top().task.Equals(pending_task.task))
- pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
+ if (delayed_work_queue_.top().sequence_num == sequence_num)
+ pump_->ScheduleDelayedWork(delayed_run_time);
} else {
- if (DeferOrRunPendingTask(pending_task))
+ if (DeferOrRunPendingTask(std::move(pending_task)))
return true;
}
} while (!work_queue_.empty());
@@ -633,13 +656,14 @@ bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
}
}
- PendingTask pending_task = delayed_work_queue_.top();
+ PendingTask pending_task =
+ std::move(const_cast<PendingTask&>(delayed_work_queue_.top()));
delayed_work_queue_.pop();
if (!delayed_work_queue_.empty())
*next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
- return DeferOrRunPendingTask(pending_task);
+ return DeferOrRunPendingTask(std::move(pending_task));
}
bool MessageLoop::DoIdleWork() {
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index 1e8b0bbc13..ac522cf133 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -291,12 +291,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Returns the type passed to the constructor.
Type type() const { return type_; }
- // Optional call to connect the thread name with this loop.
- void set_thread_name(const std::string& thread_name) {
- DCHECK(thread_name_.empty()) << "Should not rename this thread!";
- thread_name_ = thread_name;
- }
- const std::string& thread_name() const { return thread_name_; }
+ // Returns the name of the thread this message loop is bound to.
+ // This function is only valid when this message loop is running and
+ // BindToCurrentThread has already been called.
+ std::string GetThreadName() const;
// Gets the TaskRunner associated with this message loop.
const scoped_refptr<SingleThreadTaskRunner>& task_runner() {
@@ -375,16 +373,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
void AddTaskObserver(TaskObserver* task_observer);
void RemoveTaskObserver(TaskObserver* task_observer);
-#if defined(OS_WIN)
- void set_os_modal_loop(bool os_modal_loop) {
- os_modal_loop_ = os_modal_loop;
- }
-
- bool os_modal_loop() const {
- return os_modal_loop_;
- }
-#endif // OS_WIN
-
// Can only be called from the thread that owns the MessageLoop.
bool is_running() const;
@@ -402,6 +390,15 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Runs the specified PendingTask.
void RunTask(const PendingTask& pending_task);
+#if defined(OS_WIN)
+ // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+ // has been investigated.
+ // This should be used for diagnostic only. If message pump wake-up mechanism
+ // is based on auto-reset event this call would reset the event to unset
+ // state.
+ bool MessagePumpWasSignaled();
+#endif
+
//----------------------------------------------------------------------------
protected:
std::unique_ptr<MessagePump> pump_;
@@ -453,10 +450,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Calls RunTask or queues the pending_task on the deferred task list if it
// cannot be run right now. Returns true if the task was run.
- bool DeferOrRunPendingTask(const PendingTask& pending_task);
+ bool DeferOrRunPendingTask(PendingTask pending_task);
// Adds the pending task to delayed_work_queue_.
- void AddToDelayedWorkQueue(const PendingTask& pending_task);
+ void AddToDelayedWorkQueue(PendingTask pending_task);
// Delete tasks that haven't run yet without running them. Used in the
// destructor to make sure all the task's destructors get called. Returns
@@ -523,17 +520,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// insider a (accidentally induced?) nested message pump.
bool nestable_tasks_allowed_;
-#if defined(OS_WIN)
- // Should be set to true before calling Windows APIs like TrackPopupMenu, etc.
- // which enter a modal message loop.
- bool os_modal_loop_;
-#endif
-
// pump_factory_.Run() is called to create a message pump for this loop
// if type_ is TYPE_CUSTOM and pump_ is null.
MessagePumpFactoryCallback pump_factory_;
- std::string thread_name_;
// A profiling histogram showing the counts of various messages and events.
HistogramBase* message_histogram_;
@@ -552,6 +542,9 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
scoped_refptr<SingleThreadTaskRunner> task_runner_;
std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+ // Id of the thread this message loop is bound to.
+ PlatformThreadId thread_id_;
+
template <class T, class R> friend class base::subtle::DeleteHelperInternal;
template <class T, class R> friend class base::subtle::ReleaseHelperInternal;
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index 044350acd1..cabd25013b 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -11,6 +11,8 @@
#include "base/debug/leak_annotations.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_task_runner.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -24,7 +26,8 @@ class MessageLoopTaskRunnerTest : public testing::Test {
MessageLoopTaskRunnerTest()
: current_loop_(new MessageLoop()),
task_thread_("task_thread"),
- thread_sync_(true, false) {}
+ thread_sync_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
void DeleteCurrentMessageLoop() { current_loop_.reset(); }
@@ -35,7 +38,7 @@ class MessageLoopTaskRunnerTest : public testing::Test {
task_thread_.Start();
// Allow us to pause the |task_thread_|'s MessageLoop.
- task_thread_.message_loop()->PostTask(
+ task_thread_.message_loop()->task_runner()->PostTask(
FROM_HERE, Bind(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
Unretained(this)));
}
@@ -257,7 +260,8 @@ class MessageLoopTaskRunnerThreadingTest : public testing::Test {
}
void Quit() const {
- loop_.PostTask(FROM_HERE, MessageLoop::QuitWhenIdleClosure());
+ loop_.task_runner()->PostTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure());
}
void AssertOnIOThread() const {
@@ -313,21 +317,21 @@ class MessageLoopTaskRunnerThreadingTest : public testing::Test {
TEST_F(MessageLoopTaskRunnerThreadingTest, Release) {
EXPECT_TRUE(io_thread_->task_runner()->ReleaseSoon(FROM_HERE, this));
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
TEST_F(MessageLoopTaskRunnerThreadingTest, Delete) {
DeletedOnFile* deleted_on_file = new DeletedOnFile(this);
EXPECT_TRUE(
file_thread_->task_runner()->DeleteSoon(FROM_HERE, deleted_on_file));
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
TEST_F(MessageLoopTaskRunnerThreadingTest, PostTask) {
EXPECT_TRUE(file_thread_->task_runner()->PostTask(
FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::BasicFunction,
Unretained(this))));
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadExits) {
diff --git a/base/message_loop/message_loop_test.cc b/base/message_loop/message_loop_test.cc
index 4e45acbbf0..1ab946f9e2 100644
--- a/base/message_loop/message_loop_test.cc
+++ b/base/message_loop/message_loop_test.cc
@@ -12,6 +12,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
@@ -96,25 +97,25 @@ void RunTest_PostTask(MessagePumpFactory factory) {
// Add tests to message loop
scoped_refptr<Foo> foo(new Foo());
std::string a("a"), b("b"), c("c"), d("d");
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test0, foo.get()));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test1ConstRef, foo.get(), a));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test1Ptr, foo.get(), &b));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test1Int, foo.get(), 100));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test2Ptr, foo.get(), &a, &c));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test2Mixed, foo.get(), a, &d));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&Foo::Test0, foo.get()));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1ConstRef, foo.get(), a));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1Ptr, foo.get(), &b));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1Int, foo.get(), 100));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test2Ptr, foo.get(), &a, &c));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test2Mixed, foo.get(), a, &d));
// After all tests, post a message that will shut down the message loop
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE,
Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
// Now kick things off
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(foo->test_count(), 105);
EXPECT_EQ(foo->result(), "abacad");
@@ -131,12 +132,11 @@ void RunTest_PostDelayedTask_Basic(MessagePumpFactory factory) {
int num_tasks = 1;
Time run_time;
- loop.PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
- kDelay);
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
Time time_before_run = Time::Now();
- loop.Run();
+ RunLoop().Run();
Time time_after_run = Time::Now();
EXPECT_EQ(0, num_tasks);
@@ -151,18 +151,16 @@ void RunTest_PostDelayedTask_InDelayOrder(MessagePumpFactory factory) {
int num_tasks = 2;
Time run_time1, run_time2;
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
TimeDelta::FromMilliseconds(200));
// If we get a large pause in execution (due to a context switch) here, this
// test could fail.
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(10));
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
EXPECT_TRUE(run_time2 < run_time1);
@@ -185,14 +183,12 @@ void RunTest_PostDelayedTask_InPostOrder(MessagePumpFactory factory) {
int num_tasks = 2;
Time run_time1, run_time2;
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
EXPECT_TRUE(run_time1 < run_time2);
@@ -210,14 +206,13 @@ void RunTest_PostDelayedTask_InPostOrder_2(MessagePumpFactory factory) {
int num_tasks = 2;
Time run_time;
- loop.PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
TimeDelta::FromMilliseconds(10));
Time time_before_run = Time::Now();
- loop.Run();
+ RunLoop().Run();
Time time_after_run = Time::Now();
EXPECT_EQ(0, num_tasks);
@@ -240,14 +235,14 @@ void RunTest_PostDelayedTask_InPostOrder_3(MessagePumpFactory factory) {
// Clutter the ML with tasks.
for (int i = 1; i < num_tasks; ++i)
- loop.PostTask(FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
+ loop.task_runner()->PostTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
- loop.PostDelayedTask(
+ loop.task_runner()->PostDelayedTask(
FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(1));
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
EXPECT_TRUE(run_time2 > run_time1);
@@ -265,18 +260,16 @@ void RunTest_PostDelayedTask_SharedTimer(MessagePumpFactory factory) {
int num_tasks = 1;
Time run_time1, run_time2;
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
TimeDelta::FromSeconds(1000));
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(10));
Time start_time = Time::Now();
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
// Ensure that we ran in far less time than the slower timer.
@@ -309,7 +302,7 @@ class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
~RecordDeletionProbe() {
*was_deleted_ = true;
if (post_on_delete_.get())
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_.get()));
}
@@ -323,13 +316,13 @@ void RunTest_EnsureDeletion(MessagePumpFactory factory) {
{
std::unique_ptr<MessagePump> pump(factory());
MessageLoop loop(std::move(pump));
- loop.PostTask(
+ loop.task_runner()->PostTask(
FROM_HERE, Bind(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &a_was_deleted)));
+ new RecordDeletionProbe(NULL, &a_was_deleted)));
// TODO(ajwong): Do we really need 1000ms here?
- loop.PostDelayedTask(
+ loop.task_runner()->PostDelayedTask(
FROM_HERE, Bind(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &b_was_deleted)),
+ new RecordDeletionProbe(NULL, &b_was_deleted)),
TimeDelta::FromMilliseconds(1000));
}
EXPECT_TRUE(a_was_deleted);
@@ -348,7 +341,7 @@ void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory) {
RecordDeletionProbe* a = new RecordDeletionProbe(NULL, &a_was_deleted);
RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
- loop.PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
}
EXPECT_TRUE(a_was_deleted);
EXPECT_TRUE(b_was_deleted);
@@ -358,11 +351,11 @@ void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory) {
void NestingFunc(int* depth) {
if (*depth > 0) {
*depth -= 1;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&NestingFunc, depth));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, depth));
MessageLoop::current()->SetNestableTasksAllowed(true);
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
MessageLoop::current()->QuitWhenIdle();
}
@@ -372,9 +365,9 @@ void RunTest_Nesting(MessagePumpFactory factory) {
MessageLoop loop(std::move(pump));
int depth = 100;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&NestingFunc, &depth));
- MessageLoop::current()->Run();
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, &depth));
+ RunLoop().Run();
EXPECT_EQ(depth, 0);
}
@@ -410,9 +403,10 @@ void RunNestedLoop(TestNestingObserver* observer,
RunLoop nested_loop;
// Verify that by the time the first task is run the observer has seen the
// message loop begin.
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&ExpectOneBeginNestedLoop, observer));
- MessageLoop::current()->PostTask(FROM_HERE, nested_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&ExpectOneBeginNestedLoop, observer));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop.QuitClosure());
nested_loop.Run();
// Quitting message loops doesn't change the begin count.
@@ -431,9 +425,10 @@ void RunTest_NestingObserver(MessagePumpFactory factory) {
outer_loop.AddNestingObserver(&nesting_observer);
// Post a task that runs a nested message loop.
- outer_loop.PostTask(FROM_HERE, Bind(&RunNestedLoop, &nesting_observer,
- outer_loop.QuitWhenIdleClosure()));
- outer_loop.Run();
+ outer_loop.task_runner()->PostTask(FROM_HERE,
+ Bind(&RunNestedLoop, &nesting_observer,
+ outer_loop.QuitWhenIdleClosure()));
+ RunLoop().Run();
outer_loop.RemoveNestingObserver(&nesting_observer);
}
@@ -523,7 +518,7 @@ void RecursiveFunc(TaskList* order, int cookie, int depth,
if (depth > 0) {
if (is_reentrant)
MessageLoop::current()->SetNestableTasksAllowed(true);
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE,
Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
}
@@ -541,17 +536,14 @@ void RunTest_RecursiveDenial1(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&RecursiveFunc, &order, 1, 2, false));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&RecursiveFunc, &order, 2, 2, false));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, false));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, false));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(14U, order.Size());
@@ -588,20 +580,16 @@ void RunTest_RecursiveDenial3(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveSlowFunc, &order, 1, 2, false));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveSlowFunc, &order, 2, 2, false));
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&OrderedFunc, &order, 3),
- TimeDelta::FromMilliseconds(5));
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&QuitFunc, &order, 4),
- TimeDelta::FromMilliseconds(5));
+ MessageLoop::current()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 3), TimeDelta::FromMilliseconds(5));
+ MessageLoop::current()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&QuitFunc, &order, 4), TimeDelta::FromMilliseconds(5));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(16U, order.Size());
@@ -628,14 +616,14 @@ void RunTest_RecursiveSupport1(MessagePumpFactory factory) {
MessageLoop loop(std::move(pump));
TaskList order;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, true));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, true));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&QuitFunc, &order, 3));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(14U, order.Size());
@@ -670,7 +658,7 @@ void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory) {
Bind(&OrderedFunc, &order, 2));
MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
Bind(&QuitFunc, &order, 3));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(6U, order.Size());
@@ -723,7 +711,7 @@ void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory) {
FROM_HERE,
Bind(&QuitFunc, &order, 6));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(12U, order.Size());
@@ -762,20 +750,20 @@ void RunTest_QuitNow(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 3));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
- MessageLoop::current()->Run();
+ RunLoop().Run();
ASSERT_EQ(6U, order.Size());
int task_index = 0;
@@ -798,13 +786,13 @@ void RunTest_RunLoopQuitTop(MessagePumpFactory factory) {
RunLoop outer_run_loop;
RunLoop nested_run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, nested_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -828,14 +816,14 @@ void RunTest_RunLoopQuitNested(MessagePumpFactory factory) {
RunLoop outer_run_loop;
RunLoop nested_run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, nested_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -859,15 +847,15 @@ void RunTest_RunLoopQuitBogus(MessagePumpFactory factory) {
RunLoop nested_run_loop;
RunLoop bogus_run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->PostTask(
- FROM_HERE, bogus_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ bogus_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, nested_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -894,35 +882,35 @@ void RunTest_RunLoopQuitDeep(MessagePumpFactory factory) {
RunLoop nested_loop3;
RunLoop nested_loop4;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 5));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 6));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop1.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop1.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 7));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop2.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop2.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 8));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop3.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop3.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 9));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop4.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop4.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 10));
outer_run_loop.Run();
@@ -961,10 +949,10 @@ void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory) {
run_loop.Quit();
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -980,14 +968,14 @@ void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 1));
- MessageLoop::current()->PostTask(
- FROM_HERE, run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -1007,20 +995,20 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 3));
- MessageLoop::current()->PostTask(
- FROM_HERE, run_loop.QuitClosure()); // has no affect
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, run_loop.QuitClosure()); // has no affect
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 4));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
RunLoop outer_run_loop;
outer_run_loop.Run();
@@ -1040,9 +1028,8 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
}
@@ -1060,8 +1047,8 @@ void RunTest_RecursivePosts(MessagePumpFactory factory) {
const int kNumTimes = 1 << 17;
std::unique_ptr<MessagePump> pump(factory());
MessageLoop loop(std::move(pump));
- loop.PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
- loop.Run();
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
+ RunLoop().Run();
}
} // namespace test
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index bc4176fdeb..52337e31a8 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -18,6 +18,7 @@
#include "base/pending_task.h"
#include "base/posix/eintr_wrapper.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/platform_thread.h"
@@ -416,9 +417,8 @@ void RunTest_RecursiveSupport2(MessageLoop::Type message_loop_type) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
}
@@ -581,6 +581,9 @@ RUN_MESSAGE_LOOP_TESTS(UI, &TypeUIMessagePumpFactory);
RUN_MESSAGE_LOOP_TESTS(IO, &TypeIOMessagePumpFactory);
#if defined(OS_WIN)
+// Additional set of tests for GPU version of UI message loop.
+RUN_MESSAGE_LOOP_TESTS(GPU, &MessagePumpForGpu::CreateMessagePumpForGpu);
+
TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
RunTest_PostDelayedTask_SharedTimer_SubPump();
}
@@ -636,8 +639,8 @@ TEST(MessageLoopTest, TaskObserver) {
MessageLoop loop;
loop.AddTaskObserver(&observer);
- loop.PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
- loop.Run();
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
+ RunLoop().Run();
loop.RemoveTaskObserver(&observer);
EXPECT_EQ(kNumPosts, observer.num_tasks_started());
@@ -812,11 +815,10 @@ TEST(MessageLoopTest, DestructionObserverTest) {
MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
loop->AddDestructionObserver(&observer);
- loop->PostDelayedTask(
- FROM_HERE,
- Bind(&DestructionObserverProbe::Run,
- new DestructionObserverProbe(&task_destroyed,
- &destruction_observer_called)),
+ loop->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&DestructionObserverProbe::Run,
+ new DestructionObserverProbe(
+ &task_destroyed, &destruction_observer_called)),
kDelay);
delete loop;
EXPECT_TRUE(observer.task_destroyed_before_message_loop());
@@ -837,12 +839,12 @@ TEST(MessageLoopTest, ThreadMainTaskRunner) {
&Foo::Test1ConstRef, foo.get(), a));
// Post quit task;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE,
Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
// Now kick things off
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(foo->test_count(), 1);
EXPECT_EQ(foo->result(), "a");
@@ -961,7 +963,7 @@ TEST(MessageLoopTest, OriginalRunnerWorks) {
scoped_refptr<Foo> foo(new Foo());
original_runner->PostTask(FROM_HERE,
Bind(&Foo::Test1ConstRef, foo.get(), "a"));
- loop.RunUntilIdle();
+ RunLoop().RunUntilIdle();
EXPECT_EQ(1, foo->test_count());
}
@@ -976,4 +978,20 @@ TEST(MessageLoopTest, DeleteUnboundLoop) {
EXPECT_EQ(loop.task_runner(), ThreadTaskRunnerHandle::Get());
}
+TEST(MessageLoopTest, ThreadName) {
+ {
+ std::string kThreadName("foo");
+ MessageLoop loop;
+ PlatformThread::SetName(kThreadName);
+ EXPECT_EQ(kThreadName, loop.GetThreadName());
+ }
+
+ {
+ std::string kThreadName("bar");
+ base::Thread thread(kThreadName);
+ ASSERT_TRUE(thread.StartAndWaitForTesting());
+ EXPECT_EQ(kThreadName, thread.message_loop()->GetThreadName());
+ }
+}
+
} // namespace base
diff --git a/base/message_loop/message_pump.cc b/base/message_loop/message_pump.cc
index 3d85b9b564..2f740f2423 100644
--- a/base/message_loop/message_pump.cc
+++ b/base/message_loop/message_pump.cc
@@ -15,4 +15,11 @@ MessagePump::~MessagePump() {
void MessagePump::SetTimerSlack(TimerSlack) {
}
+#if defined(OS_WIN)
+bool MessagePump::WasSignaled() {
+ NOTREACHED();
+ return false;
+}
+#endif
+
} // namespace base
diff --git a/base/message_loop/message_pump.h b/base/message_loop/message_pump.h
index c53be80410..af8ed41f27 100644
--- a/base/message_loop/message_pump.h
+++ b/base/message_loop/message_pump.h
@@ -124,6 +124,15 @@ class BASE_EXPORT MessagePump : public NonThreadSafe {
// Sets the timer slack to the specified value.
virtual void SetTimerSlack(TimerSlack timer_slack);
+
+#if defined(OS_WIN)
+ // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+ // has been investigated.
+ // This should be used for diagnostic only. If message pump wake-up mechanism
+ // is based on auto-reset event this call would reset the event to unset
+ // state.
+ virtual bool WasSignaled();
+#endif
};
} // namespace base
diff --git a/base/message_loop/message_pump_default.cc b/base/message_loop/message_pump_default.cc
index ed15395d56..3449aec860 100644
--- a/base/message_loop/message_pump_default.cc
+++ b/base/message_loop/message_pump_default.cc
@@ -4,6 +4,8 @@
#include "base/message_loop/message_pump_default.h"
+#include <algorithm>
+
#include "base/logging.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -16,8 +18,8 @@ namespace base {
MessagePumpDefault::MessagePumpDefault()
: keep_running_(true),
- event_(false, false) {
-}
+ event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
MessagePumpDefault::~MessagePumpDefault() {
}
@@ -54,7 +56,31 @@ void MessagePumpDefault::Run(Delegate* delegate) {
} else {
TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
if (delay > TimeDelta()) {
+#if defined(OS_WIN)
+ // TODO(stanisc): crbug.com/623223: Consider moving the OS_WIN specific
+ // logic into TimedWait implementation in waitable_event_win.cc.
+
+ // crbug.com/487724: on Windows, waiting for less than 1 ms results in
+ // returning from TimedWait promptly and spinning
+ // MessagePumpDefault::Run loop for up to 1 ms - until it is time to
+ // run a delayed task. |min_delay| is the minimum possible wait to
+ // to avoid the spinning.
+ constexpr TimeDelta min_delay = TimeDelta::FromMilliseconds(1);
+ do {
+ delay = std::max(delay, min_delay);
+ if (event_.TimedWait(delay))
+ break;
+
+ // TimedWait can time out earlier than the specified |delay| on
+ // Windows. It doesn't make sense to run the outer loop in that case
+ // because there isn't going to be any new work. It is less overhead
+ // to just go back to wait.
+ // In practice this inner wait loop might have up to 3 iterations.
+ delay = delayed_work_time_ - TimeTicks::Now();
+ } while (delay > TimeDelta());
+#else
event_.TimedWait(delay);
+#endif
} else {
// It looks like delayed_work_time_ indicates a time in the past, so we
// need to call DoDelayedWork now.
diff --git a/base/message_loop/message_pump_glib.cc b/base/message_loop/message_pump_glib.cc
index f06f60d8cf..fd23745f4e 100644
--- a/base/message_loop/message_pump_glib.cc
+++ b/base/message_loop/message_pump_glib.cc
@@ -52,7 +52,7 @@ int GetTimeIntervalMilliseconds(const TimeTicks& from) {
// returns FALSE, GLib will destroy the source. Dispatch calls may be recursive
// (i.e., you can call Run from them), but Prepare and Check cannot.
// Finalize is called when the source is destroyed.
-// NOTE: It is common for subsytems to want to process pending events while
+// NOTE: It is common for subsystems to want to process pending events while
// doing intensive work, for example the flash plugin. They usually use the
// following pattern (recommended by the GTK docs):
// while (gtk_events_pending()) {
@@ -350,7 +350,7 @@ void MessagePumpGlib::ScheduleWork() {
void MessagePumpGlib::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
// We need to wake up the loop in case the poll timeout needs to be
- // adjusted. This will cause us to try to do work, but that's ok.
+ // adjusted. This will cause us to try to do work, but that's OK.
delayed_work_time_ = delayed_work_time;
ScheduleWork();
}
diff --git a/base/metrics/OWNERS b/base/metrics/OWNERS
index 3fd7c0dbc2..feb8271f7e 100644
--- a/base/metrics/OWNERS
+++ b/base/metrics/OWNERS
@@ -1,3 +1,2 @@
asvitkine@chromium.org
isherman@chromium.org
-jar@chromium.org
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index 3b398cd20e..600b94ed48 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -7,7 +7,6 @@
#include <algorithm>
#include "base/build_time.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
@@ -45,8 +44,14 @@ Time CreateTimeFromParams(int year, int month, int day_of_month) {
exploded.minute = 0;
exploded.second = 0;
exploded.millisecond = 0;
+ Time out_time;
+ if (!Time::FromLocalExploded(exploded, &out_time)) {
+ // TODO(maksims): implement failure handling.
+ // We might just return |out_time|, which is Time(0).
+ NOTIMPLEMENTED();
+ }
- return Time::FromLocalExploded(exploded);
+ return out_time;
}
// Returns the boundary value for comparing against the FieldTrial's added
@@ -107,38 +112,6 @@ bool ParseFieldTrialsString(const std::string& trials_string,
return true;
}
-void CheckTrialGroup(const std::string& trial_name,
- const std::string& trial_group,
- std::map<std::string, std::string>* seen_states) {
- if (ContainsKey(*seen_states, trial_name)) {
- CHECK_EQ((*seen_states)[trial_name], trial_group) << trial_name;
- } else {
- (*seen_states)[trial_name] = trial_group;
- }
-}
-
-// A second copy of FieldTrialList::seen_states_ that is meant to outlive the
-// FieldTrialList object to determine if the inconsistency happens because there
-// might be multiple FieldTrialList objects.
-// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
-base::LazyInstance<std::map<std::string, std::string>>::Leaky g_seen_states =
- LAZY_INSTANCE_INITIALIZER;
-
-// A debug token generated during FieldTrialList construction. Used to diagnose
-// crbug.com/359406.
-// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
-int32_t g_debug_token = -1;
-
-// Whether to append the debug token to the child process --force-fieldtrials
-// command line. Used to diagnose crbug.com/359406.
-// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
-bool g_append_debug_token_to_trial_string = false;
-
-// Tracks whether |g_seen_states| is used. Defaults to false, because unit tests
-// will create multiple FieldTrialList instances. Also controls whether
-// |g_debug_token| is included in the field trial state string.
-bool g_use_global_check_states = false;
-
} // namespace
// statics
@@ -242,9 +215,7 @@ void FieldTrial::SetForced() {
// static
void FieldTrial::EnableBenchmarking() {
- // TODO(asvitkine): Change this back to 0u after the trial in FieldTrialList
- // constructor is removed.
- DCHECK_EQ(1u, FieldTrialList::GetFieldTrialCount());
+ DCHECK_EQ(0u, FieldTrialList::GetFieldTrialCount());
enable_benchmarking_ = true;
}
@@ -276,9 +247,6 @@ FieldTrial::FieldTrial(const std::string& trial_name,
DCHECK_GT(total_probability, 0);
DCHECK(!trial_name_.empty());
DCHECK(!default_group_name_.empty());
-
- if (g_debug_token == -1)
- g_debug_token = RandInt(1, INT32_MAX);
}
FieldTrial::~FieldTrial() {}
@@ -344,8 +312,7 @@ FieldTrialList::FieldTrialList(
: entropy_provider_(entropy_provider),
observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
ObserverListBase<FieldTrialList::Observer>::NOTIFY_EXISTING_ONLY)) {
- // TODO(asvitkine): Turn into a DCHECK after http://crbug.com/359406 is fixed.
- CHECK(!global_);
+ DCHECK(!global_);
DCHECK(!used_without_global_);
global_ = this;
@@ -353,30 +320,6 @@ FieldTrialList::FieldTrialList(
Time::Exploded exploded;
two_years_from_build_time.LocalExplode(&exploded);
kNoExpirationYear = exploded.year;
-
- // Run a 50/50 experiment that enables |g_use_global_check_states| only for
- // half the users, to investigate if this instrumentation is causing the
- // crashes to disappear for http://crbug.com/359406. Done here instead of a
- // server-side trial because this needs to be done early during FieldTrialList
- // initialization.
- //
- // Note: |g_use_global_check_states| is set via EnableGlobalStateChecks()
- // prior to the FieldTrialList being created. We only want to do the trial
- // check once the first time FieldTrialList is created, so use a static
- // |first_time| variable to track this.
- //
- // TODO(asvitkine): Remove after http://crbug.com/359406 is fixed.
- static bool first_time = true;
- if (first_time && g_use_global_check_states) {
- first_time = false;
- base::FieldTrial* trial =
- FactoryGetFieldTrial("UMA_CheckStates", 100, "NoChecks",
- kNoExpirationYear, 1, 1,
- FieldTrial::SESSION_RANDOMIZED, nullptr);
- trial->AppendGroup("Checks", 50);
- if (trial->group_name() == "NoChecks")
- g_use_global_check_states = false;
- }
}
FieldTrialList::~FieldTrialList() {
@@ -391,18 +334,6 @@ FieldTrialList::~FieldTrialList() {
}
// static
-void FieldTrialList::EnableGlobalStateChecks() {
- CHECK(!g_use_global_check_states);
- g_use_global_check_states = true;
- g_append_debug_token_to_trial_string = true;
-}
-
-// static
-int32_t FieldTrialList::GetDebugToken() {
- return g_debug_token;
-}
-
-// static
FieldTrial* FieldTrialList::FactoryGetFieldTrial(
const std::string& trial_name,
FieldTrial::Probability total_probability,
@@ -534,12 +465,6 @@ void FieldTrialList::StatesToString(std::string* output) {
output->append(it->group_name);
output->append(1, kPersistentStringSeparator);
}
- if (g_append_debug_token_to_trial_string) {
- output->append("DebugToken");
- output->append(1, kPersistentStringSeparator);
- output->append(IntToString(g_debug_token));
- output->append(1, kPersistentStringSeparator);
- }
}
// static
@@ -562,14 +487,6 @@ void FieldTrialList::AllStatesToString(std::string* output) {
output->append(1, kPersistentStringSeparator);
trial.group_name.AppendToString(output);
output->append(1, kPersistentStringSeparator);
-
- // TODO(asvitkine): Remove these when http://crbug.com/359406 is fixed.
- CheckTrialGroup(trial.trial_name.as_string(), trial.group_name.as_string(),
- &global_->seen_states_);
- if (g_use_global_check_states) {
- CheckTrialGroup(trial.trial_name.as_string(),
- trial.group_name.as_string(), &g_seen_states.Get());
- }
}
}
@@ -694,16 +611,6 @@ void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
if (!field_trial->enable_field_trial_)
return;
- // TODO(asvitkine): Remove this block when http://crbug.com/359406 is fixed.
- {
- AutoLock auto_lock(global_->lock_);
- CheckTrialGroup(field_trial->trial_name(),
- field_trial->group_name_internal(), &global_->seen_states_);
- if (g_use_global_check_states) {
- CheckTrialGroup(field_trial->trial_name(),
- field_trial->group_name_internal(), &g_seen_states.Get());
- }
- }
global_->observer_list_->Notify(
FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
field_trial->trial_name(), field_trial->group_name_internal());
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index fc6237a513..28a4606a88 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -347,20 +347,6 @@ class BASE_EXPORT FieldTrialList {
// Destructor Release()'s references to all registered FieldTrial instances.
~FieldTrialList();
- // TODO(asvitkine): Temporary function to diagnose http://crbug.com/359406.
- // Remove when that bug is fixed. This enables using a global map that checks
- // the state of field trials between possible FieldTrialList instances. If
- // enabled, a CHECK will be hit if it's seen that a field trial is given a
- // different state then what was specified to a renderer process launch
- // command line.
- static void EnableGlobalStateChecks();
-
- // TODO(asvitkine): Temporary function to diagnose http://crbug.com/359406.
- // Remove when that bug is fixed. This returns a unique token generated during
- // FieldTrialList construction. This is used to verify that this value stays
- // consistent between renderer process invocations.
- static int32_t GetDebugToken();
-
// Get a FieldTrial instance from the factory.
//
// |name| is used to register the instance with the FieldTrialList class,
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index 0e363e3f51..0d6287c0b1 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -64,8 +64,7 @@ bool ReadHistogramArguments(PickleIterator* iter,
}
// We use the arguments to find or create the local version of the histogram
- // in this process, so we need to clear the IPC flag.
- DCHECK(*flags & HistogramBase::kIPCSerializationSourceFlag);
+ // in this process, so we need to clear any IPC flag.
*flags &= ~HistogramBase::kIPCSerializationSourceFlag;
return true;
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
index 58a9ab2edc..d240099110 100644
--- a/base/metrics/histogram_base.h
+++ b/base/metrics/histogram_base.h
@@ -135,10 +135,6 @@ class BASE_EXPORT HistogramBase {
COUNT_LOW_ERROR = 0x8,
NEVER_EXCEEDED_VALUE = 0x10,
-
- // This value is used only in HistogramSnapshotManager for marking
- // internally when new inconsistencies are found.
- NEW_INCONSISTENCY_FOUND = 0x8000000
};
explicit HistogramBase(const std::string& name);
diff --git a/base/metrics/histogram_base_unittest.cc b/base/metrics/histogram_base_unittest.cc
index 5ce39cabe1..1eb8fd4608 100644
--- a/base/metrics/histogram_base_unittest.cc
+++ b/base/metrics/histogram_base_unittest.cc
@@ -29,7 +29,7 @@ class HistogramBaseTest : public testing::Test {
// It is necessary to fully destruct any existing StatisticsRecorder
// before creating a new one.
statistics_recorder_.reset();
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
HistogramBase* GetCreationReportHistogram(const std::string& name) {
diff --git a/base/metrics/histogram_delta_serialization_unittest.cc b/base/metrics/histogram_delta_serialization_unittest.cc
index 80a7009671..719bc70970 100644
--- a/base/metrics/histogram_delta_serialization_unittest.cc
+++ b/base/metrics/histogram_delta_serialization_unittest.cc
@@ -14,7 +14,8 @@
namespace base {
TEST(HistogramDeltaSerializationTest, DeserializeHistogramAndAddSamples) {
- StatisticsRecorder statistic_recorder;
+ std::unique_ptr<StatisticsRecorder> statistic_recorder(
+ StatisticsRecorder::CreateTemporaryForTesting());
HistogramDeltaSerialization serializer("HistogramDeltaSerializationTest");
std::vector<std::string> deltas;
// Nothing was changed yet.
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
index dc6cb8a86e..340505e519 100644
--- a/base/metrics/histogram_snapshot_manager.cc
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -16,91 +16,20 @@ namespace base {
HistogramSnapshotManager::HistogramSnapshotManager(
HistogramFlattener* histogram_flattener)
- : preparing_deltas_(false),
- histogram_flattener_(histogram_flattener) {
+ : histogram_flattener_(histogram_flattener) {
DCHECK(histogram_flattener_);
}
HistogramSnapshotManager::~HistogramSnapshotManager() {
}
-void HistogramSnapshotManager::StartDeltas() {
- // Ensure that start/finish calls do not get nested.
- DCHECK(!preparing_deltas_);
- preparing_deltas_ = true;
-
- DCHECK(owned_histograms_.empty());
-
-#if DCHECK_IS_ON()
- for (const auto& hash_and_info : known_histograms_) {
- DCHECK(!hash_and_info.second.histogram);
- DCHECK(!hash_and_info.second.accumulated_samples);
- DCHECK(!(hash_and_info.second.inconsistencies &
- HistogramBase::NEW_INCONSISTENCY_FOUND));
- }
-#endif
-}
-
void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
PrepareSamples(histogram, histogram->SnapshotDelta());
}
-void HistogramSnapshotManager::PrepareDeltaTakingOwnership(
- std::unique_ptr<HistogramBase> histogram) {
- PrepareSamples(histogram.get(), histogram->SnapshotDelta());
- owned_histograms_.push_back(std::move(histogram));
-}
-
-void HistogramSnapshotManager::PrepareAbsolute(const HistogramBase* histogram) {
- PrepareSamples(histogram, histogram->SnapshotSamples());
-}
-
-void HistogramSnapshotManager::PrepareAbsoluteTakingOwnership(
- std::unique_ptr<const HistogramBase> histogram) {
- PrepareSamples(histogram.get(), histogram->SnapshotSamples());
- owned_histograms_.push_back(std::move(histogram));
-}
-
-void HistogramSnapshotManager::PrepareFinalDeltaTakingOwnership(
- std::unique_ptr<const HistogramBase> histogram) {
- PrepareSamples(histogram.get(), histogram->SnapshotFinalDelta());
- owned_histograms_.push_back(std::move(histogram));
-}
-
-void HistogramSnapshotManager::FinishDeltas() {
- DCHECK(preparing_deltas_);
-
- // Iterate over all known histograms to see what should be recorded.
- for (auto& hash_and_info : known_histograms_) {
- SampleInfo* sample_info = &hash_and_info.second;
-
- // First, record any histograms in which corruption was detected.
- if (sample_info->inconsistencies & HistogramBase::NEW_INCONSISTENCY_FOUND) {
- sample_info->inconsistencies &= ~HistogramBase::NEW_INCONSISTENCY_FOUND;
- histogram_flattener_->UniqueInconsistencyDetected(
- static_cast<HistogramBase::Inconsistency>(
- sample_info->inconsistencies));
- }
-
- // Second, record actual accumulated deltas.
- if (sample_info->accumulated_samples) {
- // TODO(bcwhite): Investigate using redundant_count() below to avoid
- // additional pass through all the samples to calculate real total.
- if (sample_info->accumulated_samples->TotalCount() > 0) {
- histogram_flattener_->RecordDelta(*sample_info->histogram,
- *sample_info->accumulated_samples);
- }
- delete sample_info->accumulated_samples;
- sample_info->accumulated_samples = nullptr;
- }
-
- // The Histogram pointer must be cleared at this point because the owner
- // is only required to keep it alive until FinishDeltas() completes.
- sample_info->histogram = nullptr;
- }
-
- owned_histograms_.clear();
- preparing_deltas_ = false;
+void HistogramSnapshotManager::PrepareFinalDelta(
+ const HistogramBase* histogram) {
+ PrepareSamples(histogram, histogram->SnapshotFinalDelta());
}
void HistogramSnapshotManager::PrepareSamples(
@@ -108,28 +37,32 @@ void HistogramSnapshotManager::PrepareSamples(
std::unique_ptr<HistogramSamples> samples) {
DCHECK(histogram_flattener_);
- // Get information known about this histogram.
+ // Get information known about this histogram. If it did not previously
+ // exist, one will be created and initialized.
SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
- if (sample_info->histogram) {
- DCHECK_EQ(sample_info->histogram->histogram_name(),
- histogram->histogram_name()) << "hash collision";
- } else {
- // First time this histogram has been seen; datafill.
- sample_info->histogram = histogram;
- }
// Crash if we detect that our histograms have been overwritten. This may be
// a fair distance from the memory smasher, but we hope to correlate these
// crashes with other events, such as plugins, or usage patterns, etc.
uint32_t corruption = histogram->FindCorruption(*samples);
if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
+ // Extract fields useful during debug.
+ const BucketRanges* ranges =
+ static_cast<const Histogram*>(histogram)->bucket_ranges();
+ std::vector<HistogramBase::Sample> ranges_copy;
+ for (size_t i = 0; i < ranges->size(); ++i)
+ ranges_copy.push_back(ranges->range(i));
+ HistogramBase::Sample* ranges_ptr = &ranges_copy[0];
+ const char* histogram_name = histogram->histogram_name().c_str();
+ int32_t flags = histogram->flags();
// The checksum should have caught this, so crash separately if it didn't.
CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
CHECK(false); // Crash for the bucket order corruption.
// Ensure that compiler keeps around pointers to |histogram| and its
// internal |bucket_ranges_| for any minidumps.
- base::debug::Alias(
- static_cast<const Histogram*>(histogram)->bucket_ranges());
+ base::debug::Alias(&ranges_ptr);
+ base::debug::Alias(&histogram_name);
+ base::debug::Alias(&flags);
}
// Checksum corruption might not have caused order corruption.
CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
@@ -146,19 +79,14 @@ void HistogramSnapshotManager::PrepareSamples(
const uint32_t old_corruption = sample_info->inconsistencies;
if (old_corruption == (corruption | old_corruption))
return; // We've already seen this corruption for this histogram.
- sample_info->inconsistencies |=
- corruption | HistogramBase::NEW_INCONSISTENCY_FOUND;
- // TODO(bcwhite): Can we clear the inconsistency for future collection?
+ sample_info->inconsistencies |= corruption;
+ histogram_flattener_->UniqueInconsistencyDetected(
+ static_cast<HistogramBase::Inconsistency>(corruption));
return;
}
- if (!sample_info->accumulated_samples) {
- // This histogram has not been seen before; add it as a new entry.
- sample_info->accumulated_samples = samples.release();
- } else {
- // There are previous values from this histogram; add them together.
- sample_info->accumulated_samples->Add(*samples);
- }
+ if (samples->TotalCount() > 0)
+ histogram_flattener_->RecordDelta(*histogram, *samples);
}
void HistogramSnapshotManager::InspectLoggedSamplesInconsistency(
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
index 83bd5feaa8..26fb93fd20 100644
--- a/base/metrics/histogram_snapshot_manager.h
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -37,16 +37,12 @@ class BASE_EXPORT HistogramSnapshotManager {
// |required_flags| is used to select histograms to be recorded.
// Only histograms that have all the flags specified by the argument will be
// chosen. If all histograms should be recorded, set it to
- // |Histogram::kNoFlags|. Though any "forward" iterator will work, the
- // histograms over which it iterates *must* remain valid until this method
- // returns; the iterator cannot deallocate histograms once it iterates past
- // them and FinishDeltas() has been called after. StartDeltas() must be
- // called before.
+ // |Histogram::kNoFlags|.
template <class ForwardHistogramIterator>
- void PrepareDeltasWithoutStartFinish(ForwardHistogramIterator begin,
- ForwardHistogramIterator end,
- HistogramBase::Flags flags_to_set,
- HistogramBase::Flags required_flags) {
+ void PrepareDeltas(ForwardHistogramIterator begin,
+ ForwardHistogramIterator end,
+ HistogramBase::Flags flags_to_set,
+ HistogramBase::Flags required_flags) {
for (ForwardHistogramIterator it = begin; it != end; ++it) {
(*it)->SetFlags(flags_to_set);
if (((*it)->flags() & required_flags) == required_flags)
@@ -54,59 +50,21 @@ class BASE_EXPORT HistogramSnapshotManager {
}
}
- // As above but also calls StartDeltas() and FinishDeltas().
- template <class ForwardHistogramIterator>
- void PrepareDeltas(ForwardHistogramIterator begin,
- ForwardHistogramIterator end,
- HistogramBase::Flags flags_to_set,
- HistogramBase::Flags required_flags) {
- StartDeltas();
- PrepareDeltasWithoutStartFinish(begin, end, flags_to_set, required_flags);
- FinishDeltas();
- }
-
// When the collection is not so simple as can be done using a single
// iterator, the steps can be performed separately. Call PerpareDelta()
- // as many times as necessary with a single StartDeltas() before and
- // a single FinishDeltas() after. All passed histograms must live
- // until FinishDeltas() completes. PrepareAbsolute() works the same
- // but assumes there were no previous logged values and no future deltas
- // will be created (and thus can work on read-only histograms).
- // PrepareFinalDelta() works like PrepareDelta() except that it does
- // not update the previous logged values and can thus be used with
- // read-only files.
- // Use Prepare*TakingOwnership() if it is desireable to have this class
- // automatically delete the histogram once it is "finished".
- void StartDeltas();
+ // as many times as necessary. PrepareFinalDelta() works like PrepareDelta()
+ // except that it does not update the previous logged values and can thus
+ // be used with read-only files.
void PrepareDelta(HistogramBase* histogram);
- void PrepareDeltaTakingOwnership(std::unique_ptr<HistogramBase> histogram);
- void PrepareAbsolute(const HistogramBase* histogram);
- void PrepareAbsoluteTakingOwnership(
- std::unique_ptr<const HistogramBase> histogram);
- void PrepareFinalDeltaTakingOwnership(
- std::unique_ptr<const HistogramBase> histogram);
- void FinishDeltas();
+ void PrepareFinalDelta(const HistogramBase* histogram);
private:
FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
// During a snapshot, samples are acquired and aggregated. This structure
- // contains all the information collected for a given histogram. Once a
- // snapshot operation is finished, it is generally emptied except for
- // information that must persist from one report to the next, such as
- // the "inconsistencies".
+ // contains all the information for a given histogram that persists between
+ // collections.
struct SampleInfo {
- // A histogram associated with this sample; it may be one of many if
- // several have been aggregated into the same "accumulated" sample set.
- // Ownership of the histogram remains elsewhere and this pointer is
- // cleared by FinishDeltas().
- const HistogramBase* histogram = nullptr;
-
- // The current snapshot-delta values being accumulated.
- // TODO(bcwhite): Change this to a scoped_ptr once all build architectures
- // support such as the value of a std::map.
- HistogramSamples* accumulated_samples = nullptr;
-
// The set of inconsistencies (flags) already seen for the histogram.
// See HistogramBase::Inconsistency for values.
uint32_t inconsistencies = 0;
@@ -126,13 +84,6 @@ class BASE_EXPORT HistogramSnapshotManager {
// by the hash of the histogram name.
std::map<uint64_t, SampleInfo> known_histograms_;
- // Collection of histograms of which ownership has been passed to this
- // object. They will be deleted by FinishDeltas().
- std::vector<std::unique_ptr<const HistogramBase>> owned_histograms_;
-
- // Indicates if deltas are currently being prepared.
- bool preparing_deltas_;
-
// |histogram_flattener_| handles the logistics of recording the histogram
// deltas.
HistogramFlattener* histogram_flattener_; // Weak.
diff --git a/base/metrics/histogram_snapshot_manager_unittest.cc b/base/metrics/histogram_snapshot_manager_unittest.cc
index 8ec03daa8d..3c13e1a5a9 100644
--- a/base/metrics/histogram_snapshot_manager_unittest.cc
+++ b/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -24,8 +24,9 @@ class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
void RecordDelta(const HistogramBase& histogram,
const HistogramSamples& snapshot) override {
recorded_delta_histogram_names_.push_back(histogram.histogram_name());
- ASSERT_FALSE(ContainsKey(recorded_delta_histogram_sum_,
- histogram.histogram_name()));
+ // Use CHECK instead of ASSERT to get full stack-trace and thus origin.
+ CHECK(!ContainsKey(recorded_delta_histogram_sum_,
+ histogram.histogram_name()));
// Keep pointer to snapshot for testing. This really isn't ideal but the
// snapshot-manager keeps the snapshot alive until it's "forgotten".
recorded_delta_histogram_sum_[histogram.histogram_name()] = snapshot.sum();
@@ -68,11 +69,12 @@ class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
class HistogramSnapshotManagerTest : public testing::Test {
protected:
HistogramSnapshotManagerTest()
- : histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
+ : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
+ histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
~HistogramSnapshotManagerTest() override {}
- StatisticsRecorder statistics_recorder_;
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
HistogramFlattenerDeltaRecorder histogram_flattener_delta_recorder_;
HistogramSnapshotManager histogram_snapshot_manager_;
};
@@ -124,35 +126,4 @@ TEST_F(HistogramSnapshotManagerTest,
EXPECT_EQ("UmaStabilityHistogram", histograms[0]);
}
-TEST_F(HistogramSnapshotManagerTest, CheckMerge) {
- UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
- UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
-
- base::HistogramBase* h1 = base::LinearHistogram::FactoryGet(
- "UmaHistogram", 1, 4, 5, 0);
- ASSERT_TRUE(h1);
- base::HistogramBase* h2 = base::LinearHistogram::FactoryGet(
- "UmaStabilityHistogram", 1, 2, 3, 0);
- ASSERT_TRUE(h2);
-
- histogram_snapshot_manager_.StartDeltas();
- histogram_snapshot_manager_.PrepareDelta(h1);
- histogram_snapshot_manager_.PrepareDelta(h1); // Delta will be zero.
- histogram_snapshot_manager_.PrepareDelta(h2);
- h1->Add(2);
- h2->Add(1);
- histogram_snapshot_manager_.PrepareDelta(h2);
- histogram_snapshot_manager_.PrepareDelta(h1);
- histogram_snapshot_manager_.FinishDeltas();
- {
- const std::vector<std::string> histograms =
- histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
- EXPECT_EQ(2U, histograms.size());
- EXPECT_EQ(3, histogram_flattener_delta_recorder_.
- GetRecordedDeltaHistogramSum("UmaHistogram"));
- EXPECT_EQ(2, histogram_flattener_delta_recorder_.
- GetRecordedDeltaHistogramSum("UmaStabilityHistogram"));
- }
-}
-
} // namespace base
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index 668ac1ba7e..5c2ca6883a 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -56,7 +56,7 @@ class HistogramTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
void UninitializeStatisticsRecorder() {
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 9608fba725..5af3486645 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -6,7 +6,10 @@
#include <memory>
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
#include "base/files/important_file_writer.h"
+#include "base/files/memory_mapped_file.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
@@ -16,12 +19,9 @@
#include "base/metrics/persistent_sample_map.h"
#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
#include "base/synchronization/lock.h"
-// TODO(bcwhite): Order these methods to match the header file. The current
-// order is only temporary in order to aid review of the transition from
-// a non-class implementation.
-
namespace base {
namespace {
@@ -263,6 +263,188 @@ PersistentHistogramAllocator::PersistentHistogramAllocator(
PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
+ Reference ref) {
+ // Unfortunately, the histogram "pickle" methods cannot be used as part of
+ // the persistance because the deserialization methods always create local
+ // count data (while these must reference the persistent counts) and always
+ // add it to the local list of known histograms (while these may be simple
+ // references to histograms in other processes).
+ PersistentHistogramData* histogram_data =
+ memory_allocator_->GetAsObject<PersistentHistogramData>(
+ ref, kTypeIdHistogram);
+ size_t length = memory_allocator_->GetAllocSize(ref);
+ if (!histogram_data ||
+ reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
+ NOTREACHED();
+ return nullptr;
+ }
+ return CreateHistogram(histogram_data);
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
+ HistogramType histogram_type,
+ const std::string& name,
+ int minimum,
+ int maximum,
+ const BucketRanges* bucket_ranges,
+ int32_t flags,
+ Reference* ref_ptr) {
+ // If the allocator is corrupt, don't waste time trying anything else.
+ // This also allows differentiating on the dashboard between allocations
+ // failed due to a corrupt allocator and the number of process instances
+ // with one, the latter being idicated by "newly corrupt", below.
+ if (memory_allocator_->IsCorrupt()) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
+ return nullptr;
+ }
+
+ // Create the metadata necessary for a persistent sparse histogram. This
+ // is done first because it is a small subset of what is required for
+ // other histograms.
+ PersistentMemoryAllocator::Reference histogram_ref =
+ memory_allocator_->Allocate(
+ offsetof(PersistentHistogramData, name) + name.length() + 1,
+ kTypeIdHistogram);
+ PersistentHistogramData* histogram_data =
+ memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
+ kTypeIdHistogram);
+ if (histogram_data) {
+ memcpy(histogram_data->name, name.c_str(), name.size() + 1);
+ histogram_data->histogram_type = histogram_type;
+ histogram_data->flags = flags | HistogramBase::kIsPersistent;
+ }
+
+ // Create the remaining metadata necessary for regular histograms.
+ if (histogram_type != SPARSE_HISTOGRAM) {
+ size_t bucket_count = bucket_ranges->bucket_count();
+ size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
+ if (counts_bytes == 0) {
+ // |bucket_count| was out-of-range.
+ NOTREACHED();
+ return nullptr;
+ }
+
+ size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
+ PersistentMemoryAllocator::Reference counts_ref =
+ memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
+ PersistentMemoryAllocator::Reference ranges_ref =
+ memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
+ HistogramBase::Sample* ranges_data =
+ memory_allocator_->GetAsObject<HistogramBase::Sample>(
+ ranges_ref, kTypeIdRangesArray);
+
+ // Only continue here if all allocations were successful. If they weren't,
+ // there is no way to free the space but that's not really a problem since
+ // the allocations only fail because the space is full or corrupt and so
+ // any future attempts will also fail.
+ if (counts_ref && ranges_data && histogram_data) {
+ for (size_t i = 0; i < bucket_ranges->size(); ++i)
+ ranges_data[i] = bucket_ranges->range(i);
+
+ histogram_data->minimum = minimum;
+ histogram_data->maximum = maximum;
+ // |bucket_count| must fit within 32-bits or the allocation of the counts
+ // array would have failed for being too large; the allocator supports
+ // less than 4GB total size.
+ histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
+ histogram_data->ranges_ref = ranges_ref;
+ histogram_data->ranges_checksum = bucket_ranges->checksum();
+ histogram_data->counts_ref = counts_ref;
+ } else {
+ histogram_data = nullptr; // Clear this for proper handling below.
+ }
+ }
+
+ if (histogram_data) {
+ // Create the histogram using resources in persistent memory. This ends up
+ // resolving the "ref" values stored in histogram_data instad of just
+ // using what is already known above but avoids duplicating the switch
+ // statement here and serves as a double-check that everything is
+ // correct before commiting the new histogram to persistent space.
+ std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
+ DCHECK(histogram);
+ if (ref_ptr != nullptr)
+ *ref_ptr = histogram_ref;
+
+ // By storing the reference within the allocator to this histogram, the
+ // next import (which will happen before the next histogram creation)
+ // will know to skip it.
+ // See also the comment in ImportHistogramsToStatisticsRecorder().
+ subtle::NoBarrier_Store(&last_created_, histogram_ref);
+ return histogram;
+ }
+
+ CreateHistogramResultType result;
+ if (memory_allocator_->IsCorrupt()) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
+ result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
+ } else if (memory_allocator_->IsFull()) {
+ result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
+ } else {
+ result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
+ }
+ RecordCreateHistogramResult(result);
+ NOTREACHED() << "error=" << result;
+
+ return nullptr;
+}
+
+void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
+ bool registered) {
+ // If the created persistent histogram was registered then it needs to
+ // be marked as "iterable" in order to be found by other processes.
+ if (registered)
+ memory_allocator_->MakeIterable(ref);
+ // If it wasn't registered then a race condition must have caused
+ // two to be created. The allocator does not support releasing the
+ // acquired memory so just change the type to be empty.
+ else
+ memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
+}
+
+void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
+ HistogramBase* histogram) {
+ DCHECK(histogram);
+
+ HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+ if (!existing) {
+ // The above should never fail but if it does, no real harm is done.
+ // The data won't be merged but it also won't be recorded as merged
+ // so a future try, if successful, will get what was missed. If it
+ // continues to fail, some metric data will be lost but that is better
+ // than crashing.
+ NOTREACHED();
+ return;
+ }
+
+ // Merge the delta from the passed object to the one in the SR.
+ existing->AddSamples(*histogram->SnapshotDelta());
+}
+
+void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
+ const HistogramBase* histogram) {
+ DCHECK(histogram);
+
+ HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+ if (!existing) {
+ // The above should never fail but if it does, no real harm is done.
+ // Some metric data will be lost but that is better than crashing.
+ NOTREACHED();
+ return;
+ }
+
+ // Merge the delta from the passed object to the one in the SR.
+ existing->AddSamples(*histogram->SnapshotFinalDelta());
+}
+
+PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
+ uint64_t id,
+ const void* user) {
+ return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
+}
+
void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
memory_allocator_->CreateTrackingHistograms(name);
}
@@ -317,15 +499,6 @@ PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
return histogram_pointer;
}
-// static
-void PersistentHistogramAllocator::RecordCreateHistogramResult(
- CreateHistogramResultType result) {
- HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
- if (result_histogram)
- result_histogram->Add(result);
-}
-
-// static
std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
PersistentHistogramData* histogram_data_ptr) {
if (!histogram_data_ptr) {
@@ -450,151 +623,42 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
return histogram;
}
-std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
- Reference ref) {
- // Unfortunately, the histogram "pickle" methods cannot be used as part of
- // the persistance because the deserialization methods always create local
- // count data (while these must reference the persistent counts) and always
- // add it to the local list of known histograms (while these may be simple
- // references to histograms in other processes).
- PersistentHistogramData* histogram_data =
- memory_allocator_->GetAsObject<PersistentHistogramData>(
- ref, kTypeIdHistogram);
- size_t length = memory_allocator_->GetAllocSize(ref);
- if (!histogram_data ||
- reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
- NOTREACHED();
+HistogramBase*
+PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
+ const HistogramBase* histogram) {
+ // This should never be called on the global histogram allocator as objects
+ // created there are already within the global statistics recorder.
+ DCHECK_NE(g_allocator, this);
+ DCHECK(histogram);
+
+ HistogramBase* existing =
+ StatisticsRecorder::FindHistogram(histogram->histogram_name());
+ if (existing)
+ return existing;
+
+ // Adding the passed histogram to the SR would cause a problem if the
+ // allocator that holds it eventually goes away. Instead, create a new
+ // one from a serialized version.
+ base::Pickle pickle;
+ if (!histogram->SerializeInfo(&pickle))
return nullptr;
- }
- return CreateHistogram(histogram_data);
-}
-
-void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
- bool registered) {
- // If the created persistent histogram was registered then it needs to
- // be marked as "iterable" in order to be found by other processes.
- if (registered)
- memory_allocator_->MakeIterable(ref);
- // If it wasn't registered then a race condition must have caused
- // two to be created. The allocator does not support releasing the
- // acquired memory so just change the type to be empty.
- else
- memory_allocator_->SetType(ref, 0);
-}
-
-PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
- uint64_t id,
- const void* user) {
- return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
-}
-
-std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
- HistogramType histogram_type,
- const std::string& name,
- int minimum,
- int maximum,
- const BucketRanges* bucket_ranges,
- int32_t flags,
- Reference* ref_ptr) {
- // If the allocator is corrupt, don't waste time trying anything else.
- // This also allows differentiating on the dashboard between allocations
- // failed due to a corrupt allocator and the number of process instances
- // with one, the latter being idicated by "newly corrupt", below.
- if (memory_allocator_->IsCorrupt()) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
+ PickleIterator iter(pickle);
+ existing = DeserializeHistogramInfo(&iter);
+ if (!existing)
return nullptr;
- }
-
- // Create the metadata necessary for a persistent sparse histogram. This
- // is done first because it is a small subset of what is required for
- // other histograms.
- PersistentMemoryAllocator::Reference histogram_ref =
- memory_allocator_->Allocate(
- offsetof(PersistentHistogramData, name) + name.length() + 1,
- kTypeIdHistogram);
- PersistentHistogramData* histogram_data =
- memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
- kTypeIdHistogram);
- if (histogram_data) {
- memcpy(histogram_data->name, name.c_str(), name.size() + 1);
- histogram_data->histogram_type = histogram_type;
- histogram_data->flags = flags | HistogramBase::kIsPersistent;
- }
-
- // Create the remaining metadata necessary for regular histograms.
- if (histogram_type != SPARSE_HISTOGRAM) {
- size_t bucket_count = bucket_ranges->bucket_count();
- size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
- if (counts_bytes == 0) {
- // |bucket_count| was out-of-range.
- NOTREACHED();
- return nullptr;
- }
-
- size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
- PersistentMemoryAllocator::Reference counts_ref =
- memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
- PersistentMemoryAllocator::Reference ranges_ref =
- memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
- HistogramBase::Sample* ranges_data =
- memory_allocator_->GetAsObject<HistogramBase::Sample>(
- ranges_ref, kTypeIdRangesArray);
-
- // Only continue here if all allocations were successful. If they weren't,
- // there is no way to free the space but that's not really a problem since
- // the allocations only fail because the space is full or corrupt and so
- // any future attempts will also fail.
- if (counts_ref && ranges_data && histogram_data) {
- for (size_t i = 0; i < bucket_ranges->size(); ++i)
- ranges_data[i] = bucket_ranges->range(i);
-
- histogram_data->minimum = minimum;
- histogram_data->maximum = maximum;
- // |bucket_count| must fit within 32-bits or the allocation of the counts
- // array would have failed for being too large; the allocator supports
- // less than 4GB total size.
- histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
- histogram_data->ranges_ref = ranges_ref;
- histogram_data->ranges_checksum = bucket_ranges->checksum();
- histogram_data->counts_ref = counts_ref;
- } else {
- histogram_data = nullptr; // Clear this for proper handling below.
- }
- }
- if (histogram_data) {
- // Create the histogram using resources in persistent memory. This ends up
- // resolving the "ref" values stored in histogram_data instad of just
- // using what is already known above but avoids duplicating the switch
- // statement here and serves as a double-check that everything is
- // correct before commiting the new histogram to persistent space.
- std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
- DCHECK(histogram);
- if (ref_ptr != nullptr)
- *ref_ptr = histogram_ref;
-
- // By storing the reference within the allocator to this histogram, the
- // next import (which will happen before the next histogram creation)
- // will know to skip it.
- // See also the comment in ImportHistogramsToStatisticsRecorder().
- subtle::NoBarrier_Store(&last_created_, histogram_ref);
- return histogram;
- }
-
- CreateHistogramResultType result;
- if (memory_allocator_->IsCorrupt()) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
- result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
- } else if (memory_allocator_->IsFull()) {
- result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
- } else {
- result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
- }
- RecordCreateHistogramResult(result);
- NOTREACHED() << "error=" << result;
+ // Make sure there is no "serialization" flag set.
+ DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
+ // Record the newly created histogram in the SR.
+ return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
+}
- return nullptr;
+// static
+void PersistentHistogramAllocator::RecordCreateHistogramResult(
+ CreateHistogramResultType result) {
+ HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
+ if (result_histogram)
+ result_histogram->Add(result);
}
GlobalHistogramAllocator::~GlobalHistogramAllocator() {}
@@ -620,6 +684,37 @@ void GlobalHistogramAllocator::CreateWithLocalMemory(
WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
}
+#if !defined(OS_NACL)
+// static
+void GlobalHistogramAllocator::CreateWithFile(
+ const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name) {
+ bool exists = PathExists(file_path);
+ File file(
+ file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
+ File::FLAG_READ | File::FLAG_WRITE);
+
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ if (exists) {
+ mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
+ } else {
+ mmfile->Initialize(std::move(file), {0, static_cast<int64_t>(size)},
+ MemoryMappedFile::READ_WRITE_EXTEND);
+ }
+ if (!mmfile->IsValid() ||
+ !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
+ NOTREACHED();
+ return;
+ }
+
+ Set(WrapUnique(new GlobalHistogramAllocator(
+ WrapUnique(new FilePersistentMemoryAllocator(
+ std::move(mmfile), size, id, name, false)))));
+}
+#endif
+
// static
void GlobalHistogramAllocator::CreateWithSharedMemory(
std::unique_ptr<SharedMemory> memory,
@@ -713,6 +808,10 @@ void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
persistent_location_ = location;
}
+const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
+ return persistent_location_;
+}
+
bool GlobalHistogramAllocator::WriteToPersistentLocation() {
#if defined(OS_NACL)
// NACL doesn't support file operations, including ImportantFileWriter.
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index 8df45f2813..ee1fba5f62 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -19,6 +19,7 @@
namespace base {
+class FilePath;
class PersistentSampleMapRecords;
class PersistentSparseHistogramDataManager;
@@ -248,6 +249,19 @@ class BASE_EXPORT PersistentHistogramAllocator {
// True, forgetting it otherwise.
void FinalizeHistogram(Reference ref, bool registered);
+ // Merges the data in a persistent histogram with one held globally by the
+ // StatisticsRecorder, updating the "logged" samples within the passed
+ // object so that repeated merges are allowed. Don't call this on a "global"
+ // allocator because histograms created there will already be in the SR.
+ void MergeHistogramDeltaToStatisticsRecorder(HistogramBase* histogram);
+
+ // As above but merge the "final" delta. No update of "logged" samples is
+ // done which means it can operate on read-only objects. It's essential,
+ // however, not to call this more than once or those final samples will
+ // get recorded again.
+ void MergeHistogramFinalDeltaToStatisticsRecorder(
+ const HistogramBase* histogram);
+
// Returns the object that manages the persistent-sample-map records for a
// given |id|. Only one |user| of this data is allowed at a time. This does
// an automatic Acquire() on the records. The user must call Release() on
@@ -332,6 +346,12 @@ class BASE_EXPORT PersistentHistogramAllocator {
std::unique_ptr<HistogramBase> CreateHistogram(
PersistentHistogramData* histogram_data_ptr);
+ // Gets or creates an object in the global StatisticsRecorder matching
+ // the |histogram| passed. Null is returned if one was not found and
+ // one could not be created.
+ HistogramBase* GetOrCreateStatisticsRecorderHistogram(
+ const HistogramBase* histogram);
+
// Record the result of a histogram creation.
static void RecordCreateHistogramResult(CreateHistogramResultType result);
@@ -370,6 +390,17 @@ class BASE_EXPORT GlobalHistogramAllocator
// specified |size| taken from the heap.
static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
+#if !defined(OS_NACL)
+ // Create a global allocator by memory-mapping a |file|. If the file does
+ // not exist, it will be created with the specified |size|. If the file does
+ // exist, the allocator will use and add to its contents, ignoring the passed
+ // size in favor of the existing size.
+ static void CreateWithFile(const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name);
+#endif
+
// Create a global allocator using a block of shared |memory| of the
// specified |size|. The allocator takes ownership of the shared memory
// and releases it upon destruction, though the memory will continue to
@@ -408,6 +439,10 @@ class BASE_EXPORT GlobalHistogramAllocator
// in order to persist the data for a later use.
void SetPersistentLocation(const FilePath& location);
+ // Retrieves a previously set pathname to which the contents of this allocator
+ // are to be saved.
+ const FilePath& GetPersistentLocation() const;
+
// Writes the internal data to a previously set location. This is generally
// called when a process is exiting from a section of code that may not know
// the filesystem. The data is written in an atomic manner. The return value
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
index 24a0753fe4..b680662250 100644
--- a/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -4,11 +4,13 @@
#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/files/scoped_temp_dir.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/statistics_recorder.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -17,7 +19,10 @@ class PersistentHistogramAllocatorTest : public testing::Test {
protected:
const int32_t kAllocatorMemorySize = 64 << 10; // 64 KiB
- PersistentHistogramAllocatorTest() { CreatePersistentHistogramAllocator(); }
+ PersistentHistogramAllocatorTest()
+ : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()) {
+ CreatePersistentHistogramAllocator();
+ }
~PersistentHistogramAllocatorTest() override {
DestroyPersistentHistogramAllocator();
}
@@ -39,6 +44,7 @@ class PersistentHistogramAllocatorTest : public testing::Test {
GlobalHistogramAllocator::ReleaseForTesting();
}
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
std::unique_ptr<char[]> allocator_memory_;
PersistentMemoryAllocator* allocator_ = nullptr;
@@ -121,4 +127,83 @@ TEST_F(PersistentHistogramAllocatorTest, CreateAndIterateTest) {
EXPECT_FALSE(recovered);
}
+TEST_F(PersistentHistogramAllocatorTest, CreateWithFileTest) {
+ const char temp_name[] = "CreateWithFileTest";
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath temp_file = temp_dir.path().AppendASCII(temp_name);
+ const size_t temp_size = 64 << 10; // 64 KiB
+
+ // Test creation of a new file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, temp_name);
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Test re-open of a possibly-existing file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, "");
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Test re-open of an known-existing file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, 0, 0, "");
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Final release so file and temp-dir can be removed.
+ GlobalHistogramAllocator::ReleaseForTesting();
+}
+
+TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderTest) {
+ size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+
+ // Create a local StatisticsRecorder in which the newly created histogram
+ // will be recorded.
+ std::unique_ptr<StatisticsRecorder> local_sr =
+ StatisticsRecorder::CreateTemporaryForTesting();
+ EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
+
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ "TestHistogram", 1, 10, 10, HistogramBase::kIsPersistent);
+ EXPECT_TRUE(histogram);
+ EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
+ histogram->Add(3);
+ histogram->Add(1);
+ histogram->Add(4);
+ histogram->Add(1);
+ histogram->Add(6);
+
+ // Destroy the local SR and ensure that we're back to the initial state.
+ local_sr.reset();
+ EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
+
+ // Create a second allocator and have it access the memory of the first.
+ std::unique_ptr<HistogramBase> recovered;
+ PersistentHistogramAllocator recovery(
+ WrapUnique(new PersistentMemoryAllocator(
+ allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+ PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+
+ recovered = histogram_iter.GetNext();
+ ASSERT_TRUE(recovered);
+
+ // Merge the recovered histogram to the SR. It will always be a new object.
+ recovery.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+ EXPECT_EQ(starting_sr_count + 1, StatisticsRecorder::GetHistogramCount());
+ HistogramBase* found =
+ StatisticsRecorder::FindHistogram(recovered->histogram_name());
+ ASSERT_TRUE(found);
+ EXPECT_NE(recovered.get(), found);
+
+ // Ensure that the data got merged, too.
+ std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+ EXPECT_EQ(recovered->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+ EXPECT_EQ(1, snapshot->GetCount(3));
+ EXPECT_EQ(2, snapshot->GetCount(1));
+ EXPECT_EQ(1, snapshot->GetCount(4));
+ EXPECT_EQ(1, snapshot->GetCount(6));
+}
+
} // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index bc873fefa0..dfa408f44d 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -7,6 +7,12 @@
#include <assert.h>
#include <algorithm>
+#if defined(OS_WIN)
+#include "winbase.h"
+#elif defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
#include "base/files/memory_mapped_file.h"
#include "base/logging.h"
#include "base/memory/shared_memory.h"
@@ -14,10 +20,8 @@
namespace {
-// Required range of memory segment sizes. It has to fit in an unsigned 32-bit
-// number and should be a power of 2 in order to accomodate almost any page
-// size.
-const uint32_t kSegmentMinSize = 1 << 10; // 1 KiB
+// Limit of memory segment size. It has to fit in an unsigned 32-bit number
+// and should be a power of 2 in order to accomodate almost any page size.
const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
// A constant (random) value placed in the shared metadata to identify
@@ -80,8 +84,8 @@ const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
struct PersistentMemoryAllocator::BlockHeader {
uint32_t size; // Number of bytes in this block, including header.
uint32_t cookie; // Constant value indicating completed allocation.
- uint32_t type_id; // A number provided by caller indicating data type.
- std::atomic<uint32_t> next; // Pointer to the next block when iterating.
+ std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
+ std::atomic<uint32_t> next; // Pointer to the next block when iterating.
};
// The shared metadata exists once at the top of the memory segment to
@@ -190,7 +194,7 @@ PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
// "strong" compare-exchange is used because failing unnecessarily would
// mean repeating some fairly costly validations above.
if (last_record_.compare_exchange_strong(last, next)) {
- *type_return = block->type_id;
+ *type_return = block->type_id.load(std::memory_order_relaxed);
break;
}
}
@@ -239,7 +243,6 @@ bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
bool readonly) {
return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
(size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
- (size >= kSegmentMinSize || readonly) &&
(size % kAllocAlignment == 0 || readonly) &&
(page_size == 0 || size % page_size == 0 || readonly));
}
@@ -298,10 +301,9 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(
shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
first_block->size != 0 ||
first_block->cookie != 0 ||
- first_block->type_id != 0 ||
+ first_block->type_id.load(std::memory_order_relaxed) != 0 ||
first_block->next != 0) {
// ...or something malicious has been playing with the metadata.
- NOTREACHED();
SetCorrupt();
}
@@ -339,12 +341,22 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(
}
if (!readonly) {
// The allocator is attaching to a previously initialized segment of
- // memory. Make sure the embedded data matches what has been passed.
- if (shared_meta()->size != mem_size_ ||
- shared_meta()->page_size != mem_page_) {
- NOTREACHED();
+ // memory. If the initialization parameters differ, make the best of it
+ // by reducing the local construction parameters to match those of
+ // the actual memory area. This ensures that the local object never
+ // tries to write outside of the original bounds.
+ // Because the fields are const to ensure that no code other than the
+ // constructor makes changes to them as well as to give optimization
+ // hints to the compiler, it's necessary to const-cast them for changes
+ // here.
+ if (shared_meta()->size < mem_size_)
+ *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
+ if (shared_meta()->page_size < mem_page_)
+ *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
+
+ // Ensure that settings are still valid after the above adjustments.
+ if (!IsMemoryAcceptable(base, mem_size_, mem_page_, readonly))
SetCorrupt();
- }
}
}
}
@@ -416,15 +428,20 @@ uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
if (!block)
return 0;
- return block->type_id;
+ return block->type_id.load(std::memory_order_relaxed);
}
-void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) {
+bool PersistentMemoryAllocator::ChangeType(Reference ref,
+ uint32_t to_type_id,
+ uint32_t from_type_id) {
DCHECK(!readonly_);
volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
if (!block)
- return;
- block->type_id = type_id;
+ return false;
+
+ // This is a "strong" exchange because there is no loop that can retry in
+ // the wake of spurious failures possible with "weak" exchanges.
+ return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
}
PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
@@ -538,7 +555,7 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
// writing beyond the allocated space and into unallocated space.
if (block->size != 0 ||
block->cookie != kBlockCookieFree ||
- block->type_id != 0 ||
+ block->type_id.load(std::memory_order_relaxed) != 0 ||
block->next.load(std::memory_order_relaxed) != 0) {
SetCorrupt();
return kReferenceNull;
@@ -546,7 +563,7 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
block->size = size;
block->cookie = kBlockCookieAllocated;
- block->type_id = type_id;
+ block->type_id.store(type_id, std::memory_order_relaxed);
return freeptr;
}
}
@@ -678,8 +695,10 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
return nullptr;
if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
return nullptr;
- if (type_id != 0 && block->type_id != type_id)
+ if (type_id != 0 &&
+ block->type_id.load(std::memory_order_relaxed) != type_id) {
return nullptr;
+ }
}
// Return pointer to block data.
@@ -716,11 +735,44 @@ LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
size_t size,
uint64_t id,
base::StringPiece name)
- : PersistentMemoryAllocator(memset(new char[size], 0, size),
+ : PersistentMemoryAllocator(AllocateLocalMemory(size),
size, 0, id, name, false) {}
LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
- delete [] mem_base_;
+ DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_);
+}
+
+// static
+void* LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+#if defined(OS_WIN)
+ void* address =
+ ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ DPCHECK(address);
+ return address;
+#elif defined(OS_POSIX)
+ // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
+ // MAP_SHARED is not available on Linux <2.4 but required on Mac.
+ void* address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_SHARED, -1, 0);
+ DPCHECK(MAP_FAILED != address);
+ return address;
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+// static
+void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
+ size_t size) {
+#if defined(OS_WIN)
+ BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
+ DPCHECK(success);
+#elif defined(OS_POSIX)
+ int result = ::munmap(memory, size);
+ DPCHECK(0 == result);
+#else
+#error This architecture is not (yet) supported.
+#endif
}
@@ -744,30 +796,35 @@ SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
// static
bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
const SharedMemory& memory) {
- return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, true);
+ return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false);
}
+#if !defined(OS_NACL)
//----- FilePersistentMemoryAllocator ------------------------------------------
FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
std::unique_ptr<MemoryMappedFile> file,
+ size_t max_size,
uint64_t id,
- base::StringPiece name)
+ base::StringPiece name,
+ bool read_only)
: PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
- file->length(),
+ max_size != 0 ? max_size : file->length(),
0,
id,
name,
- true),
+ read_only),
mapped_file_(std::move(file)) {}
FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
// static
bool FilePersistentMemoryAllocator::IsFileAcceptable(
- const MemoryMappedFile& file) {
- return IsMemoryAcceptable(file.data(), file.length(), 0, true);
+ const MemoryMappedFile& file,
+ bool read_only) {
+ return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
}
+#endif // !defined(OS_NACL)
} // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index 56edd2ca24..2fc0d2d0da 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -241,9 +241,11 @@ class BASE_EXPORT PersistentMemoryAllocator {
// Access the internal "type" of an object. This generally isn't necessary
// but can be used to "clear" the type and so effectively mark it as deleted
- // even though the memory stays valid and allocated.
+ // even though the memory stays valid and allocated. Changing the type is
+ // an atomic compare/exchange and so requires knowing the existing value.
+ // It will return false if the existing type is not what is expected.
uint32_t GetType(Reference ref) const;
- void SetType(Reference ref, uint32_t type_id);
+ bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
// Reserve space in the memory segment of the desired |size| and |type_id|.
// A return value of zero indicates the allocation failed, otherwise the
@@ -354,6 +356,14 @@ class BASE_EXPORT LocalPersistentMemoryAllocator
~LocalPersistentMemoryAllocator() override;
private:
+ // Allocates a block of local memory of the specified |size|, ensuring that
+ // the memory will not be physically allocated until accessed and will read
+ // as zero when that happens.
+ static void* AllocateLocalMemory(size_t size);
+
+ // Deallocates a block of local |memory| of the specified |size|.
+ static void DeallocateLocalMemory(void* memory, size_t size);
+
DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
};
@@ -385,28 +395,34 @@ class BASE_EXPORT SharedPersistentMemoryAllocator
};
+#if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
// This allocator takes a memory-mapped file object and performs allocation
-// from it. The allocator takes ownership of the file object. Only read access
-// is provided due to limitions of the MemoryMappedFile class.
+// from it. The allocator takes ownership of the file object.
class BASE_EXPORT FilePersistentMemoryAllocator
: public PersistentMemoryAllocator {
public:
+ // A |max_size| of zero will use the length of the file as the maximum
+ // size. The |file| object must have been already created with sufficient
+ // permissions (read, read/write, or read/write/extend).
FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
+ size_t max_size,
uint64_t id,
- base::StringPiece name);
+ base::StringPiece name,
+ bool read_only);
~FilePersistentMemoryAllocator() override;
// Ensure that the file isn't so invalid that it won't crash when passing it
// to the allocator. This doesn't guarantee the file is valid, just that it
// won't cause the program to abort. The existing IsCorrupt() call will handle
// the rest.
- static bool IsFileAcceptable(const MemoryMappedFile& file);
+ static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
private:
std::unique_ptr<MemoryMappedFile> mapped_file_;
DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
};
+#endif // !defined(OS_NACL)
} // namespace base
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index 8a664483c6..a0fd5f8ebe 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -53,7 +53,7 @@ class PersistentMemoryAllocatorTest : public testing::Test {
};
PersistentMemoryAllocatorTest() {
- kAllocAlignment = PersistentMemoryAllocator::kAllocAlignment;
+ kAllocAlignment = GetAllocAlignment();
mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
}
@@ -80,6 +80,10 @@ class PersistentMemoryAllocatorTest : public testing::Test {
return count;
}
+ static uint32_t GetAllocAlignment() {
+ return PersistentMemoryAllocator::kAllocAlignment;
+ }
+
protected:
std::unique_ptr<char[]> mem_segment_;
std::unique_ptr<PersistentMemoryAllocator> allocator_;
@@ -177,9 +181,9 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Check that an objcet's type can be changed.
EXPECT_EQ(2U, allocator_->GetType(block2));
- allocator_->SetType(block2, 3);
+ allocator_->ChangeType(block2, 3, 2);
EXPECT_EQ(3U, allocator_->GetType(block2));
- allocator_->SetType(block2, 2);
+ allocator_->ChangeType(block2, 2, 3);
EXPECT_EQ(2U, allocator_->GetType(block2));
// Create second allocator (read/write) using the same memory segment.
@@ -506,7 +510,8 @@ TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
//----- SharedPersistentMemoryAllocator ----------------------------------------
TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
- SharedMemoryHandle shared_handle;
+ SharedMemoryHandle shared_handle_1;
+ SharedMemoryHandle shared_handle_2;
PersistentMemoryAllocator::MemoryInfo meminfo1;
Reference r123, r456, r789;
@@ -520,19 +525,20 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
r456 = local.Allocate(456, 456);
r789 = local.Allocate(789, 789);
local.MakeIterable(r123);
- local.SetType(r456, 654);
+ local.ChangeType(r456, 654, 456);
local.MakeIterable(r789);
local.GetMemoryInfo(&meminfo1);
EXPECT_FALSE(local.IsFull());
EXPECT_FALSE(local.IsCorrupt());
- ASSERT_TRUE(local.shared_memory()->ShareToProcess(
- GetCurrentProcessHandle(),
- &shared_handle));
+ ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+ &shared_handle_1));
+ ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+ &shared_handle_2));
}
// Read-only test.
- std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle,
+ std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
/*readonly=*/true));
ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
@@ -558,7 +564,7 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
EXPECT_EQ(meminfo1.free, meminfo2.free);
// Read/write test.
- std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle,
+ std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
/*readonly=*/false));
ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
@@ -591,6 +597,7 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
}
+#if !defined(OS_NACL)
//----- FilePersistentMemoryAllocator ------------------------------------------
TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
@@ -607,7 +614,7 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
r456 = local.Allocate(456, 456);
r789 = local.Allocate(789, 789);
local.MakeIterable(r123);
- local.SetType(r456, 654);
+ local.ChangeType(r456, 654, 456);
local.MakeIterable(r789);
local.GetMemoryInfo(&meminfo1);
EXPECT_FALSE(local.IsFull());
@@ -624,7 +631,7 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
const size_t mmlength = mmfile->length();
EXPECT_GE(meminfo1.total, mmlength);
- FilePersistentMemoryAllocator file(std::move(mmfile), 0, "");
+ FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
EXPECT_TRUE(file.IsReadonly());
EXPECT_EQ(TEST_ID, file.Id());
EXPECT_FALSE(file.IsFull());
@@ -648,10 +655,63 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
EXPECT_EQ(0U, meminfo2.free);
}
+TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("extend_test");
+ MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size.
+
+ // Start with a small but valid file of persistent data.
+ ASSERT_FALSE(PathExists(file_path));
+ {
+ LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+ local.Allocate(1, 1);
+ local.Allocate(11, 11);
+
+ File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+ ASSERT_TRUE(writer.IsValid());
+ writer.Write(0, (const char*)local.data(), local.used());
+ }
+ ASSERT_TRUE(PathExists(file_path));
+ int64_t before_size;
+ ASSERT_TRUE(GetFileSize(file_path, &before_size));
+
+ // Map it as an extendable read/write file and append to it.
+ {
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ mmfile->Initialize(
+ File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+ region, MemoryMappedFile::READ_WRITE_EXTEND);
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
+ "", false);
+ EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
+
+ allocator.Allocate(111, 111);
+ EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
+ }
+
+ // Validate that append worked.
+ int64_t after_size;
+ ASSERT_TRUE(GetFileSize(file_path, &after_size));
+ EXPECT_LT(before_size, after_size);
+
+ // Verify that it's still an acceptable file.
+ {
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ mmfile->Initialize(
+ File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+ region, MemoryMappedFile::READ_WRITE_EXTEND);
+ EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
+ EXPECT_TRUE(
+ FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
+ }
+}
+
TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
+ const uint32_t kAllocAlignment =
+ PersistentMemoryAllocatorTest::GetAllocAlignment();
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path_base = temp_dir.path().AppendASCII("persistent_memory_");
LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
local.MakeIterable(local.Allocate(1, 1));
@@ -673,13 +733,23 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
}
ASSERT_TRUE(PathExists(file_path));
+ // Request read/write access for some sizes that are a multple of the
+ // allocator's alignment size. The allocator is strict about file size
+ // being a multiple of its internal alignment when doing read/write access.
+ const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
+ const uint32_t file_flags =
+ File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
+ const MemoryMappedFile::Access map_access =
+ read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
+
mmfile.reset(new MemoryMappedFile());
- mmfile->Initialize(file_path);
+ mmfile->Initialize(File(file_path, file_flags), map_access);
EXPECT_EQ(filesize, mmfile->length());
- if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
+ if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
// Make sure construction doesn't crash. It will, however, cause
// error messages warning about about a corrupted memory segment.
- FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+ read_only);
// Also make sure that iteration doesn't crash.
PersistentMemoryAllocator::Iterator iter(&allocator);
uint32_t type_id;
@@ -693,6 +763,7 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
(void)type;
(void)size;
}
+
// Ensure that short files are detected as corrupt and full files are not.
EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
} else {
@@ -713,12 +784,13 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
ASSERT_TRUE(PathExists(file_path));
mmfile.reset(new MemoryMappedFile());
- mmfile->Initialize(file_path);
+ mmfile->Initialize(File(file_path, file_flags), map_access);
EXPECT_EQ(filesize, mmfile->length());
- if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
+ if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
// Make sure construction doesn't crash. It will, however, cause
// error messages warning about about a corrupted memory segment.
- FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+ read_only);
EXPECT_TRUE(allocator.IsCorrupt()); // Garbage data so it should be.
} else {
// For filesize >= minsize, the file must be acceptable. This
@@ -728,5 +800,6 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
}
}
}
+#endif // !defined(OS_NACL)
} // namespace base
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index 4b7085a0e0..3c1222d2ae 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -208,7 +208,6 @@ HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
return NULL;
}
- DCHECK(flags & HistogramBase::kIPCSerializationSourceFlag);
flags &= ~HistogramBase::kIPCSerializationSourceFlag;
return SparseHistogram::FactoryGet(histogram_name, flags);
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index fbff977522..eab7790276 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -48,7 +48,7 @@ class SparseHistogramTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
void UninitializeStatisticsRecorder() {
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index cad3fd078e..42ed5a9545 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -10,6 +10,7 @@
#include "base/debug/leak_annotations.h"
#include "base/json/string_escape.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
@@ -287,7 +288,7 @@ void StatisticsRecorder::GetBucketRanges(
return;
for (const auto& entry : *ranges_) {
- for (const auto& range_entry : *entry.second) {
+ for (auto* range_entry : *entry.second) {
output->push_back(range_entry);
}
}
@@ -337,6 +338,14 @@ StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
}
// static
+void StatisticsRecorder::InitLogOnShutdown() {
+ if (lock_ == nullptr)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock();
+}
+
+// static
void StatisticsRecorder::GetSnapshot(const std::string& query,
Histograms* snapshot) {
if (lock_ == NULL)
@@ -421,6 +430,12 @@ void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
}
// static
+std::unique_ptr<StatisticsRecorder>
+StatisticsRecorder::CreateTemporaryForTesting() {
+ return WrapUnique(new StatisticsRecorder());
+}
+
+// static
void StatisticsRecorder::UninitializeForTesting() {
// Stop now if it's never been initialized.
if (lock_ == NULL || histograms_ == NULL)
@@ -475,8 +490,14 @@ StatisticsRecorder::StatisticsRecorder() {
callbacks_ = new CallbackMap;
ranges_ = new RangesMap;
- if (VLOG_IS_ON(1))
+ InitLogOnShutdownWithoutLock();
+}
+
+void StatisticsRecorder::InitLogOnShutdownWithoutLock() {
+ if (!vlog_initialized_ && VLOG_IS_ON(1)) {
+ vlog_initialized_ = true;
AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
+ }
}
// static
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index 6c436c292e..c3c6aceffd 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -26,6 +26,8 @@
#include "base/metrics/histogram_base.h"
#include "base/strings/string_piece.h"
+class SubprocessMetricsProviderTest;
+
namespace base {
class BucketRanges;
@@ -165,12 +167,26 @@ class BASE_EXPORT StatisticsRecorder {
// Returns the number of known histograms.
static size_t GetHistogramCount();
+ // Initializes logging histograms with --v=1. Safe to call multiple times.
+ // Is called from ctor but for browser it seems that it is more useful to
+ // start logging after statistics recorder, so we need to init log-on-shutdown
+ // later.
+ static void InitLogOnShutdown();
+
// Removes a histogram from the internal set of known ones. This can be
// necessary during testing persistent histograms where the underlying
// memory is being released.
static void ForgetHistogramForTesting(base::StringPiece name);
- // Reset any global instance of the statistics-recorder that was created
+ // Creates a local StatisticsRecorder object for testing purposes. All new
+ // histograms will be registered in it until it is destructed or pushed
+ // aside for the lifetime of yet another SR object. The destruction of the
+ // returned object will re-activate the previous one. Always release SR
+ // objects in the opposite order to which they're created.
+ static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
+ WARN_UNUSED_RESULT;
+
+ // Resets any global instance of the statistics-recorder that was created
// by a call to Initialize().
static void UninitializeForTesting();
@@ -185,15 +201,7 @@ class BASE_EXPORT StatisticsRecorder {
typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
friend struct DefaultLazyInstanceTraits<StatisticsRecorder>;
- friend class HistogramBaseTest;
- friend class HistogramSnapshotManagerTest;
- friend class HistogramTest;
- friend class JsonPrefStoreTest;
- friend class SharedHistogramTest;
- friend class SparseHistogramTest;
friend class StatisticsRecorderTest;
- FRIEND_TEST_ALL_PREFIXES(HistogramDeltaSerializationTest,
- DeserializeHistogramAndAddSamples);
// Imports histograms from global persistent memory. The global lock must
// not be held during this call.
@@ -204,6 +212,10 @@ class BASE_EXPORT StatisticsRecorder {
// call the constructor to get a clean StatisticsRecorder.
StatisticsRecorder();
+ // Initialize implementation but without lock. Caller should guard
+ // StatisticsRecorder by itself if needed (it isn't in unit tests).
+ void InitLogOnShutdownWithoutLock();
+
// These are copies of everything that existed when the (test) Statistics-
// Recorder was created. The global ones have to be moved aside to create a
// clean environment.
@@ -211,6 +223,8 @@ class BASE_EXPORT StatisticsRecorder {
std::unique_ptr<CallbackMap> existing_callbacks_;
std::unique_ptr<RangesMap> existing_ranges_;
+ bool vlog_initialized_ = false;
+
static void Reset();
static void DumpHistogramsToVlog(void* instance);
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index 813fbd13f5..65e2c98f52 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -11,12 +11,34 @@
#include "base/bind.h"
#include "base/json/json_reader.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/sparse_histogram.h"
#include "base/values.h"
#include "testing/gtest/include/gtest/gtest.h"
+namespace {
+
+// Class to make sure any manipulations we do to the min log level are
+// contained (i.e., do not affect other unit tests).
+class LogStateSaver {
+ public:
+ LogStateSaver() : old_min_log_level_(logging::GetMinLogLevel()) {}
+
+ ~LogStateSaver() {
+ logging::SetMinLogLevel(old_min_log_level_);
+ logging::SetLogAssertHandler(nullptr);
+ }
+
+ private:
+ int old_min_log_level_;
+
+ DISALLOW_COPY_AND_ASSIGN(LogStateSaver);
+};
+
+} // namespace
+
namespace base {
class StatisticsRecorderTest : public testing::TestWithParam<bool> {
@@ -47,7 +69,7 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
StatisticsRecorder::UninitializeForTesting();
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
void UninitializeStatisticsRecorder() {
@@ -78,12 +100,24 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
return count;
}
+ void InitLogOnShutdown() {
+ DCHECK(statistics_recorder_);
+ statistics_recorder_->InitLogOnShutdownWithoutLock();
+ }
+
+ bool VLogInitialized() {
+ DCHECK(statistics_recorder_);
+ return statistics_recorder_->vlog_initialized_;
+ }
+
const bool use_persistent_histogram_allocator_;
std::unique_ptr<StatisticsRecorder> statistics_recorder_;
std::unique_ptr<GlobalHistogramAllocator> old_global_allocator_;
private:
+ LogStateSaver log_state_saver_;
+
DISALLOW_COPY_AND_ASSIGN(StatisticsRecorderTest);
};
@@ -592,4 +626,34 @@ TEST_P(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
}
+TEST_P(StatisticsRecorderTest, LogOnShutdownNotInitialized) {
+ UninitializeStatisticsRecorder();
+ logging::SetMinLogLevel(logging::LOG_WARNING);
+ InitializeStatisticsRecorder();
+ EXPECT_FALSE(VLOG_IS_ON(1));
+ EXPECT_FALSE(VLogInitialized());
+ InitLogOnShutdown();
+ EXPECT_FALSE(VLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitializedExplicitly) {
+ UninitializeStatisticsRecorder();
+ logging::SetMinLogLevel(logging::LOG_WARNING);
+ InitializeStatisticsRecorder();
+ EXPECT_FALSE(VLOG_IS_ON(1));
+ EXPECT_FALSE(VLogInitialized());
+ logging::SetMinLogLevel(logging::LOG_VERBOSE);
+ EXPECT_TRUE(VLOG_IS_ON(1));
+ InitLogOnShutdown();
+ EXPECT_TRUE(VLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitialized) {
+ UninitializeStatisticsRecorder();
+ logging::SetMinLogLevel(logging::LOG_VERBOSE);
+ InitializeStatisticsRecorder();
+ EXPECT_TRUE(VLOG_IS_ON(1));
+ EXPECT_TRUE(VLogInitialized());
+}
+
} // namespace base
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
index c80bac038d..93701e8fd2 100644
--- a/base/metrics/user_metrics.h
+++ b/base/metrics/user_metrics.h
@@ -26,9 +26,9 @@ namespace base {
// not good: "SSLDialogShown", "PageLoaded", "DiskFull"
// We use this to gather anonymized information about how users are
// interacting with the browser.
-// WARNING: In calls to this function, UserMetricsAction and a
-// string literal parameter must be on the same line, e.g.
-// RecordAction(UserMetricsAction("my extremely long action name"));
+// WARNING: In calls to this function, UserMetricsAction should be followed by a
+// string literal parameter and not a variable e.g.
+// RecordAction(UserMetricsAction("my action name"));
// This ensures that our processing scripts can associate this action's hash
// with its metric name. Therefore, it will be possible to retrieve the metric
// name from the hash later on.
diff --git a/base/metrics/user_metrics_action.h b/base/metrics/user_metrics_action.h
index 8c195b3e80..3eca3ddb8b 100644
--- a/base/metrics/user_metrics_action.h
+++ b/base/metrics/user_metrics_action.h
@@ -10,13 +10,12 @@ namespace base {
// UserMetricsAction exists purely to standardize on the parameters passed to
// UserMetrics. That way, our toolset can scan the source code reliable for
// constructors and extract the associated string constants.
-// WARNING: When using UserMetricsAction, UserMetricsAction and a string literal
-// parameter must be on the same line, e.g.
-// RecordAction(UserMetricsAction("my extremely long action name"));
-// or
-// RenderThread::Get()->RecordAction(
-// UserMetricsAction("my extremely long action name"));
-// because otherwise our processing scripts won't pick up on new actions.
+// WARNING: When using UserMetricsAction you should use a string literal
+// parameter e.g.
+// RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
// Please see tools/metrics/actions/extract_actions.py for details.
struct UserMetricsAction {
const char* str_;
diff --git a/base/move.h b/base/move.h
deleted file mode 100644
index 42242b420e..0000000000
--- a/base/move.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MOVE_H_
-#define BASE_MOVE_H_
-
-// TODO(dcheng): Remove this header.
-#include <utility>
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "build/build_config.h"
-
-// TODO(crbug.com/566182): DEPRECATED!
-// Use DISALLOW_COPY_AND_ASSIGN instead, or if your type will be used in
-// Callbacks, use DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND instead.
-#define MOVE_ONLY_TYPE_FOR_CPP_03(type) \
- DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type)
-
-// A macro to disallow the copy constructor and copy assignment functions.
-// This should be used in the private: declarations for a class.
-//
-// Use this macro instead of DISALLOW_COPY_AND_ASSIGN if you want to pass
-// ownership of the type through a base::Callback without heap-allocating it
-// into a scoped_ptr. The class must define a move constructor and move
-// assignment operator to make this work.
-//
-// This version of the macro adds a cryptic MoveOnlyTypeForCPP03 typedef for the
-// base::Callback implementation to use. See IsMoveOnlyType template and its
-// usage in base/callback_internal.h for more details.
-// TODO(crbug.com/566182): Remove this macro and use DISALLOW_COPY_AND_ASSIGN
-// everywhere instead.
-#define DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type) \
- private: \
- type(const type&) = delete; \
- void operator=(const type&) = delete; \
- \
- public: \
- typedef void MoveOnlyTypeForCPP03; \
- \
- private:
-
-#endif // BASE_MOVE_H_
diff --git a/base/native_library.h b/base/native_library.h
index 1e764da89a..b4f3a3cd1b 100644
--- a/base/native_library.h
+++ b/base/native_library.h
@@ -11,8 +11,7 @@
#include <string>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
#include "build/build_config.h"
#if defined(OS_WIN)
@@ -26,7 +25,7 @@ namespace base {
class FilePath;
#if defined(OS_WIN)
-typedef HMODULE NativeLibrary;
+using NativeLibrary = HMODULE;
#elif defined(OS_MACOSX)
enum NativeLibraryType {
BUNDLE,
@@ -46,9 +45,9 @@ struct NativeLibraryStruct {
void* dylib;
};
};
-typedef NativeLibraryStruct* NativeLibrary;
+using NativeLibrary = NativeLibraryStruct*;
#elif defined(OS_POSIX)
-typedef void* NativeLibrary;
+using NativeLibrary = void*;
#endif // OS_*
struct BASE_EXPORT NativeLibraryLoadError {
@@ -87,13 +86,14 @@ BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
// Gets a function pointer from a native library.
BASE_EXPORT void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
- const char* name);
+ StringPiece name);
// Returns the full platform specific name for a native library.
+// |name| must be ASCII.
// For example:
// "mylib" returns "mylib.dll" on Windows, "libmylib.so" on Linux,
-// "mylib.dylib" on Mac.
-BASE_EXPORT string16 GetNativeLibraryName(const string16& name);
+// "libmylib.dylib" on Mac.
+BASE_EXPORT std::string GetNativeLibraryName(StringPiece name);
} // namespace base
diff --git a/base/native_library_posix.cc b/base/native_library_posix.cc
index 3179a93833..2dc434b7be 100644
--- a/base/native_library_posix.cc
+++ b/base/native_library_posix.cc
@@ -8,6 +8,7 @@
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
@@ -21,7 +22,7 @@ std::string NativeLibraryLoadError::ToString() const {
NativeLibrary LoadNativeLibrary(const FilePath& library_path,
NativeLibraryLoadError* error) {
// dlopen() opens the file off disk.
- base::ThreadRestrictions::AssertIOAllowed();
+ ThreadRestrictions::AssertIOAllowed();
// We deliberately do not use RTLD_DEEPBIND. For the history why, please
// refer to the bug tracker. Some useful bug reports to read include:
@@ -45,13 +46,14 @@ void UnloadNativeLibrary(NativeLibrary library) {
// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
- const char* name) {
- return dlsym(library, name);
+ StringPiece name) {
+ return dlsym(library, name.data());
}
// static
-string16 GetNativeLibraryName(const string16& name) {
- return ASCIIToUTF16("lib") + name + ASCIIToUTF16(".so");
+std::string GetNativeLibraryName(StringPiece name) {
+ DCHECK(IsStringASCII(name));
+ return "lib" + name.as_string() + ".so";
}
} // namespace base
diff --git a/base/observer_list.h b/base/observer_list.h
index 31564212e0..afe1f46cd6 100644
--- a/base/observer_list.h
+++ b/base/observer_list.h
@@ -236,8 +236,8 @@ class ObserverList : public ObserverListBase<ObserverType> {
#define FOR_EACH_OBSERVER(ObserverType, observer_list, func) \
do { \
if ((observer_list).might_have_observers()) { \
- base::ObserverListBase<ObserverType>::Iterator it_inside_observer_macro( \
- &observer_list); \
+ typename base::ObserverListBase<ObserverType>::Iterator \
+ it_inside_observer_macro(&observer_list); \
ObserverType* obs; \
while ((obs = it_inside_observer_macro.GetNext()) != nullptr) \
obs->func; \
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
index 6821795705..fe783542f4 100644
--- a/base/observer_list_threadsafe.h
+++ b/base/observer_list_threadsafe.h
@@ -7,6 +7,7 @@
#include <algorithm>
#include <map>
+#include <tuple>
#include "base/bind.h"
#include "base/location.h"
@@ -177,8 +178,8 @@ class ObserverListThreadSafe
void Notify(const tracked_objects::Location& from_here,
Method m,
const Params&... params) {
- internal::UnboundMethod<ObserverType, Method, Tuple<Params...>> method(
- m, MakeTuple(params...));
+ internal::UnboundMethod<ObserverType, Method, std::tuple<Params...>> method(
+ m, std::make_tuple(params...));
AutoLock lock(list_lock_);
for (const auto& entry : observer_lists_) {
@@ -186,8 +187,8 @@ class ObserverListThreadSafe
context->task_runner->PostTask(
from_here,
Bind(&ObserverListThreadSafe<ObserverType>::template NotifyWrapper<
- Method, Tuple<Params...>>,
- this, context, method));
+ Method, std::tuple<Params...>>,
+ this, context, method));
}
}
diff --git a/base/observer_list_unittest.cc b/base/observer_list_unittest.cc
index 2e51e45521..097a2ed28b 100644
--- a/base/observer_list_unittest.cc
+++ b/base/observer_list_unittest.cc
@@ -111,7 +111,7 @@ class AddRemoveThread : public PlatformThread::Delegate,
loop_->task_runner()->PostTask(
FROM_HERE,
base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
- loop_->Run();
+ RunLoop().Run();
//LOG(ERROR) << "Loop 0x" << std::hex << loop_ << " done. " <<
// count_observes_ << ", " << count_addtask_;
delete loop_;
diff --git a/base/optional.h b/base/optional.h
new file mode 100644
index 0000000000..b468964ae3
--- /dev/null
+++ b/base/optional.h
@@ -0,0 +1,457 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OPTIONAL_H_
+#define BASE_OPTIONAL_H_
+
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/template_util.h"
+
+namespace base {
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+struct in_place_t {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+struct nullopt_t {
+ constexpr explicit nullopt_t(int) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place
+constexpr in_place_t in_place = {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+constexpr nullopt_t nullopt(0);
+
+namespace internal {
+
+template <typename T, bool = base::is_trivially_destructible<T>::value>
+struct OptionalStorage {
+ // When T is not trivially destructible we must call its
+ // destructor before deallocating its memory.
+ ~OptionalStorage() {
+ if (!is_null_)
+ buffer_.template data_as<T>()->~T();
+ }
+
+ bool is_null_ = true;
+ base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+template <typename T>
+struct OptionalStorage<T, true> {
+ // When T is trivially destructible (i.e. its destructor does nothing)
+ // there is no need to call it.
+ // Since |base::AlignedMemory| is just an array its destructor
+ // is trivial. Explicitly defaulting the destructor means it's not
+ // user-provided. All of this together make this destructor trivial.
+ ~OptionalStorage() = default;
+
+ bool is_null_ = true;
+ base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+} // namespace internal
+
+// base::Optional is a Chromium version of the C++17 optional class:
+// std::optional documentation:
+// http://en.cppreference.com/w/cpp/utility/optional
+// Chromium documentation:
+// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
+//
+// These are the differences between the specification and the implementation:
+// - The constructor and emplace method using initializer_list are not
+// implemented because 'initializer_list' is banned from Chromium.
+// - Constructors do not use 'constexpr' as it is a C++14 extension.
+// - 'constexpr' might be missing in some places for reasons specified locally.
+// - No exceptions are thrown, because they are banned from Chromium.
+// - All the non-members are in the 'base' namespace instead of 'std'.
+template <typename T>
+class Optional {
+ public:
+ using value_type = T;
+
+ constexpr Optional() = default;
+ Optional(base::nullopt_t) : Optional() {}
+
+ Optional(const Optional& other) {
+ if (!other.storage_.is_null_)
+ Init(other.value());
+ }
+
+ Optional(Optional&& other) {
+ if (!other.storage_.is_null_)
+ Init(std::move(other.value()));
+ }
+
+ Optional(const T& value) { Init(value); }
+
+ Optional(T&& value) { Init(std::move(value)); }
+
+ template <class... Args>
+ explicit Optional(base::in_place_t, Args&&... args) {
+ emplace(std::forward<Args>(args)...);
+ }
+
+ ~Optional() = default;
+
+ Optional& operator=(base::nullopt_t) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ Optional& operator=(const Optional& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(other.value());
+ return *this;
+ }
+
+ Optional& operator=(Optional&& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(std::move(other.value()));
+ return *this;
+ }
+
+ template <class U>
+ typename std::enable_if<std::is_same<std::decay<U>, T>::value,
+ Optional&>::type
+ operator=(U&& value) {
+ InitOrAssign(std::forward<U>(value));
+ return *this;
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T* operator->() const {
+ DCHECK(!storage_.is_null_);
+ return &value();
+ }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T* operator->() {
+ DCHECK(!storage_.is_null_);
+ return &value();
+ }
+
+ constexpr const T& operator*() const& { return value(); }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T& operator*() & { return value(); }
+
+ constexpr const T&& operator*() const&& { return std::move(value()); }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T&& operator*() && { return std::move(value()); }
+
+ constexpr explicit operator bool() const { return !storage_.is_null_; }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T& value() & {
+ DCHECK(!storage_.is_null_);
+ return *storage_.buffer_.template data_as<T>();
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T& value() const& {
+ DCHECK(!storage_.is_null_);
+ return *storage_.buffer_.template data_as<T>();
+ }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T&& value() && {
+ DCHECK(!storage_.is_null_);
+ return std::move(*storage_.buffer_.template data_as<T>());
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T&& value() const&& {
+ DCHECK(!storage_.is_null_);
+ return std::move(*storage_.buffer_.template data_as<T>());
+ }
+
+ template <class U>
+ constexpr T value_or(U&& default_value) const& {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_copy_constructible<T>::value,
+ // "T must be copy constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : value();
+ }
+
+ template <class U>
+ T value_or(U&& default_value) && {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_move_constructible<T>::value,
+ // "T must be move constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : std::move(value());
+ }
+
+ void swap(Optional& other) {
+ if (storage_.is_null_ && other.storage_.is_null_)
+ return;
+
+ if (storage_.is_null_ != other.storage_.is_null_) {
+ if (storage_.is_null_) {
+ Init(std::move(*other.storage_.buffer_.template data_as<T>()));
+ other.FreeIfNeeded();
+ } else {
+ other.Init(std::move(*storage_.buffer_.template data_as<T>()));
+ FreeIfNeeded();
+ }
+ return;
+ }
+
+ DCHECK(!storage_.is_null_ && !other.storage_.is_null_);
+ using std::swap;
+ swap(**this, *other);
+ }
+
+ template <class... Args>
+ void emplace(Args&&... args) {
+ FreeIfNeeded();
+ Init(std::forward<Args>(args)...);
+ }
+
+ private:
+ void Init(const T& value) {
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(value);
+ storage_.is_null_ = false;
+ }
+
+ void Init(T&& value) {
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(std::move(value));
+ storage_.is_null_ = false;
+ }
+
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(std::forward<Args>(args)...);
+ storage_.is_null_ = false;
+ }
+
+ void InitOrAssign(const T& value) {
+ if (storage_.is_null_)
+ Init(value);
+ else
+ *storage_.buffer_.template data_as<T>() = value;
+ }
+
+ void InitOrAssign(T&& value) {
+ if (storage_.is_null_)
+ Init(std::move(value));
+ else
+ *storage_.buffer_.template data_as<T>() = std::move(value);
+ }
+
+ void FreeIfNeeded() {
+ if (storage_.is_null_)
+ return;
+ storage_.buffer_.template data_as<T>()->~T();
+ storage_.is_null_ = true;
+ }
+
+ internal::OptionalStorage<T> storage_;
+};
+
+template <class T>
+constexpr bool operator==(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !!lhs != !!rhs ? false : lhs == nullopt || (*lhs == *rhs);
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return rhs == nullopt ? false : (lhs == nullopt ? true : *lhs < *rhs);
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(rhs < lhs);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return rhs < lhs;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(lhs < rhs);
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, base::nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator==(base::nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, base::nullopt_t) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator!=(base::nullopt_t, const Optional<T>& opt) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, base::nullopt_t) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator<(base::nullopt_t, const Optional<T>& opt) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, base::nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator<=(base::nullopt_t, const Optional<T>& opt) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, base::nullopt_t) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator>(base::nullopt_t, const Optional<T>& opt) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, base::nullopt_t) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>=(base::nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, const T& value) {
+ return opt != nullopt ? *opt == value : false;
+}
+
+template <class T>
+constexpr bool operator==(const T& value, const Optional<T>& opt) {
+ return opt == value;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, const T& value) {
+ return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator!=(const T& value, const Optional<T>& opt) {
+ return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, const T& value) {
+ return opt != nullopt ? *opt < value : true;
+}
+
+template <class T>
+constexpr bool operator<(const T& value, const Optional<T>& opt) {
+ return opt != nullopt ? value < *opt : false;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, const T& value) {
+ return !(opt > value);
+}
+
+template <class T>
+constexpr bool operator<=(const T& value, const Optional<T>& opt) {
+ return !(value > opt);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, const T& value) {
+ return value < opt;
+}
+
+template <class T>
+constexpr bool operator>(const T& value, const Optional<T>& opt) {
+ return opt < value;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, const T& value) {
+ return !(opt < value);
+}
+
+template <class T>
+constexpr bool operator>=(const T& value, const Optional<T>& opt) {
+ return !(value < opt);
+}
+
+template <class T>
+constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
+ return Optional<typename std::decay<T>::type>(std::forward<T>(value));
+}
+
+template <class T>
+void swap(Optional<T>& lhs, Optional<T>& rhs) {
+ lhs.swap(rhs);
+}
+
+} // namespace base
+
+namespace std {
+
+template <class T>
+struct hash<base::Optional<T>> {
+ size_t operator()(const base::Optional<T>& opt) const {
+ return opt == base::nullopt ? 0 : std::hash<T>()(*opt);
+ }
+};
+
+} // namespace std
+
+#endif // BASE_OPTIONAL_H_
diff --git a/base/optional_unittest.cc b/base/optional_unittest.cc
new file mode 100644
index 0000000000..d6bf263691
--- /dev/null
+++ b/base/optional_unittest.cc
@@ -0,0 +1,1301 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/optional.h"
+
+#include <set>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Object used to test complex object with Optional<T> in addition of the move
+// semantics.
+class TestObject {
+ public:
+ enum class State {
+ DEFAULT_CONSTRUCTED,
+ VALUE_CONSTRUCTED,
+ COPY_CONSTRUCTED,
+ MOVE_CONSTRUCTED,
+ MOVED_FROM,
+ COPY_ASSIGNED,
+ MOVE_ASSIGNED,
+ SWAPPED,
+ };
+
+ TestObject() : foo_(0), bar_(0.0), state_(State::DEFAULT_CONSTRUCTED) {}
+
+ TestObject(int foo, double bar)
+ : foo_(foo), bar_(bar), state_(State::VALUE_CONSTRUCTED) {}
+
+ TestObject(const TestObject& other)
+ : foo_(other.foo_), bar_(other.bar_), state_(State::COPY_CONSTRUCTED) {}
+
+ TestObject(TestObject&& other)
+ : foo_(std::move(other.foo_)),
+ bar_(std::move(other.bar_)),
+ state_(State::MOVE_CONSTRUCTED) {
+ other.state_ = State::MOVED_FROM;
+ }
+
+ TestObject& operator=(const TestObject& other) {
+ foo_ = other.foo_;
+ bar_ = other.bar_;
+ state_ = State::COPY_ASSIGNED;
+ return *this;
+ }
+
+ TestObject& operator=(TestObject&& other) {
+ foo_ = other.foo_;
+ bar_ = other.bar_;
+ state_ = State::MOVE_ASSIGNED;
+ other.state_ = State::MOVED_FROM;
+ return *this;
+ }
+
+ void Swap(TestObject* other) {
+ using std::swap;
+ swap(foo_, other->foo_);
+ swap(bar_, other->bar_);
+ state_ = State::SWAPPED;
+ other->state_ = State::SWAPPED;
+ }
+
+ bool operator==(const TestObject& other) const {
+ return foo_ == other.foo_ && bar_ == other.bar_;
+ }
+
+ int foo() const { return foo_; }
+ State state() const { return state_; }
+
+ private:
+ int foo_;
+ double bar_;
+ State state_;
+};
+
+// Implementing Swappable concept.
+void swap(TestObject& lhs, TestObject& rhs) {
+ lhs.Swap(&rhs);
+}
+
+class NonTriviallyDestructible {
+ ~NonTriviallyDestructible() {}
+};
+
+} // anonymous namespace
+
+static_assert(is_trivially_destructible<Optional<int>>::value,
+ "OptionalIsTriviallyDestructible");
+
+static_assert(
+ !is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
+ "OptionalIsTriviallyDestructible");
+
+TEST(OptionalTest, DefaultConstructor) {
+ {
+ Optional<float> o;
+ EXPECT_FALSE(o);
+ }
+
+ {
+ Optional<std::string> o;
+ EXPECT_FALSE(o);
+ }
+
+ {
+ Optional<TestObject> o;
+ EXPECT_FALSE(o);
+ }
+}
+
+TEST(OptionalTest, CopyConstructor) {
+ {
+ Optional<float> first(0.1f);
+ Optional<float> other(first);
+
+ EXPECT_TRUE(other);
+ EXPECT_EQ(other.value(), 0.1f);
+ EXPECT_EQ(first, other);
+ }
+
+ {
+ Optional<std::string> first("foo");
+ Optional<std::string> other(first);
+
+ EXPECT_TRUE(other);
+ EXPECT_EQ(other.value(), "foo");
+ EXPECT_EQ(first, other);
+ }
+
+ {
+ Optional<TestObject> first(TestObject(3, 0.1));
+ Optional<TestObject> other(first);
+
+ EXPECT_TRUE(!!other);
+ EXPECT_TRUE(other.value() == TestObject(3, 0.1));
+ EXPECT_TRUE(first == other);
+ }
+}
+
+TEST(OptionalTest, ValueConstructor) {
+ {
+ Optional<float> o(0.1f);
+ EXPECT_TRUE(o);
+ EXPECT_EQ(o.value(), 0.1f);
+ }
+
+ {
+ Optional<std::string> o("foo");
+ EXPECT_TRUE(o);
+ EXPECT_EQ(o.value(), "foo");
+ }
+
+ {
+ Optional<TestObject> o(TestObject(3, 0.1));
+ EXPECT_TRUE(!!o);
+ EXPECT_TRUE(o.value() == TestObject(3, 0.1));
+ }
+}
+
+TEST(OptionalTest, MoveConstructor) {
+ {
+ Optional<float> first(0.1f);
+ Optional<float> second(std::move(first));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ(second.value(), 0.1f);
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<std::string> first("foo");
+ Optional<std::string> second(std::move(first));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ("foo", second.value());
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<TestObject> first(TestObject(3, 0.1));
+ Optional<TestObject> second(std::move(first));
+
+ EXPECT_TRUE(!!second);
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+ EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+ EXPECT_TRUE(!!first);
+ EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+ }
+}
+
+TEST(OptionalTest, MoveValueConstructor) {
+ {
+ Optional<float> first(0.1f);
+ Optional<float> second(std::move(first.value()));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ(second.value(), 0.1f);
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<std::string> first("foo");
+ Optional<std::string> second(std::move(first.value()));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ("foo", second.value());
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<TestObject> first(TestObject(3, 0.1));
+ Optional<TestObject> second(std::move(first.value()));
+
+ EXPECT_TRUE(!!second);
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+ EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+ EXPECT_TRUE(!!first);
+ EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+ }
+}
+
+TEST(OptionalTest, ConstructorForwardArguments) {
+ {
+ Optional<float> a(base::in_place, 0.1f);
+ EXPECT_TRUE(a);
+ EXPECT_EQ(0.1f, a.value());
+ }
+
+ {
+ Optional<std::string> a(base::in_place, "foo");
+ EXPECT_TRUE(a);
+ EXPECT_EQ("foo", a.value());
+ }
+
+ {
+ Optional<TestObject> a(base::in_place, 0, 0.1);
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(TestObject(0, 0.1) == a.value());
+ }
+}
+
+TEST(OptionalTest, NulloptConstructor) {
+ Optional<int> a = base::nullopt;
+ EXPECT_FALSE(a);
+}
+
+TEST(OptionalTest, AssignValue) {
+ {
+ Optional<float> a;
+ EXPECT_FALSE(a);
+ a = 0.1f;
+ EXPECT_TRUE(a);
+
+ Optional<float> b(0.1f);
+ EXPECT_TRUE(a == b);
+ }
+
+ {
+ Optional<std::string> a;
+ EXPECT_FALSE(a);
+ a = std::string("foo");
+ EXPECT_TRUE(a);
+
+ Optional<std::string> b(std::string("foo"));
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<TestObject> a;
+ EXPECT_FALSE(!!a);
+ a = TestObject(3, 0.1);
+ EXPECT_TRUE(!!a);
+
+ Optional<TestObject> b(TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+
+ {
+ Optional<TestObject> a = TestObject(4, 1.0);
+ EXPECT_TRUE(!!a);
+ a = TestObject(3, 0.1);
+ EXPECT_TRUE(!!a);
+
+ Optional<TestObject> b(TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+}
+
+TEST(OptionalTest, AssignObject) {
+ {
+ Optional<float> a;
+ Optional<float> b(0.1f);
+ a = b;
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ(a.value(), 0.1f);
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<std::string> a;
+ Optional<std::string> b("foo");
+ a = b;
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ(a.value(), "foo");
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = b;
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(4, 1.0));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = b;
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+}
+
+TEST(OptionalTest, AssignObject_rvalue) {
+ {
+ Optional<float> a;
+ Optional<float> b(0.1f);
+ a = std::move(b);
+
+ EXPECT_TRUE(a);
+ EXPECT_TRUE(b);
+ EXPECT_EQ(0.1f, a.value());
+ }
+
+ {
+ Optional<std::string> a;
+ Optional<std::string> b("foo");
+ a = std::move(b);
+
+ EXPECT_TRUE(a);
+ EXPECT_TRUE(b);
+ EXPECT_EQ("foo", a.value());
+ }
+
+ {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = std::move(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, a->state());
+ EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+ }
+
+ {
+ Optional<TestObject> a(TestObject(4, 1.0));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = std::move(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+ EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, a->state());
+ EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+ }
+}
+
+TEST(OptionalTest, AssignNull) {
+ {
+ Optional<float> a(0.1f);
+ Optional<float> b(0.2f);
+ a = base::nullopt;
+ b = base::nullopt;
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ Optional<std::string> b("bar");
+ a = base::nullopt;
+ b = base::nullopt;
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ Optional<TestObject> b(TestObject(4, 1.0));
+ a = base::nullopt;
+ b = base::nullopt;
+ EXPECT_TRUE(a == b);
+ }
+}
+
+TEST(OptionalTest, OperatorStar) {
+ {
+ Optional<float> a(0.1f);
+ EXPECT_EQ(a.value(), *a);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ EXPECT_EQ(a.value(), *a);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ EXPECT_EQ(a.value(), *a);
+ }
+}
+
+TEST(OptionalTest, OperatorStar_rvalue) {
+ EXPECT_EQ(0.1f, *Optional<float>(0.1f));
+ EXPECT_EQ(std::string("foo"), *Optional<std::string>("foo"));
+ EXPECT_TRUE(TestObject(3, 0.1) == *Optional<TestObject>(TestObject(3, 0.1)));
+}
+
+TEST(OptionalTest, OperatorArrow) {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ EXPECT_EQ(a->foo(), 3);
+}
+
+TEST(OptionalTest, Value_rvalue) {
+ EXPECT_EQ(0.1f, Optional<float>(0.1f).value());
+ EXPECT_EQ(std::string("foo"), Optional<std::string>("foo").value());
+ EXPECT_TRUE(TestObject(3, 0.1) ==
+ Optional<TestObject>(TestObject(3, 0.1)).value());
+}
+
+TEST(OptionalTest, ValueOr) {
+ {
+ Optional<float> a;
+ EXPECT_EQ(0.0f, a.value_or(0.0f));
+
+ a = 0.1f;
+ EXPECT_EQ(0.1f, a.value_or(0.0f));
+
+ a = base::nullopt;
+ EXPECT_EQ(0.0f, a.value_or(0.0f));
+ }
+
+ {
+ Optional<std::string> a;
+ EXPECT_EQ("bar", a.value_or("bar"));
+
+ a = std::string("foo");
+ EXPECT_EQ(std::string("foo"), a.value_or("bar"));
+
+ a = base::nullopt;
+ EXPECT_EQ(std::string("bar"), a.value_or("bar"));
+ }
+
+ {
+ Optional<TestObject> a;
+ EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+
+ a = TestObject(3, 0.1);
+ EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(3, 0.1));
+
+ a = base::nullopt;
+ EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+ }
+}
+
+TEST(OptionalTest, Swap_bothNoValue) {
+ Optional<TestObject> a, b;
+ a.swap(b);
+
+ EXPECT_FALSE(a);
+ EXPECT_FALSE(b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_inHasValue) {
+ Optional<TestObject> a(TestObject(1, 0.3));
+ Optional<TestObject> b;
+ a.swap(b);
+
+ EXPECT_FALSE(a);
+
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_outHasValue) {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(1, 0.3));
+ a.swap(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_FALSE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_bothValue) {
+ Optional<TestObject> a(TestObject(0, 0.1));
+ Optional<TestObject> b(TestObject(1, 0.3));
+ a.swap(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+ EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+ EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Emplace) {
+ {
+ Optional<float> a(0.1f);
+ a.emplace(0.3f);
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ(0.3f, a.value());
+ }
+
+ {
+ Optional<std::string> a("foo");
+ a.emplace("bar");
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ("bar", a.value());
+ }
+
+ {
+ Optional<TestObject> a(TestObject(0, 0.1));
+ a.emplace(TestObject(1, 0.2));
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(TestObject(1, 0.2) == a.value());
+ }
+}
+
+TEST(OptionalTest, Equals_TwoEmpty) {
+ Optional<int> a;
+ Optional<int> b;
+
+ EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoEquals) {
+ Optional<int> a(1);
+ Optional<int> b(1);
+
+ EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_OneEmpty) {
+ Optional<int> a;
+ Optional<int> b(1);
+
+ EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoDifferent) {
+ Optional<int> a(0);
+ Optional<int> b(1);
+
+ EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEmpty) {
+ Optional<int> a;
+ Optional<int> b;
+
+ EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEquals) {
+ Optional<int> a(1);
+ Optional<int> b(1);
+
+ EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_OneEmpty) {
+ Optional<int> a;
+ Optional<int> b(1);
+
+ EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoDifferent) {
+ Optional<int> a(0);
+ Optional<int> b(1);
+
+ EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, Less_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l < r);
+}
+
+TEST(OptionalTest, Less_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothValues) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_TRUE(l < r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l < r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l < r);
+ }
+}
+
+TEST(OptionalTest, LessEq_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_FALSE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothValues) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_TRUE(l <= r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l <= r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l <= r);
+ }
+}
+
+TEST(OptionalTest, Greater_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_TRUE(l > r);
+}
+
+TEST(OptionalTest, Greater_BothValue) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_FALSE(l > r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l > r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l > r);
+ }
+}
+
+TEST(OptionalTest, GreaterEq_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_BothValue) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_FALSE(l >= r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l >= r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l >= r);
+ }
+}
+
+TEST(OptionalTest, OptNullEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(opt == base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt == base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(base::nullopt == opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(base::nullopt == opt);
+ }
+}
+
+TEST(OptionalTest, OptNullNotEq) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(opt != base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt != base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptNotEq) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(base::nullopt != opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(base::nullopt != opt);
+ }
+}
+
+TEST(OptionalTest, OptNullLower) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(opt < base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt < base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptLower) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(base::nullopt < opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(base::nullopt < opt);
+ }
+}
+
+TEST(OptionalTest, OptNullLowerEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(opt <= base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt <= base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptLowerEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(base::nullopt <= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(base::nullopt <= opt);
+ }
+}
+
+TEST(OptionalTest, OptNullGreater) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(opt > base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt > base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptGreater) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(base::nullopt > opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(base::nullopt > opt);
+ }
+}
+
+TEST(OptionalTest, OptNullGreaterEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(opt >= base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt >= base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptGreaterEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(base::nullopt >= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(base::nullopt >= opt);
+ }
+}
+
+TEST(OptionalTest, ValueEq_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(opt == 1);
+}
+
+TEST(OptionalTest, ValueEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt == 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt == 1);
+ }
+}
+
+TEST(OptionalTest, EqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(1 == opt);
+}
+
+TEST(OptionalTest, EqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(1 == opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(1 == opt);
+ }
+}
+
+TEST(OptionalTest, ValueNotEq_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(opt != 1);
+}
+
+TEST(OptionalTest, ValueNotEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt != 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt != 1);
+ }
+}
+
+TEST(OptionalTest, NotEqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(1 != opt);
+}
+
+TEST(OptionalTest, NotEqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(1 != opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(1 != opt);
+ }
+}
+
+TEST(OptionalTest, ValueLess_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(opt < 1);
+}
+
+TEST(OptionalTest, ValueLess_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt < 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt < 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(opt < 1);
+ }
+}
+
+TEST(OptionalTest, LessValue_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(1 < opt);
+}
+
+TEST(OptionalTest, LessValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(1 < opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(1 < opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(1 < opt);
+ }
+}
+
+TEST(OptionalTest, ValueLessEq_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(opt <= 1);
+}
+
+TEST(OptionalTest, ValueLessEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt <= 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt <= 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(opt <= 1);
+ }
+}
+
+TEST(OptionalTest, LessEqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(1 <= opt);
+}
+
+TEST(OptionalTest, LessEqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(1 <= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(1 <= opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(1 <= opt);
+ }
+}
+
+TEST(OptionalTest, ValueGreater_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(opt > 1);
+}
+
+TEST(OptionalTest, ValueGreater_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt > 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt > 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(opt > 1);
+ }
+}
+
+TEST(OptionalTest, GreaterValue_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(1 > opt);
+}
+
+TEST(OptionalTest, GreaterValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(1 > opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(1 > opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(1 > opt);
+ }
+}
+
+TEST(OptionalTest, ValueGreaterEq_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(opt >= 1);
+}
+
+TEST(OptionalTest, ValueGreaterEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt >= 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt >= 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(opt >= 1);
+ }
+}
+
+TEST(OptionalTest, GreaterEqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(1 >= opt);
+}
+
+TEST(OptionalTest, GreaterEqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(1 >= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(1 >= opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(1 >= opt);
+ }
+}
+
+TEST(OptionalTest, NotEquals) {
+ {
+ Optional<float> a(0.1f);
+ Optional<float> b(0.2f);
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ Optional<std::string> b("bar");
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ Optional<TestObject> b(TestObject(4, 1.0));
+ EXPECT_TRUE(a != b);
+ }
+}
+
+TEST(OptionalTest, NotEqualsNull) {
+ {
+ Optional<float> a(0.1f);
+ Optional<float> b(0.1f);
+ b = base::nullopt;
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ Optional<std::string> b("foo");
+ b = base::nullopt;
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ b = base::nullopt;
+ EXPECT_TRUE(a != b);
+ }
+}
+
+TEST(OptionalTest, MakeOptional) {
+ {
+ Optional<float> o = base::make_optional(32.f);
+ EXPECT_TRUE(o);
+ EXPECT_EQ(32.f, *o);
+
+ float value = 3.f;
+ o = base::make_optional(std::move(value));
+ EXPECT_TRUE(o);
+ EXPECT_EQ(3.f, *o);
+ }
+
+ {
+ Optional<std::string> o = base::make_optional(std::string("foo"));
+ EXPECT_TRUE(o);
+ EXPECT_EQ("foo", *o);
+
+ std::string value = "bar";
+ o = base::make_optional(std::move(value));
+ EXPECT_TRUE(o);
+ EXPECT_EQ(std::string("bar"), *o);
+ }
+
+ {
+ Optional<TestObject> o = base::make_optional(TestObject(3, 0.1));
+ EXPECT_TRUE(!!o);
+ EXPECT_TRUE(TestObject(3, 0.1) == *o);
+
+ TestObject value = TestObject(0, 0.42);
+ o = base::make_optional(std::move(value));
+ EXPECT_TRUE(!!o);
+ EXPECT_TRUE(TestObject(0, 0.42) == *o);
+ EXPECT_EQ(TestObject::State::MOVED_FROM, value.state());
+ EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, o->state());
+
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED,
+ base::make_optional(std::move(value))->state());
+ }
+}
+
+TEST(OptionalTest, NonMemberSwap_bothNoValue) {
+ Optional<TestObject> a, b;
+ base::swap(a, b);
+
+ EXPECT_FALSE(!!a);
+ EXPECT_FALSE(!!b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_inHasValue) {
+ Optional<TestObject> a(TestObject(1, 0.3));
+ Optional<TestObject> b;
+ base::swap(a, b);
+
+ EXPECT_FALSE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_outHasValue) {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(1, 0.3));
+ base::swap(a, b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_FALSE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_bothValue) {
+ Optional<TestObject> a(TestObject(0, 0.1));
+ Optional<TestObject> b(TestObject(1, 0.3));
+ base::swap(a, b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+ EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+ EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Hash_OptionalReflectsInternal) {
+ {
+ std::hash<int> int_hash;
+ std::hash<Optional<int>> opt_int_hash;
+
+ EXPECT_EQ(int_hash(1), opt_int_hash(Optional<int>(1)));
+ }
+
+ {
+ std::hash<std::string> str_hash;
+ std::hash<Optional<std::string>> opt_str_hash;
+
+ EXPECT_EQ(str_hash(std::string("foobar")),
+ opt_str_hash(Optional<std::string>(std::string("foobar"))));
+ }
+}
+
+TEST(OptionalTest, Hash_NullOptEqualsNullOpt) {
+ std::hash<Optional<int>> opt_int_hash;
+ std::hash<Optional<std::string>> opt_str_hash;
+
+ EXPECT_EQ(opt_str_hash(Optional<std::string>()),
+ opt_int_hash(Optional<int>()));
+}
+
+TEST(OptionalTest, Hash_UseInSet) {
+ std::set<Optional<int>> setOptInt;
+
+ EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+
+ setOptInt.insert(Optional<int>(3));
+ EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+ EXPECT_NE(setOptInt.end(), setOptInt.find(3));
+}
+
+} // namespace base
diff --git a/base/pending_task.cc b/base/pending_task.cc
index d21f7c7f0d..73834bd460 100644
--- a/base/pending_task.cc
+++ b/base/pending_task.cc
@@ -9,9 +9,9 @@
namespace base {
PendingTask::PendingTask(const tracked_objects::Location& posted_from,
- const base::Closure& task)
+ base::Closure task)
: base::TrackingInfo(posted_from, TimeTicks()),
- task(task),
+ task(std::move(task)),
posted_from(posted_from),
sequence_num(0),
nestable(true),
@@ -19,22 +19,24 @@ PendingTask::PendingTask(const tracked_objects::Location& posted_from,
}
PendingTask::PendingTask(const tracked_objects::Location& posted_from,
- const base::Closure& task,
+ base::Closure task,
TimeTicks delayed_run_time,
bool nestable)
: base::TrackingInfo(posted_from, delayed_run_time),
- task(task),
+ task(std::move(task)),
posted_from(posted_from),
sequence_num(0),
nestable(nestable),
is_high_res(false) {
}
-PendingTask::PendingTask(const PendingTask& other) = default;
+PendingTask::PendingTask(PendingTask&& other) = default;
PendingTask::~PendingTask() {
}
+PendingTask& PendingTask::operator=(PendingTask&& other) = default;
+
bool PendingTask::operator<(const PendingTask& other) const {
// Since the top of a priority queue is defined as the "greatest" element, we
// need to invert the comparison here. We want the smaller time to be at the
@@ -51,8 +53,4 @@ bool PendingTask::operator<(const PendingTask& other) const {
return (sequence_num - other.sequence_num) > 0;
}
-void TaskQueue::Swap(TaskQueue* queue) {
- c.swap(queue->c); // Calls std::deque::swap.
-}
-
} // namespace base
diff --git a/base/pending_task.h b/base/pending_task.h
index fd0b883026..5761653397 100644
--- a/base/pending_task.h
+++ b/base/pending_task.h
@@ -19,14 +19,16 @@ namespace base {
// for use by classes that queue and execute tasks.
struct BASE_EXPORT PendingTask : public TrackingInfo {
PendingTask(const tracked_objects::Location& posted_from,
- const Closure& task);
+ Closure task);
PendingTask(const tracked_objects::Location& posted_from,
- const Closure& task,
+ Closure task,
TimeTicks delayed_run_time,
bool nestable);
- PendingTask(const PendingTask& other);
+ PendingTask(PendingTask&& other);
~PendingTask();
+ PendingTask& operator=(PendingTask&& other);
+
// Used to support sorting.
bool operator<(const PendingTask& other) const;
@@ -46,15 +48,10 @@ struct BASE_EXPORT PendingTask : public TrackingInfo {
bool is_high_res;
};
-// Wrapper around std::queue specialized for PendingTask which adds a Swap
-// helper method.
-class BASE_EXPORT TaskQueue : public std::queue<PendingTask> {
- public:
- void Swap(TaskQueue* queue);
-};
+using TaskQueue = std::queue<PendingTask>;
// PendingTasks are sorted by their |delayed_run_time| property.
-typedef std::priority_queue<base::PendingTask> DelayedTaskQueue;
+using DelayedTaskQueue = std::priority_queue<base::PendingTask>;
} // namespace base
diff --git a/base/posix/global_descriptors.h b/base/posix/global_descriptors.h
index 1761e2518f..edb299de5c 100644
--- a/base/posix/global_descriptors.h
+++ b/base/posix/global_descriptors.h
@@ -57,7 +57,12 @@ class BASE_EXPORT GlobalDescriptors {
#else
// 3 used by __android_log_write().
// 4 used by... something important on Android M.
- static const int kBaseDescriptor = 5;
+ // 5 used by... something important on Android L... on low-end devices.
+ // TODO(amistry): An Android, this mechanism is only used for tests since the
+ // content child launcher spawns a process by creating a new Activity using
+ // the Android APIs. For tests, come up with a way that doesn't require using
+ // a pre-defined fd.
+ static const int kBaseDescriptor = 6;
#endif
// Return the singleton instance of GlobalDescriptors.
diff --git a/base/posix/unix_domain_socket_linux_unittest.cc b/base/posix/unix_domain_socket_linux_unittest.cc
index e4b63c0cd4..3f5173cfc2 100644
--- a/base/posix/unix_domain_socket_linux_unittest.cc
+++ b/base/posix/unix_domain_socket_linux_unittest.cc
@@ -52,7 +52,8 @@ TEST(UnixDomainSocketTest, SendRecvMsgAbortOnReplyFDClose) {
message_fds.clear();
// Check that the thread didn't get blocked.
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
message_thread.task_runner()->PostTask(
FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
ASSERT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(5000)));
diff --git a/base/process/launch.h b/base/process/launch.h
index adfa093cfa..b8c02597a6 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -72,7 +72,7 @@ struct BASE_EXPORT LaunchOptions {
bool start_hidden;
// If non-null, inherit exactly the list of handles in this vector (these
- // handles must be inheritable). This is only supported on Vista and higher.
+ // handles must be inheritable).
HandlesToInheritVector* handles_to_inherit;
// If true, the new process inherits handles from the parent. In production
@@ -80,7 +80,7 @@ struct BASE_EXPORT LaunchOptions {
// binaries, because open handles from other libraries and subsystems will
// leak to the child process, causing errors such as open socket hangs.
// Note: If |handles_to_inherit| is non-null, this flag is ignored and only
- // those handles will be inherited (on Vista and higher).
+ // those handles will be inherited.
bool inherit_handles;
// If non-null, runs as if the user represented by the token had launched it.
@@ -257,12 +257,6 @@ BASE_EXPORT bool GetAppOutput(const StringPiece16& cl, std::string* output);
BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
std::string* output);
-// A restricted version of |GetAppOutput()| which (a) clears the environment,
-// and (b) stores at most |max_output| bytes; also, it doesn't search the path
-// for the command.
-BASE_EXPORT bool GetAppOutputRestricted(const CommandLine& cl,
- std::string* output, size_t max_output);
-
// A version of |GetAppOutput()| which also returns the exit code of the
// executed command. Returns true if the application runs and exits cleanly. If
// this is the case the exit code of the application is available in
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index af14c910e7..4fb1018276 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -152,7 +152,7 @@ int sys_rt_sigaction(int sig, const struct kernel_sigaction* act,
// This function is intended to be used in between fork() and execve() and will
// reset all signal handlers to the default.
// The motivation for going through all of them is that sa_restorer can leak
-// from parents and help defeat ASLR on buggy kernels. We reset it to NULL.
+// from parents and help defeat ASLR on buggy kernels. We reset it to null.
// See crbug.com/177956.
void ResetChildSignalHandlersToDefaults(void) {
for (int signum = 1; ; ++signum) {
@@ -162,7 +162,7 @@ void ResetChildSignalHandlersToDefaults(void) {
#else
struct kernel_sigaction act = {0};
#endif
- int sigaction_get_ret = sys_rt_sigaction(signum, NULL, &act);
+ int sigaction_get_ret = sys_rt_sigaction(signum, nullptr, &act);
if (sigaction_get_ret && errno == EINVAL) {
#if !defined(NDEBUG)
// Linux supports 32 real-time signals from 33 to 64.
@@ -181,14 +181,14 @@ void ResetChildSignalHandlersToDefaults(void) {
// The kernel won't allow to re-set SIGKILL or SIGSTOP.
if (signum != SIGSTOP && signum != SIGKILL) {
act.k_sa_handler = reinterpret_cast<void*>(SIG_DFL);
- act.k_sa_restorer = NULL;
- if (sys_rt_sigaction(signum, &act, NULL)) {
+ act.k_sa_restorer = nullptr;
+ if (sys_rt_sigaction(signum, &act, nullptr)) {
RAW_LOG(FATAL, "sigaction (set) failed.");
}
}
#if !defined(NDEBUG)
// Now ask the kernel again and check that no restorer will leak.
- if (sys_rt_sigaction(signum, NULL, &act) || act.k_sa_restorer) {
+ if (sys_rt_sigaction(signum, nullptr, &act) || act.k_sa_restorer) {
RAW_LOG(FATAL, "Cound not fix sa_restorer.");
}
#endif // !defined(NDEBUG)
@@ -310,10 +310,10 @@ Process LaunchProcess(const std::vector<std::string>& argv,
for (size_t i = 0; i < argv.size(); i++) {
argv_cstr[i] = const_cast<char*>(argv[i].c_str());
}
- argv_cstr[argv.size()] = NULL;
+ argv_cstr[argv.size()] = nullptr;
std::unique_ptr<char* []> new_environ;
- char* const empty_environ = NULL;
+ char* const empty_environ = nullptr;
char* const* old_environ = GetEnvironment();
if (options.clear_environ)
old_environ = &empty_environ;
@@ -435,7 +435,7 @@ Process LaunchProcess(const std::vector<std::string>& argv,
// Set process' controlling terminal.
if (HANDLE_EINTR(setsid()) != -1) {
if (HANDLE_EINTR(
- ioctl(options.ctrl_terminal_fd, TIOCSCTTY, NULL)) == -1) {
+ ioctl(options.ctrl_terminal_fd, TIOCSCTTY, nullptr)) == -1) {
RAW_LOG(WARNING, "ioctl(TIOCSCTTY), ctrl terminal not set");
}
} else {
@@ -516,14 +516,6 @@ void RaiseProcessToHighPriority() {
// setpriority() or sched_getscheduler, but these all require extra rights.
}
-// Return value used by GetAppOutputInternal to encapsulate the various exit
-// scenarios from the function.
-enum GetAppOutputInternalResult {
- EXECUTE_FAILURE,
- EXECUTE_SUCCESS,
- GOT_MAX_OUTPUT,
-};
-
// Executes the application specified by |argv| and wait for it to exit. Stores
// the output (stdout) in |output|. If |do_search_path| is set, it searches the
// path for the application; in that case, |envp| must be null, and it will use
@@ -531,21 +523,14 @@ enum GetAppOutputInternalResult {
// specify the path of the application, and |envp| will be used as the
// environment. If |include_stderr| is true, includes stderr otherwise redirects
// it to /dev/null.
-// If we successfully start the application and get all requested output, we
-// return GOT_MAX_OUTPUT, or if there is a problem starting or exiting
-// the application we return RUN_FAILURE. Otherwise we return EXECUTE_SUCCESS.
-// The GOT_MAX_OUTPUT return value exists so a caller that asks for limited
-// output can treat this as a success, despite having an exit code of SIG_PIPE
-// due to us closing the output pipe.
-// In the case of EXECUTE_SUCCESS, the application exit code will be returned
-// in |*exit_code|, which should be checked to determine if the application
-// ran successfully.
-static GetAppOutputInternalResult GetAppOutputInternal(
+// The return value of the function indicates success or failure. In the case of
+// success, the application exit code will be returned in |*exit_code|, which
+// should be checked to determine if the application ran successfully.
+static bool GetAppOutputInternal(
const std::vector<std::string>& argv,
char* const envp[],
bool include_stderr,
std::string* output,
- size_t max_output,
bool do_search_path,
int* exit_code) {
// Doing a blocking wait for another command to finish counts as IO.
@@ -567,13 +552,13 @@ static GetAppOutputInternalResult GetAppOutputInternal(
DCHECK(!do_search_path ^ !envp);
if (pipe(pipe_fd) < 0)
- return EXECUTE_FAILURE;
+ return false;
switch (pid = fork()) {
case -1: // error
close(pipe_fd[0]);
close(pipe_fd[1]);
- return EXECUTE_FAILURE;
+ return false;
case 0: // child
{
// DANGER: no calls to malloc or locks are allowed from now on:
@@ -610,7 +595,7 @@ static GetAppOutputInternalResult GetAppOutputInternal(
for (size_t i = 0; i < argv.size(); i++)
argv_cstr[i] = const_cast<char*>(argv[i].c_str());
- argv_cstr[argv.size()] = NULL;
+ argv_cstr[argv.size()] = nullptr;
if (do_search_path)
execvp(argv_cstr[0], argv_cstr.get());
else
@@ -625,33 +610,21 @@ static GetAppOutputInternalResult GetAppOutputInternal(
close(pipe_fd[1]);
output->clear();
- char buffer[256];
- size_t output_buf_left = max_output;
- ssize_t bytes_read = 1; // A lie to properly handle |max_output == 0|
- // case in the logic below.
-
- while (output_buf_left > 0) {
- bytes_read = HANDLE_EINTR(read(pipe_fd[0], buffer,
- std::min(output_buf_left, sizeof(buffer))));
+
+ while (true) {
+ char buffer[256];
+ ssize_t bytes_read =
+ HANDLE_EINTR(read(pipe_fd[0], buffer, sizeof(buffer)));
if (bytes_read <= 0)
break;
output->append(buffer, bytes_read);
- output_buf_left -= static_cast<size_t>(bytes_read);
}
close(pipe_fd[0]);
// Always wait for exit code (even if we know we'll declare
// GOT_MAX_OUTPUT).
Process process(pid);
- bool success = process.WaitForExit(exit_code);
-
- // If we stopped because we read as much as we wanted, we return
- // GOT_MAX_OUTPUT (because the child may exit due to |SIGPIPE|).
- if (!output_buf_left && bytes_read > 0)
- return GOT_MAX_OUTPUT;
- else if (success)
- return EXECUTE_SUCCESS;
- return EXECUTE_FAILURE;
+ return process.WaitForExit(exit_code);
}
}
}
@@ -661,44 +634,27 @@ bool GetAppOutput(const CommandLine& cl, std::string* output) {
}
bool GetAppOutput(const std::vector<std::string>& argv, std::string* output) {
- // Run |execve()| with the current environment and store "unlimited" data.
+ // Run |execve()| with the current environment.
int exit_code;
- GetAppOutputInternalResult result = GetAppOutputInternal(
- argv, NULL, false, output, std::numeric_limits<std::size_t>::max(), true,
- &exit_code);
- return result == EXECUTE_SUCCESS && exit_code == EXIT_SUCCESS;
+ bool result =
+ GetAppOutputInternal(argv, nullptr, false, output, true, &exit_code);
+ return result && exit_code == EXIT_SUCCESS;
}
bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
- // Run |execve()| with the current environment and store "unlimited" data.
- int exit_code;
- GetAppOutputInternalResult result = GetAppOutputInternal(
- cl.argv(), NULL, true, output, std::numeric_limits<std::size_t>::max(),
- true, &exit_code);
- return result == EXECUTE_SUCCESS && exit_code == EXIT_SUCCESS;
-}
-
-// TODO(viettrungluu): Conceivably, we should have a timeout as well, so we
-// don't hang if what we're calling hangs.
-bool GetAppOutputRestricted(const CommandLine& cl,
- std::string* output, size_t max_output) {
- // Run |execve()| with the empty environment.
- char* const empty_environ = NULL;
+ // Run |execve()| with the current environment.
int exit_code;
- GetAppOutputInternalResult result = GetAppOutputInternal(
- cl.argv(), &empty_environ, false, output, max_output, false, &exit_code);
- return result == GOT_MAX_OUTPUT || (result == EXECUTE_SUCCESS &&
- exit_code == EXIT_SUCCESS);
+ bool result =
+ GetAppOutputInternal(cl.argv(), nullptr, true, output, true, &exit_code);
+ return result && exit_code == EXIT_SUCCESS;
}
bool GetAppOutputWithExitCode(const CommandLine& cl,
std::string* output,
int* exit_code) {
- // Run |execve()| with the current environment and store "unlimited" data.
- GetAppOutputInternalResult result = GetAppOutputInternal(
- cl.argv(), NULL, false, output, std::numeric_limits<std::size_t>::max(),
- true, exit_code);
- return result == EXECUTE_SUCCESS;
+ // Run |execve()| with the current environment.
+ return GetAppOutputInternal(cl.argv(), nullptr, false, output, true,
+ exit_code);
}
#endif // !defined(OS_NACL_NONSFI)
diff --git a/base/process/process.h b/base/process/process.h
index 75f6a009df..70c8260193 100644
--- a/base/process/process.h
+++ b/base/process/process.h
@@ -6,7 +6,7 @@
#define BASE_PROCESS_PROCESS_H_
#include "base/base_export.h"
-#include "base/move.h"
+#include "base/macros.h"
#include "base/process/process_handle.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -31,8 +31,6 @@ namespace base {
// the process dies, and it may be reused by the system, which means that it may
// end up pointing to the wrong process.
class BASE_EXPORT Process {
- MOVE_ONLY_TYPE_FOR_CPP_03(Process)
-
public:
explicit Process(ProcessHandle handle = kNullProcessHandle);
@@ -136,6 +134,8 @@ class BASE_EXPORT Process {
#else
ProcessHandle process_;
#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Process);
};
#if defined(OS_CHROMEOS)
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 8d4e51b517..57cb3abec0 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -272,6 +272,14 @@ struct BASE_EXPORT SystemMemoryInfoKB {
int total;
int free;
+#if defined(OS_LINUX)
+ // This provides an estimate of available memory as described here:
+ // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+ // NOTE: this is ONLY valid in kernels 3.14 and up. Its value will always
+ // be 0 in earlier kernel versions.
+ int available;
+#endif
+
#if !defined(OS_MACOSX)
int swap_total;
int swap_free;
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index 89a26090da..3d27656d6a 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -534,6 +534,9 @@ const size_t kDiskWeightedIOTime = 13;
SystemMemoryInfoKB::SystemMemoryInfoKB() {
total = 0;
free = 0;
+#if defined(OS_LINUX)
+ available = 0;
+#endif
buffers = 0;
cached = 0;
active_anon = 0;
@@ -564,6 +567,9 @@ std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
res->SetInteger("total", total);
res->SetInteger("free", free);
+#if defined(OS_LINUX)
+ res->SetInteger("available", available);
+#endif
res->SetInteger("buffers", buffers);
res->SetInteger("cached", cached);
res->SetInteger("active_anon", active_anon);
@@ -621,6 +627,10 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->total;
else if (tokens[0] == "MemFree:")
target = &meminfo->free;
+#if defined(OS_LINUX)
+ else if (tokens[0] == "MemAvailable:")
+ target = &meminfo->available;
+#endif
else if (tokens[0] == "Buffers:")
target = &meminfo->buffers;
else if (tokens[0] == "Cached:")
diff --git a/base/run_loop.cc b/base/run_loop.cc
index 4e425c9339..a2322f8495 100644
--- a/base/run_loop.cc
+++ b/base/run_loop.cc
@@ -51,10 +51,18 @@ void RunLoop::Quit() {
}
}
+void RunLoop::QuitWhenIdle() {
+ quit_when_idle_received_ = true;
+}
+
base::Closure RunLoop::QuitClosure() {
return base::Bind(&RunLoop::Quit, weak_factory_.GetWeakPtr());
}
+base::Closure RunLoop::QuitWhenIdleClosure() {
+ return base::Bind(&RunLoop::QuitWhenIdle, weak_factory_.GetWeakPtr());
+}
+
bool RunLoop::BeforeRun() {
DCHECK(!run_called_);
run_called_ = true;
diff --git a/base/run_loop.h b/base/run_loop.h
index 61b0fe1653..635018f434 100644
--- a/base/run_loop.h
+++ b/base/run_loop.h
@@ -44,26 +44,32 @@ class BASE_EXPORT RunLoop {
bool running() const { return running_; }
- // Quit an earlier call to Run(). There can be other nested RunLoops servicing
- // the same task queue (MessageLoop); Quitting one RunLoop has no bearing on
- // the others. Quit can be called before, during or after Run. If called
- // before Run, Run will return immediately when called. Calling Quit after the
- // RunLoop has already finished running has no effect.
+ // Quit() quits an earlier call to Run() immediately. QuitWhenIdle() quits an
+ // earlier call to Run() when there aren't any tasks or messages in the queue.
//
- // WARNING: You must NEVER assume that a call to Quit will terminate the
- // targetted message loop. If a nested message loop continues running, the
- // target may NEVER terminate. It is very easy to livelock (run forever) in
- // such a case.
+ // There can be other nested RunLoops servicing the same task queue
+ // (MessageLoop); Quitting one RunLoop has no bearing on the others. Quit()
+ // and QuitWhenIdle() can be called before, during or after Run(). If called
+ // before Run(), Run() will return immediately when called. Calling Quit() or
+ // QuitWhenIdle() after the RunLoop has already finished running has no
+ // effect.
+ //
+ // WARNING: You must NEVER assume that a call to Quit() or QuitWhenIdle() will
+ // terminate the targetted message loop. If a nested message loop continues
+ // running, the target may NEVER terminate. It is very easy to livelock (run
+ // forever) in such a case.
void Quit();
+ void QuitWhenIdle();
- // Convenience method to get a closure that safely calls Quit (has no effect
- // if the RunLoop instance is gone).
+ // Convenience methods to get a closure that safely calls Quit() or
+ // QuitWhenIdle() (has no effect if the RunLoop instance is gone).
//
// Example:
// RunLoop run_loop;
// PostTask(run_loop.QuitClosure());
// run_loop.Run();
base::Closure QuitClosure();
+ base::Closure QuitWhenIdleClosure();
private:
friend class MessageLoop;
diff --git a/base/scoped_generic.h b/base/scoped_generic.h
index d41f19512c..84de6b7d50 100644
--- a/base/scoped_generic.h
+++ b/base/scoped_generic.h
@@ -11,7 +11,6 @@
#include "base/compiler_specific.h"
#include "base/macros.h"
-#include "base/move.h"
namespace base {
@@ -54,8 +53,6 @@ namespace base {
// typedef ScopedGeneric<int, FooScopedTraits> ScopedFoo;
template<typename T, typename Traits>
class ScopedGeneric {
- DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(ScopedGeneric)
-
private:
// This must be first since it's used inline below.
//
@@ -160,6 +157,8 @@ class ScopedGeneric {
const ScopedGeneric<T2, Traits2>& p2) const;
Data data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedGeneric);
};
template<class T, class Traits>
diff --git a/base/sequence_checker_unittest.cc b/base/sequence_checker_unittest.cc
index 1e89a5f4b4..196bb1cc79 100644
--- a/base/sequence_checker_unittest.cc
+++ b/base/sequence_checker_unittest.cc
@@ -95,9 +95,8 @@ class SequenceCheckerTest : public testing::Test {
void PostDeleteToOtherThread(
std::unique_ptr<SequenceCheckedObject> sequence_checked_object) {
- other_thread()->message_loop()->DeleteSoon(
- FROM_HERE,
- sequence_checked_object.release());
+ other_thread()->message_loop()->task_runner()->DeleteSoon(
+ FROM_HERE, sequence_checked_object.release());
}
// Destroys the SequencedWorkerPool instance, blocking until it is fully shut
diff --git a/base/strings/string16.h b/base/strings/string16.h
index 82dd0fab4f..30f4e3eec0 100644
--- a/base/strings/string16.h
+++ b/base/strings/string16.h
@@ -48,6 +48,8 @@ typedef std::char_traits<wchar_t> string16_char_traits;
#elif defined(WCHAR_T_IS_UTF32)
+#include <wchar.h> // for mbstate_t
+
namespace base {
typedef uint16_t char16;
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
index fb5633a62d..09aeb444d6 100644
--- a/base/strings/string_number_conversions.cc
+++ b/base/strings/string_number_conversions.cc
@@ -12,9 +12,9 @@
#include <limits>
#include "base/logging.h"
-#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
-#include "base/strings/utf_string_conversions.h"
+#include "base/scoped_clear_errno.h"
+#include "base/scoped_clear_errno.h"
namespace base {
diff --git a/base/strings/string_number_conversions.h b/base/strings/string_number_conversions.h
index 4a50284c87..a95544e88f 100644
--- a/base/strings/string_number_conversions.h
+++ b/base/strings/string_number_conversions.h
@@ -25,6 +25,14 @@
// Please do not add "convenience" functions for converting strings to integers
// that return the value and ignore success/failure. That encourages people to
// write code that doesn't properly handle the error conditions.
+//
+// DO NOT use these functions in any UI unless it's NOT localized on purpose.
+// Instead, use base::MessageFormatter for a complex message with numbers
+// (integer, float, double) embedded or base::Format{Number,Double,Percent} to
+// just format a single number/percent. Note that some languages use native
+// digits instead of ASCII digits while others use a group separator or decimal
+// point different from ',' and '.'. Using these functions in the UI would lead
+// numbers to be formatted in a non-native way.
// ----------------------------------------------------------------------------
namespace base {
diff --git a/base/strings/string_number_conversions_unittest.cc b/base/strings/string_number_conversions_unittest.cc
index 2e7441993d..91191e07e1 100644
--- a/base/strings/string_number_conversions_unittest.cc
+++ b/base/strings/string_number_conversions_unittest.cc
@@ -13,6 +13,7 @@
#include <cmath>
#include <limits>
+#include "base/bit_cast.h"
#include "base/format_macros.h"
#include "base/macros.h"
#include "base/strings/stringprintf.h"
diff --git a/base/strings/string_util.cc b/base/strings/string_util.cc
index 6bbc215807..cb668ed7ff 100644
--- a/base/strings/string_util.cc
+++ b/base/strings/string_util.cc
@@ -23,7 +23,6 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/singleton.h"
-#include "base/strings/string_split.h"
#include "base/strings/utf_string_conversion_utils.h"
#include "base/strings/utf_string_conversions.h"
#include "base/third_party/icu/icu_utf.h"
diff --git a/base/synchronization/read_write_lock.h b/base/synchronization/read_write_lock.h
new file mode 100644
index 0000000000..4c59b7b116
--- /dev/null
+++ b/base/synchronization/read_write_lock.h
@@ -0,0 +1,105 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+#define BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_NACL)
+#include "base/synchronization/lock.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#else
+# error No reader-writer lock defined for this platform.
+#endif
+
+namespace base {
+namespace subtle {
+
+// An OS-independent wrapper around reader-writer locks. There's no magic here.
+//
+// You are strongly encouraged to use base::Lock instead of this, unless you
+// can demonstrate contention and show that this would lead to an improvement.
+// This lock does not make any guarantees of fairness, which can lead to writer
+// starvation under certain access patterns. You should carefully consider your
+// writer access patterns before using this lock.
+class BASE_EXPORT ReadWriteLock {
+ public:
+ ReadWriteLock();
+ ~ReadWriteLock();
+
+ // Reader lock functions.
+ void ReadAcquire();
+ void ReadRelease();
+
+ // Writer lock functions.
+ void WriteAcquire();
+ void WriteRelease();
+
+ private:
+#if defined(OS_WIN)
+ using NativeHandle = SRWLOCK;
+#elif defined(OS_NACL)
+ using NativeHandle = Lock;
+#elif defined(OS_POSIX)
+ using NativeHandle = pthread_rwlock_t;
+#endif
+
+ NativeHandle native_handle_;
+
+#if defined(OS_NACL)
+ // Even though NaCl has a pthread_rwlock implementation, the build rules don't
+ // make it universally available. So instead, implement a slower and trivial
+ // reader-writer lock using a regular mutex.
+ // TODO(amistry): Remove this and use the posix implementation when it's
+ // available in all build configurations.
+ uint32_t readers_ = 0;
+ // base::Lock does checking to ensure the lock is acquired and released on the
+ // same thread. This is not the case for this lock, so use pthread mutexes
+ // directly here.
+ pthread_mutex_t writer_lock_ = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ReadWriteLock);
+};
+
+class AutoReadLock {
+ public:
+ explicit AutoReadLock(ReadWriteLock& lock) : lock_(lock) {
+ lock_.ReadAcquire();
+ }
+ ~AutoReadLock() {
+ lock_.ReadRelease();
+ }
+
+ private:
+ ReadWriteLock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoReadLock);
+};
+
+class AutoWriteLock {
+ public:
+ explicit AutoWriteLock(ReadWriteLock& lock) : lock_(lock) {
+ lock_.WriteAcquire();
+ }
+ ~AutoWriteLock() {
+ lock_.WriteRelease();
+ }
+
+ private:
+ ReadWriteLock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoWriteLock);
+};
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
diff --git a/base/synchronization/read_write_lock_posix.cc b/base/synchronization/read_write_lock_posix.cc
new file mode 100644
index 0000000000..e5de091f06
--- /dev/null
+++ b/base/synchronization/read_write_lock_posix.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/read_write_lock.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace subtle {
+
+ReadWriteLock::ReadWriteLock() : native_handle_(PTHREAD_RWLOCK_INITIALIZER) {}
+
+ReadWriteLock::~ReadWriteLock() {
+ int result = pthread_rwlock_destroy(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadAcquire() {
+ int result = pthread_rwlock_rdlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadRelease() {
+ int result = pthread_rwlock_unlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteAcquire() {
+ int result = pthread_rwlock_wrlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteRelease() {
+ int result = pthread_rwlock_unlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index b5d91d00b5..3863e98455 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -43,11 +43,18 @@ class TimeDelta;
// be better off just using an Windows event directly.
class BASE_EXPORT WaitableEvent {
public:
- // If manual_reset is true, then to set the event state to non-signaled, a
- // consumer must call the Reset method. If this parameter is false, then the
- // system automatically resets the event state to non-signaled after a single
- // waiting thread has been released.
- WaitableEvent(bool manual_reset, bool initially_signaled);
+ // Indicates whether a WaitableEvent should automatically reset the event
+ // state after a single waiting thread has been released or remain signaled
+ // until Reset() is manually invoked.
+ enum class ResetPolicy { MANUAL, AUTOMATIC };
+
+ // Indicates whether a new WaitableEvent should start in a signaled state or
+ // not.
+ enum class InitialState { SIGNALED, NOT_SIGNALED };
+
+ // Constructs a WaitableEvent with policy and initial state as detailed in
+ // the above enums.
+ WaitableEvent(ResetPolicy reset_policy, InitialState initial_state);
#if defined(OS_WIN)
// Create a WaitableEvent from an Event HANDLE which has already been
@@ -150,7 +157,7 @@ class BASE_EXPORT WaitableEvent {
struct WaitableEventKernel :
public RefCountedThreadSafe<WaitableEventKernel> {
public:
- WaitableEventKernel(bool manual_reset, bool initially_signaled);
+ WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
bool Dequeue(Waiter* waiter, void* tag);
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index 64d4376fe5..b32c882711 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -39,12 +39,11 @@ namespace base {
// -----------------------------------------------------------------------------
// This is just an abstract base class for waking the two types of waiters
// -----------------------------------------------------------------------------
-WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled)
- : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) {
-}
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+ InitialState initial_state)
+ : kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
-WaitableEvent::~WaitableEvent() {
-}
+WaitableEvent::~WaitableEvent() = default;
void WaitableEvent::Reset() {
base::AutoLock locked(kernel_->lock_);
@@ -348,14 +347,13 @@ size_t WaitableEvent::EnqueueMany
// -----------------------------------------------------------------------------
// Private functions...
-WaitableEvent::WaitableEventKernel::WaitableEventKernel(bool manual_reset,
- bool initially_signaled)
- : manual_reset_(manual_reset),
- signaled_(initially_signaled) {
-}
+WaitableEvent::WaitableEventKernel::WaitableEventKernel(
+ ResetPolicy reset_policy,
+ InitialState initial_state)
+ : manual_reset_(reset_policy == ResetPolicy::MANUAL),
+ signaled_(initial_state == InitialState::SIGNALED) {}
-WaitableEvent::WaitableEventKernel::~WaitableEventKernel() {
-}
+WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
// -----------------------------------------------------------------------------
// Wake all waiting waiters. Called with lock held.
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
index 2930409b59..ac5c9f1255 100644
--- a/base/synchronization/waitable_event_unittest.cc
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -15,7 +15,8 @@
namespace base {
TEST(WaitableEventTest, ManualBasics) {
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
EXPECT_FALSE(event.IsSignaled());
@@ -33,7 +34,8 @@ TEST(WaitableEventTest, ManualBasics) {
}
TEST(WaitableEventTest, AutoBasics) {
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
EXPECT_FALSE(event.IsSignaled());
@@ -55,8 +57,10 @@ TEST(WaitableEventTest, AutoBasics) {
TEST(WaitableEventTest, WaitManyShortcut) {
WaitableEvent* ev[5];
- for (unsigned i = 0; i < 5; ++i)
- ev[i] = new WaitableEvent(false, false);
+ for (unsigned i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
ev[3]->Signal();
EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
@@ -94,7 +98,9 @@ class WaitableEventSignaler : public PlatformThread::Delegate {
// Tests that a WaitableEvent can be safely deleted when |Wait| is done without
// additional synchronization.
TEST(WaitableEventTest, WaitAndDelete) {
- WaitableEvent* ev = new WaitableEvent(false, false);
+ WaitableEvent* ev =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev);
PlatformThreadHandle thread;
@@ -110,8 +116,10 @@ TEST(WaitableEventTest, WaitAndDelete) {
// without additional synchronization.
TEST(WaitableEventTest, WaitMany) {
WaitableEvent* ev[5];
- for (unsigned i = 0; i < 5; ++i)
- ev[i] = new WaitableEvent(false, false);
+ for (unsigned i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev[2]);
PlatformThreadHandle thread;
@@ -135,7 +143,9 @@ TEST(WaitableEventTest, WaitMany) {
#define MAYBE_TimedWait TimedWait
#endif
TEST(WaitableEventTest, MAYBE_TimedWait) {
- WaitableEvent* ev = new WaitableEvent(false, false);
+ WaitableEvent* ev =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
TimeDelta thread_delay = TimeDelta::FromMilliseconds(10);
WaitableEventSignaler signaler(thread_delay, ev);
diff --git a/base/sys_byteorder.h b/base/sys_byteorder.h
index ddb3f5bcda..8d9066c702 100644
--- a/base/sys_byteorder.h
+++ b/base/sys_byteorder.h
@@ -15,27 +15,35 @@
#include "build/build_config.h"
+#if defined(COMPILER_MSVC)
+#include <stdlib.h>
+#endif
+
namespace base {
// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
inline uint16_t ByteSwap(uint16_t x) {
- return ((x & 0x00ff) << 8) | ((x & 0xff00) >> 8);
+#if defined(COMPILER_MSVC)
+ return _byteswap_ushort(x);
+#else
+ return __builtin_bswap16(x);
+#endif
}
inline uint32_t ByteSwap(uint32_t x) {
- return ((x & 0x000000fful) << 24) | ((x & 0x0000ff00ul) << 8) |
- ((x & 0x00ff0000ul) >> 8) | ((x & 0xff000000ul) >> 24);
+#if defined(COMPILER_MSVC)
+ return _byteswap_ulong(x);
+#else
+ return __builtin_bswap32(x);
+#endif
}
inline uint64_t ByteSwap(uint64_t x) {
- return ((x & 0x00000000000000ffull) << 56) |
- ((x & 0x000000000000ff00ull) << 40) |
- ((x & 0x0000000000ff0000ull) << 24) |
- ((x & 0x00000000ff000000ull) << 8) |
- ((x & 0x000000ff00000000ull) >> 8) |
- ((x & 0x0000ff0000000000ull) >> 24) |
- ((x & 0x00ff000000000000ull) >> 40) |
- ((x & 0xff00000000000000ull) >> 56);
+#if defined(COMPILER_MSVC)
+ return _byteswap_uint64(x);
+#else
+ return __builtin_bswap64(x);
+#endif
}
// Converts the bytes in |x| from host order (endianness) to little endian, and
diff --git a/base/sys_info.h b/base/sys_info.h
index 5686dcbb49..b10747703d 100644
--- a/base/sys_info.h
+++ b/base/sys_info.h
@@ -50,6 +50,10 @@ class BASE_EXPORT SysInfo {
// or -1 on failure.
static int64_t AmountOfFreeDiskSpace(const FilePath& path);
+ // Return the total disk space in bytes on the volume containing |path|, or -1
+ // on failure.
+ static int64_t AmountOfTotalDiskSpace(const FilePath& path);
+
// Returns system uptime.
static TimeDelta Uptime();
@@ -93,12 +97,6 @@ class BASE_EXPORT SysInfo {
// allocate.
static size_t VMAllocationGranularity();
-#if defined(OS_POSIX) && !defined(OS_MACOSX)
- // Returns the maximum SysV shared memory segment size, or zero if there is no
- // limit.
- static uint64_t MaxSharedMemorySize();
-#endif // defined(OS_POSIX) && !defined(OS_MACOSX)
-
#if defined(OS_CHROMEOS)
typedef std::map<std::string, std::string> LsbReleaseMap;
diff --git a/base/sys_info_linux.cc b/base/sys_info_linux.cc
index 300ef2c0c8..298d245ecf 100644
--- a/base/sys_info_linux.cc
+++ b/base/sys_info_linux.cc
@@ -33,28 +33,9 @@ int64_t AmountOfPhysicalMemory() {
return AmountOfMemory(_SC_PHYS_PAGES);
}
-uint64_t MaxSharedMemorySize() {
- std::string contents;
- base::ReadFileToString(base::FilePath("/proc/sys/kernel/shmmax"), &contents);
- DCHECK(!contents.empty());
- if (!contents.empty() && contents.back() == '\n') {
- contents.erase(contents.length() - 1);
- }
-
- uint64_t limit;
- if (!base::StringToUint64(contents, &limit)) {
- limit = 0;
- }
- DCHECK_GT(limit, 0u);
- return limit;
-}
-
base::LazyInstance<
base::internal::LazySysInfoValue<int64_t, AmountOfPhysicalMemory>>::Leaky
g_lazy_physical_memory = LAZY_INSTANCE_INITIALIZER;
-base::LazyInstance<
- base::internal::LazySysInfoValue<uint64_t, MaxSharedMemorySize>>::Leaky
- g_lazy_max_shared_memory = LAZY_INSTANCE_INITIALIZER;
} // namespace
@@ -71,11 +52,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
}
// static
-uint64_t SysInfo::MaxSharedMemorySize() {
- return g_lazy_max_shared_memory.Get().value();
-}
-
-// static
std::string SysInfo::CPUModelName() {
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
const char kCpuModelPrefix[] = "Hardware";
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
index 85ae039118..5d1c450139 100644
--- a/base/sys_info_posix.cc
+++ b/base/sys_info_posix.cc
@@ -73,6 +73,20 @@ base::LazyInstance<
base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
+bool GetDiskSpaceInfo(const base::FilePath& path,
+ int64_t* available_bytes,
+ int64_t* total_bytes) {
+ struct statvfs stats;
+ if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+ return false;
+
+ if (available_bytes)
+ *available_bytes = static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+ if (total_bytes)
+ *total_bytes = static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
+ return true;
+}
+
} // namespace
namespace base {
@@ -92,10 +106,20 @@ int64_t SysInfo::AmountOfVirtualMemory() {
int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
base::ThreadRestrictions::AssertIOAllowed();
- struct statvfs stats;
- if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+ int64_t available;
+ if (!GetDiskSpaceInfo(path, &available, nullptr))
+ return -1;
+ return available;
+}
+
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+ base::ThreadRestrictions::AssertIOAllowed();
+
+ int64_t total;
+ if (!GetDiskSpaceInfo(path, nullptr, &total))
return -1;
- return static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+ return total;
}
#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
index 3f284ba868..0231df6379 100644
--- a/base/sys_info_unittest.cc
+++ b/base/sys_info_unittest.cc
@@ -16,13 +16,6 @@
typedef PlatformTest SysInfoTest;
using base::FilePath;
-#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
-TEST_F(SysInfoTest, MaxSharedMemorySize) {
- // We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GT(base::SysInfo::MaxSharedMemorySize(), 0u);
-}
-#endif
-
TEST_F(SysInfoTest, NumProcs) {
// We aren't actually testing that it's correct, just that it's sane.
EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
@@ -40,7 +33,15 @@ TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GT(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+ EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+ << tmp_path.value();
+}
+
+TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
+ // We aren't actually testing that it's correct, just that it's sane.
+ FilePath tmp_path;
+ ASSERT_TRUE(base::GetTempDir(&tmp_path));
+ EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
<< tmp_path.value();
}
diff --git a/base/task_runner.h b/base/task_runner.h
index 6dd82ccaca..9593835eeb 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -9,13 +9,10 @@
#include "base/base_export.h"
#include "base/callback_forward.h"
+#include "base/location.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
namespace base {
struct TaskRunnerTraits;
diff --git a/base/task_scheduler/scheduler_lock_unittest.cc b/base/task_scheduler/scheduler_lock_unittest.cc
index 6267559d1e..daa50257f1 100644
--- a/base/task_scheduler/scheduler_lock_unittest.cc
+++ b/base/task_scheduler/scheduler_lock_unittest.cc
@@ -56,8 +56,11 @@ class BasicLockAcquireAndWaitThread : public SimpleThread {
explicit BasicLockAcquireAndWaitThread(SchedulerLock* lock)
: SimpleThread("BasicLockAcquireAndWaitThread"),
lock_(lock),
- lock_acquire_event_(false, false),
- main_thread_continue_event_(false, false) {}
+ lock_acquire_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ main_thread_continue_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
+ }
void WaitForLockAcquisition() {
lock_acquire_event_.Wait();
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
index 37cb8d5b9a..3fa037fa35 100644
--- a/base/task_scheduler/sequence.h
+++ b/base/task_scheduler/sequence.h
@@ -26,8 +26,8 @@ namespace internal {
// Note: there is a known refcounted-ownership cycle in the Scheduler
// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
// This is okay so long as the other owners of Sequence (PriorityQueue and
-// SchedulerWorkerThread in alternance and
-// SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::GetWork()
+// SchedulerWorker in alternation and
+// SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork()
// temporarily) keep running it (and taking Tasks from it as a result). A
// dangling reference cycle would only occur should they release their reference
// to it while it's not empty. In other words, it is only correct for them to
diff --git a/base/task_scheduler/task_traits.h b/base/task_scheduler/task_traits.h
index 523fd137b6..0c0d304dcf 100644
--- a/base/task_scheduler/task_traits.h
+++ b/base/task_scheduler/task_traits.h
@@ -41,7 +41,7 @@ enum class TaskPriority {
enum class TaskShutdownBehavior {
// Tasks posted with this mode which have not started executing before
// shutdown is initiated will never run. Tasks with this mode running at
- // shutdown will be ignored (the worker thread will not be joined).
+ // shutdown will be ignored (the worker will not be joined).
//
// This option provides a nice way to post stuff you don't want blocking
// shutdown. For example, you might be doing a slow DNS lookup and if it's
diff --git a/base/template_util.h b/base/template_util.h
index 74c8e5afdc..1bfc1ac814 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -6,11 +6,23 @@
#define BASE_TEMPLATE_UTIL_H_
#include <stddef.h>
+#include <iosfwd>
#include <type_traits>
#include <utility>
#include "build/build_config.h"
+// This hacks around libstdc++ 4.6 missing stuff in type_traits, while we need
+// to support it.
+#define CR_GLIBCXX_4_7_0 20120322
+#define CR_GLIBCXX_4_5_4 20120702
+#define CR_GLIBCXX_4_6_4 20121127
+#if defined(__GLIBCXX__) && \
+ (__GLIBCXX__ < CR_GLIBCXX_4_7_0 || __GLIBCXX__ == CR_GLIBCXX_4_5_4 || \
+ __GLIBCXX__ == CR_GLIBCXX_4_6_4)
+#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#endif
+
namespace base {
template <class T> struct is_non_const_reference : std::false_type {};
@@ -57,6 +69,15 @@ struct IsAssignableImpl
template <class Lvalue, class Rvalue>
struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct SupportsOstreamOperator : std::false_type {};
+template <typename T>
+struct SupportsOstreamOperator<T,
+ decltype(void(std::declval<std::ostream&>()
+ << std::declval<T>()))>
+ : std::true_type {};
+
} // namespace internal
// TODO(crbug.com/554293): Remove this when all platforms have this in the std
@@ -82,6 +103,31 @@ struct is_move_assignable
const typename std::add_rvalue_reference<T>::type> {
};
+// underlying_type produces the integer type backing an enum type.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <typename T>
+struct underlying_type {
+ using type = __underlying_type(T);
+};
+#else
+template <typename T>
+using underlying_type = std::underlying_type<T>;
+#endif
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <class T>
+using is_trivially_destructible = std::has_trivial_destructor<T>;
+#else
+template <class T>
+using is_trivially_destructible = std::is_trivially_destructible<T>;
+#endif
+
} // namespace base
+#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+
#endif // BASE_TEMPLATE_UTIL_H_
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index 5686d7c752..921596474b 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -4,11 +4,26 @@
#include "base/template_util.h"
+#include <string>
+
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace {
+enum SimpleEnum { SIMPLE_ENUM };
+enum EnumWithExplicitType : uint64_t { ENUM_WITH_EXPLICIT_TYPE };
+enum class ScopedEnum { SCOPED_ENUM };
+enum class ScopedEnumWithOperator { SCOPED_ENUM_WITH_OPERATOR };
+std::ostream& operator<<(std::ostream& os, ScopedEnumWithOperator v) {
+ return os;
+}
+struct SimpleStruct {};
+struct StructWithOperator {};
+std::ostream& operator<<(std::ostream& os, const StructWithOperator& v) {
+ return os;
+}
+
// is_non_const_reference<Type>
static_assert(!is_non_const_reference<int>::value, "IsNonConstReference");
static_assert(!is_non_const_reference<const int&>::value,
@@ -48,5 +63,67 @@ static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
+// A few standard types that definitely support printing.
+static_assert(internal::SupportsOstreamOperator<int>::value,
+ "ints should be printable");
+static_assert(internal::SupportsOstreamOperator<const char*>::value,
+ "C strings should be printable");
+static_assert(internal::SupportsOstreamOperator<std::string>::value,
+ "std::string should be printable");
+
+// Various kinds of enums operator<< support.
+static_assert(internal::SupportsOstreamOperator<SimpleEnum>::value,
+ "simple enum should be printable by value");
+static_assert(internal::SupportsOstreamOperator<const SimpleEnum&>::value,
+ "simple enum should be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<EnumWithExplicitType>::value,
+ "enum with explicit type should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const EnumWithExplicitType&>::value,
+ "enum with explicit type should be printable by const ref");
+static_assert(!internal::SupportsOstreamOperator<ScopedEnum>::value,
+ "scoped enum should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const ScopedEnum&>::value,
+ "simple enum should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<ScopedEnumWithOperator>::value,
+ "scoped enum with operator<< should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const ScopedEnumWithOperator&>::value,
+ "scoped enum with operator<< should be printable by const ref");
+
+// operator<< support on structs.
+static_assert(!internal::SupportsOstreamOperator<SimpleStruct>::value,
+ "simple struct should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const SimpleStruct&>::value,
+ "simple struct should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<StructWithOperator>::value,
+ "struct with operator<< should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const StructWithOperator&>::value,
+ "struct with operator<< should be printable by const ref");
+
+// underlying type of enums
+static_assert(std::is_integral<underlying_type<SimpleEnum>::type>::value,
+ "simple enum must have some integral type");
+static_assert(
+ std::is_same<underlying_type<EnumWithExplicitType>::type, uint64_t>::value,
+ "explicit type must be detected");
+static_assert(std::is_same<underlying_type<ScopedEnum>::type, int>::value,
+ "scoped enum defaults to int");
+
+struct TriviallyDestructible {
+ int field;
+};
+
+class NonTriviallyDestructible {
+ ~NonTriviallyDestructible() {}
+};
+
+static_assert(is_trivially_destructible<int>::value, "IsTriviallyDestructible");
+static_assert(is_trivially_destructible<TriviallyDestructible>::value,
+ "IsTriviallyDestructible");
+static_assert(!is_trivially_destructible<NonTriviallyDestructible>::value,
+ "IsTriviallyDestructible");
+
} // namespace
} // namespace base
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index a0801410a5..51863a2a0c 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -9,9 +9,8 @@ if (is_android) {
import("//build/config/android/rules.gni")
}
-source_set("test_config") {
- # TODO http://crbug.com/412064 enable this flag all the time.
- testonly = !is_component_build
+static_library("test_config") {
+ testonly = true
sources = [
"test_switches.cc",
"test_switches.h",
@@ -24,10 +23,10 @@ source_set("test_config") {
}
# GYP: //base/base.gyp:test_support_base
-source_set("test_support") {
- # TODO http://crbug.com/412064 enable this flag all the time.
- testonly = !is_component_build
+static_library("test_support") {
+ testonly = true
sources = [
+ "../trace_event/trace_config_memory_test_util.h",
"gtest_util.cc",
"gtest_util.h",
"gtest_xml_unittest_result_printer.cc",
@@ -130,6 +129,8 @@ source_set("test_support") {
sources += [
"launcher/test_launcher.cc",
"launcher/test_launcher.h",
+ "launcher/test_launcher_tracer.cc",
+ "launcher/test_launcher_tracer.h",
"launcher/test_results_tracker.cc",
"launcher/unit_test_launcher.cc",
"multiprocess_test.cc",
@@ -216,6 +217,9 @@ config("perf_test_config") {
defines = [ "PERF_TEST" ]
}
+# This is a source set instead of a static library because it seems like some
+# linkers get confused when "main" is in a static library, and if you link to
+# this, you always want the object file anyway.
source_set("test_support_perf") {
testonly = true
sources = [
@@ -230,7 +234,7 @@ source_set("test_support_perf") {
public_configs = [ ":perf_test_config" ]
}
-source_set("test_launcher_nacl_nonsfi") {
+static_library("test_launcher_nacl_nonsfi") {
testonly = true
sources = [
"launcher/test_launcher_nacl_nonsfi.cc",
@@ -241,7 +245,7 @@ source_set("test_launcher_nacl_nonsfi") {
]
}
-source_set("run_all_unittests") {
+static_library("run_all_unittests") {
testonly = true
sources = [
"run_all_unittests.cc",
diff --git a/base/test/data/prefs/invalid.json b/base/test/data/prefs/invalid.json
deleted file mode 100644
index 43392a92fb..0000000000
--- a/base/test/data/prefs/invalid.json
+++ /dev/null
@@ -1 +0,0 @@
-!@#$%^& \ No newline at end of file
diff --git a/base/test/data/prefs/read.json b/base/test/data/prefs/read.json
deleted file mode 100644
index ea578a47f4..0000000000
--- a/base/test/data/prefs/read.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "homepage": "http://www.cnn.com",
- "some_directory": "/usr/local/",
- "tabs": {
- "new_windows_in_tabs": true,
- "max_tabs": 20
- }
-}
diff --git a/base/test/data/prefs/write.golden.json b/base/test/data/prefs/write.golden.json
deleted file mode 100644
index fb1fff144a..0000000000
--- a/base/test/data/prefs/write.golden.json
+++ /dev/null
@@ -1 +0,0 @@
-{"homepage":"http://www.cnn.com","long_int":{"pref":"214748364842"},"some_directory":"/usr/sbin/","tabs":{"max_tabs":10,"new_windows_in_tabs":false}} \ No newline at end of file
diff --git a/base/test/sequenced_worker_pool_owner.cc b/base/test/sequenced_worker_pool_owner.cc
index 37bad2b29d..8781495d7d 100644
--- a/base/test/sequenced_worker_pool_owner.cc
+++ b/base/test/sequenced_worker_pool_owner.cc
@@ -54,7 +54,8 @@ void SequencedWorkerPoolOwner::WillWaitForShutdown() {
}
void SequencedWorkerPoolOwner::OnDestruct() {
- constructor_message_loop_->PostTask(FROM_HERE, exit_loop_.QuitClosure());
+ constructor_message_loop_->task_runner()->PostTask(FROM_HERE,
+ exit_loop_.QuitClosure());
}
} // namespace base
diff --git a/base/test/test_io_thread.cc b/base/test/test_io_thread.cc
index 48c1e16531..1fa041251c 100644
--- a/base/test/test_io_thread.cc
+++ b/base/test/test_io_thread.cc
@@ -56,7 +56,8 @@ void TestIOThread::PostTask(const tracked_objects::Location& from_here,
void TestIOThread::PostTaskAndWait(const tracked_objects::Location& from_here,
const base::Closure& task) {
- base::WaitableEvent event(false, false);
+ base::WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(from_here,
base::Bind(&PostTaskAndWaitHelper, &event, task));
event.Wait();
diff --git a/base/test/test_switches.cc b/base/test/test_switches.cc
index 40f20d7b7d..817a38edb1 100644
--- a/base/test/test_switches.cc
+++ b/base/test/test_switches.cc
@@ -62,6 +62,10 @@ const char switches::kTestLauncherTotalShards[] =
// Time (in milliseconds) that the tests should wait before timing out.
const char switches::kTestLauncherTimeout[] = "test-launcher-timeout";
+
+// Path where to save a trace of test launcher's execution.
+const char switches::kTestLauncherTrace[] = "test-launcher-trace";
+
// TODO(phajdan.jr): Clean up the switch names.
const char switches::kTestTinyTimeout[] = "test-tiny-timeout";
const char switches::kUiTestActionTimeout[] = "ui-test-action-timeout";
diff --git a/base/test/test_switches.h b/base/test/test_switches.h
index 419b755541..88ef0ced77 100644
--- a/base/test/test_switches.h
+++ b/base/test/test_switches.h
@@ -24,6 +24,7 @@ extern const char kTestLauncherPrintWritablePath[];
extern const char kTestLauncherShardIndex[];
extern const char kTestLauncherTotalShards[];
extern const char kTestLauncherTimeout[];
+extern const char kTestLauncherTrace[];
extern const char kTestTinyTimeout[];
extern const char kUiTestActionTimeout[];
extern const char kUiTestActionMaxTimeout[];
diff --git a/base/test/trace_event_analyzer_unittest.cc b/base/test/trace_event_analyzer_unittest.cc
index e73dd65b06..086cfc97d3 100644
--- a/base/test/trace_event_analyzer_unittest.cc
+++ b/base/test/trace_event_analyzer_unittest.cc
@@ -60,7 +60,9 @@ void TraceEventAnalyzerTest::BeginTracing() {
void TraceEventAnalyzerTest::EndTracing() {
base::trace_event::TraceLog::GetInstance()->SetDisabled();
- base::WaitableEvent flush_complete_event(false, false);
+ base::WaitableEvent flush_complete_event(
+ base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
base::trace_event::TraceLog::GetInstance()->Flush(
base::Bind(&TraceEventAnalyzerTest::OnTraceDataCollected,
base::Unretained(this),
diff --git a/base/threading/platform_thread.h b/base/threading/platform_thread.h
index 72da93bf56..9b217a9c65 100644
--- a/base/threading/platform_thread.h
+++ b/base/threading/platform_thread.h
@@ -142,8 +142,8 @@ class BASE_EXPORT PlatformThread {
// Sleeps for the specified duration.
static void Sleep(base::TimeDelta duration);
- // Sets the thread name visible to debuggers/tools. This has no effect
- // otherwise.
+ // Sets the thread name visible to debuggers/tools. This will try to
+ // initialize the context for current thread unless it's a WorkerThread.
static void SetName(const std::string& name);
// Gets the thread name, if previously set by SetName.
@@ -180,6 +180,10 @@ class BASE_EXPORT PlatformThread {
// |thread_handle|.
static void Join(PlatformThreadHandle thread_handle);
+ // Detaches and releases the thread handle. The thread is no longer joinable
+ // and |thread_handle| is invalidated after this call.
+ static void Detach(PlatformThreadHandle thread_handle);
+
// Toggles the current thread's priority at runtime. A thread may not be able
// to raise its priority back up after lowering it if the process does not
// have a proper permission, e.g. CAP_SYS_NICE on Linux. A thread may not be
diff --git a/base/threading/platform_thread_freebsd.cc b/base/threading/platform_thread_freebsd.cc
deleted file mode 100644
index e69de29bb2..0000000000
--- a/base/threading/platform_thread_freebsd.cc
+++ /dev/null
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index d8bcf923a2..2321b3cd49 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -209,6 +209,11 @@ void PlatformThread::Join(PlatformThreadHandle thread_handle) {
CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), NULL));
}
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+ CHECK_EQ(0, pthread_detach(thread_handle.platform_handle()));
+}
+
// Mac has its own Set/GetCurrentThreadPriority() implementations.
#if !defined(OS_MACOSX)
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index 82221e1100..2d99ed8750 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -21,48 +21,76 @@
namespace base {
-// Trivial tests that thread runs and doesn't crash on create and join ---------
+// Trivial tests that thread runs and doesn't crash on create, join, or detach -
namespace {
class TrivialThread : public PlatformThread::Delegate {
public:
- TrivialThread() : did_run_(false) {}
+ TrivialThread() : run_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
- void ThreadMain() override { did_run_ = true; }
+ void ThreadMain() override { run_event_.Signal(); }
- bool did_run() const { return did_run_; }
+ WaitableEvent& run_event() { return run_event_; }
private:
- bool did_run_;
+ WaitableEvent run_event_;
DISALLOW_COPY_AND_ASSIGN(TrivialThread);
};
} // namespace
-TEST(PlatformThreadTest, Trivial) {
+TEST(PlatformThreadTest, TrivialJoin) {
TrivialThread thread;
PlatformThreadHandle handle;
- ASSERT_FALSE(thread.did_run());
+ ASSERT_FALSE(thread.run_event().IsSignaled());
ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
PlatformThread::Join(handle);
- ASSERT_TRUE(thread.did_run());
+ ASSERT_TRUE(thread.run_event().IsSignaled());
}
-TEST(PlatformThreadTest, TrivialTimesTen) {
+TEST(PlatformThreadTest, TrivialJoinTimesTen) {
TrivialThread thread[10];
PlatformThreadHandle handle[arraysize(thread)];
for (size_t n = 0; n < arraysize(thread); n++)
- ASSERT_FALSE(thread[n].did_run());
+ ASSERT_FALSE(thread[n].run_event().IsSignaled());
for (size_t n = 0; n < arraysize(thread); n++)
ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
for (size_t n = 0; n < arraysize(thread); n++)
PlatformThread::Join(handle[n]);
for (size_t n = 0; n < arraysize(thread); n++)
- ASSERT_TRUE(thread[n].did_run());
+ ASSERT_TRUE(thread[n].run_event().IsSignaled());
+}
+
+// The following detach tests are by nature racy. The run_event approximates the
+// end and termination of the thread, but threads could persist shortly after
+// the test completes.
+TEST(PlatformThreadTest, TrivialDetach) {
+ TrivialThread thread;
+ PlatformThreadHandle handle;
+
+ ASSERT_FALSE(thread.run_event().IsSignaled());
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ PlatformThread::Detach(handle);
+ thread.run_event().Wait();
+}
+
+TEST(PlatformThreadTest, TrivialDetachTimesTen) {
+ TrivialThread thread[10];
+ PlatformThreadHandle handle[arraysize(thread)];
+
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_FALSE(thread[n].run_event().IsSignaled());
+ for (size_t n = 0; n < arraysize(thread); n++) {
+ ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+ PlatformThread::Detach(handle[n]);
+ }
+ for (size_t n = 0; n < arraysize(thread); n++)
+ thread[n].run_event().Wait();
}
// Tests of basic thread functions ---------------------------------------------
@@ -73,8 +101,10 @@ class FunctionTestThread : public PlatformThread::Delegate {
public:
FunctionTestThread()
: thread_id_(kInvalidThreadId),
- termination_ready_(true, false),
- terminate_thread_(true, false),
+ termination_ready_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ terminate_thread_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
done_(false) {}
~FunctionTestThread() override {
EXPECT_TRUE(terminate_thread_.IsSignaled())
diff --git a/base/threading/sequenced_task_runner_handle.cc b/base/threading/sequenced_task_runner_handle.cc
index 2c3af3255d..88b36a8d64 100644
--- a/base/threading/sequenced_task_runner_handle.cc
+++ b/base/threading/sequenced_task_runner_handle.cc
@@ -8,7 +8,6 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/sequenced_task_runner.h"
#include "base/threading/sequenced_worker_pool.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_task_runner_handle.h"
diff --git a/base/threading/sequenced_task_runner_handle.h b/base/threading/sequenced_task_runner_handle.h
index e6da18d215..e6dec1e9f8 100644
--- a/base/threading/sequenced_task_runner_handle.h
+++ b/base/threading/sequenced_task_runner_handle.h
@@ -8,11 +8,10 @@
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
namespace base {
-class SequencedTaskRunner;
-
class BASE_EXPORT SequencedTaskRunnerHandle {
public:
// Returns a SequencedTaskRunner which guarantees that posted tasks will only
diff --git a/base/threading/simple_thread.cc b/base/threading/simple_thread.cc
index 7059ceab76..6c64a17d6a 100644
--- a/base/threading/simple_thread.cc
+++ b/base/threading/simple_thread.cc
@@ -12,15 +12,24 @@
namespace base {
SimpleThread::SimpleThread(const std::string& name_prefix)
- : name_prefix_(name_prefix), name_(name_prefix),
- thread_(), event_(true, false), tid_(0), joined_(false) {
-}
+ : name_prefix_(name_prefix),
+ name_(name_prefix),
+ thread_(),
+ event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ tid_(0),
+ joined_(false) {}
SimpleThread::SimpleThread(const std::string& name_prefix,
const Options& options)
- : name_prefix_(name_prefix), name_(name_prefix), options_(options),
- thread_(), event_(true, false), tid_(0), joined_(false) {
-}
+ : name_prefix_(name_prefix),
+ name_(name_prefix),
+ options_(options),
+ thread_(),
+ event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ tid_(0),
+ joined_(false) {}
SimpleThread::~SimpleThread() {
DCHECK(HasBeenStarted()) << "SimpleThread was never started.";
@@ -93,8 +102,8 @@ DelegateSimpleThreadPool::DelegateSimpleThreadPool(
int num_threads)
: name_prefix_(name_prefix),
num_threads_(num_threads),
- dry_(true, false) {
-}
+ dry_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
DelegateSimpleThreadPool::~DelegateSimpleThreadPool() {
DCHECK(threads_.empty());
diff --git a/base/threading/simple_thread_unittest.cc b/base/threading/simple_thread_unittest.cc
index 7229d362f1..14dd4591f1 100644
--- a/base/threading/simple_thread_unittest.cc
+++ b/base/threading/simple_thread_unittest.cc
@@ -95,7 +95,8 @@ TEST(SimpleThreadTest, CreateAndJoin) {
TEST(SimpleThreadTest, WaitForEvent) {
// Create a thread, and wait for it to signal us.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitEventRunner runner(&event);
DelegateSimpleThread thread(&runner, "event_waiter");
@@ -108,7 +109,8 @@ TEST(SimpleThreadTest, WaitForEvent) {
}
TEST(SimpleThreadTest, NamedWithOptions) {
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitEventRunner runner(&event);
SimpleThread::Options options;
@@ -152,7 +154,8 @@ TEST(SimpleThreadTest, ThreadPool) {
// We can reuse our pool. Verify that all 10 threads can actually run in
// parallel, so this test will only pass if there are actually 10 threads.
AtomicSequenceNumber seq2;
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
// Changing 9 to 10, for example, would cause us JoinAll() to never return.
VerifyPoolRunner verifier(&seq2, 9, &event);
pool.Start();
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
index b6fead68fc..9cdc6912ea 100644
--- a/base/threading/thread.cc
+++ b/base/threading/thread.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/lazy_instance.h"
#include "base/location.h"
+#include "base/run_loop.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_local.h"
@@ -65,11 +66,13 @@ Thread::Thread(const std::string& name)
running_(false),
thread_(0),
id_(kInvalidThreadId),
- id_event_(true, false),
+ id_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
message_loop_(nullptr),
message_loop_timer_slack_(TIMER_SLACK_NONE),
name_(name),
- start_event_(true, false) {
+ start_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
}
Thread::~Thread() {
@@ -197,8 +200,8 @@ bool Thread::IsRunning() const {
return running_;
}
-void Thread::Run(MessageLoop* message_loop) {
- message_loop->Run();
+void Thread::Run(MessageLoop*) {
+ RunLoop().Run();
}
void Thread::SetThreadWasQuitProperly(bool flag) {
@@ -227,7 +230,6 @@ void Thread::ThreadMain() {
DCHECK(message_loop_);
std::unique_ptr<MessageLoop> message_loop(message_loop_);
message_loop_->BindToCurrentThread();
- message_loop_->set_thread_name(name_);
message_loop_->SetTimerSlack(message_loop_timer_slack_);
#if defined(OS_WIN)
diff --git a/base/threading/thread_local_unittest.cc b/base/threading/thread_local_unittest.cc
index e94c1db1c8..cdc1ca6f56 100644
--- a/base/threading/thread_local_unittest.cc
+++ b/base/threading/thread_local_unittest.cc
@@ -82,7 +82,8 @@ TEST(ThreadLocalTest, Pointer) {
static char* const kBogusPointer = reinterpret_cast<char*>(0x1234);
char* tls_val;
- base::WaitableEvent done(true, false);
+ base::WaitableEvent done(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
GetThreadLocal getter(&tlp, &done);
getter.set_ptr(&tls_val);
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index d8e3cb1b55..4212a4b6eb 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -44,7 +44,7 @@ class ScopedAllowWaitForAndroidLayoutTests;
class ScopedAllowWaitForDebugURL;
class SoftwareOutputDeviceMus;
class TextInputClientMac;
-class RasterWorkerPool;
+class CategorizedWorkerPool;
} // namespace content
namespace dbus {
class Bus;
@@ -53,9 +53,6 @@ namespace disk_cache {
class BackendImpl;
class InFlightIO;
}
-namespace gles2 {
-class CommandBufferClientImpl;
-}
namespace gpu {
class GpuChannelHost;
}
@@ -63,8 +60,10 @@ namespace mojo {
namespace common {
class MessagePumpMojo;
}
+class SyncCallRestrictions;
}
-namespace mus {
+namespace ui {
+class CommandBufferClientImpl;
class CommandBufferLocal;
class GpuState;
}
@@ -201,7 +200,7 @@ class BASE_EXPORT ThreadRestrictions {
friend class ::ScopedAllowWaitForLegacyWebViewApi;
friend class cc::CompletionEvent;
friend class cc::SingleThreadTaskGraphRunner;
- friend class content::RasterWorkerPool;
+ friend class content::CategorizedWorkerPool;
friend class remoting::AutoThread;
friend class ui::WindowResizeHelperMac;
friend class MessagePumpDefault;
@@ -211,10 +210,11 @@ class BASE_EXPORT ThreadRestrictions {
friend class ThreadTestHelper;
friend class PlatformThread;
friend class android::JavaHandlerThread;
- friend class gles2::CommandBufferClientImpl;
friend class mojo::common::MessagePumpMojo;
- friend class mus::CommandBufferLocal;
- friend class mus::GpuState;
+ friend class mojo::SyncCallRestrictions;
+ friend class ui::CommandBufferClientImpl;
+ friend class ui::CommandBufferLocal;
+ friend class ui::GpuState;
// END ALLOWED USAGE.
// BEGIN USAGE THAT NEEDS TO BE FIXED.
diff --git a/base/threading/thread_task_runner_handle.cc b/base/threading/thread_task_runner_handle.cc
index 1b7c13a750..190e18ffc6 100644
--- a/base/threading/thread_task_runner_handle.cc
+++ b/base/threading/thread_task_runner_handle.cc
@@ -8,7 +8,6 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/single_thread_task_runner.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread_local.h"
diff --git a/base/threading/thread_task_runner_handle.h b/base/threading/thread_task_runner_handle.h
index 72ce49e1bd..c8e58935f0 100644
--- a/base/threading/thread_task_runner_handle.h
+++ b/base/threading/thread_task_runner_handle.h
@@ -8,11 +8,10 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
namespace base {
-class SingleThreadTaskRunner;
-
// ThreadTaskRunnerHandle stores a reference to a thread's TaskRunner
// in thread-local storage. Callers can then retrieve the TaskRunner
// for the current thread by calling ThreadTaskRunnerHandle::Get().
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index bc27088680..b0fd26521a 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -209,7 +209,8 @@ TEST_F(ThreadTest, ThreadId) {
b.Start();
// Post a task that calls GetThreadId() on the created thread.
- base::WaitableEvent event(false, false);
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
base::PlatformThreadId id_from_new_thread;
a.task_runner()->PostTask(
FROM_HERE, base::Bind(ReturnThreadId, &a, &id_from_new_thread, &event));
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index 17c3342cb2..6b4c42f601 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -150,8 +150,7 @@ void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
DCHECK(!terminated_)
<< "This thread pool is already terminated. Do not post new tasks.";
- pending_tasks_.push(*pending_task);
- pending_task->task.Reset();
+ pending_tasks_.push(std::move(*pending_task));
// We have enough worker threads.
if (static_cast<size_t>(num_idle_threads_) >= pending_tasks_.size()) {
@@ -186,7 +185,7 @@ PendingTask PosixDynamicThreadPool::WaitForTask() {
}
}
- PendingTask pending_task = pending_tasks_.front();
+ PendingTask pending_task = std::move(pending_tasks_.front());
pending_tasks_.pop();
return pending_task;
}
diff --git a/base/threading/worker_pool_posix_unittest.cc b/base/threading/worker_pool_posix_unittest.cc
index 99a9369607..6cefeed34e 100644
--- a/base/threading/worker_pool_posix_unittest.cc
+++ b/base/threading/worker_pool_posix_unittest.cc
@@ -96,7 +96,8 @@ class PosixDynamicThreadPoolTest : public testing::Test {
counter_(0),
num_waiting_to_start_(0),
num_waiting_to_start_cv_(&num_waiting_to_start_lock_),
- start_(true, false) {}
+ start_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
void SetUp() override {
peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
diff --git a/base/threading/worker_pool_unittest.cc b/base/threading/worker_pool_unittest.cc
index 27af50be67..ef4bed136e 100644
--- a/base/threading/worker_pool_unittest.cc
+++ b/base/threading/worker_pool_unittest.cc
@@ -26,7 +26,10 @@ namespace {
class PostTaskAndReplyTester
: public base::RefCountedThreadSafe<PostTaskAndReplyTester> {
public:
- PostTaskAndReplyTester() : finished_(false), test_event_(false, false) {}
+ PostTaskAndReplyTester()
+ : finished_(false),
+ test_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
void RunTest() {
ASSERT_TRUE(thread_checker_.CalledOnValidThread());
@@ -69,8 +72,10 @@ class PostTaskAndReplyTester
} // namespace
TEST_F(WorkerPoolTest, PostTask) {
- WaitableEvent test_event(false, false);
- WaitableEvent long_test_event(false, false);
+ WaitableEvent test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent long_test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WorkerPool::PostTask(FROM_HERE,
base::Bind(&WaitableEvent::Signal,
diff --git a/base/time/time.cc b/base/time/time.cc
index 76ffeb7441..3670f55758 100644
--- a/base/time/time.cc
+++ b/base/time/time.cc
@@ -136,11 +136,6 @@ std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
// Time -----------------------------------------------------------------------
// static
-Time Time::Max() {
- return Time(std::numeric_limits<int64_t>::max());
-}
-
-// static
Time Time::FromTimeT(time_t tt) {
if (tt == 0)
return Time(); // Preserve 0 so we can tell it doesn't exist.
@@ -263,6 +258,14 @@ bool Time::FromStringInternal(const char* time_string,
return true;
}
+// static
+bool Time::ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs) {
+ return lhs.year == rhs.year && lhs.month == rhs.month &&
+ lhs.day_of_month == rhs.day_of_month && lhs.hour == rhs.hour &&
+ lhs.minute == rhs.minute && lhs.second == rhs.second &&
+ lhs.millisecond == rhs.millisecond;
+}
+
std::ostream& operator<<(std::ostream& os, Time time) {
Time::Exploded exploded;
time.UTCExplode(&exploded);
diff --git a/base/time/time.h b/base/time/time.h
index 399ec826ce..efece969b0 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -56,6 +56,7 @@
#include <limits>
#include "base/base_export.h"
+#include "base/compiler_specific.h"
#include "base/numerics/safe_math.h"
#include "build/build_config.h"
@@ -311,6 +312,12 @@ class TimeBase {
// Returns true if this object represents the maximum time.
bool is_max() const { return us_ == std::numeric_limits<int64_t>::max(); }
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static TimeClass Max() {
+ return TimeClass(std::numeric_limits<int64_t>::max());
+ }
+
// For serializing only. Use FromInternalValue() to reconstitute. Please don't
// use this and do arithmetic on it, as it is more error prone than using the
// provided operators.
@@ -438,10 +445,6 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// times are increasing, or that two calls to Now() won't be the same.
static Time Now();
- // Returns the maximum time, which should be greater than any reasonable time
- // with which we might compare it.
- static Time Max();
-
// Returns the current time. Same as Now() except that this function always
// uses system time so that there are no discrepancies between the returned
// time and system time even on virtual environments including our test bot.
@@ -519,11 +522,29 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// Converts an exploded structure representing either the local time or UTC
// into a Time class.
+ // TODO(maksims): Get rid of these in favor of the methods below when
+ // all the callers stop using these ones.
static Time FromUTCExploded(const Exploded& exploded) {
- return FromExploded(false, exploded);
+ base::Time time;
+ ignore_result(FromUTCExploded(exploded, &time));
+ return time;
}
static Time FromLocalExploded(const Exploded& exploded) {
- return FromExploded(true, exploded);
+ base::Time time;
+ ignore_result(FromLocalExploded(exploded, &time));
+ return time;
+ }
+
+ // Converts an exploded structure representing either the local time or UTC
+ // into a Time class. Returns false on a failure when, for example, a day of
+ // month is set to 31 on a 28-30 day month.
+ static bool FromUTCExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(false, exploded, time);
+ }
+ static bool FromLocalExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(true, exploded, time);
}
// Converts a string representation of time to a Time object.
@@ -564,8 +585,12 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
void Explode(bool is_local, Exploded* exploded) const;
// Unexplodes a given time assuming the source is either local time
- // |is_local = true| or UTC |is_local = false|.
- static Time FromExploded(bool is_local, const Exploded& exploded);
+ // |is_local = true| or UTC |is_local = false|. Function returns false on
+ // failure and sets |time| to Time(0). Otherwise returns true and sets |time|
+ // to non-exploded time.
+ static bool FromExploded(bool is_local,
+ const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT;
// Converts a string representation of time to a Time object.
// An example of a time string which is converted is as below:-
@@ -577,6 +602,9 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
static bool FromStringInternal(const char* time_string,
bool is_local,
Time* parsed_time);
+
+ // Comparison does not consider |day_of_week| when doing the operation.
+ static bool ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs);
};
// static
@@ -639,7 +667,13 @@ constexpr TimeDelta TimeDelta::FromDouble(double value) {
// static
constexpr TimeDelta TimeDelta::FromProduct(int64_t value,
int64_t positive_value) {
- return (DCHECK(positive_value > 0),
+ return (
+#if !defined(_PREFAST_) || !defined(OS_WIN)
+ // Avoid internal compiler errors in /analyze builds with VS 2015
+ // update 3.
+ // https://connect.microsoft.com/VisualStudio/feedback/details/2870865
+ DCHECK(positive_value > 0),
+#endif
value > std::numeric_limits<int64_t>::max() / positive_value
? Max()
: value < -std::numeric_limits<int64_t>::max() / positive_value
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
index c23c4917e7..373ec3a3bc 100644
--- a/base/time/time_mac.cc
+++ b/base/time/time_mac.cc
@@ -34,7 +34,7 @@ int64_t ComputeCurrentTicks() {
struct timeval boottime;
int mib[2] = {CTL_KERN, KERN_BOOTTIME};
size_t size = sizeof(boottime);
- int kr = sysctl(mib, arraysize(mib), &boottime, &size, NULL, 0);
+ int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
DCHECK_EQ(KERN_SUCCESS, kr);
base::TimeDelta time_difference = base::Time::Now() -
(base::Time::FromTimeT(boottime.tv_sec) +
@@ -168,7 +168,7 @@ Time Time::NowFromSystemTime() {
}
// static
-Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
is_local
? CFTimeZoneCopySystem()
@@ -184,8 +184,28 @@ Time Time::FromExploded(bool is_local, const Exploded& exploded) {
exploded.day_of_month, exploded.hour, exploded.minute, exploded.second,
exploded.millisecond);
CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
- return Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
- kWindowsEpochDeltaMicroseconds);
+
+ base::Time converted_time =
+ Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
+ kWindowsEpochDeltaMicroseconds);
+
+ // If |exploded.day_of_month| is set to 31
+ // on a 28-30 day month, it will return the first day of the next month.
+ // Thus round-trip the time and compare the initial |exploded| with
+ // |utc_to_exploded| time.
+ base::Time::Exploded to_exploded;
+ if (!is_local)
+ converted_time.UTCExplode(&to_exploded);
+ else
+ converted_time.LocalExplode(&to_exploded);
+
+ if (ExplodedMostlyEquals(to_exploded, exploded)) {
+ *time = converted_time;
+ return true;
+ }
+
+ *time = Time(0);
+ return false;
}
void Time::Explode(bool is_local, Exploded* exploded) const {
diff --git a/base/time/time_posix.cc b/base/time/time_posix.cc
index 32614bc086..495e249f00 100644
--- a/base/time/time_posix.cc
+++ b/base/time/time_posix.cc
@@ -211,7 +211,7 @@ void Time::Explode(bool is_local, Exploded* exploded) const {
}
// static
-Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
struct tm timestruct;
timestruct.tm_sec = exploded.second;
timestruct.tm_min = exploded.minute;
@@ -301,8 +301,26 @@ Time Time::FromExploded(bool is_local, const Exploded& exploded) {
}
// Adjust from Unix (1970) to Windows (1601) epoch.
- return Time((milliseconds * kMicrosecondsPerMillisecond) +
- kWindowsEpochDeltaMicroseconds);
+ base::Time converted_time =
+ Time((milliseconds * kMicrosecondsPerMillisecond) +
+ kWindowsEpochDeltaMicroseconds);
+
+ // If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
+ // return the first day of the next month. Thus round-trip the time and
+ // compare the initial |exploded| with |utc_to_exploded| time.
+ base::Time::Exploded to_exploded;
+ if (!is_local)
+ converted_time.UTCExplode(&to_exploded);
+ else
+ converted_time.LocalExplode(&to_exploded);
+
+ if (ExplodedMostlyEquals(to_exploded, exploded)) {
+ *time = converted_time;
+ return true;
+ }
+
+ *time = Time(0);
+ return false;
}
// TimeTicks ------------------------------------------------------------------
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
index 25c6ca5943..4f47d56522 100644
--- a/base/time/time_unittest.cc
+++ b/base/time/time_unittest.cc
@@ -21,6 +21,52 @@ namespace base {
namespace {
+TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
+ // FromUTCExploded must set time to Time(0) and failure, if the day is set to
+ // 31 on a 28-30 day month. Test |exploded| returns Time(0) on 31st of
+ // February and 31st of April. New implementation handles this.
+
+ const struct DateTestData {
+ Time::Exploded explode;
+ bool is_valid;
+ } kDateTestData[] = {
+ // 31st of February
+ {{2016, 2, 0, 31, 12, 30, 0, 0}, true},
+ // 31st of April
+ {{2016, 4, 0, 31, 8, 43, 0, 0}, true},
+ // Negative month
+ {{2016, -5, 0, 2, 4, 10, 0, 0}, false},
+ // Negative date of month
+ {{2016, 6, 0, -15, 2, 50, 0, 0}, false},
+ // Negative hours
+ {{2016, 7, 0, 10, -11, 29, 0, 0}, false},
+ // Negative minutes
+ {{2016, 3, 0, 14, 10, -29, 0, 0}, false},
+ // Negative seconds
+ {{2016, 10, 0, 25, 7, 47, -30, 0}, false},
+ // Negative milliseconds
+ {{2016, 10, 0, 25, 7, 47, 20, -500}, false},
+ // Hours are too large
+ {{2016, 7, 0, 10, 26, 29, 0, 0}, false},
+ // Minutes are too large
+ {{2016, 3, 0, 14, 10, 78, 0, 0}, false},
+ // Seconds are too large
+ {{2016, 10, 0, 25, 7, 47, 234, 0}, false},
+ // Milliseconds are too large
+ {{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
+ };
+
+ for (const auto& test : kDateTestData) {
+ EXPECT_EQ(test.explode.HasValidValues(), test.is_valid);
+
+ base::Time result;
+ EXPECT_FALSE(base::Time::FromUTCExploded(test.explode, &result));
+ EXPECT_TRUE(result.is_null());
+ EXPECT_FALSE(base::Time::FromLocalExploded(test.explode, &result));
+ EXPECT_TRUE(result.is_null());
+ }
+}
+
// Specialized test fixture allowing time strings without timezones to be
// tested by comparing them to a known time in the local zone.
// See also pr_time_unittests.cc
@@ -80,7 +126,8 @@ TEST_F(TimeTest, TimeT) {
EXPECT_EQ(tms.tm_sec, exploded.second);
// Convert exploded back to the time struct.
- Time our_time_2 = Time::FromLocalExploded(exploded);
+ Time our_time_2;
+ EXPECT_TRUE(Time::FromLocalExploded(exploded, &our_time_2));
EXPECT_TRUE(our_time_1 == our_time_2);
time_t now_t_2 = our_time_2.ToTimeT();
@@ -119,7 +166,8 @@ TEST_F(TimeTest, FromExplodedWithMilliseconds) {
Time::Exploded exploded1 = {0};
now.UTCExplode(&exploded1);
exploded1.millisecond = 500;
- Time time = Time::FromUTCExploded(exploded1);
+ Time time;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded1, &time));
Time::Exploded exploded2 = {0};
time.UTCExplode(&exploded2);
EXPECT_EQ(exploded1.millisecond, exploded2.millisecond);
@@ -137,7 +185,8 @@ TEST_F(TimeTest, LocalExplode) {
Time::Exploded exploded;
a.LocalExplode(&exploded);
- Time b = Time::FromLocalExploded(exploded);
+ Time b;
+ EXPECT_TRUE(Time::FromLocalExploded(exploded, &b));
// The exploded structure doesn't have microseconds, and on Mac & Linux, the
// internal OS conversion uses seconds, which will cause truncation. So we
@@ -150,7 +199,8 @@ TEST_F(TimeTest, UTCExplode) {
Time::Exploded exploded;
a.UTCExplode(&exploded);
- Time b = Time::FromUTCExploded(exploded);
+ Time b;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &b));
EXPECT_TRUE((a - b) < TimeDelta::FromSeconds(1));
}
@@ -565,7 +615,8 @@ TEST_F(TimeTest, FromLocalExplodedCrashOnAndroid) {
static char buffer[] = "TZ=America/Santiago";
putenv(buffer);
tzset();
- Time t = Time::FromLocalExploded(midnight);
+ Time t;
+ EXPECT_TRUE(Time::FromLocalExploded(midnight, &t));
EXPECT_EQ(1381633200, t.ToTimeT());
}
#endif // OS_ANDROID
@@ -787,7 +838,8 @@ TEST(TimeDelta, WindowsEpoch) {
exploded.minute = 0;
exploded.second = 0;
exploded.millisecond = 0;
- Time t = Time::FromUTCExploded(exploded);
+ Time t;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &t));
// Unix 1970 epoch.
EXPECT_EQ(INT64_C(11644473600000000), t.ToInternalValue());
diff --git a/base/timer/timer_unittest.cc b/base/timer/timer_unittest.cc
index e56efac6e3..6fcd25b93a 100644
--- a/base/timer/timer_unittest.cc
+++ b/base/timer/timer_unittest.cc
@@ -10,7 +10,9 @@
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -115,7 +117,7 @@ void RunTest_OneShotTimer(base::MessageLoop::Type message_loop_type) {
OneShotTimerTester f(&did_run);
f.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(did_run);
}
@@ -127,7 +129,7 @@ void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
// This should run before the timer expires.
- base::MessageLoop::current()->DeleteSoon(FROM_HERE, a);
+ base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
@@ -136,7 +138,7 @@ void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
OneShotTimerTester b(&did_run_b);
b.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_FALSE(did_run_a);
EXPECT_TRUE(did_run_b);
@@ -150,7 +152,7 @@ void RunTest_OneShotSelfDeletingTimer(
OneShotSelfDeletingTimerTester f(&did_run);
f.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(did_run);
}
@@ -163,7 +165,7 @@ void RunTest_RepeatingTimer(base::MessageLoop::Type message_loop_type,
RepeatingTimerTester f(&did_run, delay);
f.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(did_run);
}
@@ -176,7 +178,7 @@ void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
// This should run before the timer expires.
- base::MessageLoop::current()->DeleteSoon(FROM_HERE, a);
+ base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
@@ -185,7 +187,7 @@ void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
RepeatingTimerTester b(&did_run_b, delay);
b.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_FALSE(did_run_a);
EXPECT_TRUE(did_run_b);
@@ -215,7 +217,7 @@ void RunTest_DelayTimer_NoCall(base::MessageLoop::Type message_loop_type) {
bool did_run = false;
OneShotTimerTester tester(&did_run);
tester.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
ASSERT_FALSE(target.signaled());
}
@@ -231,7 +233,7 @@ void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
bool did_run = false;
OneShotTimerTester tester(&did_run, 100 /* milliseconds */);
tester.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
ASSERT_TRUE(target.signaled());
}
@@ -270,7 +272,7 @@ void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
bool did_run = false;
OneShotTimerTester tester(&did_run, 300);
tester.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
ASSERT_TRUE(target.signaled());
}
@@ -513,7 +515,7 @@ TEST(TimerTest, ContinuationStopStart) {
timer.Stop();
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
base::Bind(&SetCallbackHappened2));
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_FALSE(g_callback_happened1);
EXPECT_TRUE(g_callback_happened2);
}
@@ -529,7 +531,7 @@ TEST(TimerTest, ContinuationReset) {
timer.Reset();
// Since Reset happened before task ran, the user_task must not be cleared:
ASSERT_FALSE(timer.user_task().is_null());
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(g_callback_happened1);
}
}
diff --git a/base/trace_event/etw_manifest/BUILD.gn b/base/trace_event/etw_manifest/BUILD.gn
index 1e16672825..19c4ecfdc4 100644
--- a/base/trace_event/etw_manifest/BUILD.gn
+++ b/base/trace_event/etw_manifest/BUILD.gn
@@ -18,8 +18,12 @@ message_compiler("chrome_events_win") {
user_mode_logging = true
- # TOOD(brucedawson) bug 569989: Enable ETW manifest and compile and link it
- # into the proper places. Enabling as-is may add the resources to too many
- # targets. See the bug for more information.
+ # The only code generated from chrome_events_win.man is a header file that
+ # is included by trace_event_etw_export_win.cc, so there is no need to
+ # compile any generated code. The other thing which compile_generated_code
+ # controls in this context is linking in the .res file generated from the
+ # manifest. However this is only needed for ETW provider registration which
+ # is done by UIforETW (https://github.com/google/UIforETW) and therefore the
+ # manifest resource can be skipped in Chrome.
compile_generated_code = false
}
diff --git a/base/trace_event/heap_profiler_allocation_context.cc b/base/trace_event/heap_profiler_allocation_context.cc
index 374d5043d1..0f330a817e 100644
--- a/base/trace_event/heap_profiler_allocation_context.cc
+++ b/base/trace_event/heap_profiler_allocation_context.cc
@@ -31,12 +31,23 @@ bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
}
+bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
+ return !(lhs == rhs);
+}
+
AllocationContext::AllocationContext(): type_name(nullptr) {}
+AllocationContext::AllocationContext(const Backtrace& backtrace,
+ const char* type_name)
+ : backtrace(backtrace), type_name(type_name) {}
+
bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
}
+bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
+ return !(lhs == rhs);
+}
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_context.h b/base/trace_event/heap_profiler_allocation_context.h
index 3566dd08f5..24e2dec73f 100644
--- a/base/trace_event/heap_profiler_allocation_context.h
+++ b/base/trace_event/heap_profiler_allocation_context.h
@@ -71,18 +71,20 @@ struct BASE_EXPORT Backtrace {
// If the stack is higher than what can be stored here, the bottom frames
// (the ones closer to main()) are stored. Depth of 12 is enough for most
// pseudo traces (see above), but not for native traces, where we need more.
- enum { kMaxFrameCount = 24 };
+ enum { kMaxFrameCount = 48 };
StackFrame frames[kMaxFrameCount];
size_t frame_count;
};
bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
+bool BASE_EXPORT operator!=(const Backtrace& lhs, const Backtrace& rhs);
// The |AllocationContext| is context metadata that is kept for every allocation
// when heap profiling is enabled. To simplify memory management for book-
// keeping, this struct has a fixed size.
struct BASE_EXPORT AllocationContext {
AllocationContext();
+ AllocationContext(const Backtrace& backtrace, const char* type_name);
Backtrace backtrace;
@@ -95,6 +97,8 @@ struct BASE_EXPORT AllocationContext {
bool BASE_EXPORT operator==(const AllocationContext& lhs,
const AllocationContext& rhs);
+bool BASE_EXPORT operator!=(const AllocationContext& lhs,
+ const AllocationContext& rhs);
// Struct to store the size and count of the allocations.
struct AllocationMetrics {
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
index fac4a8a7b4..31f311a918 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -168,8 +168,8 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
CaptureMode mode = static_cast<CaptureMode>(
subtle::NoBarrier_Load(&capture_mode_));
- auto backtrace = std::begin(ctx.backtrace.frames);
- auto backtrace_end = std::end(ctx.backtrace.frames);
+ auto* backtrace = std::begin(ctx.backtrace.frames);
+ auto* backtrace_end = std::end(ctx.backtrace.frames);
if (!thread_name_) {
// Ignore the string allocation made by GetAndLeakThreadName to avoid
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 07d5f253dd..3064a6a711 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -34,8 +34,8 @@ void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot();
- auto actual = std::begin(ctx.backtrace.frames);
- auto actual_bottom = actual + ctx.backtrace.frame_count;
+ auto* actual = std::begin(ctx.backtrace.frames);
+ auto* actual_bottom = actual + ctx.backtrace.frame_count;
auto expected = std::begin(expected_backtrace);
auto expected_bottom = std::end(expected_backtrace);
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index a0fc4be282..2c2cd378bb 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -4,116 +4,20 @@
#include "base/trace_event/heap_profiler_allocation_register.h"
+#include <algorithm>
+
#include "base/trace_event/trace_event_memory_overhead.h"
namespace base {
namespace trace_event {
-AllocationRegister::AllocationRegister()
- : AllocationRegister(kNumBuckets * kNumCellsPerBucket) {}
-
-AllocationRegister::AllocationRegister(uint32_t num_cells)
- // Reserve enough address space to store |num_cells_| entries if necessary,
- // with a guard page after it to crash the program when attempting to store
- // more entries.
- : num_cells_(num_cells),
- cells_(static_cast<Cell*>(AllocateVirtualMemory(num_cells_ *
- sizeof(Cell)))),
- buckets_(static_cast<CellIndex*>(
- AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))),
-
- // The free list is empty. The first unused cell is cell 1, because index
- // 0 is used as list terminator.
- free_list_(0),
- next_unused_cell_(1) {}
-
-AllocationRegister::~AllocationRegister() {
- FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex));
- FreeVirtualMemory(cells_, num_cells_ * sizeof(Cell));
-}
-
-void AllocationRegister::Insert(void* address,
- size_t size,
- AllocationContext context) {
- DCHECK(address != nullptr);
- if (size == 0)
- return;
-
- CellIndex* idx_ptr = Lookup(address);
-
- // If the index is 0, the address is not yet present, so insert it.
- if (*idx_ptr == 0) {
- *idx_ptr = GetFreeCell();
-
- // The address stored in a cell is const as long as it is exposed (via the
- // iterators or |Get|), but because cells are re-used, a const cast is
- // required to set it on insert and remove.
- void* const& allocation_address = cells_[*idx_ptr].allocation.address;
- const_cast<void*&>(allocation_address) = address;
- cells_[*idx_ptr].next = 0;
- }
-
- cells_[*idx_ptr].allocation.size = size;
- cells_[*idx_ptr].allocation.context = context;
-}
-
-void AllocationRegister::Remove(void* address) {
- // Get a pointer to the index of the cell that stores |address|. The index can
- // be an element of |buckets_| or the |next| member of a cell.
- CellIndex* idx_ptr = Lookup(address);
- CellIndex freed_idx = *idx_ptr;
-
- // If the index is 0, the address was not there in the first place.
- if (freed_idx == 0)
- return;
-
- // The cell at the index is now free, remove it from the linked list for
- // |Hash(address)|.
- Cell* freed_cell = &cells_[freed_idx];
- *idx_ptr = freed_cell->next;
-
- // Put the free cell at the front of the free list.
- freed_cell->next = free_list_;
- free_list_ = freed_idx;
-
- // Reset the address, so that on iteration the free cell is ignored.
- const_cast<void*&>(freed_cell->allocation.address) = nullptr;
-}
-
-AllocationRegister::Allocation* AllocationRegister::Get(void* address) {
- CellIndex* idx_ptr = Lookup(address);
-
- // If the index is 0, the address is not present in the table.
- return *idx_ptr == 0 ? nullptr : &cells_[*idx_ptr].allocation;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::begin() const {
- // Initialize the iterator's index to 0. Cell 0 never stores an entry.
- ConstIterator iterator(*this, 0);
- // Incrementing will advance the iterator to the first used cell.
- ++iterator;
- return iterator;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::end() const {
- // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry,
- // so index |next_unused_cell_| is an iterator past the last element, in line
- // with the STL iterator conventions.
- return ConstIterator(*this, next_unused_cell_);
-}
-
AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register,
- CellIndex index)
- : register_(alloc_register), index_(index) {}
+ const AllocationRegister& alloc_register, AllocationIndex index)
+ : register_(alloc_register),
+ index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
- // Find the next cell with a non-null address until all cells that could
- // possibly be used have been iterated. A null address indicates a free cell.
- do {
- index_++;
- } while (index_ < register_.next_unused_cell_ &&
- register_.cells_[index_].allocation.address == nullptr);
+ index_ = register_.allocations_.Next(index_ + 1);
}
bool AllocationRegister::ConstIterator::operator!=(
@@ -121,53 +25,38 @@ bool AllocationRegister::ConstIterator::operator!=(
return index_ != other.index_;
}
-const AllocationRegister::Allocation& AllocationRegister::ConstIterator::
-operator*() const {
- return register_.cells_[index_].allocation;
+AllocationRegister::Allocation
+AllocationRegister::ConstIterator::operator*() const {
+ return register_.GetAllocation(index_);
}
-AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) {
- // The list head is in |buckets_| at the hash offset.
- CellIndex* idx_ptr = &buckets_[Hash(address)];
+size_t AllocationRegister::BacktraceHasher::operator () (
+ const Backtrace& backtrace) const {
+ const size_t kSampleLength = 10;
- // Chase down the list until the cell that holds |address| is found,
- // or until the list ends.
- while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address)
- idx_ptr = &cells_[*idx_ptr].next;
+ uintptr_t total_value = 0;
- return idx_ptr;
-}
-
-AllocationRegister::CellIndex AllocationRegister::GetFreeCell() {
- // First try to re-use a cell from the freelist.
- if (free_list_) {
- CellIndex idx = free_list_;
- free_list_ = cells_[idx].next;
- return idx;
+ size_t head_end = std::min(backtrace.frame_count, kSampleLength);
+ for (size_t i = 0; i != head_end; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
- // Otherwise pick the next cell that has not been touched before.
- CellIndex idx = next_unused_cell_;
- next_unused_cell_++;
-
- // If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside of
- // the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
+ size_t tail_start = backtrace.frame_count -
+ std::min(backtrace.frame_count - head_end, kSampleLength);
+ for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
+ }
- DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+ total_value += backtrace.frame_count;
- return idx;
+ // These magic constants give best results in terms of average collisions
+ // per backtrace. They were found by replaying real backtraces from Linux
+ // and Android against different hash functions.
+ return (total_value * 131101) >> 14;
}
-// static
-uint32_t AllocationRegister::Hash(void* address) {
+size_t AllocationRegister::AddressHasher::operator () (
+ const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
// recorded from a Chrome trace run). It is the first prime after 2^17. For
@@ -178,22 +67,114 @@ uint32_t AllocationRegister::Hash(void* address) {
const uintptr_t a = 131101;
const uintptr_t shift = 14;
const uintptr_t h = (key * a) >> shift;
- return static_cast<uint32_t>(h) & kNumBucketsMask;
+ return h;
+}
+
+AllocationRegister::AllocationRegister()
+ : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
+
+AllocationRegister::AllocationRegister(size_t allocation_capacity,
+ size_t backtrace_capacity)
+ : allocations_(allocation_capacity),
+ backtraces_(backtrace_capacity) {}
+
+AllocationRegister::~AllocationRegister() {
+}
+
+void AllocationRegister::Insert(const void* address,
+ size_t size,
+ const AllocationContext& context) {
+ DCHECK(address != nullptr);
+ if (size == 0) {
+ return;
+ }
+
+ AllocationInfo info = {
+ size,
+ context.type_name,
+ InsertBacktrace(context.backtrace)
+ };
+
+ // Try to insert the allocation.
+ auto index_and_flag = allocations_.Insert(address, info);
+ if (!index_and_flag.second) {
+ // |address| is already there - overwrite the allocation info.
+ auto& old_info = allocations_.Get(index_and_flag.first).second;
+ RemoveBacktrace(old_info.backtrace_index);
+ old_info = info;
+ }
+}
+
+void AllocationRegister::Remove(const void* address) {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return;
+ }
+
+ const AllocationInfo& info = allocations_.Get(index).second;
+ RemoveBacktrace(info.backtrace_index);
+ allocations_.Remove(index);
+}
+
+bool AllocationRegister::Get(const void* address,
+ Allocation* out_allocation) const {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return false;
+ }
+
+ if (out_allocation) {
+ *out_allocation = GetAllocation(index);
+ }
+ return true;
+}
+
+AllocationRegister::ConstIterator AllocationRegister::begin() const {
+ return ConstIterator(*this, allocations_.Next(0));
+}
+
+AllocationRegister::ConstIterator AllocationRegister::end() const {
+ return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
}
void AllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
- // Estimate memory overhead by counting all of the cells that have ever been
- // touched. Don't report mmapped memory as allocated, because it has not been
- // allocated by malloc.
size_t allocated = sizeof(AllocationRegister);
size_t resident = sizeof(AllocationRegister)
- // Include size of touched cells (size of |*cells_|).
- + sizeof(Cell) * next_unused_cell_
- // Size of |*buckets_|.
- + sizeof(CellIndex) * kNumBuckets;
+ + allocations_.EstimateUsedMemory()
+ + backtraces_.EstimateUsedMemory();
overhead->Add("AllocationRegister", allocated, resident);
}
+AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
+ const Backtrace& backtrace) {
+ auto index = backtraces_.Insert(backtrace, 0).first;
+ auto& backtrace_and_count = backtraces_.Get(index);
+ backtrace_and_count.second++;
+ return index;
+}
+
+void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
+ auto& backtrace_and_count = backtraces_.Get(index);
+ if (--backtrace_and_count.second == 0) {
+ // Backtrace is not referenced anymore - remove it.
+ backtraces_.Remove(index);
+ }
+}
+
+AllocationRegister::Allocation AllocationRegister::GetAllocation(
+ AllocationMap::KVIndex index) const {
+ const auto& address_and_info = allocations_.Get(index);
+ const auto& backtrace_and_count = backtraces_.Get(
+ address_and_info.second.backtrace_index);
+ return {
+ address_and_info.first,
+ address_and_info.second.size,
+ AllocationContext(
+ backtrace_and_count.first,
+ address_and_info.second.type_name)
+ };
+}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index 976f2f50a9..86e2721c56 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -8,77 +8,288 @@
#include <stddef.h>
#include <stdint.h>
+#include <utility>
+
+#include "base/bits.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/template_util.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
namespace base {
namespace trace_event {
+class AllocationRegisterTest;
+
+namespace internal {
+
+// Allocates a region of virtual address space of |size| rounded up to the
+// system page size. The memory is zeroed by the system. A guard page is
+// added after the end.
+void* AllocateGuardedVirtualMemory(size_t size);
+
+// Frees a region of virtual address space allocated by a call to
+// |AllocateVirtualMemory|.
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
+
+// Hash map that mmaps memory only once in the constructor. Its API is
+// similar to std::unordered_map, only index (KVIndex) is used to address
+template <size_t NumBuckets, class Key, class Value, class KeyHasher>
+class FixedHashMap {
+ // To keep things simple we don't call destructors.
+ static_assert(is_trivially_destructible<Key>::value &&
+ is_trivially_destructible<Value>::value,
+ "Key and Value shouldn't have destructors");
+ public:
+ using KVPair = std::pair<const Key, Value>;
+
+ // For implementation simplicity API uses integer index instead
+ // of iterators. Most operations (except FindValidIndex) on KVIndex
+ // are O(1).
+ using KVIndex = size_t;
+ static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+
+ // Capacity controls how many items this hash map can hold, and largely
+ // affects memory footprint.
+ FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
+
+ ~FixedHashMap() {
+ FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
+ FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
+ }
+
+ std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
+ Cell** p_cell = Lookup(key);
+ Cell* cell = *p_cell;
+ if (cell) {
+ return {static_cast<KVIndex>(cell - cells_), false}; // not inserted
+ }
+
+ // Get a free cell and link it.
+ *p_cell = cell = GetFreeCell();
+ cell->p_prev = p_cell;
+ cell->next = nullptr;
+
+ // Initialize key/value pair. Since key is 'const Key' this is the
+ // only way to initialize it.
+ new (&cell->kv) KVPair(key, value);
+
+ return {static_cast<KVIndex>(cell - cells_), true}; // inserted
+ }
+
+ void Remove(KVIndex index) {
+ DCHECK_LT(index, next_unused_cell_);
+
+ Cell* cell = &cells_[index];
+
+ // Unlink the cell.
+ *cell->p_prev = cell->next;
+ if (cell->next) {
+ cell->next->p_prev = cell->p_prev;
+ }
+ cell->p_prev = nullptr; // mark as free
+
+ // Add it to the free list.
+ cell->next = free_list_;
+ free_list_ = cell;
+ }
+
+ KVIndex Find(const Key& key) const {
+ Cell* cell = *Lookup(key);
+ return cell ? static_cast<KVIndex>(cell - cells_) : kInvalidKVIndex;
+ }
+
+ KVPair& Get(KVIndex index) {
+ return cells_[index].kv;
+ }
+
+ const KVPair& Get(KVIndex index) const {
+ return cells_[index].kv;
+ }
+
+ // Finds next index that has a KVPair associated with it. Search starts
+ // with the specified index. Returns kInvalidKVIndex if nothing was found.
+ // To find the first valid index, call this function with 0. Continue
+ // calling with the last_index + 1 until kInvalidKVIndex is returned.
+ KVIndex Next(KVIndex index) const {
+ for (;index < next_unused_cell_; ++index) {
+ if (cells_[index].p_prev) {
+ return index;
+ }
+ }
+ return kInvalidKVIndex;
+ }
+
+ // Estimates number of bytes used in allocated memory regions.
+ size_t EstimateUsedMemory() const {
+ size_t page_size = base::GetPageSize();
+ // |next_unused_cell_| is the first cell that wasn't touched, i.e.
+ // it's the number of touched cells.
+ return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
+ bits::Align(sizeof(Bucket) * NumBuckets, page_size);
+ }
+
+ private:
+ friend base::trace_event::AllocationRegisterTest;
+
+ struct Cell {
+ KVPair kv;
+ Cell* next;
+
+ // Conceptually this is |prev| in a doubly linked list. However, buckets
+ // also participate in the bucket's cell list - they point to the list's
+ // head and also need to be linked / unlinked properly. To treat these two
+ // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
+ // that points to this Cell" kind of thing. So |p_prev| points to a bucket
+ // for the first cell in a list, and points to |next| of the previous cell
+ // for any other cell. With that Lookup() is the only function that handles
+ // buckets / cells differently.
+ // If |p_prev| is nullptr, the cell is in the free list.
+ Cell** p_prev;
+ };
+
+ using Bucket = Cell*;
+
+ // Returns a pointer to the cell that contains or should contain the entry
+ // for |key|. The pointer may point at an element of |buckets_| or at the
+ // |next| member of an element of |cells_|.
+ Cell** Lookup(const Key& key) const {
+ // The list head is in |buckets_| at the hash offset.
+ Cell** p_cell = &buckets_[Hash(key)];
+
+ // Chase down the list until the cell that holds |key| is found,
+ // or until the list ends.
+ while (*p_cell && (*p_cell)->kv.first != key) {
+ p_cell = &(*p_cell)->next;
+ }
+
+ return p_cell;
+ }
+
+ // Returns a cell that is not being used to store an entry (either by
+ // recycling from the free list or by taking a fresh cell).
+ Cell* GetFreeCell() {
+ // First try to re-use a cell from the free list.
+ if (free_list_) {
+ Cell* cell = free_list_;
+ free_list_ = cell->next;
+ return cell;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ size_t idx = next_unused_cell_;
+ next_unused_cell_++;
+
+ // If the hash table has too little capacity (when too little address space
+ // was reserved for |cells_|), |next_unused_cell_| can be an index outside
+ // of the allocated storage. A guard page is allocated there to crash the
+ // program in that case. There are alternative solutions:
+ // - Deal with it, increase capacity by reallocating |cells_|.
+ // - Refuse to insert and let the caller deal with it.
+ // Because free cells are re-used before accessing fresh cells with a higher
+ // index, and because reserving address space without touching it is cheap,
+ // the simplest solution is to just allocate a humongous chunk of address
+ // space.
+
+ DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+
+ return &cells_[idx];
+ }
+
+ // Returns a value in the range [0, NumBuckets - 1] (inclusive).
+ size_t Hash(const Key& key) const {
+ if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
+ // NumBuckets is a power of 2.
+ return KeyHasher()(key) & (NumBuckets - 1);
+ } else {
+ return KeyHasher()(key) % NumBuckets;
+ }
+ }
+
+ // Number of cells.
+ size_t const num_cells_;
+
+ // The array of cells. This array is backed by mmapped memory. Lower indices
+ // are accessed first, higher indices are accessed only when the |free_list_|
+ // is empty. This is to minimize the amount of resident memory used.
+ Cell* const cells_;
+
+ // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
+ // contain the pointer to the linked list of cells for |Hash(key)|.
+ // This array is backed by mmapped memory.
+ mutable Bucket* buckets_;
+
+ // The head of the free list.
+ Cell* free_list_;
+
+ // The index of the first element of |cells_| that has not been used before.
+ // If the free list is empty and a new cell is needed, the cell at this index
+ // is used. This is the high water mark for the number of entries stored.
+ size_t next_unused_cell_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
+};
+
+} // namespace internal
+
class TraceEventMemoryOverhead;
// The allocation register keeps track of all allocations that have not been
-// freed. It is a memory map-backed hash table that stores size and context
-// indexed by address. The hash table is tailored specifically for this use
-// case. The common case is that an entry is inserted and removed after a
-// while, lookup without modifying the table is not an intended use case. The
-// hash table is implemented as an array of linked lists. The size of this
-// array is fixed, but it does not limit the amount of entries that can be
-// stored.
-//
-// Replaying a recording of Chrome's allocations and frees against this hash
-// table takes about 15% of the time that it takes to replay them against
-// |std::map|.
+// freed. Internally it has two hashtables: one for Backtraces and one for
+// actual allocations. Sizes of both hashtables are fixed, and this class
+// allocates (mmaps) only in its constructor.
class BASE_EXPORT AllocationRegister {
public:
- // The data stored in the hash table;
- // contains the details about an allocation.
+ // Details about an allocation.
struct Allocation {
- void* const address;
+ const void* address;
size_t size;
AllocationContext context;
};
- // An iterator that iterates entries in the hash table efficiently, but in no
- // particular order. It can do this by iterating the cells and ignoring the
- // linked lists altogether. Instead of checking whether a cell is in the free
- // list to see if it should be skipped, a null address is used to indicate
- // that a cell is free.
+ // An iterator that iterates entries in no particular order.
class BASE_EXPORT ConstIterator {
public:
void operator++();
bool operator!=(const ConstIterator& other) const;
- const Allocation& operator*() const;
+ Allocation operator*() const;
private:
friend class AllocationRegister;
- using CellIndex = uint32_t;
+ using AllocationIndex = size_t;
- ConstIterator(const AllocationRegister& alloc_register, CellIndex index);
+ ConstIterator(const AllocationRegister& alloc_register,
+ AllocationIndex index);
const AllocationRegister& register_;
- CellIndex index_;
+ AllocationIndex index_;
};
AllocationRegister();
- explicit AllocationRegister(uint32_t num_cells);
+ AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
~AllocationRegister();
// Inserts allocation details into the table. If the address was present
- // already, its details are updated. |address| must not be null. (This is
- // because null is used to mark free cells, to allow efficient iteration of
- // the hash table.)
- void Insert(void* address, size_t size, AllocationContext context);
+ // already, its details are updated. |address| must not be null.
+ void Insert(const void* address,
+ size_t size,
+ const AllocationContext& context);
// Removes the address from the table if it is present. It is ok to call this
// with a null pointer.
- void Remove(void* address);
+ void Remove(const void* address);
- // Returns a pointer to the allocation at the address, or null if there is no
- // allocation at that address. This can be used to change the allocation
- // context after insertion, for example to change the type name.
- Allocation* Get(void* address);
+ // Finds allocation for the address and fills |out_allocation|.
+ bool Get(const void* address, Allocation* out_allocation) const;
ConstIterator begin() const;
ConstIterator end() const;
@@ -87,85 +298,54 @@ class BASE_EXPORT AllocationRegister {
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
private:
- friend class AllocationRegisterTest;
- using CellIndex = uint32_t;
-
- // A cell can store allocation details (size and context) by address. Cells
- // are part of a linked list via the |next| member. This list is either the
- // list for a particular hash, or the free list. All cells are contiguous in
- // memory in one big array. Therefore, on 64-bit systems, space can be saved
- // by storing 32-bit indices instead of pointers as links. Index 0 is used as
- // the list terminator.
- struct Cell {
- CellIndex next;
- Allocation allocation;
+ friend AllocationRegisterTest;
+
+ // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
+ // hashing and should be changed together with AddressHasher.
+ static const size_t kAllocationBuckets = 1 << 18;
+ static const size_t kAllocationCapacity = 1500000;
+
+ // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
+ // needing to tweak BacktraceHasher implementation.
+ static const size_t kBacktraceBuckets = 1 << 15;
+ static const size_t kBacktraceCapacity = kBacktraceBuckets;
+
+ struct BacktraceHasher {
+ size_t operator () (const Backtrace& backtrace) const;
};
- // The number of buckets, 2^17, approximately 130 000, has been tuned for
- // Chrome's typical number of outstanding allocations. (This number varies
- // between processes. Most processes have a sustained load of ~30k unfreed
- // allocations, but some processes have peeks around 100k-400k allocations.)
- // Because of the size of the table, it is likely that every |buckets_|
- // access and every |cells_| access will incur a cache miss. Microbenchmarks
- // suggest that it is worthwile to use more memory for the table to avoid
- // chasing down the linked list, until the size is 2^18. The number of buckets
- // is a power of two so modular indexing can be done with bitwise and.
- static const uint32_t kNumBuckets = 0x20000;
- static const uint32_t kNumBucketsMask = kNumBuckets - 1;
-
- // Reserve address space to store at most this number of entries. High
- // capacity does not imply high memory usage due to the access pattern. The
- // only constraint on the number of cells is that on 32-bit systems address
- // space is scarce (i.e. reserving 2GiB of address space for the entries is
- // not an option). A value of ~3M entries is large enough to handle spikes in
- // the number of allocations, and modest enough to require no more than a few
- // dozens of MiB of address space.
- static const uint32_t kNumCellsPerBucket = 10;
-
- // Returns a value in the range [0, kNumBuckets - 1] (inclusive).
- static uint32_t Hash(void* address);
-
- // Allocates a region of virtual address space of |size| rounded up to the
- // system page size. The memory is zeroed by the system. A guard page is
- // added after the end.
- static void* AllocateVirtualMemory(size_t size);
-
- // Frees a region of virtual address space allocated by a call to
- // |AllocateVirtualMemory|.
- static void FreeVirtualMemory(void* address, size_t allocated_size);
-
- // Returns a pointer to the variable that contains or should contain the
- // index of the cell that stores the entry for |address|. The pointer may
- // point at an element of |buckets_| or at the |next| member of an element of
- // |cells_|. If the value pointed at is 0, |address| is not in the table.
- CellIndex* Lookup(void* address);
-
- // Takes a cell that is not being used to store an entry (either by recycling
- // from the free list or by taking a fresh cell) and returns its index.
- CellIndex GetFreeCell();
-
- // The maximum number of cells which can be allocated.
- uint32_t const num_cells_;
+ using BacktraceMap = internal::FixedHashMap<
+ kBacktraceBuckets,
+ Backtrace,
+ size_t, // Number of references to the backtrace (the key). Incremented
+ // when an allocation that references the backtrace is inserted,
+ // and decremented when the allocation is removed. When the
+ // number drops to zero, the backtrace is removed from the map.
+ BacktraceHasher>;
- // The array of cells. This array is backed by mmapped memory. Lower indices
- // are accessed first, higher indices are only accessed when required. In
- // this way, even if a huge amount of address space has been mmapped, only
- // the cells that are actually used will be backed by physical memory.
- Cell* const cells_;
+ struct AllocationInfo {
+ size_t size;
+ const char* type_name;
+ BacktraceMap::KVIndex backtrace_index;
+ };
- // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain
- // the index of the head of the linked list for |Hash(address)|. A value of 0
- // indicates an empty list. This array is backed by mmapped memory.
- CellIndex* const buckets_;
+ struct AddressHasher {
+ size_t operator () (const void* address) const;
+ };
- // The head of the free list. This is the index of the cell. A value of 0
- // means that the free list is empty.
- CellIndex free_list_;
+ using AllocationMap = internal::FixedHashMap<
+ kAllocationBuckets,
+ const void*,
+ AllocationInfo,
+ AddressHasher>;
- // The index of the first element of |cells_| that has not been used before.
- // If the free list is empty and a new cell is needed, the cell at this index
- // is used. This is the high water mark for the number of entries stored.
- CellIndex next_unused_cell_;
+ BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
+ void RemoveBacktrace(BacktraceMap::KVIndex index);
+
+ Allocation GetAllocation(AllocationMap::KVIndex) const;
+
+ AllocationMap allocations_;
+ BacktraceMap backtraces_;
DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
};
diff --git a/base/trace_event/heap_profiler_allocation_register_posix.cc b/base/trace_event/heap_profiler_allocation_register_posix.cc
index c38d7e6918..94eeb4df88 100644
--- a/base/trace_event/heap_profiler_allocation_register_posix.cc
+++ b/base/trace_event/heap_profiler_allocation_register_posix.cc
@@ -18,6 +18,7 @@
namespace base {
namespace trace_event {
+namespace internal {
namespace {
size_t GetGuardSize() {
@@ -25,8 +26,7 @@ size_t GetGuardSize() {
}
}
-// static
-void* AllocationRegister::AllocateVirtualMemory(size_t size) {
+void* AllocateGuardedVirtualMemory(size_t size) {
size = bits::Align(size, GetPageSize());
// Add space for a guard page at the end.
@@ -48,12 +48,11 @@ void* AllocationRegister::AllocateVirtualMemory(size_t size) {
return addr;
}
-// static
-void AllocationRegister::FreeVirtualMemory(void* address,
- size_t allocated_size) {
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
munmap(address, size);
}
+} // namespace internal
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index c2b6f79a95..c3d3258651 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -230,7 +230,7 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
// This is the case of GetInstanceForCurrentThread() being called for the
// first time, which causes a new() inside the tracker which re-enters the
// heap profiler, in which case we just want to early out.
- auto tracker = AllocationContextTracker::GetInstanceForCurrentThread();
+ auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
if (!tracker)
return;
AllocationContext context = tracker->GetContextSnapshot();
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
index f9b5799c05..7583763889 100644
--- a/base/trace_event/memory_allocator_dump.cc
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -80,6 +80,13 @@ void MemoryAllocatorDump::AddScalarF(const char* name,
void MemoryAllocatorDump::AddString(const char* name,
const char* units,
const std::string& value) {
+ // String attributes are disabled in background mode.
+ if (process_memory_dump_->dump_args().level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND) {
+ NOTREACHED();
+ return;
+ }
+
attributes_->BeginDictionary(name);
attributes_->SetString("type", kTypeString);
attributes_->SetString("units", units);
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index 359f081154..1bf9715917 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -129,8 +129,8 @@ TEST(MemoryAllocatorDumpTest, GuidGeneration) {
TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
FakeMemoryAllocatorDumpProvider fmadp;
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
fmadp.OnMemoryDump(dump_args, &pmd);
@@ -176,7 +176,8 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
FakeMemoryAllocatorDumpProvider fmadp;
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
pmd.CreateAllocatorDump("foo_allocator");
pmd.CreateAllocatorDump("bar_allocator/heap");
ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index b14d265f19..eed070a782 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -23,6 +23,7 @@
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h"
@@ -46,27 +47,8 @@ const char* kTraceEventArgNames[] = {"dumps"};
const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
-uint32_t g_periodic_dumps_count = 0;
-uint32_t g_heavy_dumps_rate = 0;
MemoryDumpManager* g_instance_for_testing = nullptr;
-void RequestPeriodicGlobalDump() {
- MemoryDumpLevelOfDetail level_of_detail;
- if (g_heavy_dumps_rate == 0) {
- level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- } else {
- level_of_detail = g_periodic_dumps_count == 0
- ? MemoryDumpLevelOfDetail::DETAILED
- : MemoryDumpLevelOfDetail::LIGHT;
-
- if (++g_periodic_dumps_count == g_heavy_dumps_rate)
- g_periodic_dumps_count = 0;
- }
-
- MemoryDumpManager::GetInstance()->RequestGlobalDump(
- MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -116,6 +98,9 @@ const char* const MemoryDumpManager::kTraceCategory =
TRACE_DISABLED_BY_DEFAULT("memory-infra");
// static
+const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
+
+// static
const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
// static
@@ -272,8 +257,10 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
if (dumper_registrations_ignored_for_testing_)
return;
+ bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
- new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options);
+ new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
+ whitelisted_for_background_mode);
{
AutoLock lock(lock_);
@@ -351,8 +338,13 @@ void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
const MemoryDumpCallback& callback) {
- // Bail out immediately if tracing is not enabled at all.
- if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
+ // Bail out immediately if tracing is not enabled at all or if the dump mode
+ // is not allowed.
+ if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
+ !IsDumpModeAllowed(level_of_detail)) {
+ VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
+ << " tracing category is not enabled or the requested dump mode is "
+ "not allowed by trace config.";
if (!callback.is_null())
callback.Run(0u /* guid */, false /* success */);
return;
@@ -396,15 +388,33 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
TRACE_ID_MANGLE(args.dump_guid));
+ // If argument filter is enabled then only background mode dumps should be
+ // allowed. In case the trace config passed for background tracing session
+ // missed the allowed modes argument, it crashes here instead of creating
+ // unexpected dumps.
+ if (TraceLog::GetInstance()
+ ->GetCurrentTraceConfig()
+ .IsArgumentFilterEnabled()) {
+ CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
+ }
+
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
{
AutoLock lock(lock_);
+
// |dump_thread_| can be nullptr is tracing was disabled before reaching
// here. SetupNextMemoryDump() is robust enough to tolerate it and will
// NACK the dump.
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_, session_state_, callback,
dump_thread_ ? dump_thread_->task_runner() : nullptr));
+
+ // Safety check to prevent reaching here without calling RequestGlobalDump,
+ // with disallowed modes. If |session_state_| is null then tracing is
+ // disabled.
+ CHECK(!session_state_ ||
+ session_state_->memory_dump_config().allowed_dump_modes.count(
+ args.level_of_detail));
}
TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
@@ -438,6 +448,14 @@ void MemoryDumpManager::SetupNextMemoryDump(
// Anyway either tracing is stopped or this was the last hop, create a trace
// event, add it to the trace and finalize process dump invoking the callback.
if (!pmd_async_state->dump_thread_task_runner.get()) {
+ if (pmd_async_state->pending_dump_providers.empty()) {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before finalizing the dump";
+ } else {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before dumping "
+ << pmd_async_state->pending_dump_providers.back().get()->name;
+ }
pmd_async_state->dump_successful = false;
pmd_async_state->pending_dump_providers.clear();
}
@@ -449,6 +467,15 @@ void MemoryDumpManager::SetupNextMemoryDump(
MemoryDumpProviderInfo* mdpinfo =
pmd_async_state->pending_dump_providers.back().get();
+ // If we are in background tracing, we should invoke only the whitelisted
+ // providers. Ignore other providers and continue.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND &&
+ !mdpinfo->whitelisted_for_background_mode) {
+ pmd_async_state->pending_dump_providers.pop_back();
+ return SetupNextMemoryDump(std::move(pmd_async_state));
+ }
+
// If the dump provider did not specify a task runner affinity, dump on
// |dump_thread_| which is already checked above for presence.
SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
@@ -547,9 +574,10 @@ void MemoryDumpManager::InvokeOnMemoryDump(
// process), non-zero when the coordinator process creates dumps on behalf
// of child processes (see crbug.com/461788).
ProcessId target_pid = mdpinfo->options.target_pid;
- ProcessMemoryDump* pmd =
- pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid);
MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
+ ProcessMemoryDump* pmd =
+ pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid,
+ args);
bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
mdpinfo->consecutive_failures =
dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
@@ -602,8 +630,11 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
bool tracing_still_enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
- if (!tracing_still_enabled)
+ if (!tracing_still_enabled) {
pmd_async_state->dump_successful = false;
+ VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
+ << " the dump was completed";
+ }
if (!pmd_async_state->callback.is_null()) {
pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
@@ -632,78 +663,57 @@ void MemoryDumpManager::OnTraceLogEnabled() {
return;
}
- AutoLock lock(lock_);
-
- DCHECK(delegate_); // At this point we must have a delegate.
- session_state_ = new MemoryDumpSessionState;
-
+ const TraceConfig trace_config =
+ TraceLog::GetInstance()->GetCurrentTraceConfig();
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
if (heap_profiling_enabled_) {
// If heap profiling is enabled, the stack frame deduplicator and type name
// deduplicator will be in use. Add a metadata events to write the frames
// and type IDs.
- session_state_->SetStackFrameDeduplicator(
+ session_state->SetStackFrameDeduplicator(
WrapUnique(new StackFrameDeduplicator));
- session_state_->SetTypeNameDeduplicator(
+ session_state->SetTypeNameDeduplicator(
WrapUnique(new TypeNameDeduplicator));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
"stackFrames",
- WrapUnique(
- new SessionStateConvertableProxy<StackFrameDeduplicator>(
- session_state_,
- &MemoryDumpSessionState::stack_frame_deduplicator)));
+ WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
+ session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
"typeNames",
WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
- session_state_, &MemoryDumpSessionState::type_name_deduplicator)));
+ session_state, &MemoryDumpSessionState::type_name_deduplicator)));
}
- DCHECK(!dump_thread_);
- dump_thread_ = std::move(dump_thread);
- subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+ {
+ AutoLock lock(lock_);
- // TODO(primiano): This is a temporary hack to disable periodic memory dumps
- // when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps. See crbug.com/529184 .
- if (!is_coordinator_ ||
- CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
- return;
- }
+ DCHECK(delegate_); // At this point we must have a delegate.
+ session_state_ = session_state;
- // Enable periodic dumps. At the moment the periodic support is limited to at
- // most one low-detail periodic dump and at most one high-detail periodic
- // dump. If both are specified the high-detail period must be an integer
- // multiple of the low-level one.
- g_periodic_dumps_count = 0;
- const TraceConfig trace_config =
- TraceLog::GetInstance()->GetCurrentTraceConfig();
- session_state_->SetMemoryDumpConfig(trace_config.memory_dump_config());
- const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list =
- trace_config.memory_dump_config().triggers;
- if (triggers_list.empty())
- return;
+ DCHECK(!dump_thread_);
+ dump_thread_ = std::move(dump_thread);
- uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
- uint32_t heavy_dump_period_ms = 0;
- DCHECK_LE(triggers_list.size(), 2u);
- for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
- DCHECK(config.periodic_interval_ms);
- if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
- heavy_dump_period_ms = config.periodic_interval_ms;
- min_timer_period_ms =
- std::min(min_timer_period_ms, config.periodic_interval_ms);
+ subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+
+ // TODO(primiano): This is a temporary hack to disable periodic memory dumps
+ // when running memory benchmarks until telemetry uses TraceConfig to
+ // enable/disable periodic dumps. See crbug.com/529184 .
+ if (!is_coordinator_ ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ "enable-memory-benchmarking")) {
+ return;
+ }
}
- DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
- g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
- periodic_dump_timer_.Start(FROM_HERE,
- TimeDelta::FromMilliseconds(min_timer_period_ms),
- base::Bind(&RequestPeriodicGlobalDump));
+ // Enable periodic dumps if necessary.
+ periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
}
void MemoryDumpManager::OnTraceLogDisabled() {
@@ -725,6 +735,14 @@ void MemoryDumpManager::OnTraceLogDisabled() {
dump_thread->Stop();
}
+bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
+ AutoLock lock(lock_);
+ if (!session_state_)
+ return false;
+ return session_state_->memory_dump_config().allowed_dump_modes.count(
+ dump_mode) != 0;
+}
+
uint64_t MemoryDumpManager::GetTracingProcessId() const {
return delegate_->GetTracingProcessId();
}
@@ -733,13 +751,15 @@ MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options)
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
: dump_provider(dump_provider),
name(name),
task_runner(std::move(task_runner)),
options(options),
consecutive_failures(0),
- disabled(false) {}
+ disabled(false),
+ whitelisted_for_background_mode(whitelisted_for_background_mode) {}
MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
@@ -765,7 +785,7 @@ MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
session_state(std::move(session_state)),
callback(callback),
dump_successful(true),
- callback_task_runner(MessageLoop::current()->task_runner()),
+ callback_task_runner(ThreadTaskRunnerHandle::Get()),
dump_thread_task_runner(std::move(dump_thread_task_runner)) {
pending_dump_providers.reserve(dump_providers.size());
pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
@@ -775,15 +795,89 @@ MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
}
ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
- GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
+ GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
+ const MemoryDumpArgs& dump_args) {
auto iter = process_dumps.find(pid);
if (iter == process_dumps.end()) {
std::unique_ptr<ProcessMemoryDump> new_pmd(
- new ProcessMemoryDump(session_state));
+ new ProcessMemoryDump(session_state, dump_args));
iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
}
return iter->second.get();
}
+MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
+
+MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
+ Stop();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
+ const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
+ if (triggers_list.empty())
+ return;
+
+ // At the moment the periodic support is limited to at most one periodic
+ // trigger per dump mode. All intervals should be an integer multiple of the
+ // smallest interval specified.
+ periodic_dumps_count_ = 0;
+ uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
+ uint32_t light_dump_period_ms = 0;
+ uint32_t heavy_dump_period_ms = 0;
+ DCHECK_LE(triggers_list.size(), 3u);
+ auto* mdm = MemoryDumpManager::GetInstance();
+ for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
+ DCHECK_NE(0u, config.periodic_interval_ms);
+ switch (config.level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
+ break;
+ case MemoryDumpLevelOfDetail::LIGHT:
+ DCHECK_EQ(0u, light_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
+ light_dump_period_ms = config.periodic_interval_ms;
+ break;
+ case MemoryDumpLevelOfDetail::DETAILED:
+ DCHECK_EQ(0u, heavy_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
+ heavy_dump_period_ms = config.periodic_interval_ms;
+ break;
+ }
+ min_timer_period_ms =
+ std::min(min_timer_period_ms, config.periodic_interval_ms);
+ }
+
+ DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
+ light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
+ DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
+ heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
+
+ timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
+ base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
+ base::Unretained(this)));
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
+ if (IsRunning()) {
+ timer_.Stop();
+ }
+}
+
+bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
+ return timer_.IsRunning();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
+ MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+ if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+ if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+ ++periodic_dumps_count_;
+
+ MemoryDumpManager::GetInstance()->RequestGlobalDump(
+ MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 817768afed..06b772c6e4 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -40,6 +40,7 @@ class MemoryDumpSessionState;
class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
public:
static const char* const kTraceCategory;
+ static const char* const kLogPrefix;
// This value is returned as the tracing id of the child processes by
// GetTracingProcessId() when tracing is not enabled.
@@ -115,10 +116,14 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void OnTraceLogEnabled() override;
void OnTraceLogDisabled() override;
+ // Returns true if the dump mode is allowed for current tracing session.
+ bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
+
// Returns the MemoryDumpSessionState object, which is shared by all the
// ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
// session lifetime.
- const scoped_refptr<MemoryDumpSessionState>& session_state() const {
+ const scoped_refptr<MemoryDumpSessionState>& session_state_for_testing()
+ const {
return session_state_;
}
@@ -176,7 +181,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options);
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
MemoryDumpProvider* const dump_provider;
@@ -200,6 +206,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// Flagged either by the auto-disable logic or during unregistration.
bool disabled;
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
private:
friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
~MemoryDumpProviderInfo();
@@ -221,7 +230,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
~ProcessMemoryDumpAsyncState();
// Gets or creates the memory dump container for the given target process.
- ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(ProcessId pid);
+ ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(
+ ProcessId pid,
+ const MemoryDumpArgs& dump_args);
// A map of ProcessId -> ProcessMemoryDump, one for each target process
// being dumped from the current process. Typically each process dumps only
@@ -262,6 +273,31 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
};
+ // Sets up periodic memory dump timers to start global dump requests based on
+ // the dump triggers from trace config.
+ class BASE_EXPORT PeriodicGlobalDumpTimer {
+ public:
+ PeriodicGlobalDumpTimer();
+ ~PeriodicGlobalDumpTimer();
+
+ void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
+ triggers_list);
+ void Stop();
+
+ bool IsRunning();
+
+ private:
+ // Periodically called by the timer.
+ void RequestPeriodicGlobalDump();
+
+ RepeatingTimer timer_;
+ uint32_t periodic_dumps_count_;
+ uint32_t light_dump_rate_;
+ uint32_t heavy_dump_rate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
+ };
+
static const int kMaxConsecutiveFailuresCount;
static const char* const kSystemAllocatorPoolName;
@@ -325,7 +361,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
subtle::AtomicWord memory_tracing_enabled_;
// For time-triggered periodic dumps.
- RepeatingTimer periodic_dump_timer_;
+ PeriodicGlobalDumpTimer periodic_dump_timer_;
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index c1295efac6..d14093cbcc 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -23,6 +23,7 @@
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_config_memory_test_util.h"
@@ -48,16 +49,24 @@ MATCHER(IsLightDump, "") {
return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
}
+MATCHER(IsBackgroundDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
+}
+
namespace {
+const char* kMDPName = "TestDumpProvider";
+const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
+const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
+
void RegisterDumpProvider(
MemoryDumpProvider* mdp,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options) {
+ const MemoryDumpProvider::Options& options,
+ const char* name = kMDPName) {
MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
mdm->set_dumper_registrations_ignored_for_testing(false);
- const char* kMDPName = "TestDumpProvider";
- mdm->RegisterDumpProvider(mdp, kMDPName, std::move(task_runner), options);
+ mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
mdm->set_dumper_registrations_ignored_for_testing(true);
}
@@ -71,7 +80,6 @@ void RegisterDumpProviderWithSequencedTaskRunner(
const MemoryDumpProvider::Options& options) {
MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
mdm->set_dumper_registrations_ignored_for_testing(false);
- const char* kMDPName = "TestDumpProvider";
mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
options);
mdm->set_dumper_registrations_ignored_for_testing(true);
@@ -218,7 +226,7 @@ class MemoryDumpManagerTest : public testing::Test {
RunLoop run_loop;
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
- MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
run_loop.Run();
}
@@ -331,7 +339,8 @@ TEST_F(MemoryDumpManagerTest, SharedSessionState) {
RegisterDumpProvider(&mdp2);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
- const MemoryDumpSessionState* session_state = mdm_->session_state().get();
+ const MemoryDumpSessionState* session_state =
+ mdm_->session_state_for_testing().get();
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(2)
@@ -464,11 +473,11 @@ TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
// invoked a number of times equal to its index.
for (uint32_t i = kNumInitialThreads; i > 0; --i) {
threads.push_back(WrapUnique(new Thread("test thread")));
- auto thread = threads.back().get();
+ auto* thread = threads.back().get();
thread->Start();
scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
- auto mdp = mdps.back().get();
+ auto* mdp = mdps.back().get();
RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
EXPECT_CALL(*mdp, OnMemoryDump(_, _))
.Times(i)
@@ -895,7 +904,9 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// Tests against race conditions that might arise when disabling tracing in the
// middle of a global memory dump.
TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
- base::WaitableEvent tracing_disabled_event(false, false);
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
InitializeMemoryDumpManager(false /* is_coordinator */);
// Register a bound dump provider.
@@ -932,7 +943,7 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
RunLoop run_loop;
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
- MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED, callback);
DisableTracing();
@@ -945,7 +956,9 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
// Tests against race conditions that can happen if tracing is disabled before
// the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
- base::WaitableEvent tracing_disabled_event(false, false);
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
InitializeMemoryDumpManager(false /* is_coordinator */);
std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
@@ -1099,5 +1112,60 @@ TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+ std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp1.get());
+ std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
+ kWhitelistedMDPName);
+
+ EXPECT_CALL(*mdp1, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(*mdp2, OnMemoryDump(_, _)).Times(1).WillOnce(Return(true));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::BACKGROUND);
+ DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
+ InitializeMemoryDumpManager(true /* is_coordinator */);
+
+ RunLoop run_loop;
+ auto quit_closure = run_loop.QuitClosure();
+
+ testing::InSequence sequence;
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .Times(5);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ }));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */));
+
+ // Only background mode dumps should be allowed with the trace config.
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::LIGHT);
+ EXPECT_FALSE(last_callback_success_);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_FALSE(last_callback_success_);
+
+ ASSERT_TRUE(IsPeriodicDumpingEnabled());
+ run_loop.Run();
+ DisableTracing();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index f4db01a363..2c502861d8 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -15,12 +15,6 @@ namespace trace_event {
class ProcessMemoryDump;
-// Args passed to OnMemoryDump(). This is to avoid rewriting all the subclasses
-// in the codebase when extending the MemoryDumpProvider API.
-struct MemoryDumpArgs {
- MemoryDumpLevelOfDetail level_of_detail;
-};
-
// The contract interface that memory dump providers must implement.
class BASE_EXPORT MemoryDumpProvider {
public:
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index 48b5ba6d2c..e6c5b87b22 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -28,6 +28,8 @@ const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail) {
switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ return "background";
case MemoryDumpLevelOfDetail::LIGHT:
return "light";
case MemoryDumpLevelOfDetail::DETAILED:
@@ -39,6 +41,8 @@ const char* MemoryDumpLevelOfDetailToString(
MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
const std::string& str) {
+ if (str == "background")
+ return MemoryDumpLevelOfDetail::BACKGROUND;
if (str == "light")
return MemoryDumpLevelOfDetail::LIGHT;
if (str == "detailed")
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index 00d560ec6a..f3ff9d8e3b 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -28,13 +28,25 @@ enum class MemoryDumpType {
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
-// MemoryDumpProvider instances must guarantee that level of detail does not
-// affect the total size reported in the root node, but only the granularity of
-// the child MemoryAllocatorDump(s).
-enum class MemoryDumpLevelOfDetail {
- LIGHT, // Few entries, typically a fixed number, per dump.
- DETAILED, // Unrestricted amount of entries per dump.
- LAST = DETAILED // For IPC Macros.
+enum class MemoryDumpLevelOfDetail : uint32_t {
+ FIRST,
+
+ // For background tracing mode. The dump time is quick, and typically just the
+ // totals are expected. Suballocations need not be specified. Dump name must
+ // contain only pre-defined strings and string arguments cannot be added.
+ BACKGROUND = FIRST,
+
+ // For the levels below, MemoryDumpProvider instances must guarantee that the
+ // total size reported in the root node is consistent. Only the granularity of
+ // the child MemoryAllocatorDump(s) differs with the levels.
+
+ // Few entries, typically a fixed number, per dump.
+ LIGHT,
+
+ // Unrestricted amount of entries per dump.
+ DETAILED,
+
+ LAST = DETAILED
};
// Initial request arguments for a global memory dump. (see
@@ -49,6 +61,13 @@ struct BASE_EXPORT MemoryDumpRequestArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
+// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
+// providers. Dump providers are expected to read the args for creating dumps.
+struct MemoryDumpArgs {
+ // Specifies how detailed the dumps should be.
+ MemoryDumpLevelOfDetail level_of_detail;
+};
+
using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
new file mode 100644
index 0000000000..aed187fa1d
--- /dev/null
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -0,0 +1,131 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_infra_background_whitelist.h"
+
+#include <ctype.h>
+#include <string.h>
+
+#include <string>
+
+namespace base {
+namespace trace_event {
+namespace {
+
+// The names of dump providers whitelisted for background tracing. Dump
+// providers can be added here only if the background mode dump has very
+// less performance and memory overhead.
+const char* const kDumpProviderWhitelist[] = {
+ "BlinkGC",
+ "ChildDiscardableSharedMemoryManager",
+ "DOMStorage",
+ "HostDiscardableSharedMemoryManager",
+ "IndexedDBBackingStore",
+ "JavaHeap",
+ "LeveldbValueStore",
+ "Malloc",
+ "PartitionAlloc",
+ "ProcessMemoryMetrics",
+ "Skia",
+ "Sql",
+ "V8Isolate",
+ "WinHeap",
+ nullptr // End of list marker.
+};
+
+// A list of string names that are allowed for the memory allocator dumps in
+// background mode.
+const char* const kAllocatorDumpNameWhitelist[] = {
+ "blink_gc",
+ "blink_gc/allocated_objects",
+ "discardable",
+ "discardable/child_0x?",
+ "dom_storage/0x?/cache_size",
+ "dom_storage/session_storage_0x?",
+ "java_heap",
+ "java_heap/allocated_objects",
+ "leveldb/index_db/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Settings/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Rules/0x?",
+ "leveldb/value_store/Extensions.Database.Open.State/0x?",
+ "leveldb/value_store/Extensions.Database.Open/0x?",
+ "leveldb/value_store/Extensions.Database.Restore/0x?",
+ "leveldb/value_store/Extensions.Database.Value.Restore/0x?",
+ "malloc",
+ "malloc/allocated_objects",
+ "malloc/metadata_fragmentation_caches",
+ "partition_alloc/allocated_objects",
+ "partition_alloc/partitions",
+ "partition_alloc/partitions/buffer",
+ "partition_alloc/partitions/fast_malloc",
+ "partition_alloc/partitions/layout",
+ "skia/sk_glyph_cache",
+ "skia/sk_resource_cache",
+ "sqlite",
+ "v8/isolate_0x?/heap_spaces",
+ "v8/isolate_0x?/heap_spaces/code_space",
+ "v8/isolate_0x?/heap_spaces/large_object_space",
+ "v8/isolate_0x?/heap_spaces/map_space",
+ "v8/isolate_0x?/heap_spaces/new_space",
+ "v8/isolate_0x?/heap_spaces/old_space",
+ "v8/isolate_0x?/heap_spaces/other_spaces",
+ "v8/isolate_0x?/malloc",
+ "v8/isolate_0x?/zapped_for_debug",
+ "winheap",
+ "winheap/allocated_objects",
+ nullptr // End of list marker.
+};
+
+const char* const* g_dump_provider_whitelist = kDumpProviderWhitelist;
+const char* const* g_allocator_dump_name_whitelist =
+ kAllocatorDumpNameWhitelist;
+
+} // namespace
+
+bool IsMemoryDumpProviderWhitelisted(const char* mdp_name) {
+ for (size_t i = 0; g_dump_provider_whitelist[i] != nullptr; ++i) {
+ if (strcmp(mdp_name, g_dump_provider_whitelist[i]) == 0)
+ return true;
+ }
+ return false;
+}
+
+bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
+ // Remove special characters, numbers (including hexadecimal which are marked
+ // by '0x') from the given string.
+ const size_t length = name.size();
+ std::string stripped_str;
+ stripped_str.reserve(length);
+ bool parsing_hex = false;
+ for (size_t i = 0; i < length; ++i) {
+ if (parsing_hex && isxdigit(name[i]))
+ continue;
+ parsing_hex = false;
+ if (i + 1 < length && name[i] == '0' && name[i + 1] == 'x') {
+ parsing_hex = true;
+ stripped_str.append("0x?");
+ ++i;
+ } else {
+ stripped_str.push_back(name[i]);
+ }
+ }
+
+ for (size_t i = 0; g_allocator_dump_name_whitelist[i] != nullptr; ++i) {
+ if (stripped_str == g_allocator_dump_name_whitelist[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void SetDumpProviderWhitelistForTesting(const char* const* list) {
+ g_dump_provider_whitelist = list;
+}
+
+void SetAllocatorDumpNameWhitelistForTesting(const char* const* list) {
+ g_allocator_dump_name_whitelist = list;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_infra_background_whitelist.h b/base/trace_event/memory_infra_background_whitelist.h
new file mode 100644
index 0000000000..b8d704ae24
--- /dev/null
+++ b/base/trace_event/memory_infra_background_whitelist.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+#define BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+
+// This file contains the whitelists for background mode to limit the tracing
+// overhead and remove sensitive information from traces.
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+// Checks if the given |mdp_name| is in the whitelist.
+bool BASE_EXPORT IsMemoryDumpProviderWhitelisted(const char* mdp_name);
+
+// Checks if the given |name| matches any of the whitelisted patterns.
+bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
+
+// The whitelist is replaced with the given list for tests. The last element of
+// the list must be nullptr.
+void BASE_EXPORT SetDumpProviderWhitelistForTesting(const char* const* list);
+void BASE_EXPORT
+SetAllocatorDumpNameWhitelistForTesting(const char* const* list);
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index 52eccbe1a0..826989237b 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -12,6 +12,7 @@
#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_totals.h"
#include "base/trace_event/trace_event_argument.h"
#include "build/build_config.h"
@@ -48,6 +49,9 @@ size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
} // namespace
+// static
+bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
+
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
// static
size_t ProcessMemoryDump::GetSystemPageSize() {
@@ -148,10 +152,12 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
#endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
ProcessMemoryDump::ProcessMemoryDump(
- scoped_refptr<MemoryDumpSessionState> session_state)
+ scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args)
: has_process_totals_(false),
has_process_mmaps_(false),
- session_state_(std::move(session_state)) {}
+ session_state_(std::move(session_state)),
+ dump_args_(dump_args) {}
ProcessMemoryDump::~ProcessMemoryDump() {}
@@ -170,6 +176,13 @@ MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad) {
+ // In background mode return the black hole dump, if invalid dump name is
+ // given.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+ !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
+ return GetBlackHoleMad();
+ }
+
auto insertion_result = allocator_dumps_.insert(
std::make_pair(mad->absolute_name(), std::move(mad)));
MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
@@ -181,7 +194,11 @@ MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
const std::string& absolute_name) const {
auto it = allocator_dumps_.find(absolute_name);
- return it == allocator_dumps_.end() ? nullptr : it->second.get();
+ if (it != allocator_dumps_.end())
+ return it->second.get();
+ if (black_hole_mad_)
+ return black_hole_mad_.get();
+ return nullptr;
}
MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
@@ -192,6 +209,10 @@ MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
// A shared allocator dump can be shared within a process and the guid could
// have been created already.
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
@@ -206,6 +227,10 @@ MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
if (mad)
return mad;
@@ -219,21 +244,16 @@ MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
}
-void ProcessMemoryDump::AddHeapDump(const std::string& absolute_name,
- std::unique_ptr<TracedValue> heap_dump) {
- DCHECK_EQ(0ul, heap_dumps_.count(absolute_name));
- heap_dumps_[absolute_name] = std::move(heap_dump);
-}
-
void ProcessMemoryDump::DumpHeapUsage(
const base::hash_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>& metrics_by_context,
base::trace_event::TraceEventMemoryOverhead& overhead,
const char* allocator_name) {
if (!metrics_by_context.empty()) {
+ DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
metrics_by_context, *session_state());
- AddHeapDump(allocator_name, std::move(heap_dump));
+ heap_dumps_[allocator_name] = std::move(heap_dump);
}
std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
@@ -333,10 +353,21 @@ void ProcessMemoryDump::AddOwnershipEdge(
void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
const std::string& target_node_name) {
+ // Do not create new dumps for suballocations in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return;
+
std::string child_mad_name = target_node_name + "/__" + source.ToString();
MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
AddOwnershipEdge(source, target_child_mad->guid());
}
+MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
+ DCHECK(is_black_hole_non_fatal_for_testing_);
+ if (!black_hole_mad_)
+ black_hole_mad_.reset(new MemoryAllocatorDump("discarded", this));
+ return black_hole_mad_.get();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
index 51e4b5f515..d020c7d652 100644
--- a/base/trace_event/process_memory_dump.h
+++ b/base/trace_event/process_memory_dump.h
@@ -16,6 +16,7 @@
#include "base/memory/scoped_vector.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/memory_dump_session_state.h"
#include "base/trace_event/process_memory_maps.h"
#include "base/trace_event/process_memory_totals.h"
@@ -67,7 +68,8 @@ class BASE_EXPORT ProcessMemoryDump {
static size_t CountResidentBytes(void* start_address, size_t mapped_size);
#endif
- ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state);
+ ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args);
~ProcessMemoryDump();
// Creates a new MemoryAllocatorDump with the given name and returns the
@@ -116,14 +118,6 @@ class BASE_EXPORT ProcessMemoryDump {
// Returns the map of the MemoryAllocatorDumps added to this dump.
const AllocatorDumpsMap& allocator_dumps() const { return allocator_dumps_; }
- // Adds a heap dump for the allocator with |absolute_name|. The |TracedValue|
- // must have the correct format. |trace_event::HeapDumper| will generate such
- // a value from a |trace_event::AllocationRegister|.
- // TODO(bashi): Remove this when WebMemoryDumpProvider is gone.
- // http://crbug.com/605822
- void AddHeapDump(const std::string& absolute_name,
- std::unique_ptr<TracedValue> heap_dump);
-
// Dumps heap usage with |allocator_name|.
void DumpHeapUsage(const base::hash_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>&
@@ -183,10 +177,16 @@ class BASE_EXPORT ProcessMemoryDump {
const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
+ const MemoryDumpArgs& dump_args() const { return dump_args_; }
+
private:
+ FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
+
MemoryAllocatorDump* AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad);
+ MemoryAllocatorDump* GetBlackHoleMad();
+
ProcessMemoryTotals process_totals_;
bool has_process_totals_;
@@ -202,6 +202,18 @@ class BASE_EXPORT ProcessMemoryDump {
// Keeps track of relationships between MemoryAllocatorDump(s).
std::vector<MemoryAllocatorDumpEdge> allocator_dumps_edges_;
+ // Level of detail of the current dump.
+ const MemoryDumpArgs dump_args_;
+
+ // This allocator dump is returned when an invalid dump is created in
+ // background mode. The attributes of the dump are ignored and not added to
+ // the trace.
+ std::unique_ptr<MemoryAllocatorDump> black_hole_mad_;
+
+ // When set to true, the DCHECK(s) for invalid dump creations on the
+ // background mode are disabled for testing.
+ static bool is_black_hole_non_fatal_for_testing_;
+
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDump);
};
diff --git a/base/trace_event/process_memory_dump_unittest.cc b/base/trace_event/process_memory_dump_unittest.cc
index 3a93b2c489..571774a10c 100644
--- a/base/trace_event/process_memory_dump_unittest.cc
+++ b/base/trace_event/process_memory_dump_unittest.cc
@@ -7,8 +7,10 @@
#include <stddef.h>
#include "base/memory/aligned_memory.h"
+#include "base/memory/ptr_util.h"
#include "base/process/process_metrics.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/trace_event_argument.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -16,14 +18,22 @@ namespace base {
namespace trace_event {
namespace {
+
+const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
+const char* const kTestDumpNameWhitelist[] = {
+ "Whitelisted/TestName", "Whitelisted/TestName_0x?",
+ "Whitelisted/0x?/TestName", nullptr};
+
TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
auto it = pmd.heap_dumps().find(name);
return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
}
+
} // namespace
TEST(ProcessMemoryDumpTest, Clear) {
- std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
pmd1->CreateAllocatorDump("mad1");
pmd1->CreateAllocatorDump("mad2");
ASSERT_FALSE(pmd1->allocator_dumps().empty());
@@ -58,10 +68,10 @@ TEST(ProcessMemoryDumpTest, Clear) {
pmd1->AsValueInto(traced_value.get());
// Check that the pmd can be reused and behaves as expected.
- auto mad1 = pmd1->CreateAllocatorDump("mad1");
- auto mad3 = pmd1->CreateAllocatorDump("mad3");
- auto shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
- auto shared_mad2 =
+ auto* mad1 = pmd1->CreateAllocatorDump("mad1");
+ auto* mad3 = pmd1->CreateAllocatorDump("mad3");
+ auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
ASSERT_EQ(4u, pmd1->allocator_dumps().size());
ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
@@ -80,35 +90,36 @@ TEST(ProcessMemoryDumpTest, Clear) {
TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
std::unique_ptr<TracedValue> traced_value(new TracedValue);
- TracedValue* heap_dumps_ptr[4];
- std::unique_ptr<TracedValue> heap_dump;
-
- std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
- auto mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
- auto mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
+ hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+ metrics_by_context[AllocationContext()] = { 1, 1 };
+ TraceEventMemoryOverhead overhead;
+
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetStackFrameDeduplicator(
+ WrapUnique(new StackFrameDeduplicator));
+ session_state->SetTypeNameDeduplicator(
+ WrapUnique(new TypeNameDeduplicator));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
+ auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[0] = heap_dump.get();
- pmd1->AddHeapDump("pmd1/heap_dump1", std::move(heap_dump));
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[1] = heap_dump.get();
- pmd1->AddHeapDump("pmd1/heap_dump2", std::move(heap_dump));
-
- std::unique_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(nullptr));
- auto mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
- auto mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
+
+ std::unique_ptr<ProcessMemoryDump> pmd2(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
+ auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[2] = heap_dump.get();
- pmd2->AddHeapDump("pmd2/heap_dump1", std::move(heap_dump));
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[3] = heap_dump.get();
- pmd2->AddHeapDump("pmd2/heap_dump2", std::move(heap_dump));
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
MemoryAllocatorDumpGuid shared_mad_guid1(1);
MemoryAllocatorDumpGuid shared_mad_guid2(2);
- auto shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
- auto shared_mad2 =
+ auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
pmd1->TakeAllDumpsFrom(pmd2.get());
@@ -141,10 +152,10 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
ASSERT_EQ(4u, pmd1->heap_dumps().size());
- ASSERT_EQ(heap_dumps_ptr[0], GetHeapDump(*pmd1, "pmd1/heap_dump1"));
- ASSERT_EQ(heap_dumps_ptr[1], GetHeapDump(*pmd1, "pmd1/heap_dump2"));
- ASSERT_EQ(heap_dumps_ptr[2], GetHeapDump(*pmd1, "pmd2/heap_dump1"));
- ASSERT_EQ(heap_dumps_ptr[3], GetHeapDump(*pmd1, "pmd2/heap_dump2"));
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump2") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump2") != nullptr);
// Check that calling AsValueInto() doesn't cause a crash.
traced_value.reset(new TracedValue);
@@ -154,17 +165,18 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
}
TEST(ProcessMemoryDumpTest, Suballocations) {
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
const std::string allocator_dump_name = "fakealloc/allocated_objects";
pmd->CreateAllocatorDump(allocator_dump_name);
// Create one allocation with an auto-assigned guid and mark it as a
// suballocation of "fakealloc/allocated_objects".
- auto pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
+ auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
// Same here, but this time create an allocation with an explicit guid.
- auto pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
+ auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
MemoryAllocatorDumpGuid(0x42));
pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
@@ -198,29 +210,75 @@ TEST(ProcessMemoryDumpTest, Suballocations) {
}
TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
MemoryAllocatorDumpGuid shared_mad_guid(1);
- auto shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad2);
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad3);
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad4);
ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
- auto shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad5);
ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
}
+TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
+ MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, background_args));
+ ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
+ SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
+ MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
+
+ // Invalid dump names.
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
+
+ // Global dumps.
+ MemoryAllocatorDumpGuid guid(1);
+ EXPECT_EQ(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
+
+ // Suballocations.
+ pmd->AddSuballocation(guid, "malloc/allocated_objects");
+ EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
+ EXPECT_EQ(0u, pmd->allocator_dumps_.size());
+
+ // Valid dump names.
+ EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
+
+ // GetAllocatorDump is consistent.
+ EXPECT_EQ(black_hole_mad, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
+}
+
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
TEST(ProcessMemoryDumpTest, CountResidentBytes) {
const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
diff --git a/base/trace_event/process_memory_maps_dump_provider.h b/base/trace_event/process_memory_maps_dump_provider.h
deleted file mode 100644
index e69de29bb2..0000000000
--- a/base/trace_event/process_memory_maps_dump_provider.h
+++ /dev/null
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 25a0cd6d40..b343ea00bc 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -14,6 +14,7 @@
#include "base/strings/pattern.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_request_args.h"
@@ -47,6 +48,7 @@ const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
// String parameters that is used to parse memory dump config in trace config
// string.
const char kMemoryDumpConfigParam[] = "memory_dump_config";
+const char kAllowedDumpModesParam[] = "allowed_dump_modes";
const char kTriggersParam[] = "triggers";
const char kPeriodicIntervalParam[] = "periodic_interval_ms";
const char kModeParam[] = "mode";
@@ -67,6 +69,7 @@ class ConvertableTraceConfigToTraceFormat
explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
: trace_config_(trace_config) {}
~ConvertableTraceConfigToTraceFormat() override {}
+
void AppendAsTraceFormat(std::string* out) const override {
out->append(trace_config_.ToString());
}
@@ -75,24 +78,39 @@ class ConvertableTraceConfigToTraceFormat
const TraceConfig trace_config_;
};
-} // namespace
+std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
+ std::set<MemoryDumpLevelOfDetail> all_modes;
+ for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
+ mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
+ all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
+ }
+ return all_modes;
+}
+} // namespace
-TraceConfig::MemoryDumpConfig::HeapProfiler::HeapProfiler() :
- breakdown_threshold_bytes(kDefaultBreakdownThresholdBytes) {};
+TraceConfig::MemoryDumpConfig::HeapProfiler::HeapProfiler()
+ : breakdown_threshold_bytes(kDefaultBreakdownThresholdBytes) {}
void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
}
-TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {};
+void TraceConfig::ResetMemoryDumpConfig(
+ const TraceConfig::MemoryDumpConfig& memory_dump_config) {
+ memory_dump_config_.Clear();
+ memory_dump_config_ = memory_dump_config;
+}
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {}
TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
const MemoryDumpConfig& other) = default;
-TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {};
+TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {}
void TraceConfig::MemoryDumpConfig::Clear() {
+ allowed_dump_modes.clear();
triggers.clear();
heap_profiler_options.Clear();
}
@@ -101,12 +119,12 @@ TraceConfig::TraceConfig() {
InitializeDefault();
}
-TraceConfig::TraceConfig(const std::string& category_filter_string,
- const std::string& trace_options_string) {
+TraceConfig::TraceConfig(StringPiece category_filter_string,
+ StringPiece trace_options_string) {
InitializeFromStrings(category_filter_string, trace_options_string);
}
-TraceConfig::TraceConfig(const std::string& category_filter_string,
+TraceConfig::TraceConfig(StringPiece category_filter_string,
TraceRecordMode record_mode) {
std::string trace_options_string;
switch (record_mode) {
@@ -132,7 +150,7 @@ TraceConfig::TraceConfig(const DictionaryValue& config) {
InitializeFromConfigDict(config);
}
-TraceConfig::TraceConfig(const std::string& config_string) {
+TraceConfig::TraceConfig(StringPiece config_string) {
if (!config_string.empty())
InitializeFromConfigString(config_string);
else
@@ -174,12 +192,9 @@ const TraceConfig::StringList& TraceConfig::GetSyntheticDelayValues() const {
}
std::string TraceConfig::ToString() const {
- base::DictionaryValue dict;
- ToDict(dict);
-
+ std::unique_ptr<DictionaryValue> dict = ToDict();
std::string json;
- base::JSONWriter::Write(dict, &json);
-
+ JSONWriter::Write(*dict, &json);
return json;
}
@@ -204,20 +219,18 @@ bool TraceConfig::IsCategoryGroupEnabled(
bool had_enabled_by_default = false;
DCHECK(category_group_name);
- CStringTokenizer category_group_tokens(
- category_group_name, category_group_name + strlen(category_group_name),
- ",");
+ std::string category_group_name_str = category_group_name;
+ StringTokenizer category_group_tokens(category_group_name_str, ",");
while (category_group_tokens.GetNext()) {
std::string category_group_token = category_group_tokens.token();
// Don't allow empty tokens, nor tokens with leading or trailing space.
DCHECK(!TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
category_group_token))
<< "Disallowed category string";
- if (IsCategoryEnabled(category_group_token.c_str())) {
+ if (IsCategoryEnabled(category_group_token.c_str()))
return true;
- }
- if (!base::MatchPattern(category_group_token.c_str(),
- TRACE_DISABLED_BY_DEFAULT("*")))
+
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
had_enabled_by_default = true;
}
// Do a second pass to check for explicitly disabled categories
@@ -226,10 +239,8 @@ bool TraceConfig::IsCategoryGroupEnabled(
bool category_group_disabled = false;
while (category_group_tokens.GetNext()) {
std::string category_group_token = category_group_tokens.token();
- for (StringList::const_iterator ci = excluded_categories_.begin();
- ci != excluded_categories_.end();
- ++ci) {
- if (base::MatchPattern(category_group_token.c_str(), ci->c_str())) {
+ for (const std::string& category : excluded_categories_) {
+ if (MatchPattern(category_group_token, category)) {
// Current token of category_group_name is present in excluded_list.
// Flag the exclusion and proceed further to check if any of the
// remaining categories of category_group_name is not present in the
@@ -241,8 +252,7 @@ bool TraceConfig::IsCategoryGroupEnabled(
// excluded_ list. So, if it's not a disabled-by-default category,
// it has to be included_ list. Enable the category_group_name
// for recording.
- if (!base::MatchPattern(category_group_token.c_str(),
- TRACE_DISABLED_BY_DEFAULT("*"))) {
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*"))) {
category_group_disabled = false;
}
}
@@ -255,8 +265,8 @@ bool TraceConfig::IsCategoryGroupEnabled(
// If the category group is not excluded, and there are no included patterns
// we consider this category group enabled, as long as it had categories
// other than disabled-by-default.
- return !category_group_disabled &&
- included_categories_.empty() && had_enabled_by_default;
+ return !category_group_disabled && had_enabled_by_default &&
+ included_categories_.empty();
}
void TraceConfig::Merge(const TraceConfig& config) {
@@ -311,8 +321,6 @@ void TraceConfig::InitializeDefault() {
enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
- excluded_categories_.push_back("*Debug");
- excluded_categories_.push_back("*Test");
}
void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
@@ -330,25 +338,13 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
}
}
- bool enable_sampling;
- if (!dict.GetBoolean(kEnableSamplingParam, &enable_sampling))
- enable_sampling_ = false;
- else
- enable_sampling_ = enable_sampling;
-
- bool enable_systrace;
- if (!dict.GetBoolean(kEnableSystraceParam, &enable_systrace))
- enable_systrace_ = false;
- else
- enable_systrace_ = enable_systrace;
+ bool val;
+ enable_sampling_ = dict.GetBoolean(kEnableSamplingParam, &val) ? val : false;
+ enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
+ enable_argument_filter_ =
+ dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
- bool enable_argument_filter;
- if (!dict.GetBoolean(kEnableArgumentFilterParam, &enable_argument_filter))
- enable_argument_filter_ = false;
- else
- enable_argument_filter_ = enable_argument_filter;
-
- const base::ListValue* category_list = nullptr;
+ const ListValue* category_list = nullptr;
if (dict.GetList(kIncludedCategoriesParam, &category_list))
SetCategoriesFromIncludedList(*category_list);
if (dict.GetList(kExcludedCategoriesParam, &category_list))
@@ -359,58 +355,47 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
- const base::DictionaryValue* memory_dump_config = nullptr;
+ const DictionaryValue* memory_dump_config = nullptr;
if (dict.GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
- SetMemoryDumpConfig(*memory_dump_config);
+ SetMemoryDumpConfigFromConfigDict(*memory_dump_config);
else
SetDefaultMemoryDumpConfig();
}
}
-void TraceConfig::InitializeFromConfigString(const std::string& config_string) {
- std::unique_ptr<Value> value(JSONReader::Read(config_string));
- if (!value)
- return InitializeDefault();
-
- const DictionaryValue* dict = nullptr;
- bool is_dict = value->GetAsDictionary(&dict);
-
- if (!is_dict)
- return InitializeDefault();
-
- DCHECK(dict);
- InitializeFromConfigDict(*dict);
+void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
+ auto dict = DictionaryValue::From(JSONReader::Read(config_string));
+ if (dict)
+ InitializeFromConfigDict(*dict);
+ else
+ InitializeDefault();
}
-void TraceConfig::InitializeFromStrings(
- const std::string& category_filter_string,
- const std::string& trace_options_string) {
+void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
+ StringPiece trace_options_string) {
if (!category_filter_string.empty()) {
- std::vector<std::string> split = base::SplitString(
- category_filter_string, ",", base::TRIM_WHITESPACE,
- base::SPLIT_WANT_ALL);
- std::vector<std::string>::iterator iter;
- for (iter = split.begin(); iter != split.end(); ++iter) {
- std::string category = *iter;
+ std::vector<std::string> split = SplitString(
+ category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const std::string& category : split) {
// Ignore empty categories.
if (category.empty())
continue;
// Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
- if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
- category.at(category.size() - 1) == ')') {
- category = category.substr(
+ if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
+ CompareCase::SENSITIVE) &&
+ category.back() == ')') {
+ std::string synthetic_category = category.substr(
strlen(kSyntheticDelayCategoryFilterPrefix),
category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
- size_t name_length = category.find(';');
+ size_t name_length = synthetic_category.find(';');
if (name_length != std::string::npos && name_length > 0 &&
- name_length != category.size() - 1) {
- synthetic_delays_.push_back(category);
+ name_length != synthetic_category.size() - 1) {
+ synthetic_delays_.push_back(synthetic_category);
}
- } else if (category.at(0) == '-') {
+ } else if (category.front() == '-') {
// Excluded categories start with '-'.
// Remove '-' from category string.
- category = category.substr(1);
- excluded_categories_.push_back(category);
+ excluded_categories_.push_back(category.substr(1));
} else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
TRACE_DISABLED_BY_DEFAULT("")) == 0) {
disabled_categories_.push_back(category);
@@ -424,24 +409,23 @@ void TraceConfig::InitializeFromStrings(
enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
- if(!trace_options_string.empty()) {
- std::vector<std::string> split = base::SplitString(
- trace_options_string, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
- std::vector<std::string>::iterator iter;
- for (iter = split.begin(); iter != split.end(); ++iter) {
- if (*iter == kRecordUntilFull) {
+ if (!trace_options_string.empty()) {
+ std::vector<std::string> split =
+ SplitString(trace_options_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const std::string& token : split) {
+ if (token == kRecordUntilFull) {
record_mode_ = RECORD_UNTIL_FULL;
- } else if (*iter == kRecordContinuously) {
+ } else if (token == kRecordContinuously) {
record_mode_ = RECORD_CONTINUOUSLY;
- } else if (*iter == kTraceToConsole) {
+ } else if (token == kTraceToConsole) {
record_mode_ = ECHO_TO_CONSOLE;
- } else if (*iter == kRecordAsMuchAsPossible) {
+ } else if (token == kRecordAsMuchAsPossible) {
record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
- } else if (*iter == kEnableSampling) {
+ } else if (token == kEnableSampling) {
enable_sampling_ = true;
- } else if (*iter == kEnableSystrace) {
+ } else if (token == kEnableSystrace) {
enable_systrace_ = true;
- } else if (*iter == kEnableArgumentFilter) {
+ } else if (token == kEnableArgumentFilter) {
enable_argument_filter_ = true;
}
}
@@ -453,7 +437,7 @@ void TraceConfig::InitializeFromStrings(
}
void TraceConfig::SetCategoriesFromIncludedList(
- const base::ListValue& included_list) {
+ const ListValue& included_list) {
included_categories_.clear();
for (size_t i = 0; i < included_list.GetSize(); ++i) {
std::string category;
@@ -469,7 +453,7 @@ void TraceConfig::SetCategoriesFromIncludedList(
}
void TraceConfig::SetCategoriesFromExcludedList(
- const base::ListValue& excluded_list) {
+ const ListValue& excluded_list) {
excluded_categories_.clear();
for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
std::string category;
@@ -478,7 +462,7 @@ void TraceConfig::SetCategoriesFromExcludedList(
}
}
-void TraceConfig::SetSyntheticDelaysFromList(const base::ListValue& list) {
+void TraceConfig::SetSyntheticDelaysFromList(const ListValue& list) {
synthetic_delays_.clear();
for (size_t i = 0; i < list.GetSize(); ++i) {
std::string delay;
@@ -493,42 +477,51 @@ void TraceConfig::SetSyntheticDelaysFromList(const base::ListValue& list) {
}
}
-void TraceConfig::AddCategoryToDict(base::DictionaryValue& dict,
+void TraceConfig::AddCategoryToDict(DictionaryValue* dict,
const char* param,
const StringList& categories) const {
if (categories.empty())
return;
- std::unique_ptr<base::ListValue> list(new base::ListValue());
- for (StringList::const_iterator ci = categories.begin();
- ci != categories.end();
- ++ci) {
- list->AppendString(*ci);
- }
-
- dict.Set(param, std::move(list));
+ auto list = MakeUnique<ListValue>();
+ for (const std::string& category : categories)
+ list->AppendString(category);
+ dict->Set(param, std::move(list));
}
-void TraceConfig::SetMemoryDumpConfig(
- const base::DictionaryValue& memory_dump_config) {
+void TraceConfig::SetMemoryDumpConfigFromConfigDict(
+ const DictionaryValue& memory_dump_config) {
+ // Set allowed dump modes.
+ memory_dump_config_.allowed_dump_modes.clear();
+ const ListValue* allowed_modes_list;
+ if (memory_dump_config.GetList(kAllowedDumpModesParam, &allowed_modes_list)) {
+ for (size_t i = 0; i < allowed_modes_list->GetSize(); ++i) {
+ std::string level_of_detail_str;
+ allowed_modes_list->GetString(i, &level_of_detail_str);
+ memory_dump_config_.allowed_dump_modes.insert(
+ StringToMemoryDumpLevelOfDetail(level_of_detail_str));
+ }
+ } else {
+ // If allowed modes param is not given then allow all modes by default.
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
+ }
+
// Set triggers
memory_dump_config_.triggers.clear();
-
- const base::ListValue* trigger_list = nullptr;
+ const ListValue* trigger_list = nullptr;
if (memory_dump_config.GetList(kTriggersParam, &trigger_list) &&
trigger_list->GetSize() > 0) {
for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
- const base::DictionaryValue* trigger = nullptr;
+ const DictionaryValue* trigger = nullptr;
if (!trigger_list->GetDictionary(i, &trigger))
continue;
- MemoryDumpConfig::Trigger dump_config;
int interval = 0;
-
- if (!trigger->GetInteger(kPeriodicIntervalParam, &interval)) {
+ if (!trigger->GetInteger(kPeriodicIntervalParam, &interval))
continue;
- }
+
DCHECK_GT(interval, 0);
+ MemoryDumpConfig::Trigger dump_config;
dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
std::string level_of_detail_str;
trigger->GetString(kModeParam, &level_of_detail_str);
@@ -539,7 +532,7 @@ void TraceConfig::SetMemoryDumpConfig(
}
// Set heap profiler options
- const base::DictionaryValue* heap_profiler_options = nullptr;
+ const DictionaryValue* heap_profiler_options = nullptr;
if (memory_dump_config.GetDictionary(kHeapProfilerOptions,
&heap_profiler_options)) {
int min_size_bytes = 0;
@@ -559,57 +552,51 @@ void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.Clear();
memory_dump_config_.triggers.push_back(kDefaultHeavyMemoryDumpTrigger);
memory_dump_config_.triggers.push_back(kDefaultLightMemoryDumpTrigger);
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
-void TraceConfig::ToDict(base::DictionaryValue& dict) const {
+std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
+ auto dict = MakeUnique<DictionaryValue>();
switch (record_mode_) {
case RECORD_UNTIL_FULL:
- dict.SetString(kRecordModeParam, kRecordUntilFull);
+ dict->SetString(kRecordModeParam, kRecordUntilFull);
break;
case RECORD_CONTINUOUSLY:
- dict.SetString(kRecordModeParam, kRecordContinuously);
+ dict->SetString(kRecordModeParam, kRecordContinuously);
break;
case RECORD_AS_MUCH_AS_POSSIBLE:
- dict.SetString(kRecordModeParam, kRecordAsMuchAsPossible);
+ dict->SetString(kRecordModeParam, kRecordAsMuchAsPossible);
break;
case ECHO_TO_CONSOLE:
- dict.SetString(kRecordModeParam, kTraceToConsole);
+ dict->SetString(kRecordModeParam, kTraceToConsole);
break;
default:
NOTREACHED();
}
- if (enable_sampling_)
- dict.SetBoolean(kEnableSamplingParam, true);
- else
- dict.SetBoolean(kEnableSamplingParam, false);
-
- if (enable_systrace_)
- dict.SetBoolean(kEnableSystraceParam, true);
- else
- dict.SetBoolean(kEnableSystraceParam, false);
-
- if (enable_argument_filter_)
- dict.SetBoolean(kEnableArgumentFilterParam, true);
- else
- dict.SetBoolean(kEnableArgumentFilterParam, false);
+ dict->SetBoolean(kEnableSamplingParam, enable_sampling_);
+ dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
+ dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
StringList categories(included_categories_);
categories.insert(categories.end(),
disabled_categories_.begin(),
disabled_categories_.end());
- AddCategoryToDict(dict, kIncludedCategoriesParam, categories);
- AddCategoryToDict(dict, kExcludedCategoriesParam, excluded_categories_);
- AddCategoryToDict(dict, kSyntheticDelaysParam, synthetic_delays_);
+ AddCategoryToDict(dict.get(), kIncludedCategoriesParam, categories);
+ AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
+ AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
- std::unique_ptr<base::DictionaryValue> memory_dump_config(
- new base::DictionaryValue());
- std::unique_ptr<base::ListValue> triggers_list(new base::ListValue());
- for (const MemoryDumpConfig::Trigger& config
- : memory_dump_config_.triggers) {
- std::unique_ptr<base::DictionaryValue> trigger_dict(
- new base::DictionaryValue());
+ auto allowed_modes = MakeUnique<ListValue>();
+ for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
+ allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
+
+ auto memory_dump_config = MakeUnique<DictionaryValue>();
+ memory_dump_config->Set(kAllowedDumpModesParam, std::move(allowed_modes));
+
+ auto triggers_list = MakeUnique<ListValue>();
+ for (const auto& config : memory_dump_config_.triggers) {
+ auto trigger_dict = MakeUnique<DictionaryValue>();
trigger_dict->SetInteger(kPeriodicIntervalParam,
static_cast<int>(config.periodic_interval_ms));
trigger_dict->SetString(
@@ -623,16 +610,15 @@ void TraceConfig::ToDict(base::DictionaryValue& dict) const {
if (memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes !=
MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes) {
- std::unique_ptr<base::DictionaryValue> heap_profiler_options(
- new base::DictionaryValue());
- heap_profiler_options->SetInteger(
+ auto options = MakeUnique<DictionaryValue>();
+ options->SetInteger(
kBreakdownThresholdBytes,
memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
- memory_dump_config->Set(kHeapProfilerOptions,
- std::move(heap_profiler_options));
+ memory_dump_config->Set(kHeapProfilerOptions, std::move(options));
}
- dict.Set(kMemoryDumpConfigParam, std::move(memory_dump_config));
+ dict->Set(kMemoryDumpConfigParam, std::move(memory_dump_config));
}
+ return dict;
}
std::string TraceConfig::ToTraceOptionsString() const {
@@ -667,11 +653,10 @@ void TraceConfig::WriteCategoryFilterString(const StringList& values,
bool included) const {
bool prepend_comma = !out->empty();
int token_cnt = 0;
- for (StringList::const_iterator ci = values.begin();
- ci != values.end(); ++ci) {
+ for (const std::string& category : values) {
if (token_cnt > 0 || prepend_comma)
StringAppendF(out, ",");
- StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
+ StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
++token_cnt;
}
}
@@ -680,35 +665,28 @@ void TraceConfig::WriteCategoryFilterString(const StringList& delays,
std::string* out) const {
bool prepend_comma = !out->empty();
int token_cnt = 0;
- for (StringList::const_iterator ci = delays.begin();
- ci != delays.end(); ++ci) {
+ for (const std::string& category : delays) {
if (token_cnt > 0 || prepend_comma)
StringAppendF(out, ",");
StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
- ci->c_str());
+ category.c_str());
++token_cnt;
}
}
bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
- StringList::const_iterator ci;
-
// Check the disabled- filters and the disabled-* wildcard first so that a
// "*" filter does not include the disabled.
- for (ci = disabled_categories_.begin();
- ci != disabled_categories_.end();
- ++ci) {
- if (base::MatchPattern(category_name, ci->c_str()))
+ for (const std::string& category : disabled_categories_) {
+ if (MatchPattern(category_name, category))
return true;
}
- if (base::MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+ if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
return false;
- for (ci = included_categories_.begin();
- ci != included_categories_.end();
- ++ci) {
- if (base::MatchPattern(category_name, ci->c_str()))
+ for (const std::string& category : included_categories_) {
+ if (MatchPattern(category_name, category))
return true;
}
@@ -716,10 +694,8 @@ bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
}
bool TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- const std::string& str) {
- return str.empty() ||
- str.at(0) == ' ' ||
- str.at(str.length() - 1) == ' ';
+ StringPiece str) {
+ return str.empty() || str.front() == ' ' || str.back() == ' ';
}
bool TraceConfig::HasIncludedPatterns() const {
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 5b119eae98..91d6f1f3bd 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -7,11 +7,13 @@
#include <stdint.h>
+#include <set>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
+#include "base/strings/string_piece.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/values.h"
@@ -38,11 +40,11 @@ enum TraceRecordMode {
class BASE_EXPORT TraceConfig {
public:
- typedef std::vector<std::string> StringList;
+ using StringList = std::vector<std::string>;
// Specifies the memory dump config for tracing.
// Used only when "memory-infra" category is enabled.
- struct MemoryDumpConfig {
+ struct BASE_EXPORT MemoryDumpConfig {
MemoryDumpConfig();
MemoryDumpConfig(const MemoryDumpConfig& other);
~MemoryDumpConfig();
@@ -69,6 +71,11 @@ class BASE_EXPORT TraceConfig {
// Reset the values in the config.
void Clear();
+ // Set of memory dump modes allowed for the tracing session. The explicitly
+ // triggered dumps will be successful only if the dump mode is allowed in
+ // the config.
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes;
+
std::vector<Trigger> triggers;
HeapProfiler heap_profiler_options;
};
@@ -118,11 +125,10 @@ class BASE_EXPORT TraceConfig {
// Example: TraceConfig("DELAY(gpu.PresentingFrame;16;alternating)", "");
// would make swap buffers take at least 16 ms every other time it
// is called; and use default options.
- TraceConfig(const std::string& category_filter_string,
- const std::string& trace_options_string);
+ TraceConfig(StringPiece category_filter_string,
+ StringPiece trace_options_string);
- TraceConfig(const std::string& category_filter_string,
- TraceRecordMode record_mode);
+ TraceConfig(StringPiece category_filter_string, TraceRecordMode record_mode);
// Create TraceConfig object from the trace config string.
//
@@ -139,7 +145,7 @@ class BASE_EXPORT TraceConfig {
// "inc_pattern*",
// "disabled-by-default-memory-infra"],
// "excluded_categories": ["excluded", "exc_pattern*"],
- // "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"]
+ // "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"],
// "memory_dump_config": {
// "triggers": [
// {
@@ -152,7 +158,7 @@ class BASE_EXPORT TraceConfig {
//
// Note: memory_dump_config can be specified only if
// disabled-by-default-memory-infra category is enabled.
- explicit TraceConfig(const std::string& config_string);
+ explicit TraceConfig(StringPiece config_string);
// Functionally identical to the above, but takes a parsed dictionary as input
// instead of its JSON serialization.
@@ -188,7 +194,8 @@ class BASE_EXPORT TraceConfig {
std::string ToCategoryFilterString() const;
// Returns true if at least one category in the list is enabled by this
- // trace config.
+ // trace config. This is used to determine if the category filters are
+ // enabled in the TRACE_* macros.
bool IsCategoryGroupEnabled(const char* category_group) const;
// Merges config with the current TraceConfig
@@ -196,6 +203,9 @@ class BASE_EXPORT TraceConfig {
void Clear();
+ // Clears and resets the memory dump config.
+ void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+
const MemoryDumpConfig& memory_dump_config() const {
return memory_dump_config_;
}
@@ -204,7 +214,6 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
TraceConfigFromInvalidLegacyStrings);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, ConstructDefaultTraceConfig);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -212,6 +221,8 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+ EmptyAndAsteriskCategoryFilterString);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
@@ -222,24 +233,24 @@ class BASE_EXPORT TraceConfig {
void InitializeFromConfigDict(const DictionaryValue& dict);
// Initialize from a config string.
- void InitializeFromConfigString(const std::string& config_string);
+ void InitializeFromConfigString(StringPiece config_string);
// Initialize from category filter and trace options strings
- void InitializeFromStrings(const std::string& category_filter_string,
- const std::string& trace_options_string);
+ void InitializeFromStrings(StringPiece category_filter_string,
+ StringPiece trace_options_string);
- void SetCategoriesFromIncludedList(const base::ListValue& included_list);
- void SetCategoriesFromExcludedList(const base::ListValue& excluded_list);
- void SetSyntheticDelaysFromList(const base::ListValue& list);
- void AddCategoryToDict(base::DictionaryValue& dict,
+ void SetCategoriesFromIncludedList(const ListValue& included_list);
+ void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+ void SetSyntheticDelaysFromList(const ListValue& list);
+ void AddCategoryToDict(DictionaryValue* dict,
const char* param,
const StringList& categories) const;
- void SetMemoryDumpConfig(const base::DictionaryValue& memory_dump_config);
+ void SetMemoryDumpConfigFromConfigDict(
+ const DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
- // Convert TraceConfig to the dict representation of the TraceConfig.
- void ToDict(base::DictionaryValue& dict) const;
+ std::unique_ptr<DictionaryValue> ToDict() const;
std::string ToTraceOptionsString() const;
@@ -249,11 +260,13 @@ class BASE_EXPORT TraceConfig {
void WriteCategoryFilterString(const StringList& delays,
std::string* out) const;
- // Returns true if category is enable according to this trace config.
+ // Returns true if the category is enabled according to this trace config.
+ // This tells whether a category is enabled from the TraceConfig's
+ // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+ // category is enabled from the tracing runtime's perspective.
bool IsCategoryEnabled(const char* category_name) const;
- static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
- const std::string& str);
+ static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(StringPiece str);
bool HasIncludedPatterns() const;
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
index 1acc62b9ce..6b47f8dc55 100644
--- a/base/trace_event/trace_config_memory_test_util.h
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -24,6 +24,7 @@ class TraceConfigMemoryTestUtil {
"\"%s\""
"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":2048"
"},"
@@ -52,6 +53,7 @@ class TraceConfigMemoryTestUtil {
"\"%s\""
"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"triggers\":["
"]"
"},"
@@ -71,6 +73,28 @@ class TraceConfigMemoryTestUtil {
"\"record_mode\":\"record-until-full\""
"}", MemoryDumpManager::kTraceCategory);
}
+
+ static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\"],"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"background\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory, period_ms);
+ }
};
} // namespace trace_event
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index a17337619b..4b46b2fefd 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -21,7 +21,6 @@ const char kDefaultTraceConfigString[] =
"\"enable_argument_filter\":false,"
"\"enable_sampling\":false,"
"\"enable_systrace\":false,"
- "\"excluded_categories\":[\"*Debug\",\"*Test\"],"
"\"record_mode\":\"record-until-full\""
"}";
@@ -36,6 +35,7 @@ const char kCustomTraceConfigString[] =
"\"disabled-by-default-cc\","
"\"disabled-by-default-memory-infra\"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":10240"
"},"
@@ -48,6 +48,24 @@ const char kCustomTraceConfigString[] =
"\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
"}";
+void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+
+ // Default trace config enables every category filter except the
+ // disabled-by-default-* ones.
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,not-excluded-category"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,disabled-by-default-cc"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled(
+ "disabled-by-default-cc,disabled-by-default-cc2"));
+}
+
} // namespace
TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
@@ -155,9 +173,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config.ToTraceOptionsString().c_str());
// From category filter strings
- config = TraceConfig("-*Debug,-*Test", "");
- EXPECT_STREQ("-*Debug,-*Test", config.ToCategoryFilterString().c_str());
-
config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*", "");
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
@@ -257,38 +272,79 @@ TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
}
TEST(TraceConfigTest, ConstructDefaultTraceConfig) {
- // Make sure that upon an empty string, we fall back to the default config.
TraceConfig tc;
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
- EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
- EXPECT_FALSE(tc.IsSystraceEnabled());
- EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ // Constructors from category filter string and trace option string.
+ TraceConfig tc_asterisk("*", "");
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ TraceConfig tc_empty_category_filter("", "");
+ EXPECT_STREQ("", tc_empty_category_filter.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_category_filter.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_category_filter);
+
+ // Constructor from JSON formated config string.
+ TraceConfig tc_empty_json_string("");
+ EXPECT_STREQ("", tc_empty_json_string.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_json_string.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_json_string);
+
+ // Constructor from dictionary value.
+ DictionaryValue dict;
+ TraceConfig tc_dict(dict);
+ EXPECT_STREQ("", tc_dict.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc_dict.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_dict);
+}
- EXPECT_FALSE(tc.IsCategoryEnabled("Category1"));
- EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryEnabled("CategoryTest"));
- EXPECT_FALSE(tc.IsCategoryEnabled("CategoryDebug"));
- EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-cc"));
+TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
+ TraceConfig tc_empty("", "");
+ TraceConfig tc_asterisk("*", "");
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryTest"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_STREQ("", tc_empty.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+
+ // Both fall back to default config.
+ CheckDefaultTraceConfigBehavior(tc_empty);
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ // They differ only for internal checking.
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+}
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,CategoryDebug"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryDebug,Category1"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryTest,not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug,CategoryTest"));
+TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
+ TraceConfig tc("foo,disabled-by-default-foo", "");
+ EXPECT_STREQ("foo,disabled-by-default-foo",
+ tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ tc = TraceConfig("disabled-by-default-foo", "");
+ EXPECT_STREQ("disabled-by-default-foo", tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
}
TEST(TraceConfigTest, TraceConfigFromDict) {
- // Passing in empty dictionary will not result in default trace config.
+ // Passing in empty dictionary will result in default trace config.
DictionaryValue dict;
TraceConfig tc(dict);
- EXPECT_STRNE(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
@@ -307,7 +363,7 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
EXPECT_FALSE(default_tc.IsSamplingEnabled());
EXPECT_FALSE(default_tc.IsSystraceEnabled());
EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", default_tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
std::unique_ptr<Value> custom_value(
JSONReader::Read(kCustomTraceConfigString));
@@ -405,7 +461,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("This is an invalid config string.");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -413,7 +470,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -421,7 +479,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -429,7 +488,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
// If the config string a dictionary formatted as a JSON string, it will
// initialize TraceConfig with best effort.
@@ -439,6 +499,7 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
@@ -446,6 +507,7 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
const char invalid_config_string[] =
"{"
@@ -487,9 +549,7 @@ TEST(TraceConfigTest, MergingTraceConfigs) {
"\"enable_argument_filter\":false,"
"\"enable_sampling\":false,"
"\"enable_systrace\":false,"
- "\"excluded_categories\":["
- "\"*Debug\",\"*Test\",\"excluded\",\"exc_pattern*\""
- "],"
+ "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"record_mode\":\"record-until-full\""
"}",
tc.ToString().c_str());
@@ -568,22 +628,34 @@ TEST(TraceConfigTest, SetTraceOptionValues) {
}
TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
- std::string tc_str =
+ std::string tc_str1 =
TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
- TraceConfig tc(tc_str);
- EXPECT_EQ(tc_str, tc.ToString());
- EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(2u, tc.memory_dump_config_.triggers.size());
+ TraceConfig tc1(tc_str1);
+ EXPECT_EQ(tc_str1, tc1.ToString());
+ EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
- EXPECT_EQ(200u, tc.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
- tc.memory_dump_config_.triggers[0].level_of_detail);
+ tc1.memory_dump_config_.triggers[0].level_of_detail);
- EXPECT_EQ(2000u, tc.memory_dump_config_.triggers[1].periodic_interval_ms);
+ EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc.memory_dump_config_.triggers[1].level_of_detail);
- EXPECT_EQ(2048u, tc.memory_dump_config_.heap_profiler_options.
- breakdown_threshold_bytes);
+ tc1.memory_dump_config_.triggers[1].level_of_detail);
+ EXPECT_EQ(
+ 2048u,
+ tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+
+ std::string tc_str2 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */);
+ TraceConfig tc2(tc_str2);
+ EXPECT_EQ(tc_str2, tc2.ToString());
+ EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
+ tc2.memory_dump_config_.triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
index 4335ea1b98..f915780de5 100644
--- a/base/trace_event/trace_event.gypi
+++ b/base/trace_event/trace_event.gypi
@@ -35,6 +35,8 @@
'trace_event/memory_dump_request_args.h',
'trace_event/memory_dump_session_state.cc',
'trace_event/memory_dump_session_state.h',
+ 'trace_event/memory_infra_background_whitelist.cc',
+ 'trace_event/memory_infra_background_whitelist.h',
'trace_event/process_memory_dump.cc',
'trace_event/process_memory_dump.h',
'trace_event/process_memory_maps.cc',
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
index 8babf3b47f..336d964bff 100644
--- a/base/trace_event/trace_event_argument.cc
+++ b/base/trace_event/trace_event_argument.cc
@@ -288,7 +288,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArrayWithCopiedName(name);
- for (base::Value* base_value : *list_value)
+ for (const auto& base_value : *list_value)
AppendBaseValue(*base_value);
EndArray();
} break;
@@ -342,7 +342,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArray();
- for (base::Value* base_value : *list_value)
+ for (const auto& base_value : *list_value)
AppendBaseValue(*base_value);
EndArray();
} break;
@@ -361,7 +361,7 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
switch (*type) {
case kTypeStartDict: {
- auto new_dict = new DictionaryValue();
+ auto* new_dict = new DictionaryValue();
if (cur_dict) {
cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
WrapUnique(new_dict));
@@ -386,7 +386,7 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
} break;
case kTypeStartArray: {
- auto new_list = new ListValue();
+ auto* new_list = new ListValue();
if (cur_dict) {
cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
WrapUnique(new_list));
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index e2e250ed56..f469f2f6bc 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -261,7 +261,7 @@ void TraceEvent::AppendValueAsJSON(unsigned char type,
// So as not to lose bits from a 64-bit pointer, output as a hex string.
StringAppendF(
out, "\"0x%" PRIx64 "\"",
- static_cast<uint64_t>(reinterpret_cast<intptr_t>(value.as_pointer)));
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(value.as_pointer)));
break;
case TRACE_VALUE_TYPE_STRING:
case TRACE_VALUE_TYPE_COPY_STRING:
diff --git a/base/trace_event/trace_event_memory_overhead.cc b/base/trace_event/trace_event_memory_overhead.cc
index ba7207d616..23579cbb22 100644
--- a/base/trace_event/trace_event_memory_overhead.cc
+++ b/base/trace_event/trace_event_memory_overhead.cc
@@ -104,7 +104,7 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
const ListValue* list_value = nullptr;
value.GetAsList(&list_value);
Add("ListValue", sizeof(ListValue));
- for (const Value* v : *list_value)
+ for (const auto& v : *list_value)
AddValue(*v);
} break;
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index e626a779ed..ff8ec2de78 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -10,6 +10,7 @@
#include <cstdlib>
#include <memory>
+#include <utility>
#include "base/bind.h"
#include "base/command_line.h"
@@ -96,14 +97,18 @@ class TraceEventTestFixture : public testing::Test {
}
void CancelTrace() {
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
CancelTraceAsync(&flush_complete_event);
flush_complete_event.Wait();
}
void EndTraceAndFlush() {
num_flush_callbacks_ = 0;
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
EndTraceAndFlushAsync(&flush_complete_event);
flush_complete_event.Wait();
}
@@ -111,7 +116,9 @@ class TraceEventTestFixture : public testing::Test {
// Used when testing thread-local buffers which requires the thread initiating
// flush to have a message loop.
void EndTraceAndFlushInThreadWithMessageLoop() {
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
Thread flush_thread("flush");
flush_thread.Start();
flush_thread.task_runner()->PostTask(
@@ -199,7 +206,7 @@ void TraceEventTestFixture::OnTraceDataCollected(
while (root_list->GetSize()) {
std::unique_ptr<Value> item;
root_list->Remove(0, &item);
- trace_parsed_.Append(item.release());
+ trace_parsed_.Append(std::move(item));
}
if (!has_more_events)
@@ -997,6 +1004,17 @@ void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
}
}
+void CheckTraceDefaultCategoryFilters(const TraceLog& trace_log) {
+ // Default enables all category filters except the disabled-by-default-* ones.
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo,bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled(
+ "foo,disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log.GetCategoryGroupEnabled(
+ "disabled-by-default-foo,disabled-by-default-bar"));
+}
+
} // namespace
// Simple Test for emitting data and validating it was received.
@@ -1609,7 +1627,8 @@ TEST_F(TraceEventTestFixture, DataCapturedOnThread) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
@@ -1631,7 +1650,9 @@ TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
WaitableEvent* task_complete_events[num_threads];
for (int i = 0; i < num_threads; i++) {
threads[i] = new Thread(StringPrintf("Thread %d", i));
- task_complete_events[i] = new WaitableEvent(false, false);
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
threads[i]->Start();
threads[i]->task_runner()->PostTask(
FROM_HERE, base::Bind(&TraceManyInstantEvents, i, num_events,
@@ -1678,7 +1699,9 @@ TEST_F(TraceEventTestFixture, ThreadNames) {
// Now run some trace code on these threads.
WaitableEvent* task_complete_events[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
- task_complete_events[i] = new WaitableEvent(false, false);
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
threads[i]->Start();
thread_ids[i] = threads[i]->GetThreadId();
threads[i]->task_runner()->PostTask(
@@ -1951,7 +1974,7 @@ TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
EXPECT_STREQ(
- "-*Debug,-*Test",
+ "",
trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
trace_log->SetDisabled();
trace_log->SetDisabled();
@@ -1988,6 +2011,48 @@ TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
trace_log->SetDisabled();
}
+TEST_F(TraceEventTestFixture, TraceWithDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("*", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig(""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig("foo,disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ trace_log->SetEnabled(TraceConfig("disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+}
+
TEST_F(TraceEventTestFixture, TraceSampling) {
TraceLog::GetInstance()->SetEnabled(
TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
@@ -2823,7 +2888,8 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
FROM_HERE, Bind(&TraceLog::SetCurrentThreadBlocksMessageLoop,
@@ -2833,8 +2899,10 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
@@ -2895,15 +2963,18 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopAfterTracing) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&SetBlockingFlagAndBlockUntilStopped, &task_start_event,
&task_stop_event));
@@ -2920,7 +2991,8 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
@@ -2928,8 +3000,10 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
task_complete_event.Wait();
task_complete_event.Reset();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
@@ -3099,5 +3173,12 @@ TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
EXPECT_EQ(filter, config.ToCategoryFilterString());
}
+TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
+ BeginSpecificTrace("-*");
+ TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindNamePhase("clock_sync", "c"));
+}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 1da42bfa56..12cebc6f65 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -86,7 +86,7 @@ const size_t kEchoToConsoleTraceEventBufferChunks = 256;
const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
const int kThreadFlushTimeoutMs = 3000;
-#define MAX_CATEGORY_GROUPS 105
+#define MAX_CATEGORY_GROUPS 200
// Parallel arrays g_category_groups and g_category_group_enabled are separate
// so that a pointer to a member of g_category_group_enabled can be easily
@@ -334,7 +334,7 @@ void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
}
struct TraceLog::RegisteredAsyncObserver {
- RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
+ explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
: observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
~RegisteredAsyncObserver() {}
@@ -402,7 +402,7 @@ void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
return;
HEAP_PROFILER_SCOPED_IGNORE;
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
if (thread_local_event_buffer &&
!CheckGeneration(thread_local_event_buffer->generation())) {
delete thread_local_event_buffer;
@@ -478,6 +478,12 @@ void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
}
#endif
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
+ enabled_flag |= ENABLED_FOR_RECORDING;
+
g_category_group_enabled[category_index] = enabled_flag;
}
@@ -890,7 +896,7 @@ void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
? ThreadTaskRunnerHandle::Get()
: nullptr;
- DCHECK(!thread_message_loops_.size() || flush_task_runner_);
+ DCHECK(thread_message_loops_.empty() || flush_task_runner_);
flush_output_callback_ = cb;
if (thread_shared_chunk_) {
@@ -1037,7 +1043,7 @@ void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
for (hash_set<MessageLoop*>::const_iterator it =
thread_message_loops_.begin();
it != thread_message_loops_.end(); ++it) {
- LOG(WARNING) << "Thread: " << (*it)->thread_name();
+ LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
}
}
FinishFlush(generation, discard_events);
@@ -1220,7 +1226,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// |thread_local_event_buffer_| can be null if the current thread doesn't have
// a message loop or the message loop is blocked.
InitializeThreadLocalEventBufferIfSupported();
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
// Check and update the current thread name only if the event is for the
// current thread to avoid locks in most cases.
@@ -1347,11 +1353,12 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
phase == TRACE_EVENT_PHASE_COMPLETE) {
AllocationContextTracker::GetInstanceForCurrentThread()
->PushPseudoStackFrame(name);
- } else if (phase == TRACE_EVENT_PHASE_END)
+ } else if (phase == TRACE_EVENT_PHASE_END) {
// The pop for |TRACE_EVENT_PHASE_COMPLETE| events
// is in |TraceLog::UpdateTraceEventDuration|.
AllocationContextTracker::GetInstanceForCurrentThread()
->PopPseudoStackFrame(name);
+ }
}
}
diff --git a/base/trace_event/trace_sampling_thread.cc b/base/trace_event/trace_sampling_thread.cc
index a8d32d6ee2..5a0d2f8a02 100644
--- a/base/trace_event/trace_sampling_thread.cc
+++ b/base/trace_event/trace_sampling_thread.cc
@@ -25,7 +25,9 @@ class TraceBucketData {
};
TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false), waitable_event_for_testing_(false, false) {}
+ : thread_running_(false),
+ waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
TraceSamplingThread::~TraceSamplingThread() {}
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 7a88079a3d..487fd19098 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -16,6 +16,7 @@
#include "base/process/process_handle.h"
#include "base/strings/stringprintf.h"
#include "base/third_party/valgrind/memcheck.h"
+#include "base/threading/worker_pool.h"
#include "base/tracking_info.h"
#include "build/build_config.h"
@@ -355,7 +356,9 @@ ThreadData* ThreadData::next() const { return next_; }
// static
void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
- Initialize();
+ if (base::WorkerPool::RunsTasksOnCurrentThread())
+ return;
+ EnsureTlsInitialization();
ThreadData* current_thread_data =
reinterpret_cast<ThreadData*>(tls_index_.Get());
if (current_thread_data)
@@ -669,7 +672,7 @@ void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
}
}
-void ThreadData::Initialize() {
+void ThreadData::EnsureTlsInitialization() {
if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
return; // Someone else did the initialization.
// Due to racy lazy initialization in tests, we'll need to recheck status_
@@ -709,7 +712,7 @@ void ThreadData::InitializeAndSetTrackingStatus(Status status) {
DCHECK_GE(status, DEACTIVATED);
DCHECK_LE(status, PROFILING_ACTIVE);
- Initialize(); // No-op if already initialized.
+ EnsureTlsInitialization(); // No-op if already initialized.
if (status > DEACTIVATED)
status = PROFILING_ACTIVE;
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 168b17db04..7ef0317c39 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -174,7 +174,7 @@ struct TrackingInfo;
// (worker threads don't have message loops generally, and hence gathering from
// them will continue to be asynchronous). We had an implementation of this in
// the past, but the difficulty is dealing with message loops being terminated.
-// We can *try* to spam the available threads via some message loop proxy to
+// We can *try* to spam the available threads via some task runner to
// achieve this feat, and it *might* be valuable when we are collecting data
// for upload via UMA (where correctness of data may be more significant than
// for a single screen of about:profiler).
@@ -514,7 +514,7 @@ class BASE_EXPORT ThreadData {
// Initializes all statics if needed (this initialization call should be made
// while we are single threaded).
- static void Initialize();
+ static void EnsureTlsInitialization();
// Sets internal status_.
// If |status| is false, then status_ is set to DEACTIVATED.
diff --git a/base/tuple.h b/base/tuple.h
index df69bf0116..e82f2e5f06 100644
--- a/base/tuple.h
+++ b/base/tuple.h
@@ -2,27 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// A Tuple is a generic templatized container, similar in concept to std::pair
-// and std::tuple. The convenient MakeTuple() function takes any number of
-// arguments and will construct and return the appropriate Tuple object. The
-// functions DispatchToMethod and DispatchToFunction take a function pointer or
-// instance and method pointer, and unpack a tuple into arguments to the call.
-//
-// Tuple elements are copied by value, and stored in the tuple. See the unit
-// tests for more details of how/when the values are copied.
+// Use std::tuple as tuple type. This file contains helper functions for
+// working with std::tuples.
+// The functions DispatchToMethod and DispatchToFunction take a function pointer
+// or instance and method pointer, and unpack a tuple into arguments to the
+// call.
//
// Example usage:
// // These two methods of creating a Tuple are identical.
-// Tuple<int, const char*> tuple_a(1, "wee");
-// Tuple<int, const char*> tuple_b = MakeTuple(1, "wee");
+// std::tuple<int, const char*> tuple_a(1, "wee");
+// std::tuple<int, const char*> tuple_b = std::make_tuple(1, "wee");
//
// void SomeFunc(int a, const char* b) { }
// DispatchToFunction(&SomeFunc, tuple_a); // SomeFunc(1, "wee")
// DispatchToFunction(
-// &SomeFunc, MakeTuple(10, "foo")); // SomeFunc(10, "foo")
+// &SomeFunc, std::make_tuple(10, "foo")); // SomeFunc(10, "foo")
//
// struct { void SomeMeth(int a, int b, int c) { } } foo;
-// DispatchToMethod(&foo, &Foo::SomeMeth, MakeTuple(1, 2, 3));
+// DispatchToMethod(&foo, &Foo::SomeMeth, std::make_tuple(1, 2, 3));
// // foo->SomeMeth(1, 2, 3);
#ifndef BASE_TUPLE_H_
@@ -107,46 +104,23 @@ struct MakeIndexSequenceImpl<N, Ns...>
#endif // defined(OS_WIN) && defined(_PREFAST_)
-template <size_t N>
-using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
-
-// Tuple -----------------------------------------------------------------------
-//
-// This set of classes is useful for bundling 0 or more heterogeneous data types
-// into a single variable. The advantage of this is that it greatly simplifies
-// function objects that need to take an arbitrary number of parameters; see
-// RunnableMethod and IPC::MessageWithTuple.
-//
-// Tuple<> is supplied to act as a 'void' type. It can be used, for example,
-// when dispatching to a function that accepts no arguments (see the
-// Dispatchers below).
-// Tuple<A> is rarely useful. One such use is when A is non-const ref that you
-// want filled by the dispatchee, and the tuple is merely a container for that
-// output (a "tier"). See MakeRefTuple and its usages.
-
-template <typename... Ts>
-using Tuple = std::tuple<Ts...>;
-
-using std::get;
-
-// Tuple creators -------------------------------------------------------------
-//
-// Helper functions for constructing tuples while inferring the template
-// argument types.
-
-template <typename... Ts>
-inline Tuple<Ts...> MakeTuple(const Ts&... arg) {
- return Tuple<Ts...>(arg...);
+// std::get() in <=libstdc++-4.6 returns an lvalue-reference for
+// rvalue-reference of a tuple, where an rvalue-reference is expected.
+template <size_t I, typename... Ts>
+typename std::tuple_element<I, std::tuple<Ts...>>::type&& get(
+ std::tuple<Ts...>&& t) {
+ using ElemType = typename std::tuple_element<I, std::tuple<Ts...>>::type;
+ return std::forward<ElemType>(std::get<I>(t));
}
-// The following set of helpers make what Boost refers to as "Tiers" - a tuple
-// of references.
-
-template <typename... Ts>
-inline Tuple<Ts&...> MakeRefTuple(Ts&... arg) {
- return Tuple<Ts&...>(arg...);
+template <size_t I, typename T>
+auto get(T& t) -> decltype(std::get<I>(t)) {
+ return std::get<I>(t);
}
+template <size_t N>
+using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
+
// Dispatchers ----------------------------------------------------------------
//
// Helper functions that call the given method on an object, with the unpacked
@@ -161,15 +135,15 @@ inline Tuple<Ts&...> MakeRefTuple(Ts&... arg) {
template <typename ObjT, typename Method, typename... Ts, size_t... Ns>
inline void DispatchToMethodImpl(const ObjT& obj,
Method method,
- const Tuple<Ts...>& arg,
+ const std::tuple<Ts...>& arg,
IndexSequence<Ns...>) {
- (obj->*method)(internal::Unwrap(get<Ns>(arg))...);
+ (obj->*method)(internal::Unwrap(std::get<Ns>(arg))...);
}
template <typename ObjT, typename Method, typename... Ts>
inline void DispatchToMethod(const ObjT& obj,
Method method,
- const Tuple<Ts...>& arg) {
+ const std::tuple<Ts...>& arg) {
DispatchToMethodImpl(obj, method, arg, MakeIndexSequence<sizeof...(Ts)>());
}
@@ -177,13 +151,14 @@ inline void DispatchToMethod(const ObjT& obj,
template <typename Function, typename... Ts, size_t... Ns>
inline void DispatchToFunctionImpl(Function function,
- const Tuple<Ts...>& arg,
+ const std::tuple<Ts...>& arg,
IndexSequence<Ns...>) {
- (*function)(internal::Unwrap(get<Ns>(arg))...);
+ (*function)(internal::Unwrap(std::get<Ns>(arg))...);
}
template <typename Function, typename... Ts>
-inline void DispatchToFunction(Function function, const Tuple<Ts...>& arg) {
+inline void DispatchToFunction(Function function,
+ const std::tuple<Ts...>& arg) {
DispatchToFunctionImpl(function, arg, MakeIndexSequence<sizeof...(Ts)>());
}
@@ -197,18 +172,19 @@ template <typename ObjT,
size_t... OutNs>
inline void DispatchToMethodImpl(const ObjT& obj,
Method method,
- const Tuple<InTs...>& in,
- Tuple<OutTs...>* out,
+ const std::tuple<InTs...>& in,
+ std::tuple<OutTs...>* out,
IndexSequence<InNs...>,
IndexSequence<OutNs...>) {
- (obj->*method)(internal::Unwrap(get<InNs>(in))..., &get<OutNs>(*out)...);
+ (obj->*method)(internal::Unwrap(std::get<InNs>(in))...,
+ &std::get<OutNs>(*out)...);
}
template <typename ObjT, typename Method, typename... InTs, typename... OutTs>
inline void DispatchToMethod(const ObjT& obj,
Method method,
- const Tuple<InTs...>& in,
- Tuple<OutTs...>* out) {
+ const std::tuple<InTs...>& in,
+ std::tuple<OutTs...>* out) {
DispatchToMethodImpl(obj, method, in, out,
MakeIndexSequence<sizeof...(InTs)>(),
MakeIndexSequence<sizeof...(OutTs)>());
diff --git a/base/tuple_unittest.cc b/base/tuple_unittest.cc
index 55a9139235..6f90c29220 100644
--- a/base/tuple_unittest.cc
+++ b/base/tuple_unittest.cc
@@ -32,51 +32,34 @@ struct Addz {
} // namespace
TEST(TupleTest, Basic) {
- base::Tuple<> t0 = base::MakeTuple();
+ std::tuple<> t0 = std::make_tuple();
ALLOW_UNUSED_LOCAL(t0);
- base::Tuple<int> t1(1);
- base::Tuple<int, const char*> t2 =
- base::MakeTuple(1, static_cast<const char*>("wee"));
- base::Tuple<int, int, int> t3(1, 2, 3);
- base::Tuple<int, int, int, int*> t4(1, 2, 3, &get<0>(t1));
- base::Tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &get<0>(t4));
- base::Tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &get<0>(t4));
-
- EXPECT_EQ(1, get<0>(t1));
- EXPECT_EQ(1, get<0>(t2));
- EXPECT_EQ(1, get<0>(t3));
- EXPECT_EQ(2, get<1>(t3));
- EXPECT_EQ(3, get<2>(t3));
- EXPECT_EQ(1, get<0>(t4));
- EXPECT_EQ(2, get<1>(t4));
- EXPECT_EQ(3, get<2>(t4));
- EXPECT_EQ(1, get<0>(t5));
- EXPECT_EQ(2, get<1>(t5));
- EXPECT_EQ(3, get<2>(t5));
- EXPECT_EQ(4, get<3>(t5));
- EXPECT_EQ(1, get<0>(t6));
- EXPECT_EQ(2, get<1>(t6));
- EXPECT_EQ(3, get<2>(t6));
- EXPECT_EQ(4, get<3>(t6));
- EXPECT_EQ(5, get<4>(t6));
-
- EXPECT_EQ(1, get<0>(t1));
+ std::tuple<int> t1(1);
+ std::tuple<int, const char*> t2 =
+ std::make_tuple(1, static_cast<const char*>("wee"));
+ ALLOW_UNUSED_LOCAL(t2);
+ std::tuple<int, int, int> t3(1, 2, 3);
+ std::tuple<int, int, int, int*> t4(1, 2, 3, &std::get<0>(t1));
+ std::tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &std::get<0>(t4));
+ std::tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &std::get<0>(t4));
+
+ EXPECT_EQ(1, std::get<0>(t1));
DispatchToFunction(&DoAdd, t4);
- EXPECT_EQ(6, get<0>(t1));
+ EXPECT_EQ(6, std::get<0>(t1));
int res = 0;
- DispatchToFunction(&DoAdd, base::MakeTuple(9, 8, 7, &res));
+ DispatchToFunction(&DoAdd, std::make_tuple(9, 8, 7, &res));
EXPECT_EQ(24, res);
Addy addy;
- EXPECT_EQ(1, get<0>(t4));
+ EXPECT_EQ(1, std::get<0>(t4));
DispatchToMethod(&addy, &Addy::DoAdd, t5);
- EXPECT_EQ(10, get<0>(t4));
+ EXPECT_EQ(10, std::get<0>(t4));
Addz addz;
- EXPECT_EQ(10, get<0>(t4));
+ EXPECT_EQ(10, std::get<0>(t4));
DispatchToMethod(&addz, &Addz::DoAdd, t6);
- EXPECT_EQ(15, get<0>(t4));
+ EXPECT_EQ(15, std::get<0>(t4));
}
namespace {
@@ -111,8 +94,8 @@ TEST(TupleTest, Copying) {
bool res = false;
// Creating the tuple should copy the class to store internally in the tuple.
- base::Tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
- get<1>(tuple) = &get<0>(tuple);
+ std::tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
+ std::get<1>(tuple) = &std::get<0>(tuple);
EXPECT_EQ(2, CopyLogger::TimesConstructed);
EXPECT_EQ(1, CopyLogger::TimesCopied);
@@ -131,4 +114,30 @@ TEST(TupleTest, Copying) {
EXPECT_EQ(2, CopyLogger::TimesCopied);
}
+TEST(TupleTest, Get) {
+ int i = 1;
+ int j = 2;
+ std::tuple<int, int&, int&&> t(3, i, std::move(j));
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<0>(t))>::value));
+ EXPECT_EQ(3, base::get<0>(t));
+
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<1>(t))>::value));
+ EXPECT_EQ(1, base::get<1>(t));
+
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<2>(t))>::value));
+ EXPECT_EQ(2, base::get<2>(t));
+
+ EXPECT_TRUE((std::is_same<int&&,
+ decltype(base::get<0>(std::move(t)))>::value));
+ EXPECT_EQ(3, base::get<0>(std::move(t)));
+
+ EXPECT_TRUE((std::is_same<int&,
+ decltype(base::get<1>(std::move(t)))>::value));
+ EXPECT_EQ(1, base::get<1>(std::move(t)));
+
+ EXPECT_TRUE((std::is_same<int&&,
+ decltype(base::get<2>(std::move(t)))>::value));
+ EXPECT_EQ(2, base::get<2>(std::move(t)));
+}
+
} // namespace base
diff --git a/base/values.cc b/base/values.cc
index 5f6eaae01c..d579699079 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -14,7 +14,6 @@
#include "base/json/json_writer.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
-#include "base/move.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
@@ -29,8 +28,8 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
// expects |node| to always be non-NULL.
std::unique_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
std::unique_ptr<ListValue> copy;
- for (ListValue::const_iterator it = list.begin(); it != list.end(); ++it) {
- std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(**it);
+ for (const auto& entry : list) {
+ std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(*entry);
if (child_copy) {
if (!copy)
copy.reset(new ListValue);
@@ -68,22 +67,6 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
}
}
-// A small functor for comparing Values for std::find_if and similar.
-class ValueEquals {
- public:
- // Pass the value against which all consecutive calls of the () operator will
- // compare their argument to. This Value object must not be destroyed while
- // the ValueEquals is in use.
- explicit ValueEquals(const Value* first) : first_(first) { }
-
- bool operator ()(const Value* second) const {
- return first_->Equals(second);
- }
-
- private:
- const Value* first_;
-};
-
} // namespace
Value::~Value() {
@@ -321,12 +304,12 @@ BinaryValue::~BinaryValue() {
}
// static
-BinaryValue* BinaryValue::CreateWithCopiedBuffer(const char* buffer,
- size_t size) {
- char* buffer_copy = new char[size];
- memcpy(buffer_copy, buffer, size);
- std::unique_ptr<char[]> scoped_buffer_copy(buffer_copy);
- return new BinaryValue(std::move(scoped_buffer_copy), size);
+std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
+ const char* buffer,
+ size_t size) {
+ std::unique_ptr<char[]> buffer_copy(new char[size]);
+ memcpy(buffer_copy.get(), buffer, size);
+ return base::MakeUnique<BinaryValue>(std::move(buffer_copy), size);
}
bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
@@ -336,7 +319,7 @@ bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
}
BinaryValue* BinaryValue::DeepCopy() const {
- return CreateWithCopiedBuffer(buffer_.get(), size_);
+ return CreateWithCopiedBuffer(buffer_.get(), size_).release();
}
bool BinaryValue::Equals(const Value* other) const {
@@ -383,18 +366,12 @@ bool DictionaryValue::GetAsDictionary(const DictionaryValue** out_value) const {
bool DictionaryValue::HasKey(const std::string& key) const {
DCHECK(IsStringUTF8(key));
- ValueMap::const_iterator current_entry = dictionary_.find(key);
+ auto current_entry = dictionary_.find(key);
DCHECK((current_entry == dictionary_.end()) || current_entry->second);
return current_entry != dictionary_.end();
}
void DictionaryValue::Clear() {
- ValueMap::iterator dict_iterator = dictionary_.begin();
- while (dict_iterator != dictionary_.end()) {
- delete dict_iterator->second;
- ++dict_iterator;
- }
-
dictionary_.clear();
}
@@ -452,16 +429,7 @@ void DictionaryValue::SetString(const std::string& path,
void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
std::unique_ptr<Value> in_value) {
- Value* bare_ptr = in_value.release();
- // If there's an existing value here, we need to delete it, because
- // we own all our children.
- std::pair<ValueMap::iterator, bool> ins_res =
- dictionary_.insert(std::make_pair(key, bare_ptr));
- if (!ins_res.second) {
- DCHECK_NE(ins_res.first->second, bare_ptr); // This would be bogus
- delete ins_res.first->second;
- ins_res.first->second = bare_ptr;
- }
+ dictionary_[key] = std::move(in_value);
}
void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
@@ -645,13 +613,12 @@ bool DictionaryValue::GetList(const std::string& path, ListValue** out_value) {
bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
const Value** out_value) const {
DCHECK(IsStringUTF8(key));
- ValueMap::const_iterator entry_iterator = dictionary_.find(key);
+ auto entry_iterator = dictionary_.find(key);
if (entry_iterator == dictionary_.end())
return false;
- const Value* entry = entry_iterator->second;
if (out_value)
- *out_value = entry;
+ *out_value = entry_iterator->second.get();
return true;
}
@@ -775,15 +742,12 @@ bool DictionaryValue::RemoveWithoutPathExpansion(
const std::string& key,
std::unique_ptr<Value>* out_value) {
DCHECK(IsStringUTF8(key));
- ValueMap::iterator entry_iterator = dictionary_.find(key);
+ auto entry_iterator = dictionary_.find(key);
if (entry_iterator == dictionary_.end())
return false;
- Value* entry = entry_iterator->second;
if (out_value)
- out_value->reset(entry);
- else
- delete entry;
+ *out_value = std::move(entry_iterator->second);
dictionary_.erase(entry_iterator);
return true;
}
@@ -849,10 +813,9 @@ DictionaryValue::Iterator::~Iterator() {}
DictionaryValue* DictionaryValue::DeepCopy() const {
DictionaryValue* result = new DictionaryValue;
- for (ValueMap::const_iterator current_entry(dictionary_.begin());
- current_entry != dictionary_.end(); ++current_entry) {
- result->SetWithoutPathExpansion(current_entry->first,
- current_entry->second->DeepCopy());
+ for (const auto& current_entry : dictionary_) {
+ result->SetWithoutPathExpansion(current_entry.first,
+ current_entry.second->CreateDeepCopy());
}
return result;
@@ -904,12 +867,14 @@ ListValue::~ListValue() {
}
void ListValue::Clear() {
- for (ValueVector::iterator i(list_.begin()); i != list_.end(); ++i)
- delete *i;
list_.clear();
}
bool ListValue::Set(size_t index, Value* in_value) {
+ return Set(index, WrapUnique(in_value));
+}
+
+bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
if (!in_value)
return false;
@@ -917,25 +882,21 @@ bool ListValue::Set(size_t index, Value* in_value) {
// Pad out any intermediate indexes with null settings
while (index > list_.size())
Append(CreateNullValue());
- Append(in_value);
+ Append(std::move(in_value));
} else {
+ // TODO(dcheng): remove this DCHECK once the raw pointer version is removed?
DCHECK(list_[index] != in_value);
- delete list_[index];
- list_[index] = in_value;
+ list_[index] = std::move(in_value);
}
return true;
}
-bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
- return Set(index, in_value.release());
-}
-
bool ListValue::Get(size_t index, const Value** out_value) const {
if (index >= list_.size())
return false;
if (out_value)
- *out_value = list_[index];
+ *out_value = list_[index].get();
return true;
}
@@ -1046,20 +1007,17 @@ bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
return false;
if (out_value)
- out_value->reset(list_[index]);
- else
- delete list_[index];
+ *out_value = std::move(list_[index]);
list_.erase(list_.begin() + index);
return true;
}
bool ListValue::Remove(const Value& value, size_t* index) {
- for (ValueVector::iterator i(list_.begin()); i != list_.end(); ++i) {
- if ((*i)->Equals(&value)) {
- size_t previous_index = i - list_.begin();
- delete *i;
- list_.erase(i);
+ for (auto it = list_.begin(); it != list_.end(); ++it) {
+ if ((*it)->Equals(&value)) {
+ size_t previous_index = it - list_.begin();
+ list_.erase(it);
if (index)
*index = previous_index;
@@ -1072,20 +1030,18 @@ bool ListValue::Remove(const Value& value, size_t* index) {
ListValue::iterator ListValue::Erase(iterator iter,
std::unique_ptr<Value>* out_value) {
if (out_value)
- out_value->reset(*iter);
- else
- delete *iter;
+ *out_value = std::move(*Storage::iterator(iter));
return list_.erase(iter);
}
void ListValue::Append(std::unique_ptr<Value> in_value) {
- Append(in_value.release());
+ list_.push_back(std::move(in_value));
}
void ListValue::Append(Value* in_value) {
DCHECK(in_value);
- list_.push_back(in_value);
+ Append(WrapUnique(in_value));
}
void ListValue::AppendBoolean(bool in_value) {
@@ -1124,13 +1080,13 @@ void ListValue::AppendStrings(const std::vector<string16>& in_values) {
bool ListValue::AppendIfNotPresent(Value* in_value) {
DCHECK(in_value);
- for (ValueVector::const_iterator i(list_.begin()); i != list_.end(); ++i) {
- if ((*i)->Equals(in_value)) {
+ for (const auto& entry : list_) {
+ if (entry->Equals(in_value)) {
delete in_value;
return false;
}
}
- list_.push_back(in_value);
+ list_.emplace_back(in_value);
return true;
}
@@ -1139,12 +1095,15 @@ bool ListValue::Insert(size_t index, Value* in_value) {
if (index > list_.size())
return false;
- list_.insert(list_.begin() + index, in_value);
+ list_.insert(list_.begin() + index, WrapUnique(in_value));
return true;
}
ListValue::const_iterator ListValue::Find(const Value& value) const {
- return std::find_if(list_.begin(), list_.end(), ValueEquals(&value));
+ return std::find_if(list_.begin(), list_.end(),
+ [&value](const std::unique_ptr<Value>& entry) {
+ return entry->Equals(&value);
+ });
}
void ListValue::Swap(ListValue* other) {
@@ -1166,8 +1125,8 @@ bool ListValue::GetAsList(const ListValue** out_value) const {
ListValue* ListValue::DeepCopy() const {
ListValue* result = new ListValue;
- for (ValueVector::const_iterator i(list_.begin()); i != list_.end(); ++i)
- result->Append((*i)->DeepCopy());
+ for (const auto& entry : list_)
+ result->Append(entry->CreateDeepCopy());
return result;
}
@@ -1182,11 +1141,11 @@ bool ListValue::Equals(const Value* other) const {
const ListValue* other_list =
static_cast<const ListValue*>(other);
- const_iterator lhs_it, rhs_it;
+ Storage::const_iterator lhs_it, rhs_it;
for (lhs_it = begin(), rhs_it = other_list->begin();
lhs_it != end() && rhs_it != other_list->end();
++lhs_it, ++rhs_it) {
- if (!(*lhs_it)->Equals(*rhs_it))
+ if (!(*lhs_it)->Equals(rhs_it->get()))
return false;
}
if (lhs_it != end() || rhs_it != other_list->end())
diff --git a/base/values.h b/base/values.h
index e2506cc14f..e3d60891b3 100644
--- a/base/values.h
+++ b/base/values.h
@@ -42,9 +42,6 @@ class ListValue;
class StringValue;
class Value;
-typedef std::vector<Value*> ValueVector;
-typedef std::map<std::string, Value*> ValueMap;
-
// The Value class is the base class for Values. A Value can be instantiated
// via the Create*Value() factory methods, or by directly creating instances of
// the subclasses.
@@ -185,7 +182,8 @@ class BASE_EXPORT BinaryValue: public Value {
// For situations where you want to keep ownership of your buffer, this
// factory method creates a new BinaryValue by copying the contents of the
// buffer that's passed in.
- static BinaryValue* CreateWithCopiedBuffer(const char* buffer, size_t size);
+ static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
+ size_t size);
size_t GetSize() const { return size_; }
@@ -210,6 +208,7 @@ class BASE_EXPORT BinaryValue: public Value {
// are |std::string|s and should be UTF-8 encoded.
class BASE_EXPORT DictionaryValue : public Value {
public:
+ using Storage = std::map<std::string, std::unique_ptr<Value>>;
// Returns |value| if it is a dictionary, nullptr otherwise.
static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
@@ -372,7 +371,7 @@ class BASE_EXPORT DictionaryValue : public Value {
private:
const DictionaryValue& target_;
- ValueMap::const_iterator it_;
+ Storage::const_iterator it_;
};
// Overridden from Value:
@@ -382,7 +381,7 @@ class BASE_EXPORT DictionaryValue : public Value {
bool Equals(const Value* other) const override;
private:
- ValueMap dictionary_;
+ Storage dictionary_;
DISALLOW_COPY_AND_ASSIGN(DictionaryValue);
};
@@ -390,8 +389,9 @@ class BASE_EXPORT DictionaryValue : public Value {
// This type of Value represents a list of other Value values.
class BASE_EXPORT ListValue : public Value {
public:
- typedef ValueVector::iterator iterator;
- typedef ValueVector::const_iterator const_iterator;
+ using Storage = std::vector<std::unique_ptr<Value>>;
+ using const_iterator = Storage::const_iterator;
+ using iterator = Storage::iterator;
// Returns |value| if it is a list, nullptr otherwise.
static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
@@ -508,7 +508,7 @@ class BASE_EXPORT ListValue : public Value {
std::unique_ptr<ListValue> CreateDeepCopy() const;
private:
- ValueVector list_;
+ Storage list_;
DISALLOW_COPY_AND_ASSIGN(ListValue);
};
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index ac7883054f..d68522234d 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -127,7 +127,7 @@ TEST(ValuesTest, BinaryValue) {
char stack_buffer[42];
memset(stack_buffer, '!', 42);
- binary.reset(BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42));
+ binary = BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42);
ASSERT_TRUE(binary.get());
ASSERT_TRUE(binary->GetBuffer());
ASSERT_NE(stack_buffer, binary->GetBuffer());
diff --git a/base/version.cc b/base/version.cc
new file mode 100644
index 0000000000..02213fbf15
--- /dev/null
+++ b/base/version.cc
@@ -0,0 +1,193 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Parses the |numbers| vector representing the different numbers
+// inside the version string and constructs a vector of valid integers. It stops
+// when it reaches an invalid item (including the wildcard character). |parsed|
+// is the resulting integer vector. Function returns true if all numbers were
+// parsed successfully, false otherwise.
+bool ParseVersionNumbers(const std::string& version_str,
+ std::vector<uint32_t>* parsed) {
+ std::vector<StringPiece> numbers =
+ SplitStringPiece(version_str, ".", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ if (numbers.empty())
+ return false;
+
+ for (auto it = numbers.begin(); it != numbers.end(); ++it) {
+ if (StartsWith(*it, "+", CompareCase::SENSITIVE))
+ return false;
+
+ unsigned int num;
+ if (!StringToUint(*it, &num))
+ return false;
+
+ // This throws out leading zeros for the first item only.
+ if (it == numbers.begin() && UintToString(num) != *it)
+ return false;
+
+ // StringToUint returns unsigned int but Version fields are uint32_t.
+ static_assert(sizeof (uint32_t) == sizeof (unsigned int),
+ "uint32_t must be same as unsigned int");
+ parsed->push_back(num);
+ }
+ return true;
+}
+
+// Compares version components in |components1| with components in
+// |components2|. Returns -1, 0 or 1 if |components1| is less than, equal to,
+// or greater than |components2|, respectively.
+int CompareVersionComponents(const std::vector<uint32_t>& components1,
+ const std::vector<uint32_t>& components2) {
+ const size_t count = std::min(components1.size(), components2.size());
+ for (size_t i = 0; i < count; ++i) {
+ if (components1[i] > components2[i])
+ return 1;
+ if (components1[i] < components2[i])
+ return -1;
+ }
+ if (components1.size() > components2.size()) {
+ for (size_t i = count; i < components1.size(); ++i) {
+ if (components1[i] > 0)
+ return 1;
+ }
+ } else if (components1.size() < components2.size()) {
+ for (size_t i = count; i < components2.size(); ++i) {
+ if (components2[i] > 0)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+} // namespace
+
+Version::Version() {
+}
+
+Version::Version(const Version& other) = default;
+
+Version::~Version() {
+}
+
+Version::Version(const std::string& version_str) {
+ std::vector<uint32_t> parsed;
+ if (!ParseVersionNumbers(version_str, &parsed))
+ return;
+
+ components_.swap(parsed);
+}
+
+bool Version::IsValid() const {
+ return (!components_.empty());
+}
+
+// static
+bool Version::IsValidWildcardString(const std::string& wildcard_string) {
+ std::string version_string = wildcard_string;
+ if (EndsWith(version_string, ".*", CompareCase::SENSITIVE))
+ version_string.resize(version_string.size() - 2);
+
+ Version version(version_string);
+ return version.IsValid();
+}
+
+int Version::CompareToWildcardString(const std::string& wildcard_string) const {
+ DCHECK(IsValid());
+ DCHECK(Version::IsValidWildcardString(wildcard_string));
+
+ // Default behavior if the string doesn't end with a wildcard.
+ if (!EndsWith(wildcard_string, ".*", CompareCase::SENSITIVE)) {
+ Version version(wildcard_string);
+ DCHECK(version.IsValid());
+ return CompareTo(version);
+ }
+
+ std::vector<uint32_t> parsed;
+ const bool success = ParseVersionNumbers(
+ wildcard_string.substr(0, wildcard_string.length() - 2), &parsed);
+ DCHECK(success);
+ const int comparison = CompareVersionComponents(components_, parsed);
+ // If the version is smaller than the wildcard version's |parsed| vector,
+ // then the wildcard has no effect (e.g. comparing 1.2.3 and 1.3.*) and the
+ // version is still smaller. Same logic for equality (e.g. comparing 1.2.2 to
+ // 1.2.2.* is 0 regardless of the wildcard). Under this logic,
+ // 1.2.0.0.0.0 compared to 1.2.* is 0.
+ if (comparison == -1 || comparison == 0)
+ return comparison;
+
+ // Catch the case where the digits of |parsed| are found in |components_|,
+ // which means that the two are equal since |parsed| has a trailing "*".
+ // (e.g. 1.2.3 vs. 1.2.* will return 0). All other cases return 1 since
+ // components is greater (e.g. 3.2.3 vs 1.*).
+ DCHECK_GT(parsed.size(), 0UL);
+ const size_t min_num_comp = std::min(components_.size(), parsed.size());
+ for (size_t i = 0; i < min_num_comp; ++i) {
+ if (components_[i] != parsed[i])
+ return 1;
+ }
+ return 0;
+}
+
+int Version::CompareTo(const Version& other) const {
+ DCHECK(IsValid());
+ DCHECK(other.IsValid());
+ return CompareVersionComponents(components_, other.components_);
+}
+
+const std::string Version::GetString() const {
+ DCHECK(IsValid());
+ std::string version_str;
+ size_t count = components_.size();
+ for (size_t i = 0; i < count - 1; ++i) {
+ version_str.append(UintToString(components_[i]));
+ version_str.append(".");
+ }
+ version_str.append(UintToString(components_[count - 1]));
+ return version_str;
+}
+
+bool operator==(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) == 0;
+}
+
+bool operator!=(const Version& v1, const Version& v2) {
+ return !(v1 == v2);
+}
+
+bool operator<(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) < 0;
+}
+
+bool operator<=(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) <= 0;
+}
+
+bool operator>(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) > 0;
+}
+
+bool operator>=(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) >= 0;
+}
+
+std::ostream& operator<<(std::ostream& stream, const Version& v) {
+ return stream << v.GetString();
+}
+
+} // namespace base
diff --git a/base/version.h b/base/version.h
new file mode 100644
index 0000000000..25b570a4e3
--- /dev/null
+++ b/base/version.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VERSION_H_
+#define BASE_VERSION_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Version represents a dotted version number, like "1.2.3.4", supporting
+// parsing and comparison.
+class BASE_EXPORT Version {
+ public:
+ // The only thing you can legally do to a default constructed
+ // Version object is assign to it.
+ Version();
+
+ Version(const Version& other);
+
+ ~Version();
+
+ // Initializes from a decimal dotted version number, like "0.1.1".
+ // Each component is limited to a uint16_t. Call IsValid() to learn
+ // the outcome.
+ explicit Version(const std::string& version_str);
+
+ // Returns true if the object contains a valid version number.
+ bool IsValid() const;
+
+ // Returns true if the version wildcard string is valid. The version wildcard
+ // string may end with ".*" (e.g. 1.2.*, 1.*). Any other arrangement with "*"
+ // is invalid (e.g. 1.*.3 or 1.2.3*). This functions defaults to standard
+ // Version behavior (IsValid) if no wildcard is present.
+ static bool IsValidWildcardString(const std::string& wildcard_string);
+
+ // Returns -1, 0, 1 for <, ==, >.
+ int CompareTo(const Version& other) const;
+
+ // Given a valid version object, compare if a |wildcard_string| results in a
+ // newer version. This function will default to CompareTo if the string does
+ // not end in wildcard sequence ".*". IsValidWildcard(wildcard_string) must be
+ // true before using this function.
+ int CompareToWildcardString(const std::string& wildcard_string) const;
+
+ // Return the string representation of this version.
+ const std::string GetString() const;
+
+ const std::vector<uint32_t>& components() const { return components_; }
+
+ private:
+ std::vector<uint32_t> components_;
+};
+
+BASE_EXPORT bool operator==(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator!=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>=(const Version& v1, const Version& v2);
+BASE_EXPORT std::ostream& operator<<(std::ostream& stream, const Version& v);
+
+} // namespace base
+
+// TODO(xhwang) remove this when all users are updated to explicitly use the
+// namespace
+using base::Version;
+
+#endif // BASE_VERSION_H_
diff --git a/base/version_unittest.cc b/base/version_unittest.cc
new file mode 100644
index 0000000000..5d9ea9973c
--- /dev/null
+++ b/base/version_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+TEST(VersionTest, DefaultConstructor) {
+ Version v;
+ EXPECT_FALSE(v.IsValid());
+}
+
+TEST(VersionTest, ValueSemantics) {
+ Version v1("1.2.3.4");
+ EXPECT_TRUE(v1.IsValid());
+ Version v3;
+ EXPECT_FALSE(v3.IsValid());
+ {
+ Version v2(v1);
+ v3 = v2;
+ EXPECT_TRUE(v2.IsValid());
+ EXPECT_EQ(v1, v2);
+ }
+ EXPECT_EQ(v3, v1);
+}
+
+TEST(VersionTest, GetVersionFromString) {
+ static const struct version_string {
+ const char* input;
+ size_t parts;
+ uint32_t firstpart;
+ bool success;
+ } cases[] = {
+ {"", 0, 0, false},
+ {" ", 0, 0, false},
+ {"\t", 0, 0, false},
+ {"\n", 0, 0, false},
+ {" ", 0, 0, false},
+ {".", 0, 0, false},
+ {" . ", 0, 0, false},
+ {"0", 1, 0, true},
+ {"0.", 0, 0, false},
+ {"0.0", 2, 0, true},
+ {"4294967295.0", 2, 4294967295, true},
+ {"4294967296.0", 0, 0, false},
+ {"-1.0", 0, 0, false},
+ {"1.-1.0", 0, 0, false},
+ {"1,--1.0", 0, 0, false},
+ {"+1.0", 0, 0, false},
+ {"1.+1.0", 0, 0, false},
+ {"1+1.0", 0, 0, false},
+ {"++1.0", 0, 0, false},
+ {"1.0a", 0, 0, false},
+ {"1.2.3.4.5.6.7.8.9.0", 10, 1, true},
+ {"02.1", 0, 0, false},
+ {"0.01", 2, 0, true},
+ {"f.1", 0, 0, false},
+ {"15.007.20011", 3, 15, true},
+ {"15.5.28.130162", 4, 15, true},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ Version version(cases[i].input);
+ EXPECT_EQ(cases[i].success, version.IsValid());
+ if (cases[i].success) {
+ EXPECT_EQ(cases[i].parts, version.components().size());
+ EXPECT_EQ(cases[i].firstpart, version.components()[0]);
+ }
+ }
+}
+
+TEST(VersionTest, Compare) {
+ static const struct version_compare {
+ const char* lhs;
+ const char* rhs;
+ int expected;
+ } cases[] = {
+ {"1.0", "1.0", 0},
+ {"1.0", "0.0", 1},
+ {"1.0", "2.0", -1},
+ {"1.0", "1.1", -1},
+ {"1.1", "1.0", 1},
+ {"1.0", "1.0.1", -1},
+ {"1.1", "1.0.1", 1},
+ {"1.1", "1.0.1", 1},
+ {"1.0.0", "1.0", 0},
+ {"1.0.3", "1.0.20", -1},
+ {"11.0.10", "15.007.20011", -1},
+ {"11.0.10", "15.5.28.130162", -1},
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ Version lhs(cases[i].lhs);
+ Version rhs(cases[i].rhs);
+ EXPECT_EQ(lhs.CompareTo(rhs), cases[i].expected) <<
+ cases[i].lhs << " ? " << cases[i].rhs;
+
+ // Test comparison operators
+ switch (cases[i].expected) {
+ case -1:
+ EXPECT_LT(lhs, rhs);
+ EXPECT_LE(lhs, rhs);
+ EXPECT_NE(lhs, rhs);
+ EXPECT_FALSE(lhs == rhs);
+ EXPECT_FALSE(lhs >= rhs);
+ EXPECT_FALSE(lhs > rhs);
+ break;
+ case 0:
+ EXPECT_FALSE(lhs < rhs);
+ EXPECT_LE(lhs, rhs);
+ EXPECT_FALSE(lhs != rhs);
+ EXPECT_EQ(lhs, rhs);
+ EXPECT_GE(lhs, rhs);
+ EXPECT_FALSE(lhs > rhs);
+ break;
+ case 1:
+ EXPECT_FALSE(lhs < rhs);
+ EXPECT_FALSE(lhs <= rhs);
+ EXPECT_NE(lhs, rhs);
+ EXPECT_FALSE(lhs == rhs);
+ EXPECT_GE(lhs, rhs);
+ EXPECT_GT(lhs, rhs);
+ break;
+ }
+ }
+}
+
+TEST(VersionTest, CompareToWildcardString) {
+ static const struct version_compare {
+ const char* lhs;
+ const char* rhs;
+ int expected;
+ } cases[] = {
+ {"1.0", "1.*", 0},
+ {"1.0", "0.*", 1},
+ {"1.0", "2.*", -1},
+ {"1.2.3", "1.2.3.*", 0},
+ {"10.0", "1.0.*", 1},
+ {"1.0", "3.0.*", -1},
+ {"1.4", "1.3.0.*", 1},
+ {"1.3.9", "1.3.*", 0},
+ {"1.4.1", "1.3.*", 1},
+ {"1.3", "1.4.5.*", -1},
+ {"1.5", "1.4.5.*", 1},
+ {"1.3.9", "1.3.*", 0},
+ {"1.2.0.0.0.0", "1.2.*", 0},
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ const Version version(cases[i].lhs);
+ const int result = version.CompareToWildcardString(cases[i].rhs);
+ EXPECT_EQ(result, cases[i].expected) << cases[i].lhs << "?" << cases[i].rhs;
+ }
+}
+
+TEST(VersionTest, IsValidWildcardString) {
+ static const struct version_compare {
+ const char* version;
+ bool expected;
+ } cases[] = {
+ {"1.0", true},
+ {"", false},
+ {"1.2.3.4.5.6", true},
+ {"1.2.3.*", true},
+ {"1.2.3.5*", false},
+ {"1.2.3.56*", false},
+ {"1.*.3", false},
+ {"20.*", true},
+ {"+2.*", false},
+ {"*", false},
+ {"*.2", false},
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ EXPECT_EQ(Version::IsValidWildcardString(cases[i].version),
+ cases[i].expected) << cases[i].version << "?" << cases[i].expected;
+ }
+}
+
+} // namespace
diff --git a/base/win/scoped_handle_test_dll.cc b/base/win/scoped_handle_test_dll.cc
index 440a4ca765..c72e4592b9 100644
--- a/base/win/scoped_handle_test_dll.cc
+++ b/base/win/scoped_handle_test_dll.cc
@@ -6,6 +6,7 @@
#include <vector>
+#include "base/win/base_features.h"
#include "base/win/current_module.h"
#include "base/win/scoped_handle.h"
@@ -99,7 +100,7 @@ bool InternalRunLocationTest() {
HMODULE main_module = ::GetModuleHandle(NULL);
-#if defined(COMPONENT_BUILD)
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
// In a component build ActiveVerifier will always be created inside base.dll
// as the code always lives there.
if (verifier_module == my_module || verifier_module == main_module)
diff --git a/components/timers/BUILD.gn b/components/timers/BUILD.gn
index d6a7efb064..c6f4a12f94 100644
--- a/components/timers/BUILD.gn
+++ b/components/timers/BUILD.gn
@@ -12,3 +12,17 @@ static_library("timers") {
"//base",
]
}
+
+source_set("unit_tests") {
+ testonly = true
+
+ sources = [
+ "alarm_timer_unittest.cc",
+ ]
+
+ deps = [
+ ":timers",
+ "//base",
+ "//testing/gtest",
+ ]
+}
diff --git a/crypto/BUILD.gn b/crypto/BUILD.gn
index 088a5c131b..a912d934c5 100644
--- a/crypto/BUILD.gn
+++ b/crypto/BUILD.gn
@@ -175,36 +175,32 @@ test("crypto_unittests") {
]
}
+# This has no sources in some cases so can't be a static library.
source_set("test_support") {
- sources = [
- "scoped_test_nss_chromeos_user.cc",
- "scoped_test_nss_chromeos_user.h",
- "scoped_test_nss_db.cc",
- "scoped_test_nss_db.h",
- "scoped_test_system_nss_key_slot.cc",
- "scoped_test_system_nss_key_slot.h",
- ]
- deps = [
- ":crypto",
- ":platform",
- "//base",
- ]
+ testonly = true
+ sources = []
- if (!use_nss_certs) {
- sources -= [
+ if (use_nss_certs) {
+ sources += [
"scoped_test_nss_db.cc",
"scoped_test_nss_db.h",
]
}
- if (!is_chromeos) {
- sources -= [
+ if (is_chromeos) {
+ sources += [
"scoped_test_nss_chromeos_user.cc",
"scoped_test_nss_chromeos_user.h",
"scoped_test_system_nss_key_slot.cc",
"scoped_test_system_nss_key_slot.h",
]
}
+
+ deps = [
+ ":crypto",
+ ":platform",
+ "//base",
+ ]
}
config("platform_config") {
diff --git a/crypto/ec_private_key.h b/crypto/ec_private_key.h
index 1ee4aca8b0..a24219bef5 100644
--- a/crypto/ec_private_key.h
+++ b/crypto/ec_private_key.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <memory>
#include <string>
#include <vector>
@@ -37,16 +38,25 @@ class CRYPTO_EXPORT ECPrivateKey {
public:
~ECPrivateKey();
- // Creates a new random instance. Can return NULL if initialization fails.
+ // Creates a new random instance. Can return nullptr if initialization fails.
// The created key will use the NIST P-256 curve.
// TODO(mattm): Add a curve parameter.
- static ECPrivateKey* Create();
+ static std::unique_ptr<ECPrivateKey> Create();
+
+ // Create a new instance by importing an existing private key. The format is
+ // an ASN.1-encoded PrivateKeyInfo block from PKCS #8. This can return
+ // nullptr if initialization fails.
+ static std::unique_ptr<ECPrivateKey> CreateFromPrivateKeyInfo(
+ const std::vector<uint8_t>& input);
// Creates a new instance by importing an existing key pair.
// The key pair is given as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
// block and an X.509 SubjectPublicKeyInfo block.
- // Returns NULL if initialization fails.
- static ECPrivateKey* CreateFromEncryptedPrivateKeyInfo(
+ // Returns nullptr if initialization fails.
+ //
+ // This function is deprecated. Use CreateFromPrivateKeyInfo for new code.
+ // See https://crbug.com/603319.
+ static std::unique_ptr<ECPrivateKey> CreateFromEncryptedPrivateKeyInfo(
const std::string& password,
const std::vector<uint8_t>& encrypted_private_key_info,
const std::vector<uint8_t>& subject_public_key_info);
@@ -69,7 +79,7 @@ class CRYPTO_EXPORT ECPrivateKey {
#endif
// Returns a copy of the object.
- ECPrivateKey* Copy() const;
+ std::unique_ptr<ECPrivateKey> Copy() const;
#if defined(USE_OPENSSL)
EVP_PKEY* key() { return key_; }
@@ -78,24 +88,26 @@ class CRYPTO_EXPORT ECPrivateKey {
SECKEYPublicKey* public_key() { return public_key_; }
#endif
+ // Exports the private key to a PKCS #8 PrivateKeyInfo block.
+ bool ExportPrivateKey(std::vector<uint8_t>* output) const;
+
// Exports the private key as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
// block and the public key as an X.509 SubjectPublicKeyInfo block.
// The |password| and |iterations| are used as inputs to the key derivation
// function for generating the encryption key. PKCS #5 recommends a minimum
// of 1000 iterations, on modern systems a larger value may be preferrable.
+ //
+ // This function is deprecated. Use ExportPrivateKey for new code. See
+ // https://crbug.com/603319.
bool ExportEncryptedPrivateKey(const std::string& password,
int iterations,
- std::vector<uint8_t>* output);
+ std::vector<uint8_t>* output) const;
// Exports the public key to an X.509 SubjectPublicKeyInfo block.
- bool ExportPublicKey(std::vector<uint8_t>* output);
+ bool ExportPublicKey(std::vector<uint8_t>* output) const;
// Exports the public key as an EC point in the uncompressed point format.
- bool ExportRawPublicKey(std::string* output);
-
- // Exports private key data for testing. The format of data stored into output
- // doesn't matter other than that it is consistent for the same key.
- bool ExportValueForTesting(std::vector<uint8_t>* output);
+ bool ExportRawPublicKey(std::string* output) const;
private:
// Constructor is private. Use one of the Create*() methods above instead.
diff --git a/crypto/hmac_unittest.cc b/crypto/hmac_unittest.cc
index f8dbd5a6ff..9c42dad2b4 100644
--- a/crypto/hmac_unittest.cc
+++ b/crypto/hmac_unittest.cc
@@ -287,7 +287,7 @@ TEST(HMACTest, EmptyKey) {
base::StringPiece data("");
crypto::HMAC hmac(crypto::HMAC::SHA1);
- ASSERT_TRUE(hmac.Init(NULL, 0));
+ ASSERT_TRUE(hmac.Init(nullptr, 0));
unsigned char digest[kSHA1DigestSize];
EXPECT_TRUE(hmac.Sign(data, digest, kSHA1DigestSize));
diff --git a/crypto/nss_key_util.cc b/crypto/nss_key_util.cc
index 1f726674ec..da8d9c39b1 100644
--- a/crypto/nss_key_util.cc
+++ b/crypto/nss_key_util.cc
@@ -7,24 +7,19 @@
#include <cryptohi.h>
#include <keyhi.h>
#include <pk11pub.h>
+#include <secmod.h>
#include <stdint.h>
#include <memory>
#include "base/logging.h"
#include "crypto/nss_util.h"
-
-#if defined(USE_NSS_CERTS)
-#include <secmod.h>
#include "crypto/nss_util_internal.h"
-#endif
namespace crypto {
namespace {
-#if defined(USE_NSS_CERTS)
-
struct PublicKeyInfoDeleter {
inline void operator()(CERTSubjectPublicKeyInfo* spki) {
SECKEY_DestroySubjectPublicKeyInfo(spki);
@@ -59,8 +54,6 @@ ScopedSECItem MakeIDFromSPKI(const std::vector<uint8_t>& input) {
return ScopedSECItem(PK11_MakeIDFromPubKey(&result->u.rsa.modulus));
}
-#endif // defined(USE_NSS_CERTS)
-
} // namespace
bool GenerateRSAKeyPairNSS(PK11SlotInfo* slot,
@@ -118,8 +111,6 @@ ScopedSECKEYPrivateKey ImportNSSKeyFromPrivateKeyInfo(
return ScopedSECKEYPrivateKey(key_raw);
}
-#if defined(USE_NSS_CERTS)
-
ScopedSECKEYPrivateKey FindNSSKeyFromPublicKeyInfo(
const std::vector<uint8_t>& input) {
EnsureNSSInit();
@@ -160,6 +151,4 @@ ScopedSECKEYPrivateKey FindNSSKeyFromPublicKeyInfoInSlot(
PK11_FindKeyByKeyID(slot, cka_id.get(), nullptr));
}
-#endif // defined(USE_NSS_CERTS)
-
} // namespace crypto
diff --git a/crypto/nss_key_util.h b/crypto/nss_key_util.h
index 12b948d25b..86934dd003 100644
--- a/crypto/nss_key_util.h
+++ b/crypto/nss_key_util.h
@@ -36,8 +36,6 @@ ImportNSSKeyFromPrivateKeyInfo(PK11SlotInfo* slot,
const std::vector<uint8_t>& input,
bool permanent);
-#if defined(USE_NSS_CERTS)
-
// Decodes |input| as a DER-encoded X.509 SubjectPublicKeyInfo and searches for
// the private key half in the key database. Returns the private key on success
// or nullptr on error.
@@ -51,8 +49,6 @@ CRYPTO_EXPORT ScopedSECKEYPrivateKey
FindNSSKeyFromPublicKeyInfoInSlot(const std::vector<uint8_t>& input,
PK11SlotInfo* slot);
-#endif // defined(USE_NSS_CERTS)
-
} // namespace crypto
#endif // CRYPTO_NSS_KEY_UTIL_H_
diff --git a/crypto/nss_key_util_unittest.cc b/crypto/nss_key_util_unittest.cc
index 99b52a9891..ced9850aa3 100644
--- a/crypto/nss_key_util_unittest.cc
+++ b/crypto/nss_key_util_unittest.cc
@@ -46,7 +46,6 @@ TEST_F(NSSKeyUtilTest, GenerateRSAKeyPairNSS) {
PK11_GetPrivateModulusLen(private_key.get()));
}
-#if defined(USE_NSS_CERTS)
TEST_F(NSSKeyUtilTest, FindNSSKeyFromPublicKeyInfo) {
// Create an NSS keypair, which will put the keys in the user's NSSDB.
ScopedSECKEYPublicKey public_key;
@@ -83,6 +82,5 @@ TEST_F(NSSKeyUtilTest, FailedFindNSSKeyFromPublicKeyInfo) {
EXPECT_FALSE(FindNSSKeyFromPublicKeyInfo(public_key_der));
}
-#endif // defined(USE_NSS_CERTS)
} // namespace crypto
diff --git a/crypto/nss_util.h b/crypto/nss_util.h
index 71e5a67b3a..a8b57ff9f0 100644
--- a/crypto/nss_util.h
+++ b/crypto/nss_util.h
@@ -24,12 +24,10 @@ class Time;
// initialization functions.
namespace crypto {
-#if defined(USE_NSS_CERTS)
// EarlySetupForNSSInit performs lightweight setup which must occur before the
// process goes multithreaded. This does not initialise NSS. For test, see
// EnsureNSSInit.
CRYPTO_EXPORT void EarlySetupForNSSInit();
-#endif
// Initialize NRPR if it isn't already initialized. This function is
// thread-safe, and NSPR will only ever be initialized once.
@@ -81,7 +79,6 @@ CRYPTO_EXPORT base::Time PRTimeToBaseTime(int64_t prtime);
// We use a int64_t instead of PRTime here to avoid depending on NSPR headers.
CRYPTO_EXPORT int64_t BaseTimeToPRTime(base::Time time);
-#if defined(USE_NSS_CERTS)
// NSS has a bug which can cause a deadlock or stall in some cases when writing
// to the certDB and keyDB. It also has a bug which causes concurrent key pair
// generations to scribble over each other. To work around this, we synchronize
@@ -102,7 +99,6 @@ class CRYPTO_EXPORT AutoNSSWriteLock {
base::Lock *lock_;
DISALLOW_COPY_AND_ASSIGN(AutoNSSWriteLock);
};
-#endif // defined(USE_NSS_CERTS)
} // namespace crypto
diff --git a/crypto/nss_util_internal.h b/crypto/nss_util_internal.h
index 0982a6e8c7..697e376e5a 100644
--- a/crypto/nss_util_internal.h
+++ b/crypto/nss_util_internal.h
@@ -24,7 +24,7 @@ namespace crypto {
// Opens an NSS software database in folder |path|, with the (potentially)
// user-visible description |description|. Returns the slot for the opened
-// database, or NULL if the database could not be opened.
+// database, or nullptr if the database could not be opened.
CRYPTO_EXPORT ScopedPK11Slot OpenSoftwareNSSDB(const base::FilePath& path,
const std::string& description);
@@ -57,8 +57,8 @@ CRYPTO_EXPORT ScopedPK11Slot GetSystemNSSKeySlot(
// through |GetSystemNSSKeySlot| and |IsTPMTokenReady| will return true.
// |InitializeTPMTokenAndSystemSlot|, which triggers the TPM initialization,
// does not have to be called if the test system slot is set.
-// This must must not be called consecutively with a |slot| != NULL. If |slot|
-// is NULL, the test system slot is unset.
+// This must must not be called consecutively with a |slot| != nullptr. If
+// |slot| is nullptr, the test system slot is unset.
CRYPTO_EXPORT void SetSystemKeySlotForTesting(ScopedPK11Slot slot);
// Prepare per-user NSS slot mapping. It is safe to call this function multiple
diff --git a/crypto/nss_util_unittest.cc b/crypto/nss_util_unittest.cc
index 28591916d3..729d5bf1b3 100644
--- a/crypto/nss_util_unittest.cc
+++ b/crypto/nss_util_unittest.cc
@@ -34,7 +34,8 @@ TEST(NSSUtilTest, PRTimeConversion) {
prxtime.tm_usec = 342000;
PRTime pr_time = PR_ImplodeTime(&prxtime);
- base::Time base_time = base::Time::FromUTCExploded(exploded);
+ base::Time base_time;
+ EXPECT_TRUE(base::Time::FromUTCExploded(exploded, &base_time));
EXPECT_EQ(base_time, PRTimeToBaseTime(pr_time));
EXPECT_EQ(pr_time, BaseTimeToPRTime(base_time));
diff --git a/crypto/secure_hash.cc b/crypto/secure_hash.cc
index 2bdf3d81de..9003b9cb69 100644
--- a/crypto/secure_hash.cc
+++ b/crypto/secure_hash.cc
@@ -13,6 +13,7 @@
#include <stddef.h>
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/pickle.h"
#include "crypto/openssl_util.h"
@@ -44,8 +45,8 @@ class SecureHashSHA256 : public SecureHash {
SHA256_Final(result.safe_buffer(), &ctx_);
}
- SecureHash* Clone() const override {
- return new SecureHashSHA256(*this);
+ std::unique_ptr<SecureHash> Clone() const override {
+ return base::MakeUnique<SecureHashSHA256>(*this);
}
size_t GetHashLength() const override { return SHA256_DIGEST_LENGTH; }
@@ -56,13 +57,13 @@ class SecureHashSHA256 : public SecureHash {
} // namespace
-SecureHash* SecureHash::Create(Algorithm algorithm) {
+std::unique_ptr<SecureHash> SecureHash::Create(Algorithm algorithm) {
switch (algorithm) {
case SHA256:
- return new SecureHashSHA256();
+ return base::MakeUnique<SecureHashSHA256>();
default:
NOTIMPLEMENTED();
- return NULL;
+ return nullptr;
}
}
diff --git a/crypto/secure_hash.h b/crypto/secure_hash.h
index a5590e5d0b..30b9fdc5f2 100644
--- a/crypto/secure_hash.h
+++ b/crypto/secure_hash.h
@@ -7,6 +7,8 @@
#include <stddef.h>
+#include <memory>
+
#include "base/macros.h"
#include "crypto/crypto_export.h"
@@ -21,7 +23,7 @@ class CRYPTO_EXPORT SecureHash {
};
virtual ~SecureHash() {}
- static SecureHash* Create(Algorithm type);
+ static std::unique_ptr<SecureHash> Create(Algorithm type);
virtual void Update(const void* input, size_t len) = 0;
virtual void Finish(void* output, size_t len) = 0;
@@ -30,7 +32,7 @@ class CRYPTO_EXPORT SecureHash {
// Create a clone of this SecureHash. The returned clone and this both
// represent the same hash state. But from this point on, calling
// Update()/Finish() on either doesn't affect the state of the other.
- virtual SecureHash* Clone() const = 0;
+ virtual std::unique_ptr<SecureHash> Clone() const = 0;
protected:
SecureHash() {}
diff --git a/crypto/signature_creator.h b/crypto/signature_creator.h
index abd1546164..1e8e856a02 100644
--- a/crypto/signature_creator.h
+++ b/crypto/signature_creator.h
@@ -7,6 +7,7 @@
#include <stdint.h>
+#include <memory>
#include <vector>
#include "base/macros.h"
@@ -40,8 +41,8 @@ class CRYPTO_EXPORT SignatureCreator {
// Create an instance. The caller must ensure that the provided PrivateKey
// instance outlives the created SignatureCreator. Uses the HashAlgorithm
// specified.
- static SignatureCreator* Create(RSAPrivateKey* key, HashAlgorithm hash_alg);
-
+ static std::unique_ptr<SignatureCreator> Create(RSAPrivateKey* key,
+ HashAlgorithm hash_alg);
// Signs the precomputed |hash_alg| digest |data| using private |key| as
// specified in PKCS #1 v1.5.
diff --git a/crypto/symmetric_key.cc b/crypto/symmetric_key.cc
index 4da8bd8662..e3ecf624bc 100644
--- a/crypto/symmetric_key.cc
+++ b/crypto/symmetric_key.cc
@@ -10,7 +10,7 @@
#include <stdint.h>
#include <algorithm>
-#include <memory>
+#include <utility>
#include "base/logging.h"
#include "base/strings/string_util.h"
@@ -23,21 +23,22 @@ SymmetricKey::~SymmetricKey() {
}
// static
-SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
- size_t key_size_in_bits) {
+std::unique_ptr<SymmetricKey> SymmetricKey::GenerateRandomKey(
+ Algorithm algorithm,
+ size_t key_size_in_bits) {
DCHECK_EQ(AES, algorithm);
// Whitelist supported key sizes to avoid accidentaly relying on
// algorithms available in NSS but not BoringSSL and vice
// versa. Note that BoringSSL does not support AES-192.
if (key_size_in_bits != 128 && key_size_in_bits != 256)
- return NULL;
+ return nullptr;
size_t key_size_in_bytes = key_size_in_bits / 8;
DCHECK_EQ(key_size_in_bits, key_size_in_bytes * 8);
if (key_size_in_bytes == 0)
- return NULL;
+ return nullptr;
OpenSSLErrStackTracer err_tracer(FROM_HERE);
std::unique_ptr<SymmetricKey> key(new SymmetricKey);
@@ -45,15 +46,16 @@ SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
base::WriteInto(&key->key_, key_size_in_bytes + 1));
int rv = RAND_bytes(key_data, static_cast<int>(key_size_in_bytes));
- return rv == 1 ? key.release() : NULL;
+ return rv == 1 ? std::move(key) : nullptr;
}
// static
-SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
- const std::string& password,
- const std::string& salt,
- size_t iterations,
- size_t key_size_in_bits) {
+std::unique_ptr<SymmetricKey> SymmetricKey::DeriveKeyFromPassword(
+ Algorithm algorithm,
+ const std::string& password,
+ const std::string& salt,
+ size_t iterations,
+ size_t key_size_in_bits) {
DCHECK(algorithm == AES || algorithm == HMAC_SHA1);
if (algorithm == AES) {
@@ -61,14 +63,14 @@ SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
// algorithms available in NSS but not BoringSSL and vice
// versa. Note that BoringSSL does not support AES-192.
if (key_size_in_bits != 128 && key_size_in_bits != 256)
- return NULL;
+ return nullptr;
}
size_t key_size_in_bytes = key_size_in_bits / 8;
DCHECK_EQ(key_size_in_bits, key_size_in_bytes * 8);
if (key_size_in_bytes == 0)
- return NULL;
+ return nullptr;
OpenSSLErrStackTracer err_tracer(FROM_HERE);
std::unique_ptr<SymmetricKey> key(new SymmetricKey);
@@ -79,23 +81,23 @@ SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
reinterpret_cast<const uint8_t*>(salt.data()), salt.length(),
static_cast<unsigned>(iterations),
key_size_in_bytes, key_data);
- return rv == 1 ? key.release() : NULL;
+ return rv == 1 ? std::move(key) : nullptr;
}
// static
-SymmetricKey* SymmetricKey::Import(Algorithm algorithm,
- const std::string& raw_key) {
+std::unique_ptr<SymmetricKey> SymmetricKey::Import(Algorithm algorithm,
+ const std::string& raw_key) {
if (algorithm == AES) {
// Whitelist supported key sizes to avoid accidentaly relying on
// algorithms available in NSS but not BoringSSL and vice
// versa. Note that BoringSSL does not support AES-192.
if (raw_key.size() != 128/8 && raw_key.size() != 256/8)
- return NULL;
+ return nullptr;
}
std::unique_ptr<SymmetricKey> key(new SymmetricKey);
key->key_ = raw_key;
- return key.release();
+ return key;
}
bool SymmetricKey::GetRawKey(std::string* raw_key) {
@@ -103,4 +105,6 @@ bool SymmetricKey::GetRawKey(std::string* raw_key) {
return true;
}
+SymmetricKey::SymmetricKey() = default;
+
} // namespace crypto
diff --git a/crypto/symmetric_key.h b/crypto/symmetric_key.h
index 14f74aeb74..88627084c6 100644
--- a/crypto/symmetric_key.h
+++ b/crypto/symmetric_key.h
@@ -7,6 +7,7 @@
#include <stddef.h>
+#include <memory>
#include <string>
#include "base/macros.h"
@@ -40,26 +41,28 @@ class CRYPTO_EXPORT SymmetricKey {
// Generates a random key suitable to be used with |algorithm| and of
// |key_size_in_bits| bits. |key_size_in_bits| must be a multiple of 8.
// The caller is responsible for deleting the returned SymmetricKey.
- static SymmetricKey* GenerateRandomKey(Algorithm algorithm,
- size_t key_size_in_bits);
+ static std::unique_ptr<SymmetricKey> GenerateRandomKey(
+ Algorithm algorithm,
+ size_t key_size_in_bits);
// Derives a key from the supplied password and salt using PBKDF2, suitable
// for use with specified |algorithm|. Note |algorithm| is not the algorithm
// used to derive the key from the password. |key_size_in_bits| must be a
// multiple of 8. The caller is responsible for deleting the returned
// SymmetricKey.
- static SymmetricKey* DeriveKeyFromPassword(Algorithm algorithm,
- const std::string& password,
- const std::string& salt,
- size_t iterations,
- size_t key_size_in_bits);
+ static std::unique_ptr<SymmetricKey> DeriveKeyFromPassword(
+ Algorithm algorithm,
+ const std::string& password,
+ const std::string& salt,
+ size_t iterations,
+ size_t key_size_in_bits);
// Imports an array of key bytes in |raw_key|. This key may have been
// generated by GenerateRandomKey or DeriveKeyFromPassword and exported with
// GetRawKey, or via another compatible method. The key must be of suitable
// size for use with |algorithm|. The caller owns the returned SymmetricKey.
- static SymmetricKey* Import(Algorithm algorithm, const std::string& raw_key);
-
+ static std::unique_ptr<SymmetricKey> Import(Algorithm algorithm,
+ const std::string& raw_key);
#if defined(NACL_WIN64)
HCRYPTKEY key() const { return key_.get(); }
#elif defined(USE_OPENSSL)
diff --git a/crypto/symmetric_key_unittest.cc b/crypto/symmetric_key_unittest.cc
index 7cd47cd73c..d954761d75 100644
--- a/crypto/symmetric_key_unittest.cc
+++ b/crypto/symmetric_key_unittest.cc
@@ -14,7 +14,7 @@
TEST(SymmetricKeyTest, GenerateRandomKey) {
std::unique_ptr<crypto::SymmetricKey> key(
crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
- ASSERT_TRUE(NULL != key.get());
+ ASSERT_TRUE(key);
std::string raw_key;
EXPECT_TRUE(key->GetRawKey(&raw_key));
EXPECT_EQ(32U, raw_key.size());
@@ -23,7 +23,7 @@ TEST(SymmetricKeyTest, GenerateRandomKey) {
// (Note: this has a one-in-10^77 chance of failure!)
std::unique_ptr<crypto::SymmetricKey> key2(
crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
- ASSERT_TRUE(NULL != key2.get());
+ ASSERT_TRUE(key2);
std::string raw_key2;
EXPECT_TRUE(key2->GetRawKey(&raw_key2));
EXPECT_EQ(32U, raw_key2.size());
@@ -33,13 +33,13 @@ TEST(SymmetricKeyTest, GenerateRandomKey) {
TEST(SymmetricKeyTest, ImportGeneratedKey) {
std::unique_ptr<crypto::SymmetricKey> key1(
crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
- ASSERT_TRUE(NULL != key1.get());
+ ASSERT_TRUE(key1);
std::string raw_key1;
EXPECT_TRUE(key1->GetRawKey(&raw_key1));
std::unique_ptr<crypto::SymmetricKey> key2(
crypto::SymmetricKey::Import(crypto::SymmetricKey::AES, raw_key1));
- ASSERT_TRUE(NULL != key2.get());
+ ASSERT_TRUE(key2);
std::string raw_key2;
EXPECT_TRUE(key2->GetRawKey(&raw_key2));
@@ -51,13 +51,13 @@ TEST(SymmetricKeyTest, ImportDerivedKey) {
std::unique_ptr<crypto::SymmetricKey> key1(
crypto::SymmetricKey::DeriveKeyFromPassword(
crypto::SymmetricKey::HMAC_SHA1, "password", "somesalt", 1024, 160));
- ASSERT_TRUE(NULL != key1.get());
+ ASSERT_TRUE(key1);
std::string raw_key1;
EXPECT_TRUE(key1->GetRawKey(&raw_key1));
std::unique_ptr<crypto::SymmetricKey> key2(
crypto::SymmetricKey::Import(crypto::SymmetricKey::HMAC_SHA1, raw_key1));
- ASSERT_TRUE(NULL != key2.get());
+ ASSERT_TRUE(key2);
std::string raw_key2;
EXPECT_TRUE(key2->GetRawKey(&raw_key2));
@@ -80,20 +80,11 @@ class SymmetricKeyDeriveKeyFromPasswordTest
TEST_P(SymmetricKeyDeriveKeyFromPasswordTest, DeriveKeyFromPassword) {
PBKDF2TestVector test_data(GetParam());
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The OS X crypto libraries have minimum salt and iteration requirements
- // so some of the tests below will cause them to barf. Skip these.
- if (strlen(test_data.salt) < 8 || test_data.rounds < 1000) {
- VLOG(1) << "Skipped test vector for " << test_data.expected;
- return;
- }
-#endif // OS_MACOSX
-
std::unique_ptr<crypto::SymmetricKey> key(
crypto::SymmetricKey::DeriveKeyFromPassword(
test_data.algorithm, test_data.password, test_data.salt,
test_data.rounds, test_data.key_size_in_bits));
- ASSERT_TRUE(NULL != key.get());
+ ASSERT_TRUE(key);
std::string raw_key;
key->GetRawKey(&raw_key);
diff --git a/crypto/wincrypt_shim.h b/crypto/wincrypt_shim.h
index 799ac49fee..48d4b5c5fa 100644
--- a/crypto/wincrypt_shim.h
+++ b/crypto/wincrypt_shim.h
@@ -22,4 +22,4 @@
#define WINCRYPT_X509_EXTENSIONS ((LPCSTR) 5)
#define WINCRYPT_X509_NAME ((LPCSTR) 7)
-#endif // NET_CRYPTO_WINCRYPT_SHIM_H_ \ No newline at end of file
+#endif // NET_CRYPTO_WINCRYPT_SHIM_H_
diff --git a/dbus/BUILD.gn b/dbus/BUILD.gn
index 1502ca7536..28efb93fe4 100644
--- a/dbus/BUILD.gn
+++ b/dbus/BUILD.gn
@@ -2,9 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/features.gni")
import("//testing/test.gni")
import("//third_party/protobuf/proto_library.gni")
+assert(use_dbus)
+
component("dbus") {
sources = [
"bus.cc",
@@ -56,7 +59,7 @@ proto_library("test_proto") {
# This target contains mocks that can be used to write unit tests without
# issuing actual D-Bus calls.
-source_set("test_support") {
+static_library("test_support") {
testonly = true
sources = [
"mock_bus.cc",
diff --git a/dbus/OWNERS b/dbus/OWNERS
index fc425e6db4..04931c39c3 100644
--- a/dbus/OWNERS
+++ b/dbus/OWNERS
@@ -1,4 +1,3 @@
hashimoto@chromium.org
-keybuk@chromium.org
satorux@chromium.org
stevenjb@chromium.org
diff --git a/dbus/bus.cc b/dbus/bus.cc
index bb5afb16da..57834d348a 100644
--- a/dbus/bus.cc
+++ b/dbus/bus.cc
@@ -13,6 +13,7 @@
#include "base/strings/stringprintf.h"
#include "base/threading/thread.h"
#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "dbus/exported_object.h"
#include "dbus/message.h"
@@ -184,7 +185,8 @@ Bus::Bus(const Options& options)
: bus_type_(options.bus_type),
connection_type_(options.connection_type),
dbus_task_runner_(options.dbus_task_runner),
- on_shutdown_(false /* manual_reset */, false /* initially_signaled */),
+ on_shutdown_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
connection_(NULL),
origin_thread_id_(base::PlatformThread::CurrentId()),
async_operations_set_up_(false),
@@ -196,8 +198,8 @@ Bus::Bus(const Options& options)
dbus_threads_init_default();
// The origin message loop is unnecessary if the client uses synchronous
// functions only.
- if (base::MessageLoop::current())
- origin_task_runner_ = base::MessageLoop::current()->task_runner();
+ if (base::ThreadTaskRunnerHandle::IsSet())
+ origin_task_runner_ = base::ThreadTaskRunnerHandle::Get();
}
Bus::~Bus() {
diff --git a/dbus/file_descriptor.h b/dbus/file_descriptor.h
index 8fcab2f440..f8e86777ea 100644
--- a/dbus/file_descriptor.h
+++ b/dbus/file_descriptor.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "base/move.h"
+#include "base/macros.h"
#include "dbus/dbus_export.h"
namespace dbus {
@@ -34,8 +34,6 @@ namespace dbus {
// also allows the caller to do this work on the File thread to conform
// with i/o restrictions.
class CHROME_DBUS_EXPORT FileDescriptor {
- MOVE_ONLY_TYPE_FOR_CPP_03(FileDescriptor);
-
public:
// This provides a simple way to pass around file descriptors since they must
// be closed on a thread that is allowed to perform I/O.
@@ -82,6 +80,8 @@ class CHROME_DBUS_EXPORT FileDescriptor {
int value_;
bool owner_;
bool valid_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileDescriptor);
};
using ScopedFileDescriptor =
diff --git a/dbus/values_util.cc b/dbus/values_util.cc
index ed435a1936..bea7bea746 100644
--- a/dbus/values_util.cc
+++ b/dbus/values_util.cc
@@ -4,10 +4,11 @@
#include "dbus/values_util.h"
-#include <memory>
+#include <utility>
#include "base/json/json_writer.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/values.h"
#include "dbus/message.h"
@@ -24,10 +25,10 @@ bool IsExactlyRepresentableByDouble(T value) {
// Pops values from |reader| and appends them to |list_value|.
bool PopListElements(MessageReader* reader, base::ListValue* list_value) {
while (reader->HasMoreData()) {
- base::Value* element_value = PopDataAsValue(reader);
+ std::unique_ptr<base::Value> element_value = PopDataAsValue(reader);
if (!element_value)
return false;
- list_value->Append(element_value);
+ list_value->Append(std::move(element_value));
}
return true;
}
@@ -55,10 +56,10 @@ bool PopDictionaryEntries(MessageReader* reader,
base::JSONWriter::Write(*key, &key_string);
}
// Get the value and set the key-value pair.
- base::Value* value = PopDataAsValue(&entry_reader);
+ std::unique_ptr<base::Value> value = PopDataAsValue(&entry_reader);
if (!value)
return false;
- dictionary_value->SetWithoutPathExpansion(key_string, value);
+ dictionary_value->SetWithoutPathExpansion(key_string, std::move(value));
}
return true;
}
@@ -88,8 +89,8 @@ std::string GetTypeSignature(const base::Value& value) {
} // namespace
-base::Value* PopDataAsValue(MessageReader* reader) {
- base::Value* result = NULL;
+std::unique_ptr<base::Value> PopDataAsValue(MessageReader* reader) {
+ std::unique_ptr<base::Value> result;
switch (reader->GetDataType()) {
case Message::INVALID_DATA:
// Do nothing.
@@ -97,37 +98,39 @@ base::Value* PopDataAsValue(MessageReader* reader) {
case Message::BYTE: {
uint8_t value = 0;
if (reader->PopByte(&value))
- result = new base::FundamentalValue(value);
+ result = base::MakeUnique<base::FundamentalValue>(value);
break;
}
case Message::BOOL: {
bool value = false;
if (reader->PopBool(&value))
- result = new base::FundamentalValue(value);
+ result = base::MakeUnique<base::FundamentalValue>(value);
break;
}
case Message::INT16: {
int16_t value = 0;
if (reader->PopInt16(&value))
- result = new base::FundamentalValue(value);
+ result = base::MakeUnique<base::FundamentalValue>(value);
break;
}
case Message::UINT16: {
uint16_t value = 0;
if (reader->PopUint16(&value))
- result = new base::FundamentalValue(value);
+ result = base::MakeUnique<base::FundamentalValue>(value);
break;
}
case Message::INT32: {
int32_t value = 0;
if (reader->PopInt32(&value))
- result = new base::FundamentalValue(value);
+ result = base::MakeUnique<base::FundamentalValue>(value);
break;
}
case Message::UINT32: {
uint32_t value = 0;
- if (reader->PopUint32(&value))
- result = new base::FundamentalValue(static_cast<double>(value));
+ if (reader->PopUint32(&value)) {
+ result = base::MakeUnique<base::FundamentalValue>(
+ static_cast<double>(value));
+ }
break;
}
case Message::INT64: {
@@ -135,7 +138,8 @@ base::Value* PopDataAsValue(MessageReader* reader) {
if (reader->PopInt64(&value)) {
DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
value << " is not exactly representable by double";
- result = new base::FundamentalValue(static_cast<double>(value));
+ result = base::MakeUnique<base::FundamentalValue>(
+ static_cast<double>(value));
}
break;
}
@@ -144,26 +148,27 @@ base::Value* PopDataAsValue(MessageReader* reader) {
if (reader->PopUint64(&value)) {
DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
value << " is not exactly representable by double";
- result = new base::FundamentalValue(static_cast<double>(value));
+ result = base::MakeUnique<base::FundamentalValue>(
+ static_cast<double>(value));
}
break;
}
case Message::DOUBLE: {
double value = 0;
if (reader->PopDouble(&value))
- result = new base::FundamentalValue(value);
+ result = base::MakeUnique<base::FundamentalValue>(value);
break;
}
case Message::STRING: {
std::string value;
if (reader->PopString(&value))
- result = new base::StringValue(value);
+ result = base::MakeUnique<base::StringValue>(value);
break;
}
case Message::OBJECT_PATH: {
ObjectPath value;
if (reader->PopObjectPath(&value))
- result = new base::StringValue(value.value());
+ result = base::MakeUnique<base::StringValue>(value.value());
break;
}
case Message::UNIX_FD: {
@@ -180,11 +185,11 @@ base::Value* PopDataAsValue(MessageReader* reader) {
std::unique_ptr<base::DictionaryValue> dictionary_value(
new base::DictionaryValue);
if (PopDictionaryEntries(&sub_reader, dictionary_value.get()))
- result = dictionary_value.release();
+ result = std::move(dictionary_value);
} else {
std::unique_ptr<base::ListValue> list_value(new base::ListValue);
if (PopListElements(&sub_reader, list_value.get()))
- result = list_value.release();
+ result = std::move(list_value);
}
}
break;
@@ -194,7 +199,7 @@ base::Value* PopDataAsValue(MessageReader* reader) {
if (reader->PopStruct(&sub_reader)) {
std::unique_ptr<base::ListValue> list_value(new base::ListValue);
if (PopListElements(&sub_reader, list_value.get()))
- result = list_value.release();
+ result = std::move(list_value);
}
break;
}
@@ -279,9 +284,7 @@ void AppendValueData(MessageWriter* writer, const base::Value& value) {
value.GetAsList(&list);
dbus::MessageWriter array_writer(NULL);
writer->OpenArray("v", &array_writer);
- for (base::ListValue::const_iterator iter = list->begin();
- iter != list->end(); ++iter) {
- const base::Value* value = *iter;
+ for (const auto& value : *list) {
AppendValueDataAsVariant(&array_writer, *value);
}
writer->CloseContainer(&array_writer);
diff --git a/dbus/values_util.h b/dbus/values_util.h
index b6f4ff3b10..81b839b188 100644
--- a/dbus/values_util.h
+++ b/dbus/values_util.h
@@ -7,6 +7,8 @@
#include <stdint.h>
+#include <memory>
+
#include "dbus/dbus_export.h"
namespace base {
@@ -22,7 +24,8 @@ class MessageWriter;
// Returns NULL if an error occurs.
// Note: Integer values larger than int32_t (including uint32_t) are converted
// to double. Non-string dictionary keys are converted to strings.
-CHROME_DBUS_EXPORT base::Value* PopDataAsValue(MessageReader* reader);
+CHROME_DBUS_EXPORT std::unique_ptr<base::Value> PopDataAsValue(
+ MessageReader* reader);
// Appends a basic type value to |writer|. Basic types are BOOLEAN, INTEGER,
// DOUBLE, and STRING. Use this function for values that are known to be basic
diff --git a/sandbox/linux/BUILD.gn b/sandbox/linux/BUILD.gn
index e95303ec13..a5c041fad0 100644
--- a/sandbox/linux/BUILD.gn
+++ b/sandbox/linux/BUILD.gn
@@ -305,9 +305,24 @@ if (is_linux) {
"-Wno-sign-compare",
]
- deps = [
- "//build/config/sanitizers:deps",
- ]
+ import("//build/config/compiler/compiler.gni")
+ import("//build/config/sanitizers/sanitizers.gni")
+ if (is_component_build || using_sanitizer) {
+ # WARNING! We remove this config so that we don't accidentally
+ # pick up the //build/config:rpath_for_built_shared_libraries
+ # sub-config. However, this means that we need to duplicate any
+ # other flags that executable_config might have.
+ configs -= [ "//build/config:executable_config" ]
+ if (!use_gold) {
+ ldflags = [ "-Wl,--disable-new-dtags" ]
+ }
+ }
+
+ # We also do not want to pick up any of the other sanitizer
+ # flags (i.e. we do not want to build w/ the sanitizers at all).
+ # This is safe to delete unconditionally, because it is part of the
+ # default configs and empty when not using the sanitizers.
+ configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
}
}
diff --git a/sandbox/linux/bpf_dsl/codegen.cc b/sandbox/linux/bpf_dsl/codegen.cc
index 647f55aa8a..d88bd531a2 100644
--- a/sandbox/linux/bpf_dsl/codegen.cc
+++ b/sandbox/linux/bpf_dsl/codegen.cc
@@ -144,18 +144,4 @@ size_t CodeGen::Offset(Node target) const {
return (program_.size() - 1) - target;
}
-// TODO(mdempsky): Move into a general base::Tuple helper library.
-bool CodeGen::MemoKeyLess::operator()(const MemoKey& lhs,
- const MemoKey& rhs) const {
- if (base::get<0>(lhs) != base::get<0>(rhs))
- return base::get<0>(lhs) < base::get<0>(rhs);
- if (base::get<1>(lhs) != base::get<1>(rhs))
- return base::get<1>(lhs) < base::get<1>(rhs);
- if (base::get<2>(lhs) != base::get<2>(rhs))
- return base::get<2>(lhs) < base::get<2>(rhs);
- if (base::get<3>(lhs) != base::get<3>(rhs))
- return base::get<3>(lhs) < base::get<3>(rhs);
- return false;
-}
-
} // namespace sandbox
diff --git a/sandbox/linux/bpf_dsl/codegen.h b/sandbox/linux/bpf_dsl/codegen.h
index 03c3b236ef..3fc3f35a0d 100644
--- a/sandbox/linux/bpf_dsl/codegen.h
+++ b/sandbox/linux/bpf_dsl/codegen.h
@@ -9,10 +9,10 @@
#include <stdint.h>
#include <map>
+#include <tuple>
#include <vector>
#include "base/macros.h"
-#include "base/tuple.h"
#include "sandbox/sandbox_export.h"
struct sock_filter;
@@ -80,10 +80,7 @@ class SANDBOX_EXPORT CodeGen {
Program Compile(Node head);
private:
- using MemoKey = base::Tuple<uint16_t, uint32_t, Node, Node>;
- struct MemoKeyLess {
- bool operator()(const MemoKey& lhs, const MemoKey& rhs) const;
- };
+ using MemoKey = std::tuple<uint16_t, uint32_t, Node, Node>;
// AppendInstruction adds a new instruction, ensuring that |jt| and
// |jf| are within range as necessary for |code|.
@@ -112,7 +109,7 @@ class SANDBOX_EXPORT CodeGen {
// if it's an unconditional jump to a node semantically-equivalent to N.
std::vector<Node> equivalent_;
- std::map<MemoKey, Node, MemoKeyLess> memos_;
+ std::map<MemoKey, Node> memos_;
DISALLOW_COPY_AND_ASSIGN(CodeGen);
};
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
index fbbd6349b6..56c4cb387d 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
@@ -47,9 +47,8 @@
#define PR_SET_VMA 0x53564d41
#endif
-// https://android.googlesource.com/platform/system/core/+/lollipop-release/libcutils/sched_policy.c
-#if !defined(PR_SET_TIMERSLACK_PID)
-#define PR_SET_TIMERSLACK_PID 41
+#ifndef PR_SET_PTRACER
+#define PR_SET_PTRACER 0x59616d61
#endif
#endif // defined(OS_ANDROID)
@@ -154,9 +153,35 @@ ResultExpr RestrictPrctl() {
return Switch(option)
.CASES((PR_GET_NAME, PR_SET_NAME, PR_GET_DUMPABLE, PR_SET_DUMPABLE
#if defined(OS_ANDROID)
- ,
- PR_SET_VMA, PR_SET_TIMERSLACK_PID
-#endif
+ , PR_SET_VMA, PR_SET_PTRACER
+
+// Enable PR_SET_TIMERSLACK_PID, an Android custom prctl which is used in:
+// https://android.googlesource.com/platform/system/core/+/lollipop-release/libcutils/sched_policy.c.
+// Depending on the Android kernel version, this prctl may have different
+// values. Since we don't know the correct value for the running kernel, we must
+// allow them all.
+//
+// The effect is:
+// On 3.14 kernels, this allows PR_SET_TIMERSLACK_PID and 43 and 127 (invalid
+// prctls which will return EINVAL)
+// On 3.18 kernels, this allows PR_SET_TIMERSLACK_PID, PR_SET_THP_DISABLE, and
+// 127 (invalid).
+// On 4.1 kernels and up, this allows PR_SET_TIMERSLACK_PID, PR_SET_THP_DISABLE,
+// and PR_MPX_ENABLE_MANAGEMENT.
+
+// https://android.googlesource.com/kernel/common/+/android-3.14/include/uapi/linux/prctl.h
+#define PR_SET_TIMERSLACK_PID_1 41
+
+// https://android.googlesource.com/kernel/common/+/android-3.18/include/uapi/linux/prctl.h
+#define PR_SET_TIMERSLACK_PID_2 43
+
+// https://android.googlesource.com/kernel/common/+/android-4.1/include/uapi/linux/prctl.h and up
+#define PR_SET_TIMERSLACK_PID_3 127
+
+ , PR_SET_TIMERSLACK_PID_1
+ , PR_SET_TIMERSLACK_PID_2
+ , PR_SET_TIMERSLACK_PID_3
+#endif // defined(OS_ANDROID)
),
Allow())
.Default(CrashSIGSYSPrctl());
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
index 9daeedc901..804a8fea1e 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
@@ -157,7 +157,9 @@ void SchedGetParamThread(base::WaitableEvent* thread_run) {
BPF_TEST_C(ParameterRestrictions,
sched_getparam_allowed,
RestrictSchedPolicy) {
- base::WaitableEvent thread_run(true, false);
+ base::WaitableEvent thread_run(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
// Run the actual test in a new thread so that the current pid and tid are
// different.
base::Thread getparam_thread("sched_getparam_thread");
diff --git a/sandbox/win/BUILD.gn b/sandbox/win/BUILD.gn
index 89eaaed703..60bb499af3 100644
--- a/sandbox/win/BUILD.gn
+++ b/sandbox/win/BUILD.gn
@@ -205,11 +205,14 @@ test("sbox_integration_tests") {
"tests/common/test_utils.cc",
"tests/common/test_utils.h",
"tests/integration_tests/integration_tests.cc",
+ "tests/integration_tests/integration_tests_common.h",
"tests/integration_tests/integration_tests_test.cc",
]
deps = [
":sandbox",
+ ":sbox_integration_test_hook_dll",
+ ":sbox_integration_test_win_proc",
"//base/test:test_support",
"//testing/gtest",
]
@@ -217,6 +220,23 @@ test("sbox_integration_tests") {
libs = [ "dxva2.lib" ]
}
+loadable_module("sbox_integration_test_hook_dll") {
+ sources = [
+ "tests/integration_tests/hooking_dll.cc",
+ "tests/integration_tests/integration_tests_common.h",
+ ]
+}
+
+executable("sbox_integration_test_win_proc") {
+ sources = [
+ "tests/integration_tests/hooking_win_proc.cc",
+ "tests/integration_tests/integration_tests_common.h",
+ ]
+
+ configs -= [ "//build/config/win:console" ]
+ configs += [ "//build/config/win:windowed" ]
+}
+
test("sbox_validation_tests") {
sources = [
"tests/common/controller.cc",
diff --git a/sandbox/win/sandbox_win.gypi b/sandbox/win/sandbox_win.gypi
index 8ac9e5909a..e9673aa9a1 100644
--- a/sandbox/win/sandbox_win.gypi
+++ b/sandbox/win/sandbox_win.gypi
@@ -197,6 +197,8 @@
'type': 'executable',
'dependencies': [
'sandbox',
+ 'sbox_integration_test_hook_dll',
+ 'sbox_integration_test_win_proc',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
],
@@ -224,6 +226,7 @@
'tests/common/test_utils.cc',
'tests/common/test_utils.h',
'tests/integration_tests/integration_tests.cc',
+ 'tests/integration_tests/integration_tests_common.h',
],
'link_settings': {
'libraries': [
@@ -232,6 +235,31 @@
},
},
{
+ 'target_name': 'sbox_integration_test_hook_dll',
+ 'type': 'shared_library',
+ 'dependencies': [
+ ],
+ 'sources': [
+ 'tests/integration_tests/hooking_dll.cc',
+ 'tests/integration_tests/integration_tests_common.h',
+ ],
+ },
+ {
+ 'target_name': 'sbox_integration_test_win_proc',
+ 'type': 'executable',
+ 'dependencies': [
+ ],
+ 'sources': [
+ 'tests/integration_tests/hooking_win_proc.cc',
+ 'tests/integration_tests/integration_tests_common.h',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
+ },
+ },
+ },
+ {
'target_name': 'sbox_validation_tests',
'type': 'executable',
'dependencies': [
diff --git a/sandbox/win/src/nt_internals.h b/sandbox/win/src/nt_internals.h
index a206e94d15..6469c2bf34 100644
--- a/sandbox/win/src/nt_internals.h
+++ b/sandbox/win/src/nt_internals.h
@@ -333,18 +333,18 @@ typedef struct _PROCESS_BASIC_INFORMATION {
};
} PROCESS_BASIC_INFORMATION, *PPROCESS_BASIC_INFORMATION;
-typedef NTSTATUS (WINAPI *NtQueryInformationProcessFunction)(
- IN HANDLE ProcessHandle,
- IN PROCESSINFOCLASS ProcessInformationClass,
- OUT PVOID ProcessInformation,
- IN ULONG ProcessInformationLength,
- OUT PULONG ReturnLength OPTIONAL);
-
-typedef NTSTATUS (WINAPI *NtSetInformationProcessFunction)(
- HANDLE ProcessHandle,
- IN PROCESSINFOCLASS ProcessInformationClass,
- IN PVOID ProcessInformation,
- IN ULONG ProcessInformationLength);
+typedef NTSTATUS(WINAPI* NtQueryInformationProcessFunction)(
+ IN HANDLE ProcessHandle,
+ IN PROCESSINFOCLASS ProcessInformationClass,
+ OUT PVOID ProcessInformation,
+ IN ULONG ProcessInformationLength,
+ OUT PULONG ReturnLength OPTIONAL);
+
+typedef NTSTATUS(WINAPI* NtSetInformationProcessFunction)(
+ HANDLE ProcessHandle,
+ IN PROCESSINFOCLASS ProcessInformationClass,
+ IN PVOID ProcessInformation,
+ IN ULONG ProcessInformationLength);
typedef NTSTATUS (WINAPI *NtOpenThreadTokenFunction) (
IN HANDLE ThreadHandle,
@@ -370,21 +370,50 @@ typedef NTSTATUS (WINAPI *NtOpenProcessTokenExFunction) (
IN ULONG HandleAttributes,
OUT PHANDLE TokenHandle);
-typedef NTSTATUS (WINAPI * RtlCreateUserThreadFunction)(
- IN HANDLE Process,
- IN PSECURITY_DESCRIPTOR ThreadSecurityDescriptor,
- IN BOOLEAN CreateSuspended,
- IN ULONG ZeroBits,
- IN SIZE_T MaximumStackSize,
- IN SIZE_T CommittedStackSize,
- IN LPTHREAD_START_ROUTINE StartAddress,
- IN PVOID Parameter,
- OUT PHANDLE Thread,
- OUT PCLIENT_ID ClientId);
+typedef NTSTATUS(WINAPI* NtQueryInformationTokenFunction)(
+ IN HANDLE TokenHandle,
+ IN TOKEN_INFORMATION_CLASS TokenInformationClass,
+ OUT PVOID TokenInformation,
+ IN ULONG TokenInformationLength,
+ OUT PULONG ReturnLength);
+
+typedef NTSTATUS(WINAPI* RtlCreateUserThreadFunction)(
+ IN HANDLE Process,
+ IN PSECURITY_DESCRIPTOR ThreadSecurityDescriptor,
+ IN BOOLEAN CreateSuspended,
+ IN ULONG ZeroBits,
+ IN SIZE_T MaximumStackSize,
+ IN SIZE_T CommittedStackSize,
+ IN LPTHREAD_START_ROUTINE StartAddress,
+ IN PVOID Parameter,
+ OUT PHANDLE Thread,
+ OUT PCLIENT_ID ClientId);
+
+typedef NTSTATUS(WINAPI* RtlConvertSidToUnicodeStringFunction)(
+ OUT PUNICODE_STRING UnicodeString,
+ IN PSID Sid,
+ IN BOOLEAN AllocateDestinationString);
+
+typedef VOID(WINAPI* RtlFreeUnicodeStringFunction)(
+ IN OUT PUNICODE_STRING UnicodeString);
// -----------------------------------------------------------------------
// Registry
+typedef enum _KEY_VALUE_INFORMATION_CLASS {
+ KeyValueFullInformation = 1
+} KEY_VALUE_INFORMATION_CLASS,
+ *PKEY_VALUE_INFORMATION_CLASS;
+
+typedef struct _KEY_VALUE_FULL_INFORMATION {
+ ULONG TitleIndex;
+ ULONG Type;
+ ULONG DataOffset;
+ ULONG DataLength;
+ ULONG NameLength;
+ WCHAR Name[1];
+} KEY_VALUE_FULL_INFORMATION, *PKEY_VALUE_FULL_INFORMATION;
+
typedef NTSTATUS (WINAPI *NtCreateKeyFunction)(
OUT PHANDLE KeyHandle,
IN ACCESS_MASK DesiredAccess,
@@ -408,6 +437,24 @@ typedef NTSTATUS (WINAPI *NtOpenKeyExFunction)(
typedef NTSTATUS (WINAPI *NtDeleteKeyFunction)(
IN HANDLE KeyHandle);
+typedef NTSTATUS(WINAPI* RtlFormatCurrentUserKeyPathFunction)(
+ OUT PUNICODE_STRING RegistryPath);
+
+typedef NTSTATUS(WINAPI* NtQueryValueKeyFunction)(IN HANDLE KeyHandle,
+ IN PUNICODE_STRING ValueName,
+ IN KEY_VALUE_INFORMATION_CLASS
+ KeyValueInformationClass,
+ OUT PVOID KeyValueInformation,
+ IN ULONG Length,
+ OUT PULONG ResultLength);
+
+typedef NTSTATUS(WINAPI* NtSetValueKeyFunction)(IN HANDLE KeyHandle,
+ IN PUNICODE_STRING ValueName,
+ IN ULONG TitleIndex OPTIONAL,
+ IN ULONG Type,
+ IN PVOID Data,
+ IN ULONG DataSize);
+
// -----------------------------------------------------------------------
// Memory
diff --git a/sandbox/win/src/security_level.h b/sandbox/win/src/security_level.h
index 87abdebad5..d8524c1fac 100644
--- a/sandbox/win/src/security_level.h
+++ b/sandbox/win/src/security_level.h
@@ -187,10 +187,14 @@ const MitigationFlags MITIGATION_STRICT_HANDLE_CHECKS = 0x00000100;
// PROCESS_CREATION_MITIGATION_POLICY_WIN32K_SYSTEM_CALL_DISABLE_ALWAYS_ON.
const MitigationFlags MITIGATION_WIN32K_DISABLE = 0x00000200;
-// Disables common DLL injection methods (e.g. window hooks and
-// App_InitDLLs). Corresponds to
+// Prevents certain built-in third party extension points from being used.
+// - App_Init DLLs
+// - Winsock Layered Service Providers (LSPs)
+// - Global Windows Hooks (NOT thread-targeted hooks)
+// - Legacy Input Method Editors (IMEs).
+// I.e.: Disable legacy hooking mechanisms. Corresponds to
// PROCESS_CREATION_MITIGATION_POLICY_EXTENSION_POINT_DISABLE_ALWAYS_ON.
-const MitigationFlags MITIGATION_EXTENSION_DLL_DISABLE = 0x00000400;
+const MitigationFlags MITIGATION_EXTENSION_POINT_DISABLE = 0x00000400;
// Prevents the process from loading non-system fonts into GDI.
// Corresponds to
diff --git a/testing/multiprocess_func_list.cc b/testing/multiprocess_func_list.cc
index 49ae07dd3e..f96c2b5079 100644
--- a/testing/multiprocess_func_list.cc
+++ b/testing/multiprocess_func_list.cc
@@ -40,7 +40,7 @@ AppendMultiProcessTest::AppendMultiProcessTest(
ProcessFunctions(main_func_ptr, setup_func_ptr);
}
-int InvokeChildProcessTest(std::string test_name) {
+int InvokeChildProcessTest(const std::string& test_name) {
MultiProcessTestMap& func_lookup_table = GetMultiprocessFuncMap();
MultiProcessTestMap::iterator it = func_lookup_table.find(test_name);
if (it != func_lookup_table.end()) {
diff --git a/testing/multiprocess_func_list.h b/testing/multiprocess_func_list.h
index f806d53c93..c3d2f1f733 100644
--- a/testing/multiprocess_func_list.h
+++ b/testing/multiprocess_func_list.h
@@ -47,7 +47,7 @@ class AppendMultiProcessTest {
// Invoke the main function of a test previously registered with
// MULTIPROCESS_TEST_MAIN()
-int InvokeChildProcessTest(std::string test_name);
+int InvokeChildProcessTest(const std::string& test_name);
// This macro creates a global MultiProcessTest::AppendMultiProcessTest object
// whose constructor does the work of adding the global mapping.