summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Pawlowski <jpawlowski@google.com>2017-03-14 10:55:53 -0700
committerJakub Pawlowski <jpawlowski@google.com>2017-12-19 07:47:37 -0800
commitbf8c17f71511c1e90cd8cccfe71f0852c566bd3b (patch)
treeb5603968c3907c8b41594f2a2b0f15475406f94d
parent9b3f4a254c327005c91c1eb094062cdefe5f0553 (diff)
downloadlibchrome-bf8c17f71511c1e90cd8cccfe71f0852c566bd3b.tar.gz
Uprev the library to r462023 from Chromium
This merge was done against r462023 which corresponds to git commit 32eb7c31af9cab6231f0d3d05206072079177605 from Apr 05, 2017 Change-Id: I70bf77fcb3215da3e17f997752bfdad7d4e6e1c9
-rw-r--r--Android.bp2
-rw-r--r--base/BUILD.gn33
-rw-r--r--base/allocator/BUILD.gn11
-rw-r--r--base/allocator/allocator_shim.cc6
-rw-r--r--base/allocator/allocator_shim_internals.h21
-rw-r--r--base/at_exit.cc4
-rw-r--r--base/base.isolate60
-rw-r--r--base/bind_unittest.cc38
-rw-r--r--base/callback.h90
-rw-r--r--base/callback_helpers.h4
-rw-r--r--base/callback_internal.cc21
-rw-r--r--base/callback_internal.h39
-rw-r--r--base/callback_unittest.cc42
-rw-r--r--base/command_line.cc15
-rw-r--r--base/command_line_unittest.cc31
-rw-r--r--base/containers/container_test_utils.h39
-rw-r--r--base/containers/mru_cache.h2
-rw-r--r--base/critical_closure.h15
-rw-r--r--base/debug/activity_tracker.cc781
-rw-r--r--base/debug/activity_tracker.h318
-rw-r--r--base/debug/activity_tracker_unittest.cc188
-rw-r--r--base/debug/stack_trace.cc19
-rw-r--r--base/debug/stack_trace.h22
-rw-r--r--base/environment.cc2
-rw-r--r--base/feature_list.cc10
-rw-r--r--base/feature_list.h7
-rw-r--r--base/feature_list_unittest.cc3
-rw-r--r--base/files/file_path.cc2
-rw-r--r--base/files/file_path.h2
-rw-r--r--base/files/file_util_mac.mm12
-rw-r--r--base/mac/mach_port_broker_unittest.cc17
-rw-r--r--base/memory/ref_counted.cc31
-rw-r--r--base/memory/ref_counted.h201
-rw-r--r--base/memory/ref_counted_unittest.cc38
-rw-r--r--base/memory/ref_counted_unittest.nc25
-rw-r--r--base/memory/shared_memory_mac_unittest.cc8
-rw-r--r--base/memory/shared_memory_unittest.cc8
-rw-r--r--base/memory/singleton_objc.h60
-rw-r--r--base/message_loop/incoming_task_queue.cc8
-rw-r--r--base/message_loop/incoming_task_queue.h3
-rw-r--r--base/message_loop/message_loop.h2
-rw-r--r--base/message_loop/message_loop_task_runner.cc12
-rw-r--r--base/message_loop/message_loop_task_runner.h5
-rw-r--r--base/message_loop/message_loop_task_runner_unittest.cc11
-rw-r--r--base/message_loop/message_loop_unittest.cc31
-rw-r--r--base/metrics/histogram_macros.h14
-rw-r--r--base/metrics/histogram_macros_internal.h61
-rw-r--r--base/metrics/histogram_macros_unittest.cc31
-rw-r--r--base/metrics/persistent_histogram_allocator.cc20
-rw-r--r--base/metrics/persistent_histogram_allocator.h9
-rw-r--r--base/metrics/persistent_memory_allocator.cc96
-rw-r--r--base/metrics/persistent_memory_allocator.h47
-rw-r--r--base/metrics/persistent_memory_allocator_unittest.cc16
-rw-r--r--base/native_library.h10
-rw-r--r--base/post_task_and_reply_with_result_internal.h9
-rw-r--r--base/process/launch.h5
-rw-r--r--base/process/launch_posix.cc8
-rw-r--r--base/process/process_info_unittest.cc20
-rw-r--r--base/process/process_metrics.cc5
-rw-r--r--base/process/process_metrics.h93
-rw-r--r--base/process/process_metrics_linux.cc64
-rw-r--r--base/process/process_metrics_mac.cc166
-rw-r--r--base/process/process_metrics_unittest.cc80
-rw-r--r--base/sequence_checker_impl.cc4
-rw-r--r--base/sequenced_task_runner.cc7
-rw-r--r--base/sequenced_task_runner.h5
-rw-r--r--base/strings/string_piece.h3
-rw-r--r--base/strings/string_piece_unittest.cc14
-rw-r--r--base/synchronization/waitable_event.h3
-rw-r--r--base/synchronization/waitable_event_posix.cc63
-rw-r--r--base/synchronization/waitable_event_unittest.cc38
-rw-r--r--base/sys_info.h21
-rw-r--r--base/sys_info_linux.cc23
-rw-r--r--base/sys_info_mac.mm19
-rw-r--r--base/sys_info_posix.cc24
-rw-r--r--base/sys_info_unittest.cc122
-rw-r--r--base/task_runner.cc14
-rw-r--r--base/task_runner.h9
-rw-r--r--base/task_scheduler/sequence.cc2
-rw-r--r--base/task_scheduler/sequence_unittest.cc11
-rw-r--r--base/task_scheduler/task.cc22
-rw-r--r--base/task_scheduler/task.h4
-rw-r--r--base/template_util.h61
-rw-r--r--base/template_util_unittest.cc33
-rw-r--r--base/test/BUILD.gn7
-rw-r--r--base/test/multiprocess_test.cc10
-rw-r--r--base/test/multiprocess_test.h27
-rw-r--r--base/test/multiprocess_test_android.cc12
-rw-r--r--base/test/test_mock_time_task_runner.cc22
-rw-r--r--base/test/test_mock_time_task_runner.h5
-rw-r--r--base/test/test_pending_task.cc18
-rw-r--r--base/test/test_pending_task.h2
-rw-r--r--base/test/test_simple_task_runner.cc18
-rw-r--r--base/test/test_simple_task_runner.h5
-rw-r--r--base/threading/post_task_and_reply_impl.cc26
-rw-r--r--base/threading/post_task_and_reply_impl.h6
-rw-r--r--base/threading/sequenced_worker_pool.cc134
-rw-r--r--base/threading/sequenced_worker_pool.h18
-rw-r--r--base/threading/worker_pool.cc20
-rw-r--r--base/threading/worker_pool.h7
-rw-r--r--base/threading/worker_pool_posix.cc19
-rw-r--r--base/threading/worker_pool_posix.h5
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc5
-rw-r--r--base/trace_event/heap_profiler_allocation_register.cc80
-rw-r--r--base/trace_event/heap_profiler_allocation_register.h79
-rw-r--r--base/trace_event/malloc_dump_provider.cc10
-rw-r--r--base/trace_event/memory_allocator_dump.cc5
-rw-r--r--base/trace_event/memory_allocator_dump.h11
-rw-r--r--base/trace_event/memory_allocator_dump_unittest.cc10
-rw-r--r--base/trace_event/memory_dump_manager.cc210
-rw-r--r--base/trace_event/memory_dump_manager.h109
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc58
-rw-r--r--base/trace_event/memory_dump_provider_info.cc43
-rw-r--r--base/trace_event/memory_dump_provider_info.h108
-rw-r--r--base/trace_event/memory_dump_request_args.cc4
-rw-r--r--base/trace_event/memory_dump_request_args.h29
-rw-r--r--base/trace_event/memory_dump_scheduler.cc198
-rw-r--r--base/trace_event/memory_dump_scheduler.h64
-rw-r--r--base/trace_event/memory_dump_scheduler_unittest.cc101
-rw-r--r--base/trace_event/memory_infra_background_whitelist.cc68
-rw-r--r--base/trace_event/memory_peak_detector.cc164
-rw-r--r--base/trace_event/memory_peak_detector.h139
-rw-r--r--base/trace_event/memory_peak_detector_unittest.cc381
-rw-r--r--base/trace_event/trace_config.cc373
-rw-r--r--base/trace_event/trace_config.h66
-rw-r--r--base/trace_event/trace_config_category_filter.cc297
-rw-r--r--base/trace_event/trace_config_category_filter.h86
-rw-r--r--base/trace_event/trace_config_unittest.cc131
-rw-r--r--base/trace_event/trace_event_unittest.cc37
-rw-r--r--base/trace_event/trace_log.cc16
-rw-r--r--base/values.cc282
-rw-r--r--base/values.h30
-rw-r--r--base/values_unittest.cc238
-rw-r--r--base/win/scoped_comptr.h178
134 files changed, 5263 insertions, 2263 deletions
diff --git a/Android.bp b/Android.bp
index fdbcd93923..3aba3c1311 100644
--- a/Android.bp
+++ b/Android.bp
@@ -239,6 +239,7 @@ libchromeCommonSrc = [
"base/trace_event/memory_allocator_dump.cc",
"base/trace_event/memory_allocator_dump_guid.cc",
"base/trace_event/memory_dump_manager.cc",
+ "base/trace_event/memory_dump_provider_info.cc",
"base/trace_event/memory_dump_request_args.cc",
"base/trace_event/memory_dump_scheduler.cc",
"base/trace_event/memory_dump_session_state.cc",
@@ -249,6 +250,7 @@ libchromeCommonSrc = [
"base/trace_event/process_memory_totals.cc",
"base/trace_event/trace_buffer.cc",
"base/trace_event/trace_config.cc",
+ "base/trace_event/trace_config_category_filter.cc",
"base/trace_event/trace_event_argument.cc",
"base/trace_event/trace_event_filter.cc",
"base/trace_event/trace_event_impl.cc",
diff --git a/base/BUILD.gn b/base/BUILD.gn
index f84856de5c..b4a5c47a3c 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -276,7 +276,9 @@ component("base") {
"command_line.h",
"compiler_specific.h",
"containers/adapters.h",
+ "containers/flat_map.h",
"containers/flat_set.h",
+ "containers/flat_tree.h",
"containers/hash_tables.h",
"containers/linked_list.h",
"containers/mru_cache.h",
@@ -472,6 +474,7 @@ component("base") {
"mac/scoped_aedesc.h",
"mac/scoped_authorizationref.h",
"mac/scoped_block.h",
+ "mac/scoped_cffiledescriptorref.h",
"mac/scoped_cftyperef.h",
"mac/scoped_dispatch_object.h",
"mac/scoped_ionotificationportref.h",
@@ -854,6 +857,7 @@ component("base") {
"task_scheduler/scheduler_single_thread_task_runner_manager.h",
"task_scheduler/scheduler_worker.cc",
"task_scheduler/scheduler_worker.h",
+ "task_scheduler/scheduler_worker_params.h",
"task_scheduler/scheduler_worker_pool.h",
"task_scheduler/scheduler_worker_pool_impl.cc",
"task_scheduler/scheduler_worker_pool_impl.h",
@@ -993,6 +997,8 @@ component("base") {
"trace_event/memory_dump_manager.cc",
"trace_event/memory_dump_manager.h",
"trace_event/memory_dump_provider.h",
+ "trace_event/memory_dump_provider_info.cc",
+ "trace_event/memory_dump_provider_info.h",
"trace_event/memory_dump_request_args.cc",
"trace_event/memory_dump_request_args.h",
"trace_event/memory_dump_scheduler.cc",
@@ -1001,6 +1007,8 @@ component("base") {
"trace_event/memory_dump_session_state.h",
"trace_event/memory_infra_background_whitelist.cc",
"trace_event/memory_infra_background_whitelist.h",
+ "trace_event/memory_peak_detector.cc",
+ "trace_event/memory_peak_detector.h",
"trace_event/memory_usage_estimator.cc",
"trace_event/memory_usage_estimator.h",
"trace_event/process_memory_dump.cc",
@@ -1014,6 +1022,8 @@ component("base") {
"trace_event/trace_category.h",
"trace_event/trace_config.cc",
"trace_event/trace_config.h",
+ "trace_event/trace_config_category_filter.cc",
+ "trace_event/trace_config_category_filter.h",
"trace_event/trace_event.h",
"trace_event/trace_event_android.cc",
"trace_event/trace_event_argument.cc",
@@ -1050,6 +1060,7 @@ component("base") {
"version.h",
"vlog.cc",
"vlog.h",
+ "win/current_module.h",
"win/enum_variant.cc",
"win/enum_variant.h",
"win/event_trace_consumer.h",
@@ -1194,6 +1205,7 @@ component("base") {
"process/internal_linux.cc",
"process/memory_linux.cc",
"process/process_handle_linux.cc",
+ "process/process_info_linux.cc",
"process/process_iterator_linux.cc",
"process/process_metrics_linux.cc",
"sys_info_linux.cc",
@@ -1700,6 +1712,7 @@ component("i18n") {
"i18n/time_formatting.h",
"i18n/timezone.cc",
"i18n/timezone.h",
+ "i18n/unicodestring.h",
"i18n/utf8_validator_tables.cc",
"i18n/utf8_validator_tables.h",
]
@@ -1893,6 +1906,7 @@ if (is_ios || is_mac) {
test("base_unittests") {
sources = [
+ "allocator/allocator_interception_mac_unittest.mm",
"allocator/malloc_zone_functions_mac_unittest.cc",
"allocator/tcmalloc_unittest.cc",
"android/application_status_listener_unittest.cc",
@@ -1922,7 +1936,10 @@ test("base_unittests") {
"cancelable_callback_unittest.cc",
"command_line_unittest.cc",
"containers/adapters_unittest.cc",
+ "containers/container_test_utils.h",
+ "containers/flat_map_unittest.cc",
"containers/flat_set_unittest.cc",
+ "containers/flat_tree_unittest.cc",
"containers/hash_tables_unittest.cc",
"containers/linked_list_unittest.cc",
"containers/mru_cache_unittest.cc",
@@ -1972,6 +1989,7 @@ test("base_unittests") {
"i18n/time_formatting_unittest.cc",
"i18n/timezone_unittest.cc",
"id_map_unittest.cc",
+ "ios/crb_protocol_observers_unittest.mm",
"ios/device_util_unittest.mm",
"ios/weak_nsobject_unittest.mm",
"json/json_parser_unittest.cc",
@@ -2049,6 +2067,7 @@ test("base_unittests") {
"process/memory_unittest.cc",
"process/memory_unittest_mac.h",
"process/memory_unittest_mac.mm",
+ "process/process_info_unittest.cc",
"process/process_metrics_unittest.cc",
"process/process_metrics_unittest_ios.cc",
"process/process_unittest.cc",
@@ -2154,6 +2173,8 @@ test("base_unittests") {
"trace_event/java_heap_dump_provider_android_unittest.cc",
"trace_event/memory_allocator_dump_unittest.cc",
"trace_event/memory_dump_manager_unittest.cc",
+ "trace_event/memory_dump_scheduler_unittest.cc",
+ "trace_event/memory_peak_detector_unittest.cc",
"trace_event/memory_usage_estimator_unittest.cc",
"trace_event/process_memory_dump_unittest.cc",
"trace_event/trace_category_unittest.cc",
@@ -2381,6 +2402,7 @@ if (enable_nocompile_tests) {
"bind_unittest.nc",
"callback_list_unittest.nc",
"callback_unittest.nc",
+ "memory/ref_counted_unittest.nc",
"memory/weak_ptr_unittest.nc",
"metrics/histogram_unittest.nc",
]
@@ -2446,6 +2468,7 @@ if (is_android) {
srcjar_deps = [
":base_android_java_enums_srcjar",
":base_build_config_gen",
+ ":base_java_aidl",
":base_native_libraries_gen",
]
@@ -2524,6 +2547,8 @@ if (is_android) {
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
"android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
"android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildProcessCreationParams.java",
+ "android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
]
# New versions of BuildConfig.java and NativeLibraries.java
@@ -2535,6 +2560,13 @@ if (is_android) {
]
}
+ android_aidl("base_java_aidl") {
+ import_include = [ "android/java/src" ]
+ sources = [
+ "android/java/src/org/chromium/base/process_launcher/IChildProcessService.aidl",
+ ]
+ }
+
android_library("base_javatests") {
testonly = true
deps = [
@@ -2661,6 +2693,7 @@ if (is_android) {
]
java_files = [
"test/android/java/src/org/chromium/base/ContentUriTestUtils.java",
+ "test/android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
"test/android/java/src/org/chromium/base/TestSystemMessageHandler.java",
]
}
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 8cdb06161f..ac534817fc 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -65,12 +65,10 @@ config("tcmalloc_flags") {
# tcmalloc contains some unused local template specializations.
"-Wno-unused-function",
- # tcmalloc uses COMPILE_ASSERT without static_assert but with
- # typedefs.
+ # tcmalloc uses COMPILE_ASSERT without static_assert but with typedefs.
"-Wno-unused-local-typedefs",
- # for magic2_ in debugallocation.cc (only built in Debug builds)
- # typedefs.
+ # for magic2_ in debugallocation.cc (only built in Debug builds) typedefs.
"-Wno-unused-private-field",
]
} else {
@@ -166,12 +164,15 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/heap-profile-table.cc",
"$tcmalloc_dir/src/heap-profile-table.h",
"$tcmalloc_dir/src/heap-profiler.cc",
+ "$tcmalloc_dir/src/heap-profiler.h",
"$tcmalloc_dir/src/internal_logging.cc",
"$tcmalloc_dir/src/internal_logging.h",
"$tcmalloc_dir/src/linked_list.h",
"$tcmalloc_dir/src/malloc_extension.cc",
+ "$tcmalloc_dir/src/malloc_extension.h",
"$tcmalloc_dir/src/malloc_hook-inl.h",
"$tcmalloc_dir/src/malloc_hook.cc",
+ "$tcmalloc_dir/src/malloc_hook.h",
"$tcmalloc_dir/src/maybe_threads.cc",
"$tcmalloc_dir/src/maybe_threads.h",
"$tcmalloc_dir/src/memory_region_map.cc",
@@ -187,6 +188,7 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/stack_trace_table.cc",
"$tcmalloc_dir/src/stack_trace_table.h",
"$tcmalloc_dir/src/stacktrace.cc",
+ "$tcmalloc_dir/src/stacktrace.h",
"$tcmalloc_dir/src/static_vars.cc",
"$tcmalloc_dir/src/static_vars.h",
"$tcmalloc_dir/src/symbolize.cc",
@@ -196,6 +198,7 @@ if (use_allocator == "tcmalloc") {
# #included by debugallocation_shim.cc
#"$tcmalloc_dir/src/tcmalloc.cc",
+ #"$tcmalloc_dir/src/tcmalloc.h",
"$tcmalloc_dir/src/thread_cache.cc",
"$tcmalloc_dir/src/thread_cache.h",
"$tcmalloc_dir/src/windows/port.cc",
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
index fbdbdfc8c2..4887142d25 100644
--- a/base/allocator/allocator_shim.cc
+++ b/base/allocator/allocator_shim.cc
@@ -23,6 +23,8 @@
#if defined(OS_MACOSX)
#include <malloc/malloc.h>
+
+#include "base/allocator/allocator_interception_mac.h"
#endif
// No calls to malloc / new in this file. They would would cause re-entrancy of
@@ -336,9 +338,11 @@ void InitializeAllocatorShim() {
// traversed the shim this will route them to the default malloc zone.
InitializeDefaultDispatchToMacAllocator();
+ MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
+
// This replaces the default malloc zone, causing calls to malloc & friends
// from the codebase to be routed to ShimMalloc() above.
- OverrideMacSymbols();
+ base::allocator::ReplaceFunctionsForStoredZones(&functions);
}
} // namespace allocator
} // namespace base
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
index 82624ee45b..0196f899ae 100644
--- a/base/allocator/allocator_shim_internals.h
+++ b/base/allocator/allocator_shim_internals.h
@@ -18,7 +18,26 @@
#endif
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
-#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
+//
+// If an exported symbol is linked into a DSO, it may be preempted by a
+// definition in the main executable. If this happens to an allocator symbol, it
+// will mean that the DSO will use the main executable's allocator. This is
+// normally relatively harmless -- regular allocations should all use the same
+// allocator, but if the DSO tries to hook the allocator it will not see any
+// allocations.
+//
+// However, if LLVM LTO is enabled, the compiler may inline the shim layer
+// symbols into callers. The end result is that allocator calls in DSOs may use
+// either the main executable's allocator or the DSO's allocator, depending on
+// whether the call was inlined. This is arguably a bug in LLVM caused by its
+// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
+// To work around the bug we use noinline to prevent the symbols from being
+// inlined.
+//
+// In the long run we probably want to avoid linking the allocator bits into
+// DSOs altogether. This will save a little space and stop giving DSOs the false
+// impression that they can hook the allocator.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
#endif // __GNUC__
diff --git a/base/at_exit.cc b/base/at_exit.cc
index 5dcc83cb2f..e0025ea0d3 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -81,6 +81,10 @@ void AtExitManager::ProcessCallbacksNow() {
g_top_manager->processing_callbacks_ = true;
}
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // It's safe since all other threads should be terminated at this point.
+ ScopedAllowCrossThreadRefCountAccess allow_cross_thread_ref_count_access;
+
while (!tasks.empty()) {
base::Closure task = tasks.top();
task.Run();
diff --git a/base/base.isolate b/base/base.isolate
deleted file mode 100644
index 079d07d810..0000000000
--- a/base/base.isolate
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'includes': [
- # While the target 'base' doesn't depend on ../third_party/icu/icu.gyp
- # itself, virtually all targets using it has to include icu. The only
- # exception is the Windows sandbox (?).
- '../third_party/icu/icu.isolate',
- # Sanitizer-instrumented third-party libraries (if enabled).
- '../third_party/instrumented_libraries/instrumented_libraries.isolate',
- # MSVS runtime libraries.
- '../build/config/win/msvs_dependencies.isolate',
- ],
- 'conditions': [
- ['use_custom_libcxx==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/lib/libc++.so',
- ],
- },
- }],
- ['OS=="mac" and asan==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
- ],
- },
- }],
- ['OS=="win" and asan==1 and component=="shared_library"', {
- 'variables': {
- 'files': [
- # We only need x.y.z/lib/windows/clang_rt.asan_dynamic-i386.dll,
- # but since the version (x.y.z) changes, just grab the whole dir.
- '../third_party/llvm-build/Release+Asserts/lib/clang/',
- ],
- },
- }],
- ['OS=="linux" and (asan==1 or lsan==1 or msan==1 or tsan==1)', {
- 'variables': {
- 'files': [
- # For llvm-symbolizer.
- '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
- ],
- },
- }],
- ['asan==1 or lsan==1 or msan==1 or tsan==1', {
- 'variables': {
- 'files': [
- '../tools/valgrind/asan/',
- '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
- ],
- },
- }],
- # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
- ['asan==0 or lsan==0 or msan==0 or tsan==0', {
- 'variables': {},
- }],
- ],
-}
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index a9ca9d2538..0de9294894 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -1309,65 +1309,59 @@ TEST_F(BindTest, OnceCallback) {
static_assert(std::is_constructible<
RepeatingClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be copyable.");
- static_assert(is_assignable<
- RepeatingClosure, const RepeatingClosure&>::value,
+ static_assert(
+ std::is_assignable<RepeatingClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be copy-assignable.");
// Move constructor and assignment of RepeatingCallback.
static_assert(std::is_constructible<
RepeatingClosure, RepeatingClosure&&>::value,
"RepeatingClosure should be movable.");
- static_assert(is_assignable<
- RepeatingClosure, RepeatingClosure&&>::value,
- "RepeatingClosure should be move-assignable");
+ static_assert(std::is_assignable<RepeatingClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be move-assignable");
// Conversions from OnceCallback to RepeatingCallback.
static_assert(!std::is_constructible<
RepeatingClosure, const OnceClosure&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(!is_assignable<
- RepeatingClosure, const OnceClosure&>::value,
+ static_assert(
+ !std::is_assignable<RepeatingClosure, const OnceClosure&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
// Destructive conversions from OnceCallback to RepeatingCallback.
static_assert(!std::is_constructible<
RepeatingClosure, OnceClosure&&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(!is_assignable<
- RepeatingClosure, OnceClosure&&>::value,
- "OnceClosure should not be convertible to RepeatingClosure.");
+ static_assert(!std::is_assignable<RepeatingClosure, OnceClosure&&>::value,
+ "OnceClosure should not be convertible to RepeatingClosure.");
// Copy constructor and assignment of OnceCallback.
static_assert(!std::is_constructible<
OnceClosure, const OnceClosure&>::value,
"OnceClosure should not be copyable.");
- static_assert(!is_assignable<
- OnceClosure, const OnceClosure&>::value,
- "OnceClosure should not be copy-assignable");
+ static_assert(!std::is_assignable<OnceClosure, const OnceClosure&>::value,
+ "OnceClosure should not be copy-assignable");
// Move constructor and assignment of OnceCallback.
static_assert(std::is_constructible<
OnceClosure, OnceClosure&&>::value,
"OnceClosure should be movable.");
- static_assert(is_assignable<
- OnceClosure, OnceClosure&&>::value,
- "OnceClosure should be move-assignable.");
+ static_assert(std::is_assignable<OnceClosure, OnceClosure&&>::value,
+ "OnceClosure should be move-assignable.");
// Conversions from RepeatingCallback to OnceCallback.
static_assert(std::is_constructible<
OnceClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be convertible to OnceClosure.");
- static_assert(is_assignable<
- OnceClosure, const RepeatingClosure&>::value,
- "RepeatingClosure should be convertible to OnceClosure.");
+ static_assert(std::is_assignable<OnceClosure, const RepeatingClosure&>::value,
+ "RepeatingClosure should be convertible to OnceClosure.");
// Destructive conversions from RepeatingCallback to OnceCallback.
static_assert(std::is_constructible<
OnceClosure, RepeatingClosure&&>::value,
"RepeatingClosure should be convertible to OnceClosure.");
- static_assert(is_assignable<
- OnceClosure, RepeatingClosure&&>::value,
- "RepeatingClosure should be covretible to OnceClosure.");
+ static_assert(std::is_assignable<OnceClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be covretible to OnceClosure.");
OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
std::move(cb).Run();
diff --git a/base/callback.h b/base/callback.h
index 40bd5208a8..c91e1a88d3 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -21,71 +21,6 @@ namespace base {
namespace internal {
-template <typename CallbackType>
-struct IsOnceCallback : std::false_type {};
-
-template <typename Signature>
-struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
-
-// RunMixin provides different variants of `Run()` function to `Callback<>`
-// based on the type of callback.
-template <typename CallbackType>
-class RunMixin;
-
-// Specialization for OnceCallback.
-template <typename R, typename... Args>
-class RunMixin<OnceCallback<R(Args...)>> {
- private:
- using CallbackType = OnceCallback<R(Args...)>;
-
- public:
- using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
- R Run(Args... /* args */) const & {
- // Note: even though this static_assert will trivially always fail, it
- // cannot be simply replaced with static_assert(false, ...) because:
- // - Per [dcl.dcl]/p4, a program is ill-formed if the constant-expression
- // argument does not evaluate to true.
- // - Per [temp.res]/p8, if no valid specialization can be generated for a
- // template definition, and that template is not instantiated, the
- // template definition is ill-formed, no diagnostic required.
- // These two clauses, taken together, would allow a conforming C++ compiler
- // to immediately reject static_assert(false, ...), even inside an
- // uninstantiated template.
- static_assert(!IsOnceCallback<CallbackType>::value,
- "OnceCallback::Run() may only be invoked on a non-const "
- "rvalue, i.e. std::move(callback).Run().");
- }
-
- R Run(Args... args) && {
- // Move the callback instance into a local variable before the invocation,
- // that ensures the internal state is cleared after the invocation.
- // It's not safe to touch |this| after the invocation, since running the
- // bound function may destroy |this|.
- CallbackType cb = static_cast<CallbackType&&>(*this);
- PolymorphicInvoke f =
- reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
- return f(cb.bind_state_.get(), std::forward<Args>(args)...);
- }
-};
-
-// Specialization for RepeatingCallback.
-template <typename R, typename... Args>
-class RunMixin<RepeatingCallback<R(Args...)>> {
- private:
- using CallbackType = RepeatingCallback<R(Args...)>;
-
- public:
- using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
- R Run(Args... args) const {
- const CallbackType& cb = static_cast<const CallbackType&>(*this);
- PolymorphicInvoke f =
- reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
- return f(cb.bind_state_.get(), std::forward<Args>(args)...);
- }
-};
-
template <typename From, typename To>
struct IsCallbackConvertible : std::false_type {};
@@ -100,14 +35,14 @@ template <typename R,
internal::CopyMode copy_mode,
internal::RepeatMode repeat_mode>
class Callback<R(Args...), copy_mode, repeat_mode>
- : public internal::CallbackBase<copy_mode>,
- public internal::RunMixin<Callback<R(Args...), copy_mode, repeat_mode>> {
+ : public internal::CallbackBase<copy_mode> {
public:
static_assert(repeat_mode != internal::RepeatMode::Once ||
copy_mode == internal::CopyMode::MoveOnly,
"OnceCallback must be MoveOnly.");
using RunType = R(Args...);
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
@@ -135,7 +70,26 @@ class Callback<R(Args...), copy_mode, repeat_mode>
return this->EqualsInternal(other);
}
- friend class internal::RunMixin<Callback>;
+ R Run(Args... args) const & {
+ static_assert(repeat_mode == internal::RepeatMode::Repeating,
+ "OnceCallback::Run() may only be invoked on a non-const "
+ "rvalue, i.e. std::move(callback).Run().");
+
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
+ return f(this->bind_state_.get(), std::forward<Args>(args)...);
+ }
+
+ R Run(Args... args) && {
+ // Move the callback instance into a local variable before the invocation,
+ // that ensures the internal state is cleared after the invocation.
+ // It's not safe to touch |this| after the invocation, since running the
+ // bound function may destroy |this|.
+ Callback cb = std::move(*this);
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+ return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+ }
};
} // namespace base
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
index ec3d6cbf16..6e0aee8882 100644
--- a/base/callback_helpers.h
+++ b/base/callback_helpers.h
@@ -8,8 +8,8 @@
// generated). Instead, consider adding methods here.
//
// ResetAndReturn(&cb) is like cb.Reset() but allows executing a callback (via a
-// copy) after the original callback is Reset(). This can be handy if Run()
-// reads/writes the variable holding the Callback.
+// move or copy) after the original callback is Reset(). This can be handy if
+// Run() reads/writes the variable holding the Callback.
#ifndef BASE_CALLBACK_HELPERS_H_
#define BASE_CALLBACK_HELPERS_H_
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
index 4330e9cce5..a760f0664c 100644
--- a/base/callback_internal.cc
+++ b/base/callback_internal.cc
@@ -17,6 +17,10 @@ bool ReturnFalse(const BindStateBase*) {
} // namespace
+void BindStateBaseRefCountTraits::Destruct(const BindStateBase* bind_state) {
+ bind_state->destructor_(bind_state);
+}
+
BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*))
: BindStateBase(polymorphic_invoke, destructor, &ReturnFalse) {
@@ -26,19 +30,9 @@ BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*),
bool (*is_cancelled)(const BindStateBase*))
: polymorphic_invoke_(polymorphic_invoke),
- ref_count_(0),
destructor_(destructor),
is_cancelled_(is_cancelled) {}
-void BindStateBase::AddRef() const {
- AtomicRefCountInc(&ref_count_);
-}
-
-void BindStateBase::Release() const {
- if (!AtomicRefCountDec(&ref_count_))
- destructor_(this);
-}
-
CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c) = default;
CallbackBase<CopyMode::MoveOnly>&
@@ -80,10 +74,9 @@ bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
return bind_state_ == other.bind_state_;
}
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(
- BindStateBase* bind_state)
- : bind_state_(bind_state) {
- DCHECK(!bind_state_.get() || bind_state_->ref_count_ == 1);
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(BindStateBase* bind_state)
+ : bind_state_(bind_state ? AdoptRef(bind_state) : nullptr) {
+ DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
}
CallbackBase<CopyMode::MoveOnly>::~CallbackBase() {}
diff --git a/base/callback_internal.h b/base/callback_internal.h
index d6dcfeb3c0..29b07c23bd 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -8,17 +8,29 @@
#ifndef BASE_CALLBACK_INTERNAL_H_
#define BASE_CALLBACK_INTERNAL_H_
-#include "base/atomic_ref_count.h"
#include "base/base_export.h"
#include "base/callback_forward.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
namespace base {
+
+struct FakeBindState;
+
namespace internal {
+
template <CopyMode copy_mode>
class CallbackBase;
+class BindStateBase;
+
+template <typename Functor, typename... BoundArgs>
+struct BindState;
+
+struct BindStateBaseRefCountTraits {
+ static void Destruct(const BindStateBase*);
+};
+
// BindStateBase is used to provide an opaque handle that the Callback
// class can use to represent a function object with bound arguments. It
// behaves as an existential type that is used by a corresponding
@@ -30,38 +42,43 @@ class CallbackBase;
// Creating a vtable for every BindState template instantiation results in a lot
// of bloat. Its only task is to call the destructor which can be done with a
// function pointer.
-class BASE_EXPORT BindStateBase {
+class BASE_EXPORT BindStateBase
+ : public RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits> {
public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
using InvokeFuncStorage = void(*)();
- protected:
+ private:
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*));
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*),
bool (*is_cancelled)(const BindStateBase*));
+
~BindStateBase() = default;
- private:
- friend class scoped_refptr<BindStateBase>;
+ friend struct BindStateBaseRefCountTraits;
+ friend class RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits>;
+
template <CopyMode copy_mode>
friend class CallbackBase;
+ // Whitelist subclasses that access the destructor of BindStateBase.
+ template <typename Functor, typename... BoundArgs>
+ friend struct BindState;
+ friend struct ::base::FakeBindState;
+
bool IsCancelled() const {
return is_cancelled_(this);
}
- void AddRef() const;
- void Release() const;
-
// In C++, it is safe to cast function pointers to function pointers of
// another type. It is not okay to use void*. We create a InvokeFuncStorage
// that that can store our function pointer, and then cast it back to
// the original type on usage.
InvokeFuncStorage polymorphic_invoke_;
- mutable AtomicRefCount ref_count_;
-
// Pointer to a function that will properly destroy |this|.
void (*destructor_)(const BindStateBase*);
bool (*is_cancelled_)(const BindStateBase*);
@@ -86,7 +103,7 @@ class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
CallbackBase& operator=(CallbackBase<CopyMode::Copyable>&& c);
// Returns true if Callback is null (doesn't refer to anything).
- bool is_null() const { return bind_state_.get() == NULL; }
+ bool is_null() const { return !bind_state_; }
explicit operator bool() const { return !is_null(); }
// Returns true if the callback invocation will be nop due to an cancellation.
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index a41736946a..f76adbcdd2 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -21,24 +21,13 @@ void NopInvokeFunc() {}
// based on a type we declared in the anonymous namespace above to remove any
// chance of colliding with another instantiation and breaking the
// one-definition-rule.
-struct FakeBindState1 : internal::BindStateBase {
- FakeBindState1() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
- private:
- ~FakeBindState1() {}
- static void Destroy(const internal::BindStateBase* self) {
- delete static_cast<const FakeBindState1*>(self);
- }
- static bool IsCancelled(const internal::BindStateBase*) {
- return false;
- }
-};
+struct FakeBindState : internal::BindStateBase {
+ FakeBindState() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
-struct FakeBindState2 : internal::BindStateBase {
- FakeBindState2() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
private:
- ~FakeBindState2() {}
+ ~FakeBindState() {}
static void Destroy(const internal::BindStateBase* self) {
- delete static_cast<const FakeBindState2*>(self);
+ delete static_cast<const FakeBindState*>(self);
}
static bool IsCancelled(const internal::BindStateBase*) {
return false;
@@ -50,9 +39,7 @@ namespace {
class CallbackTest : public ::testing::Test {
public:
CallbackTest()
- : callback_a_(new FakeBindState1()),
- callback_b_(new FakeBindState2()) {
- }
+ : callback_a_(new FakeBindState()), callback_b_(new FakeBindState()) {}
~CallbackTest() override {}
@@ -94,7 +81,7 @@ TEST_F(CallbackTest, Equals) {
EXPECT_FALSE(callback_b_.Equals(callback_a_));
// We should compare based on instance, not type.
- Callback<void()> callback_c(new FakeBindState1());
+ Callback<void()> callback_c(new FakeBindState());
Callback<void()> callback_a2 = callback_a_;
EXPECT_TRUE(callback_a_.Equals(callback_a2));
EXPECT_FALSE(callback_a_.Equals(callback_c));
@@ -148,6 +135,23 @@ TEST_F(CallbackTest, ResetAndReturn) {
ASSERT_TRUE(tfr.cb_already_run);
}
+TEST_F(CallbackTest, NullAfterMoveRun) {
+ Closure cb = Bind([] {});
+ ASSERT_TRUE(cb);
+ std::move(cb).Run();
+ ASSERT_FALSE(cb);
+
+ const Closure cb2 = Bind([] {});
+ ASSERT_TRUE(cb2);
+ std::move(cb2).Run();
+ ASSERT_TRUE(cb2);
+
+ OnceClosure cb3 = BindOnce([] {});
+ ASSERT_TRUE(cb3);
+ std::move(cb3).Run();
+ ASSERT_FALSE(cb3);
+}
+
class CallbackOwner : public base::RefCounted<CallbackOwner> {
public:
explicit CallbackOwner(bool* deleted) {
diff --git a/base/command_line.cc b/base/command_line.cc
index 3033fcfc6e..137f966674 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
@@ -411,11 +412,15 @@ void CommandLine::AppendArguments(const CommandLine& other,
void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
if (wrapper.empty())
return;
- // The wrapper may have embedded arguments (like "gdb --args"). In this case,
- // we don't pretend to do anything fancy, we just split on spaces.
- StringVector wrapper_argv = SplitString(
- wrapper, FilePath::StringType(1, ' '), base::TRIM_WHITESPACE,
- base::SPLIT_WANT_ALL);
+ // Split the wrapper command based on whitespace (with quoting).
+ using CommandLineTokenizer =
+ StringTokenizerT<StringType, StringType::const_iterator>;
+ CommandLineTokenizer tokenizer(wrapper, FILE_PATH_LITERAL(" "));
+ tokenizer.set_quote_chars(FILE_PATH_LITERAL("'\""));
+ std::vector<StringType> wrapper_argv;
+ while (tokenizer.GetNext())
+ wrapper_argv.emplace_back(tokenizer.token());
+
// Prepend the wrapper and update the switches/arguments |begin_args_|.
argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
begin_args_ += wrapper_argv.size();
diff --git a/base/command_line_unittest.cc b/base/command_line_unittest.cc
index bcfc6c59c9..79c9aecc2a 100644
--- a/base/command_line_unittest.cc
+++ b/base/command_line_unittest.cc
@@ -406,4 +406,35 @@ TEST(CommandLineTest, Copy) {
EXPECT_TRUE(assigned.HasSwitch(pair.first));
}
+TEST(CommandLineTest, PrependSimpleWrapper) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+ cl.AppendSwitch("a");
+ cl.AppendSwitch("b");
+ cl.PrependWrapper(FILE_PATH_LITERAL("wrapper --foo --bar"));
+
+ EXPECT_EQ(6u, cl.argv().size());
+ EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--foo"), cl.argv()[1]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--bar"), cl.argv()[2]);
+ EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
+TEST(CommandLineTest, PrependComplexWrapper) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+ cl.AppendSwitch("a");
+ cl.AppendSwitch("b");
+ cl.PrependWrapper(
+ FILE_PATH_LITERAL("wrapper --foo='hello world' --bar=\"let's go\""));
+
+ EXPECT_EQ(6u, cl.argv().size());
+ EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--foo='hello world'"), cl.argv()[1]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--bar=\"let's go\""), cl.argv()[2]);
+ EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
} // namespace base
diff --git a/base/containers/container_test_utils.h b/base/containers/container_test_utils.h
new file mode 100644
index 0000000000..e36b9f7312
--- /dev/null
+++ b/base/containers/container_test_utils.h
@@ -0,0 +1,39 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
+#define BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
+
+// This file contains some helper classes for testing conainer behavior.
+
+#include "base/macros.h"
+
+namespace base {
+
+// A move-only class that holds an integer.
+class MoveOnlyInt {
+ public:
+ explicit MoveOnlyInt(int data = 1) : data_(data) {}
+ MoveOnlyInt(MoveOnlyInt&& other) : data_(other.data_) { other.data_ = 0; }
+ MoveOnlyInt& operator=(MoveOnlyInt&& other) {
+ data_ = other.data_;
+ other.data_ = 0;
+ return *this;
+ }
+
+ friend bool operator<(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+ return lhs.data_ < rhs.data_;
+ }
+
+ int data() const { return data_; }
+
+ private:
+ int data_;
+
+ DISALLOW_COPY_AND_ASSIGN(MoveOnlyInt);
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
index 4005489d4b..7c684a9690 100644
--- a/base/containers/mru_cache.h
+++ b/base/containers/mru_cache.h
@@ -105,8 +105,6 @@ class MRUCacheBase {
// Retrieves the contents of the given key, or end() if not found. This method
// has the side effect of moving the requested item to the front of the
// recency list.
- //
- // TODO(brettw) We may want a const version of this function in the future.
iterator Get(const KeyType& key) {
typename KeyIndex::iterator index_iter = index_.find(key);
if (index_iter == index_.end())
diff --git a/base/critical_closure.h b/base/critical_closure.h
index 1b10cde7ce..94c618dfbb 100644
--- a/base/critical_closure.h
+++ b/base/critical_closure.h
@@ -5,6 +5,8 @@
#ifndef BASE_CRITICAL_CLOSURE_H_
#define BASE_CRITICAL_CLOSURE_H_
+#include <utility>
+
#include "base/callback.h"
#include "base/macros.h"
#include "build/build_config.h"
@@ -27,13 +29,13 @@ bool IsMultiTaskingSupported();
// |ios::ScopedCriticalAction|.
class CriticalClosure {
public:
- explicit CriticalClosure(const Closure& closure);
+ explicit CriticalClosure(OnceClosure closure);
~CriticalClosure();
void Run();
private:
ios::ScopedCriticalAction critical_action_;
- Closure closure_;
+ OnceClosure closure_;
DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
};
@@ -55,13 +57,14 @@ class CriticalClosure {
// background running time, |MakeCriticalClosure| should be applied on them
// before posting.
#if defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
DCHECK(internal::IsMultiTaskingSupported());
- return base::Bind(&internal::CriticalClosure::Run,
- Owned(new internal::CriticalClosure(closure)));
+ return base::BindOnce(
+ &internal::CriticalClosure::Run,
+ Owned(new internal::CriticalClosure(std::move(closure))));
}
#else // defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
// No-op for platforms where the application does not need to acquire
// background time for closures to finish when it goes into the background.
return closure;
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
index 40e9b9537c..5081c1c9d2 100644
--- a/base/debug/activity_tracker.cc
+++ b/base/debug/activity_tracker.cc
@@ -23,6 +23,7 @@
#include "base/process/process_handle.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/threading/platform_thread.h"
namespace base {
@@ -30,18 +31,13 @@ namespace debug {
namespace {
-// A number that identifies the memory as having been initialized. It's
-// arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
-// A version number is added on so that major structure changes won't try to
-// read an older version (since the cookie won't match).
-const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
-
// The minimum depth a stack should support.
const int kMinStackDepth = 2;
// The amount of memory set aside for holding arbitrary user data (key/value
// pairs) globally or associated with ActivityData entries.
const size_t kUserDataSize = 1 << 10; // 1 KiB
+const size_t kProcessDataSize = 4 << 10; // 4 KiB
const size_t kGlobalDataSize = 16 << 10; // 16 KiB
const size_t kMaxUserDataNameLength =
static_cast<size_t>(std::numeric_limits<uint8_t>::max());
@@ -49,6 +45,13 @@ const size_t kMaxUserDataNameLength =
// A constant used to indicate that module information is changing.
const uint32_t kModuleInformationChanging = 0x80000000;
+// The key used to record process information.
+const char kProcessPhaseDataKey[] = "process-phase";
+
+// An atomically incrementing number, used to check for recreations of objects
+// in the same memory space.
+StaticAtomicSequenceNumber g_next_id;
+
union ThreadRef {
int64_t as_id;
#if defined(OS_WIN)
@@ -64,6 +67,43 @@ union ThreadRef {
#endif
};
+// Gets the next non-zero identifier. It is only unique within a process.
+uint32_t GetNextDataId() {
+ uint32_t id;
+ while ((id = g_next_id.GetNext()) == 0)
+ ;
+ return id;
+}
+
+// Gets the current process-id, either from the GlobalActivityTracker if it
+// exists (where the PID can be defined for testing) or from the system if
+// there isn't such.
+int64_t GetProcessId() {
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ if (global)
+ return global->process_id();
+ return GetCurrentProcId();
+}
+
+// Finds and reuses a specific allocation or creates a new one.
+PersistentMemoryAllocator::Reference AllocateFrom(
+ PersistentMemoryAllocator* allocator,
+ uint32_t from_type,
+ size_t size,
+ uint32_t to_type) {
+ PersistentMemoryAllocator::Iterator iter(allocator);
+ PersistentMemoryAllocator::Reference ref;
+ while ((ref = iter.GetNextOfType(from_type)) != 0) {
+ DCHECK_LE(size, allocator->GetAllocSize(ref));
+ // This can fail if a another thread has just taken it. It is assumed that
+ // the memory is cleared during the "free" operation.
+ if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
+ return ref;
+ }
+
+ return allocator->Allocate(size, to_type);
+}
+
// Determines the previous aligned index.
size_t RoundDownToAlignment(size_t index, size_t alignment) {
return index & (0 - alignment);
@@ -74,8 +114,43 @@ size_t RoundUpToAlignment(size_t index, size_t alignment) {
return (index + (alignment - 1)) & (0 - alignment);
}
+// Converts "tick" timing into wall time.
+Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
+ return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
+}
+
} // namespace
+OwningProcess::OwningProcess() {}
+OwningProcess::~OwningProcess() {}
+
+void OwningProcess::Release_Initialize(int64_t pid) {
+ uint32_t old_id = data_id.load(std::memory_order_acquire);
+ DCHECK_EQ(0U, old_id);
+ process_id = pid != 0 ? pid : GetProcessId();
+ create_stamp = Time::Now().ToInternalValue();
+ data_id.store(GetNextDataId(), std::memory_order_release);
+}
+
+void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
+ DCHECK_NE(0U, data_id);
+ process_id = pid;
+ create_stamp = stamp;
+}
+
+// static
+bool OwningProcess::GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp) {
+ const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
+ uint32_t id = info->data_id.load(std::memory_order_acquire);
+ if (id == 0)
+ return false;
+
+ *out_id = info->process_id;
+ *out_stamp = info->create_stamp;
+ return id == info->data_id.load(std::memory_order_seq_cst);
+}
// It doesn't matter what is contained in this (though it will be all zeros)
// as only the address of it is important.
@@ -246,32 +321,42 @@ StringPiece ActivityUserData::TypedValue::GetStringReference() const {
return ref_value_;
}
+// These are required because std::atomic is (currently) not a POD type and
+// thus clang requires explicit out-of-line constructors and destructors even
+// when they do nothing.
ActivityUserData::ValueInfo::ValueInfo() {}
ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
ActivityUserData::ValueInfo::~ValueInfo() {}
+ActivityUserData::MemoryHeader::MemoryHeader() {}
+ActivityUserData::MemoryHeader::~MemoryHeader() {}
+ActivityUserData::FieldHeader::FieldHeader() {}
+ActivityUserData::FieldHeader::~FieldHeader() {}
-StaticAtomicSequenceNumber ActivityUserData::next_id_;
+ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
-ActivityUserData::ActivityUserData(void* memory, size_t size)
+ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
: memory_(reinterpret_cast<char*>(memory)),
available_(RoundDownToAlignment(size, kMemoryAlignment)),
- id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) {
+ header_(reinterpret_cast<MemoryHeader*>(memory)),
+ orig_data_id(0),
+ orig_process_id(0),
+ orig_create_stamp(0) {
// It's possible that no user data is being stored.
if (!memory_)
return;
- DCHECK_LT(kMemoryAlignment, available_);
- if (id_->load(std::memory_order_relaxed) == 0) {
- // Generate a new ID and store it in the first 32-bit word of memory_.
- // |id_| must be non-zero for non-sink instances.
- uint32_t id;
- while ((id = next_id_.GetNext()) == 0)
- ;
- id_->store(id, std::memory_order_relaxed);
- DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
- }
- memory_ += kMemoryAlignment;
- available_ -= kMemoryAlignment;
+ static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
+ DCHECK_LT(sizeof(MemoryHeader), available_);
+ if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
+ header_->owner.Release_Initialize(pid);
+ memory_ += sizeof(MemoryHeader);
+ available_ -= sizeof(MemoryHeader);
+
+ // Make a copy of identifying information for later comparison.
+ *const_cast<uint32_t*>(&orig_data_id) =
+ header_->owner.data_id.load(std::memory_order_acquire);
+ *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
+ *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
// If there is already data present, load that. This allows the same class
// to be used for analysis through snapshots.
@@ -280,6 +365,85 @@ ActivityUserData::ActivityUserData(void* memory, size_t size)
ActivityUserData::~ActivityUserData() {}
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
+ DCHECK(output_snapshot);
+ DCHECK(output_snapshot->empty());
+
+ // Find any new data that may have been added by an active instance of this
+ // class that is adding records.
+ ImportExistingData();
+
+ // Add all the values to the snapshot.
+ for (const auto& entry : values_) {
+ TypedValue value;
+ const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
+ value.type_ = entry.second.type;
+ DCHECK_GE(entry.second.extent, size);
+
+ switch (entry.second.type) {
+ case RAW_VALUE:
+ case STRING_VALUE:
+ value.long_value_ =
+ std::string(reinterpret_cast<char*>(entry.second.memory), size);
+ break;
+ case RAW_VALUE_REFERENCE:
+ case STRING_VALUE_REFERENCE: {
+ ReferenceRecord* ref =
+ reinterpret_cast<ReferenceRecord*>(entry.second.memory);
+ value.ref_value_ = StringPiece(
+ reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
+ static_cast<size_t>(ref->size));
+ } break;
+ case BOOL_VALUE:
+ case CHAR_VALUE:
+ value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
+ break;
+ case SIGNED_VALUE:
+ case UNSIGNED_VALUE:
+ value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
+ break;
+ case END_OF_VALUES: // Included for completeness purposes.
+ NOTREACHED();
+ }
+ auto inserted = output_snapshot->insert(
+ std::make_pair(entry.second.name.as_string(), std::move(value)));
+ DCHECK(inserted.second); // True if inserted, false if existed.
+ }
+
+ // Another import attempt will validate that the underlying memory has not
+ // been reused for another purpose. Entries added since the first import
+ // will be ignored here but will be returned if another snapshot is created.
+ ImportExistingData();
+ if (!memory_) {
+ output_snapshot->clear();
+ return false;
+ }
+
+ // Successful snapshot.
+ return true;
+}
+
+const void* ActivityUserData::GetBaseAddress() const {
+ // The |memory_| pointer advances as elements are written but the |header_|
+ // value is always at the start of the block so just return that.
+ return header_;
+}
+
+void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
+ int64_t stamp) {
+ if (!header_)
+ return;
+ header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ActivityUserData::GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp) {
+ const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
+ return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
void ActivityUserData::Set(StringPiece name,
ValueType type,
const void* memory,
@@ -308,13 +472,13 @@ void ActivityUserData::Set(StringPiece name,
// following field will be aligned properly.
size_t name_size = name.length();
size_t name_extent =
- RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
- sizeof(Header);
+ RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
+ sizeof(FieldHeader);
size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
// The "base size" is the size of the header and (padded) string key. Stop
// now if there's not room enough for even this.
- size_t base_size = sizeof(Header) + name_extent;
+ size_t base_size = sizeof(FieldHeader) + name_extent;
if (base_size > available_)
return;
@@ -338,7 +502,7 @@ void ActivityUserData::Set(StringPiece name,
}
// Allocate a chunk of memory.
- Header* header = reinterpret_cast<Header*>(memory_);
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
memory_ += full_size;
available_ -= full_size;
@@ -348,9 +512,9 @@ void ActivityUserData::Set(StringPiece name,
DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
header->name_size = static_cast<uint8_t>(name_size);
header->record_size = full_size;
- char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
+ char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
void* value_memory =
- reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
+ reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
memcpy(name_memory, name.data(), name_size);
header->type.store(type, std::memory_order_release);
@@ -364,7 +528,7 @@ void ActivityUserData::Set(StringPiece name,
info->name = persistent_name;
info->memory = value_memory;
info->size_ptr = &header->value_size;
- info->extent = full_size - sizeof(Header) - name_extent;
+ info->extent = full_size - sizeof(FieldHeader) - name_extent;
info->type = type;
}
@@ -389,8 +553,12 @@ void ActivityUserData::SetReference(StringPiece name,
}
void ActivityUserData::ImportExistingData() const {
- while (available_ > sizeof(Header)) {
- Header* header = reinterpret_cast<Header*>(memory_);
+ // It's possible that no user data is being stored.
+ if (!memory_)
+ return;
+
+ while (available_ > sizeof(FieldHeader)) {
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
ValueType type =
static_cast<ValueType>(header->type.load(std::memory_order_acquire));
if (type == END_OF_VALUES)
@@ -398,8 +566,8 @@ void ActivityUserData::ImportExistingData() const {
if (header->record_size > available_)
return;
- size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size,
- kMemoryAlignment);
+ size_t value_offset = RoundUpToAlignment(
+ sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
if (header->record_size == value_offset &&
header->value_size.load(std::memory_order_relaxed) == 1) {
value_offset -= 1;
@@ -408,7 +576,7 @@ void ActivityUserData::ImportExistingData() const {
return;
ValueInfo info;
- info.name = StringPiece(memory_ + sizeof(Header), header->name_size);
+ info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
info.type = type;
info.memory = memory_ + value_offset;
info.size_ptr = &header->value_size;
@@ -420,60 +588,14 @@ void ActivityUserData::ImportExistingData() const {
memory_ += header->record_size;
available_ -= header->record_size;
}
-}
-
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
- DCHECK(output_snapshot);
- DCHECK(output_snapshot->empty());
-
- // Find any new data that may have been added by an active instance of this
- // class that is adding records.
- ImportExistingData();
- for (const auto& entry : values_) {
- TypedValue value;
- value.type_ = entry.second.type;
- DCHECK_GE(entry.second.extent,
- entry.second.size_ptr->load(std::memory_order_relaxed));
-
- switch (entry.second.type) {
- case RAW_VALUE:
- case STRING_VALUE:
- value.long_value_ =
- std::string(reinterpret_cast<char*>(entry.second.memory),
- entry.second.size_ptr->load(std::memory_order_relaxed));
- break;
- case RAW_VALUE_REFERENCE:
- case STRING_VALUE_REFERENCE: {
- ReferenceRecord* ref =
- reinterpret_cast<ReferenceRecord*>(entry.second.memory);
- value.ref_value_ = StringPiece(
- reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
- static_cast<size_t>(ref->size));
- } break;
- case BOOL_VALUE:
- case CHAR_VALUE:
- value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
- break;
- case SIGNED_VALUE:
- case UNSIGNED_VALUE:
- value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
- break;
- case END_OF_VALUES: // Included for completeness purposes.
- NOTREACHED();
- }
- auto inserted = output_snapshot->insert(
- std::make_pair(entry.second.name.as_string(), std::move(value)));
- DCHECK(inserted.second); // True if inserted, false if existed.
+ // Check if memory has been completely reused.
+ if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
+ header_->owner.process_id != orig_process_id ||
+ header_->owner.create_stamp != orig_create_stamp) {
+ memory_ = nullptr;
+ values_.clear();
}
-
- return true;
-}
-
-const void* ActivityUserData::GetBaseAddress() {
- // The |memory_| pointer advances as elements are written but the |id_|
- // value is always at the start of the block so just return that.
- return id_;
}
// This information is kept for every thread that is tracked. It is filled
@@ -485,27 +607,16 @@ struct ThreadActivityTracker::Header {
GlobalActivityTracker::kTypeIdActivityTracker;
// Expected size for 32/64-bit check.
- static constexpr size_t kExpectedInstanceSize = 80;
-
- // This unique number indicates a valid initialization of the memory.
- std::atomic<uint32_t> cookie;
+ static constexpr size_t kExpectedInstanceSize =
+ OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
+ 72;
- // The number of Activity slots (spaces that can hold an Activity) that
- // immediately follow this structure in memory.
- uint32_t stack_slots;
+ // This information uniquely identifies a process.
+ OwningProcess owner;
- // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
- // These identifiers are not guaranteed to mean anything but are unique, in
- // combination, among all active trackers. It would be nice to always have
- // the process_id be a 64-bit value but the necessity of having it atomic
- // (for the memory barriers it provides) limits it to the natural word size
- // of the machine.
-#ifdef ARCH_CPU_64_BITS
- std::atomic<int64_t> process_id;
-#else
- std::atomic<int32_t> process_id;
- int32_t process_id_padding;
-#endif
+ // The thread-id (thread_ref.as_id) to which this data belongs. This number
+ // is not guaranteed to mean anything but combined with the process-id from
+ // OwningProcess is unique among all active trackers.
ThreadRef thread_ref;
// The start-time and start-ticks when the data was created. Each activity
@@ -514,12 +625,19 @@ struct ThreadActivityTracker::Header {
int64_t start_time;
int64_t start_ticks;
+ // The number of Activity slots (spaces that can hold an Activity) that
+ // immediately follow this structure in memory.
+ uint32_t stack_slots;
+
+ // Some padding to keep everything 64-bit aligned.
+ uint32_t padding;
+
// The current depth of the stack. This may be greater than the number of
// slots. If the depth exceeds the number of slots, the newest entries
// won't be recorded.
std::atomic<uint32_t> current_depth;
- // A memory location used to indicate if changes have been made to the stack
+ // A memory location used to indicate if changes have been made to the data
// that would invalidate an in-progress read of its contents. The active
// tracker will zero the value whenever something gets popped from the
// stack. A monitoring tracker can write a non-zero value here, copy the
@@ -527,7 +645,11 @@ struct ThreadActivityTracker::Header {
// the contents didn't change while being copied. This can handle concurrent
// snapshot operations only if each snapshot writes a different bit (which
// is not the current implementation so no parallel snapshots allowed).
- std::atomic<uint32_t> stack_unchanged;
+ std::atomic<uint32_t> data_unchanged;
+
+ // The last "exception" activity. This can't be stored on the stack because
+ // that could get popped as things unwind.
+ Activity last_exception;
// The name of the thread (up to a maximum length). Dynamic-length names
// are not practical since the memory has to come from the same persistent
@@ -596,15 +718,16 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
"ActivityData.data is not 64-bit aligned");
// Provided memory should either be completely initialized or all zeros.
- if (header_->cookie.load(std::memory_order_relaxed) == 0) {
+ if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
// This is a new file. Double-check other fields and then initialize.
- DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
+ DCHECK_EQ(0, header_->owner.process_id);
+ DCHECK_EQ(0, header_->owner.create_stamp);
DCHECK_EQ(0, header_->thread_ref.as_id);
DCHECK_EQ(0, header_->start_time);
DCHECK_EQ(0, header_->start_ticks);
DCHECK_EQ(0U, header_->stack_slots);
DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
- DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
+ DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
DCHECK_EQ(0, stack_[0].time_internal);
DCHECK_EQ(0U, stack_[0].origin_address);
DCHECK_EQ(0U, stack_[0].call_stack[0]);
@@ -616,7 +739,6 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
header_->thread_ref.as_handle =
PlatformThread::CurrentHandle().platform_handle();
#endif
- header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
header_->start_time = base::Time::Now().ToInternalValue();
header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
@@ -626,7 +748,7 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
// This is done last so as to guarantee that everything above is "released"
// by the time this value gets written.
- header_->cookie.store(kHeaderCookie, std::memory_order_release);
+ header_->owner.Release_Initialize();
valid_ = true;
DCHECK(IsValid());
@@ -719,40 +841,28 @@ void ThreadActivityTracker::PopActivity(ActivityId id) {
// The stack has shrunk meaning that some other thread trying to copy the
// contents for reporting purposes could get bad data. That thread would
- // have written a non-zero value into |stack_unchanged|; clearing it here
+ // have written a non-zero value into |data_unchanged|; clearing it here
// will let that thread detect that something did change. This needs to
// happen after the atomic |depth| operation above so a "release" store
// is required.
- header_->stack_unchanged.store(0, std::memory_order_release);
+ header_->data_unchanged.store(0, std::memory_order_release);
}
std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
ActivityId id,
ActivityTrackerMemoryAllocator* allocator) {
- // User-data is only stored for activities actually held in the stack.
- if (id < stack_slots_) {
- // Don't allow user data for lock acquisition as recursion may occur.
- if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
- NOTREACHED();
- return MakeUnique<ActivityUserData>(nullptr, 0);
- }
-
- // Get (or reuse) a block of memory and create a real UserData object
- // on it.
- PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
- void* memory =
- allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
- if (memory) {
- std::unique_ptr<ActivityUserData> user_data =
- MakeUnique<ActivityUserData>(memory, kUserDataSize);
- stack_[id].user_data_ref = ref;
- stack_[id].user_data_id = user_data->id();
- return user_data;
- }
+ // Don't allow user data for lock acquisition as recursion may occur.
+ if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+ NOTREACHED();
+ return MakeUnique<ActivityUserData>();
}
- // Return a dummy object that will still accept (but ignore) Set() calls.
- return MakeUnique<ActivityUserData>(nullptr, 0);
+ // User-data is only stored for activities actually held in the stack.
+ if (id >= stack_slots_)
+ return MakeUnique<ActivityUserData>();
+
+ // Create and return a real UserData object.
+ return CreateUserDataForActivity(&stack_[id], allocator);
}
bool ThreadActivityTracker::HasUserData(ActivityId id) {
@@ -770,12 +880,27 @@ void ThreadActivityTracker::ReleaseUserData(
}
}
+void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
+ // A thread-checker creates a lock to check the thread-id which means
+ // re-entry into this code if lock acquisitions are being tracked.
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Fill the reusable exception activity.
+ Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
+ data);
+
+ // The data has changed meaning that some other thread trying to copy the
+ // contents for reporting purposes could get bad data.
+ header_->data_unchanged.store(0, std::memory_order_relaxed);
+}
+
bool ThreadActivityTracker::IsValid() const {
- if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
- header_->process_id.load(std::memory_order_relaxed) == 0 ||
- header_->thread_ref.as_id == 0 ||
- header_->start_time == 0 ||
- header_->start_ticks == 0 ||
+ if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
+ header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
+ header_->start_time == 0 || header_->start_ticks == 0 ||
header_->stack_slots != stack_slots_ ||
header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
return false;
@@ -806,20 +931,21 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
output_snapshot->activity_stack.reserve(stack_slots_);
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
- // Remember the process and thread IDs to ensure they aren't replaced
- // during the snapshot operation. Use "acquire" to ensure that all the
- // non-atomic fields of the structure are valid (at least at the current
- // moment in time).
- const int64_t starting_process_id =
- header_->process_id.load(std::memory_order_acquire);
+ // Remember the data IDs to ensure nothing is replaced during the snapshot
+ // operation. Use "acquire" so that all the non-atomic fields of the
+ // structure are valid (at least at the current moment in time).
+ const uint32_t starting_id =
+ header_->owner.data_id.load(std::memory_order_acquire);
+ const int64_t starting_create_stamp = header_->owner.create_stamp;
+ const int64_t starting_process_id = header_->owner.process_id;
const int64_t starting_thread_id = header_->thread_ref.as_id;
- // Write a non-zero value to |stack_unchanged| so it's possible to detect
+ // Write a non-zero value to |data_unchanged| so it's possible to detect
// at the end that nothing has changed since copying the data began. A
// "cst" operation is required to ensure it occurs before everything else.
// Using "cst" memory ordering is relatively expensive but this is only
// done during analysis so doesn't directly affect the worker threads.
- header_->stack_unchanged.store(1, std::memory_order_seq_cst);
+ header_->data_unchanged.store(1, std::memory_order_seq_cst);
// Fetching the current depth also "acquires" the contents of the stack.
depth = header_->current_depth.load(std::memory_order_acquire);
@@ -831,29 +957,26 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
count * sizeof(Activity));
}
+ // Capture the last exception.
+ memcpy(&output_snapshot->last_exception, &header_->last_exception,
+ sizeof(Activity));
+
+ // TODO(bcwhite): Snapshot other things here.
+
// Retry if something changed during the copy. A "cst" operation ensures
// it must happen after all the above operations.
- if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
+ if (!header_->data_unchanged.load(std::memory_order_seq_cst))
continue;
// Stack copied. Record it's full depth.
output_snapshot->activity_stack_depth = depth;
- // TODO(bcwhite): Snapshot other things here.
-
- // Get the general thread information. Loading of "process_id" is guaranteed
- // to be last so that it's possible to detect below if any content has
- // changed while reading it. It's technically possible for a thread to end,
- // have its data cleared, a new thread get created with the same IDs, and
- // it perform an action which starts tracking all in the time since the
- // ID reads above but the chance is so unlikely that it's not worth the
- // effort and complexity of protecting against it (perhaps with an
- // "unchanged" field like is done for the stack).
+ // Get the general thread information.
output_snapshot->thread_name =
std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
+ output_snapshot->create_stamp = header_->owner.create_stamp;
output_snapshot->thread_id = header_->thread_ref.as_id;
- output_snapshot->process_id =
- header_->process_id.load(std::memory_order_seq_cst);
+ output_snapshot->process_id = header_->owner.process_id;
// All characters of the thread-name buffer were copied so as to not break
// if the trailing NUL were missing. Now limit the length if the actual
@@ -861,9 +984,11 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
output_snapshot->thread_name.resize(
strlen(output_snapshot->thread_name.c_str()));
- // If the process or thread ID has changed then the tracker has exited and
- // the memory reused by a new one. Try again.
- if (output_snapshot->process_id != starting_process_id ||
+ // If the data ID has changed then the tracker has exited and the memory
+ // reused by a new one. Try again.
+ if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
+ output_snapshot->create_stamp != starting_create_stamp ||
+ output_snapshot->process_id != starting_process_id ||
output_snapshot->thread_id != starting_thread_id) {
continue;
}
@@ -879,10 +1004,14 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
const int64_t start_ticks = header_->start_ticks;
for (Activity& activity : output_snapshot->activity_stack) {
activity.time_internal =
- (start_time +
- TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
+ WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
.ToInternalValue();
}
+ output_snapshot->last_exception.time_internal =
+ WallTimeFromTickTime(start_ticks,
+ output_snapshot->last_exception.time_internal,
+ start_time)
+ .ToInternalValue();
// Success!
return true;
@@ -892,11 +1021,48 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
return false;
}
+const void* ThreadActivityTracker::GetBaseAddress() {
+ return header_;
+}
+
+void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
+ int64_t stamp) {
+ header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp) {
+ const Header* header = reinterpret_cast<const Header*>(memory);
+ return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
// static
size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
}
+std::unique_ptr<ActivityUserData>
+ThreadActivityTracker::CreateUserDataForActivity(
+ Activity* activity,
+ ActivityTrackerMemoryAllocator* allocator) {
+ DCHECK_EQ(0U, activity->user_data_ref);
+
+ PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
+ void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
+ if (memory) {
+ std::unique_ptr<ActivityUserData> user_data =
+ MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ activity->user_data_ref = ref;
+ activity->user_data_id = user_data->id();
+ return user_data;
+ }
+
+ // Return a dummy object that will still accept (but ignore) Set() calls.
+ return MakeUnique<ActivityUserData>();
+}
+
// The instantiation of the GlobalActivityTracker object.
// The object held here will obviously not be destructed at process exit
// but that's best since PersistentMemoryAllocator objects (that underlie
@@ -979,6 +1145,9 @@ bool GlobalActivityTracker::ModuleInfoRecord::EncodeFrom(
pickle_size = pickler.size();
changes.store(0, std::memory_order_relaxed);
+ // Initialize the owner info.
+ owner.Release_Initialize();
+
// Now set those fields that can change.
return UpdateFrom(info);
}
@@ -1047,21 +1216,23 @@ ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
user_data_ =
tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
} else {
- user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
+ user_data_ = MakeUnique<ActivityUserData>();
}
}
return *user_data_;
}
-GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size)
- : ActivityUserData(memory, size) {}
+GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
+ size_t size,
+ int64_t pid)
+ : ActivityUserData(memory, size, pid) {}
-GlobalActivityTracker::GlobalUserData::~GlobalUserData() {}
+GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
-void GlobalActivityTracker::GlobalUserData::Set(StringPiece name,
- ValueType type,
- const void* memory,
- size_t size) {
+void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
AutoLock lock(data_lock_);
ActivityUserData::Set(name, type, memory, size);
}
@@ -1084,10 +1255,11 @@ GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
void GlobalActivityTracker::CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth) {
+ int stack_depth,
+ int64_t process_id) {
// There's no need to do anything with the result. It is self-managing.
GlobalActivityTracker* global_tracker =
- new GlobalActivityTracker(std::move(allocator), stack_depth);
+ new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
// Create a tracker for this thread since it is known.
global_tracker->CreateTrackerForCurrentThread();
}
@@ -1113,7 +1285,7 @@ void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
DCHECK(success);
CreateWithAllocator(MakeUnique<FilePersistentMemoryAllocator>(
std::move(mapped_file), size, id, name, false),
- stack_depth);
+ stack_depth, 0);
}
#endif // !defined(OS_NACL)
@@ -1121,11 +1293,37 @@ void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
- int stack_depth) {
+ int stack_depth,
+ int64_t process_id) {
CreateWithAllocator(
- MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
+ MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth,
+ process_id);
}
+// static
+void GlobalActivityTracker::SetForTesting(
+ std::unique_ptr<GlobalActivityTracker> tracker) {
+ CHECK(!subtle::NoBarrier_Load(&g_tracker_));
+ subtle::Release_Store(&g_tracker_,
+ reinterpret_cast<uintptr_t>(tracker.release()));
+}
+
+// static
+std::unique_ptr<GlobalActivityTracker>
+GlobalActivityTracker::ReleaseForTesting() {
+ GlobalActivityTracker* tracker = Get();
+ if (!tracker)
+ return nullptr;
+
+ // Thread trackers assume that the global tracker is present for some
+ // operations so ensure that there aren't any.
+ tracker->ReleaseTrackerForCurrentThreadForTesting();
+ DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
+
+ subtle::Release_Store(&g_tracker_, 0);
+ return WrapUnique(tracker);
+};
+
ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
DCHECK(!this_thread_tracker_.Get());
@@ -1182,8 +1380,181 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
ThreadActivityTracker* tracker =
reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
- if (tracker)
+ if (tracker) {
+ this_thread_tracker_.Set(nullptr);
delete tracker;
+ }
+}
+
+void GlobalActivityTracker::SetBackgroundTaskRunner(
+ const scoped_refptr<TaskRunner>& runner) {
+ AutoLock lock(global_tracker_lock_);
+ background_task_runner_ = runner;
+}
+
+void GlobalActivityTracker::SetProcessExitCallback(
+ ProcessExitCallback callback) {
+ AutoLock lock(global_tracker_lock_);
+ process_exit_callback_ = callback;
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+ ProcessId process_id,
+ const FilePath::StringType& cmd) {
+ const int64_t pid = process_id;
+ DCHECK_NE(GetProcessId(), pid);
+ DCHECK_NE(0, pid);
+
+ base::AutoLock lock(global_tracker_lock_);
+ if (base::ContainsKey(known_processes_, pid)) {
+ // TODO(bcwhite): Measure this in UMA.
+ NOTREACHED() << "Process #" << process_id
+ << " was previously recorded as \"launched\""
+ << " with no corresponding exit.";
+ known_processes_.erase(pid);
+ }
+
+#if defined(OS_WIN)
+ known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
+#else
+ known_processes_.insert(std::make_pair(pid, cmd));
+#endif
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+ ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args) {
+ const int64_t pid = process_id;
+ if (exe.find(FILE_PATH_LITERAL(" "))) {
+ RecordProcessLaunch(pid, FilePath::StringType(FILE_PATH_LITERAL("\"")) +
+ exe + FILE_PATH_LITERAL("\" ") + args);
+ } else {
+ RecordProcessLaunch(pid, exe + FILE_PATH_LITERAL(' ') + args);
+ }
+}
+
+void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
+ int exit_code) {
+ const int64_t pid = process_id;
+ DCHECK_NE(GetProcessId(), pid);
+ DCHECK_NE(0, pid);
+
+ scoped_refptr<TaskRunner> task_runner;
+ std::string command_line;
+ {
+ base::AutoLock lock(global_tracker_lock_);
+ task_runner = background_task_runner_;
+ auto found = known_processes_.find(pid);
+ if (found != known_processes_.end()) {
+ command_line = std::move(found->second);
+ known_processes_.erase(found);
+ } else {
+ DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
+ }
+ }
+
+ // Use the current time to differentiate the process that just exited
+ // from any that might be created in the future with the same ID.
+ int64_t now_stamp = Time::Now().ToInternalValue();
+
+ // The persistent allocator is thread-safe so run the iteration and
+ // adjustments on a worker thread if one was provided.
+ if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
+ task_runner->PostTask(
+ FROM_HERE,
+ Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this), pid,
+ now_stamp, exit_code, Passed(&command_line)));
+ return;
+ }
+
+ CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
+}
+
+void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
+ process_data().SetInt(kProcessPhaseDataKey, phase);
+}
+
+void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ std::string&& command_line) {
+ // The process may not have exited cleanly so its necessary to go through
+ // all the data structures it may have allocated in the persistent memory
+ // segment and mark them as "released". This will allow them to be reused
+ // later on.
+
+ PersistentMemoryAllocator::Iterator iter(allocator_.get());
+ PersistentMemoryAllocator::Reference ref;
+
+ ProcessExitCallback process_exit_callback;
+ {
+ AutoLock lock(global_tracker_lock_);
+ process_exit_callback = process_exit_callback_;
+ }
+ if (process_exit_callback) {
+ // Find the processes user-data record so the process phase can be passed
+ // to the callback.
+ ActivityUserData::Snapshot process_data_snapshot;
+ while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
+ const void* memory = allocator_->GetAsArray<char>(
+ ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
+ int64_t found_id;
+ int64_t create_stamp;
+ if (ActivityUserData::GetOwningProcessId(memory, &found_id,
+ &create_stamp)) {
+ if (found_id == process_id && create_stamp < exit_stamp) {
+ const ActivityUserData process_data(const_cast<void*>(memory),
+ allocator_->GetAllocSize(ref));
+ process_data.CreateSnapshot(&process_data_snapshot);
+ break; // No need to look for any others.
+ }
+ }
+ }
+ iter.Reset(); // So it starts anew when used below.
+
+ // Record the process's phase at exit so callback doesn't need to go
+ // searching based on a private key value.
+ ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
+ auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
+ if (phase != process_data_snapshot.end())
+ exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
+
+ // Perform the callback.
+ process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
+ std::move(command_line),
+ std::move(process_data_snapshot));
+ }
+
+ // Find all allocations associated with the exited process and free them.
+ uint32_t type;
+ while ((ref = iter.GetNext(&type)) != 0) {
+ switch (type) {
+ case kTypeIdActivityTracker:
+ case kTypeIdUserDataRecord:
+ case kTypeIdProcessDataRecord:
+ case ModuleInfoRecord::kPersistentTypeId: {
+ const void* memory = allocator_->GetAsArray<char>(
+ ref, type, PersistentMemoryAllocator::kSizeAny);
+ int64_t found_id;
+ int64_t create_stamp;
+
+ // By convention, the OwningProcess structure is always the first
+ // field of the structure so there's no need to handle all the
+ // cases separately.
+ if (OwningProcess::GetOwningProcessId(memory, &found_id,
+ &create_stamp)) {
+ // Only change the type to be "free" if the process ID matches and
+ // the creation time is before the exit time (so PID re-use doesn't
+ // cause the erasure of something that is in-use). Memory is cleared
+ // here, rather than when it's needed, so as to limit the impact at
+ // that critical time.
+ if (found_id == process_id && create_stamp < exit_stamp)
+ allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
+ }
+ } break;
+ }
+ }
}
void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
@@ -1233,9 +1604,11 @@ void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
GlobalActivityTracker::GlobalActivityTracker(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth)
+ int stack_depth,
+ int64_t process_id)
: allocator_(std::move(allocator)),
stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
+ process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
this_thread_tracker_(&OnTLSDestroy),
thread_tracker_count_(0),
thread_tracker_allocator_(allocator_.get(),
@@ -1249,25 +1622,38 @@ GlobalActivityTracker::GlobalActivityTracker(
kTypeIdUserDataRecordFree,
kUserDataSize,
kCachedUserDataMemories,
- /*make_iterable=*/false),
+ /*make_iterable=*/true),
+ process_data_(allocator_->GetAsArray<char>(
+ AllocateFrom(allocator_.get(),
+ kTypeIdProcessDataRecordFree,
+ kProcessDataSize,
+ kTypeIdProcessDataRecord),
+ kTypeIdProcessDataRecord,
+ kProcessDataSize),
+ kProcessDataSize,
+ process_id_),
global_data_(
allocator_->GetAsArray<char>(
allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
kTypeIdGlobalDataRecord,
- PersistentMemoryAllocator::kSizeAny),
- kGlobalDataSize) {
- // Ensure the passed memory is valid and empty (iterator finds nothing).
- uint32_t type;
- DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
+ kGlobalDataSize),
+ kGlobalDataSize,
+ process_id_) {
+ DCHECK_NE(0, process_id_);
// Ensure that there is no other global object and then make this one such.
DCHECK(!g_tracker_);
subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
- // The global records must be iterable in order to be found by an analyzer.
+ // The data records must be iterable in order to be found by an analyzer.
+ allocator_->MakeIterable(allocator_->GetAsReference(
+ process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
allocator_->MakeIterable(allocator_->GetAsReference(
global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
+ // Note that this process has launched.
+ SetProcessPhase(PROCESS_LAUNCHED);
+
// Fetch and record all activated field trials.
FieldTrial::ActiveGroups active_groups;
FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -1276,7 +1662,7 @@ GlobalActivityTracker::GlobalActivityTracker(
}
GlobalActivityTracker::~GlobalActivityTracker() {
- DCHECK_EQ(Get(), this);
+ DCHECK(Get() == nullptr || Get() == this);
DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
subtle::Release_Store(&g_tracker_, 0);
}
@@ -1297,6 +1683,23 @@ void GlobalActivityTracker::ReturnTrackerMemory(
thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
}
+void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
+ const void* origin,
+ uint32_t code) {
+ // Get an existing tracker for this thread. It's not possible to create
+ // one at this point because such would involve memory allocations and
+ // other potentially complex operations that can cause failures if done
+ // within an exception handler. In most cases various operations will
+ // have already created the tracker so this shouldn't generally be a
+ // problem.
+ ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
+ if (!tracker)
+ return;
+
+ tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
+ ActivityData::ForException(code));
+}
+
// static
void GlobalActivityTracker::OnTLSDestroy(void* value) {
delete reinterpret_cast<ManagedActivityTracker*>(value);
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
index 719a31865c..c8cf1e972e 100644
--- a/base/debug/activity_tracker.h
+++ b/base/debug/activity_tracker.h
@@ -23,12 +23,15 @@
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/location.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/process/process_handle.h"
#include "base/strings/string_piece.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/task_runner.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_local_storage.h"
@@ -41,7 +44,6 @@ class FilePath;
class Lock;
class PlatformThreadHandle;
class Process;
-class StaticAtomicSequenceNumber;
class WaitableEvent;
namespace debug {
@@ -56,11 +58,48 @@ enum : int {
kActivityCallStackSize = 10,
};
+// A class for keeping all information needed to verify that a structure is
+// associated with a given process.
+struct OwningProcess {
+ OwningProcess();
+ ~OwningProcess();
+
+ // Initializes structure with the current process id and the current time.
+ // These can uniquely identify a process. A unique non-zero data_id will be
+ // set making it possible to tell using atomic reads if the data has changed.
+ void Release_Initialize(int64_t pid = 0);
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from memory without loading the entire structure for analysis. This will
+ // return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp);
+
+ // SHA1(base::debug::OwningProcess): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0xB1179672 + 1;
+
+ // Expected size for 32/64-bit check by PersistentMemoryAllocator.
+ static constexpr size_t kExpectedInstanceSize = 24;
+
+ std::atomic<uint32_t> data_id;
+ uint32_t padding;
+ int64_t process_id;
+ int64_t create_stamp;
+};
+
// The data associated with an activity is dependent upon the activity type.
// This union defines all of the various fields. All fields must be explicitly
// sized types to ensure no interoperability problems between 32-bit and
// 64-bit systems.
union ActivityData {
+ // Expected size for 32/64-bit check.
+ // TODO(bcwhite): VC2015 doesn't allow statics in unions. Fix when it does.
+ // static constexpr size_t kExpectedInstanceSize = 8;
+
// Generic activities don't have any defined structure.
struct {
uint32_t id; // An arbitrary identifier used for association.
@@ -81,6 +120,9 @@ union ActivityData {
struct {
int64_t process_id; // A unique identifier for a process.
} process;
+ struct {
+ uint32_t code; // An "exception code" number.
+ } exception;
// These methods create an ActivityData object from the appropriate
// parameters. Objects of this type should always be created this way to
@@ -126,6 +168,12 @@ union ActivityData {
data.process.process_id = id;
return data;
}
+
+ static ActivityData ForException(const uint32_t code) {
+ ActivityData data;
+ data.exception.code = code;
+ return data;
+ }
};
// A "null" activity-data that can be passed to indicate "do not change".
@@ -237,6 +285,9 @@ struct Activity {
ACT_PROCESS_START = ACT_PROCESS,
ACT_PROCESS_WAIT,
+ // Exception activities indicate the occurence of something unexpected.
+ ACT_EXCEPTION = 14 << 4,
+
// Generic activities are user defined and can be anything.
ACT_GENERIC = 15 << 4,
@@ -293,7 +344,9 @@ struct Activity {
// This class manages arbitrary user data that can be associated with activities
// done by a thread by supporting key/value pairs of any type. This can provide
// additional information during debugging. It is also used to store arbitrary
-// global data. All updates must be done from the same thread.
+// global data. All updates must be done from the same thread though other
+// threads can read it concurrently if they create new objects using the same
+// memory.
class BASE_EXPORT ActivityUserData {
public:
// List of known value type. REFERENCE types must immediately follow the non-
@@ -340,7 +393,7 @@ class BASE_EXPORT ActivityUserData {
private:
friend class ActivityUserData;
- ValueType type_;
+ ValueType type_ = END_OF_VALUES;
uint64_t short_value_; // Used to hold copy of numbers, etc.
std::string long_value_; // Used to hold copy of raw/string data.
StringPiece ref_value_; // Used to hold reference to external data.
@@ -348,14 +401,17 @@ class BASE_EXPORT ActivityUserData {
using Snapshot = std::map<std::string, TypedValue>;
- ActivityUserData(void* memory, size_t size);
+ // Initialize the object either as a "sink" that just accepts and discards
+ // data or an active one that writes to a given (zeroed) memory block.
+ ActivityUserData();
+ ActivityUserData(void* memory, size_t size, int64_t pid = 0);
virtual ~ActivityUserData();
// Gets the unique ID number for this user data. If this changes then the
// contents have been overwritten by another thread. The return value is
// always non-zero unless it's actually just a data "sink".
uint32_t id() const {
- return memory_ ? id_->load(std::memory_order_relaxed) : 0;
+ return header_ ? header_->owner.data_id.load(std::memory_order_relaxed) : 0;
}
// Writes a |value| (as part of a key/value pair) that will be included with
@@ -403,13 +459,23 @@ class BASE_EXPORT ActivityUserData {
// Creates a snapshot of the key/value pairs contained within. The returned
// data will be fixed, independent of whatever changes afterward. There is
- // protection against concurrent modification of the values but no protection
- // against a complete overwrite of the contents; the caller must ensure that
- // the memory segment is not going to be re-initialized while this runs.
+ // some protection against concurrent modification. This will return false
+ // if the data is invalid or if a complete overwrite of the contents is
+ // detected.
bool CreateSnapshot(Snapshot* output_snapshot) const;
// Gets the base memory address used for storing data.
- const void* GetBaseAddress();
+ const void* GetBaseAddress() const;
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from tracker memory without loading the entire structure for analysis. This
+ // will return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp);
protected:
virtual void Set(StringPiece name,
@@ -422,20 +488,31 @@ class BASE_EXPORT ActivityUserData {
enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
- // A structure used to reference data held outside of persistent memory.
- struct ReferenceRecord {
- uint64_t address;
- uint64_t size;
+ // A structure that defines the structure header in memory.
+ struct MemoryHeader {
+ MemoryHeader();
+ ~MemoryHeader();
+
+ OwningProcess owner; // Information about the creating process.
};
// Header to a key/value record held in persistent memory.
- struct Header {
+ struct FieldHeader {
+ FieldHeader();
+ ~FieldHeader();
+
std::atomic<uint8_t> type; // Encoded ValueType
uint8_t name_size; // Length of "name" key.
std::atomic<uint16_t> value_size; // Actual size of of the stored value.
uint16_t record_size; // Total storage of name, value, header.
};
+ // A structure used to reference data held outside of persistent memory.
+ struct ReferenceRecord {
+ uint64_t address;
+ uint64_t size;
+ };
+
// This record is used to hold known value is a map so that they can be
// found and overwritten later.
struct ValueInfo {
@@ -456,7 +533,10 @@ class BASE_EXPORT ActivityUserData {
size_t size);
// Loads any data already in the memory segment. This allows for accessing
- // records created previously.
+ // records created previously. If this detects that the underlying data has
+ // gone away (cleared by another thread/process), it will invalidate all the
+ // data in this object and turn it into simple "sink" with no values to
+ // return.
void ImportExistingData() const;
// A map of all the values within the memory block, keyed by name for quick
@@ -470,12 +550,14 @@ class BASE_EXPORT ActivityUserData {
mutable char* memory_;
mutable size_t available_;
- // A pointer to the unique ID for this instance.
- std::atomic<uint32_t>* const id_;
+ // A pointer to the memory header for this instance.
+ MemoryHeader* const header_;
- // This ID is used to create unique indentifiers for user data so that it's
- // possible to tell if the information has been overwritten.
- static StaticAtomicSequenceNumber next_id_;
+ // These hold values used when initially creating the object. They are
+ // compared against current header values to check for outside changes.
+ const uint32_t orig_data_id;
+ const int64_t orig_process_id;
+ const int64_t orig_create_stamp;
DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
};
@@ -511,6 +593,9 @@ class BASE_EXPORT ThreadActivityTracker {
// truncated due to internal length limitations.
std::string thread_name;
+ // The timestamp at which this process was created.
+ int64_t create_stamp;
+
// The process and thread IDs. These values have no meaning other than
// they uniquely identify a running process and a running thread within
// that process. Thread-IDs can be re-used across different processes
@@ -525,6 +610,9 @@ class BASE_EXPORT ThreadActivityTracker {
// The current total depth of the activity stack, including those later
// entries not recorded in the |activity_stack| vector.
uint32_t activity_stack_depth = 0;
+
+ // The last recorded "exception" activity.
+ Activity last_exception;
};
// This is the base class for having the compiler manage an activity on the
@@ -608,6 +696,12 @@ class BASE_EXPORT ThreadActivityTracker {
void ReleaseUserData(ActivityId id,
ActivityTrackerMemoryAllocator* allocator);
+ // Save an exception. |origin| is the location of the exception.
+ void RecordExceptionActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data);
+
// Returns whether the current data is valid or not. It is not valid if
// corruption has been detected in the header or other data structures.
bool IsValid() const;
@@ -618,6 +712,19 @@ class BASE_EXPORT ThreadActivityTracker {
// implementation does not support concurrent snapshot operations.
bool CreateSnapshot(Snapshot* output_snapshot) const;
+ // Gets the base memory address used for storing data.
+ const void* GetBaseAddress();
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from tracker memory without loading the entire structure for analysis. This
+ // will return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp);
+
// Calculates the memory size required for a given stack depth, including
// the internal header structure for the stack.
static size_t SizeForStackDepth(int stack_depth);
@@ -625,6 +732,10 @@ class BASE_EXPORT ThreadActivityTracker {
private:
friend class ActivityTrackerTest;
+ std::unique_ptr<ActivityUserData> CreateUserDataForActivity(
+ Activity* activity,
+ ActivityTrackerMemoryAllocator* allocator);
+
Header* const header_; // Pointer to the Header structure.
Activity* const stack_; // The stack of activities.
const uint32_t stack_slots_; // The total number of stack slots.
@@ -649,15 +760,45 @@ class BASE_EXPORT GlobalActivityTracker {
// will be safely ignored. These are public so that an external process
// can recognize records of this type within an allocator.
enum : uint32_t {
- kTypeIdActivityTracker = 0x5D7381AF + 3, // SHA1(ActivityTracker) v3
- kTypeIdUserDataRecord = 0x615EDDD7 + 2, // SHA1(UserDataRecord) v2
+ kTypeIdActivityTracker = 0x5D7381AF + 4, // SHA1(ActivityTracker) v4
+ kTypeIdUserDataRecord = 0x615EDDD7 + 3, // SHA1(UserDataRecord) v3
kTypeIdGlobalLogMessage = 0x4CF434F9 + 1, // SHA1(GlobalLogMessage) v1
- kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 1000,
+ kTypeIdProcessDataRecord = kTypeIdUserDataRecord + 0x100,
+ kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 0x200,
kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
+ kTypeIdProcessDataRecordFree = ~kTypeIdProcessDataRecord,
+ };
+
+ // An enumeration of common process life stages. All entries are given an
+ // explicit number so they are known and remain constant; this allows for
+ // cross-version analysis either locally or on a server.
+ enum ProcessPhase : int {
+ // The phases are generic and may have meaning to the tracker.
+ PROCESS_PHASE_UNKNOWN = 0,
+ PROCESS_LAUNCHED = 1,
+ PROCESS_LAUNCH_FAILED = 2,
+ PROCESS_EXITED_CLEANLY = 10,
+ PROCESS_EXITED_WITH_CODE = 11,
+
+ // Add here whatever is useful for analysis.
+ PROCESS_SHUTDOWN_STARTED = 100,
+ PROCESS_MAIN_LOOP_STARTED = 101,
};
+ // A callback made when a process exits to allow immediate analysis of its
+ // data. Note that the system may reuse the |process_id| so when fetching
+ // records it's important to ensure that what is returned was created before
+ // the |exit_stamp|. Movement of |process_data| information is allowed.
+ using ProcessExitCallback =
+ Callback<void(int64_t process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ ProcessPhase exit_phase,
+ std::string&& command_line,
+ ActivityUserData::Snapshot&& process_data)>;
+
// This structure contains information about a loaded module, as shown to
// users of the tracker.
struct BASE_EXPORT ModuleInfo {
@@ -728,9 +869,12 @@ class BASE_EXPORT GlobalActivityTracker {
// Creates a global tracker using a given persistent-memory |allocator| and
// providing the given |stack_depth| to each thread tracker it manages. The
// created object is activated so tracking will begin immediately upon return.
+ // The |process_id| can be zero to get it from the OS but is taken for testing
+ // purposes.
static void CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
#if !defined(OS_NACL)
// Like above but internally creates an allocator around a disk file with
@@ -745,11 +889,13 @@ class BASE_EXPORT GlobalActivityTracker {
#endif // !defined(OS_NACL)
// Like above but internally creates an allocator using local heap memory of
- // the specified size. This is used primarily for unit tests.
+ // the specified size. This is used primarily for unit tests. The |process_id|
+ // can be zero to get it from the OS but is taken for testing purposes.
static void CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
// Gets the global activity-tracker or null if none exists.
static GlobalActivityTracker* Get() {
@@ -757,6 +903,15 @@ class BASE_EXPORT GlobalActivityTracker {
subtle::Acquire_Load(&g_tracker_));
}
+ // Sets the global activity-tracker for testing purposes.
+ static void SetForTesting(std::unique_ptr<GlobalActivityTracker> tracker);
+
+ // This access to the persistent allocator is only for testing; it extracts
+ // the global tracker completely. All tracked threads must exit before
+ // calling this. Tracking for the current thread will be automatically
+ // stopped.
+ static std::unique_ptr<GlobalActivityTracker> ReleaseForTesting();
+
// Convenience method for determining if a global tracker is active.
static bool IsEnabled() { return Get() != nullptr; }
@@ -789,6 +944,50 @@ class BASE_EXPORT GlobalActivityTracker {
// Releases the activity-tracker for the current thread (for testing only).
void ReleaseTrackerForCurrentThreadForTesting();
+ // Sets a task-runner that can be used for background work.
+ void SetBackgroundTaskRunner(const scoped_refptr<TaskRunner>& runner);
+
+ // Sets an optional callback to be called when a process exits.
+ void SetProcessExitCallback(ProcessExitCallback callback);
+
+ // Manages process lifetimes. These are called by the process that launched
+ // and reaped the subprocess, not the subprocess itself. If it is expensive
+ // to generate the parameters, Get() the global tracker and call these
+ // conditionally rather than using the static versions.
+ void RecordProcessLaunch(ProcessId process_id,
+ const FilePath::StringType& cmd);
+ void RecordProcessLaunch(ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args);
+ void RecordProcessExit(ProcessId process_id, int exit_code);
+ static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+ const FilePath::StringType& cmd) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessLaunch(process_id, cmd);
+ }
+ static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessLaunch(process_id, exe, args);
+ }
+ static void RecordProcessExitIfEnabled(ProcessId process_id, int exit_code) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessExit(process_id, exit_code);
+ }
+
+ // Sets the "phase" of the current process, useful for knowing what it was
+ // doing when it last reported.
+ void SetProcessPhase(ProcessPhase phase);
+ static void SetProcessPhaseIfEnabled(ProcessPhase phase) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->SetProcessPhase(phase);
+ }
+
// Records a log message. The current implementation does NOT recycle these
// only store critical messages such as FATAL ones.
void RecordLogMessage(StringPiece message);
@@ -818,7 +1017,23 @@ class BASE_EXPORT GlobalActivityTracker {
tracker->RecordFieldTrial(trial_name, group_name);
}
+ // Record exception information for the current thread.
+ ALWAYS_INLINE
+ void RecordException(const void* origin, uint32_t code) {
+ return RecordExceptionImpl(::tracked_objects::GetProgramCounter(), origin,
+ code);
+ }
+
+ // Gets the process ID used for tracking. This is typically the same as what
+ // the OS thinks is the current process but can be overridden for testing.
+ int64_t process_id() { return process_id_; };
+
+ // Accesses the process data record for storing arbitrary key/value pairs.
+ // Updates to this are thread-safe.
+ ActivityUserData& process_data() { return process_data_; }
+
// Accesses the global data record for storing arbitrary key/value pairs.
+ // Updates to this are thread-safe.
ActivityUserData& global_data() { return global_data_; }
private:
@@ -837,10 +1052,10 @@ class BASE_EXPORT GlobalActivityTracker {
// A wrapper around ActivityUserData that is thread-safe and thus can be used
// in the global scope without the requirement of being called from only one
// thread.
- class GlobalUserData : public ActivityUserData {
+ class ThreadSafeUserData : public ActivityUserData {
public:
- GlobalUserData(void* memory, size_t size);
- ~GlobalUserData() override;
+ ThreadSafeUserData(void* memory, size_t size, int64_t pid = 0);
+ ~ThreadSafeUserData() override;
private:
void Set(StringPiece name,
@@ -850,7 +1065,7 @@ class BASE_EXPORT GlobalActivityTracker {
Lock data_lock_;
- DISALLOW_COPY_AND_ASSIGN(GlobalUserData);
+ DISALLOW_COPY_AND_ASSIGN(ThreadSafeUserData);
};
// State of a module as stored in persistent memory. This supports a single
@@ -862,7 +1077,8 @@ class BASE_EXPORT GlobalActivityTracker {
static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
// Expected size for 32/64-bit check by PersistentMemoryAllocator.
- static constexpr size_t kExpectedInstanceSize = 56;
+ static constexpr size_t kExpectedInstanceSize =
+ OwningProcess::kExpectedInstanceSize + 56;
// The atomic unfortunately makes this a "complex" class on some compilers
// and thus requires an out-of-line constructor & destructor even though
@@ -870,6 +1086,7 @@ class BASE_EXPORT GlobalActivityTracker {
ModuleInfoRecord();
~ModuleInfoRecord();
+ OwningProcess owner; // The process that created this record.
uint64_t address; // The base address of the module.
uint64_t load_time; // Time of last load/unload.
uint64_t size; // The size of the module in bytes.
@@ -921,18 +1138,30 @@ class BASE_EXPORT GlobalActivityTracker {
// Creates a global tracker using a given persistent-memory |allocator| and
// providing the given |stack_depth| to each thread tracker it manages. The
// created object is activated so tracking has already started upon return.
+ // The |process_id| can be zero to get it from the OS but is taken for testing
+ // purposes.
GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
// Returns the memory used by an activity-tracker managed by this class.
// It is called during the destruction of a ManagedActivityTracker object.
void ReturnTrackerMemory(ManagedActivityTracker* tracker);
+ // Records exception information.
+ void RecordExceptionImpl(const void* pc, const void* origin, uint32_t code);
+
// Releases the activity-tracker associcated with thread. It is called
// automatically when a thread is joined and thus there is nothing more to
// be tracked. |value| is a pointer to a ManagedActivityTracker.
static void OnTLSDestroy(void* value);
+ // Does process-exit work. This can be run on any thread.
+ void CleanupAfterProcess(int64_t process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ std::string&& command_line);
+
// The persistent-memory allocator from which the memory for all trackers
// is taken.
std::unique_ptr<PersistentMemoryAllocator> allocator_;
@@ -941,6 +1170,10 @@ class BASE_EXPORT GlobalActivityTracker {
// provide the stack-depth requested during construction.
const size_t stack_memory_size_;
+ // The process-id of the current process. This is kept as a member variable,
+ // defined during initialization, for testing purposes.
+ const int64_t process_id_;
+
// The activity tracker for the currently executing thread.
base::ThreadLocalStorage::Slot this_thread_tracker_;
@@ -955,9 +1188,9 @@ class BASE_EXPORT GlobalActivityTracker {
ActivityTrackerMemoryAllocator user_data_allocator_;
base::Lock user_data_allocator_lock_;
- // An object for holding global arbitrary key value pairs. Values must always
- // be written from the main UI thread.
- GlobalUserData global_data_;
+ // An object for holding arbitrary key value pairs with thread-safe access.
+ ThreadSafeUserData process_data_;
+ ThreadSafeUserData global_data_;
// A map of global module information, keyed by module path.
std::map<const std::string, ModuleInfoRecord*> modules_;
@@ -966,6 +1199,21 @@ class BASE_EXPORT GlobalActivityTracker {
// The active global activity tracker.
static subtle::AtomicWord g_tracker_;
+ // A lock that is used to protect access to the following fields.
+ base::Lock global_tracker_lock_;
+
+ // The collection of processes being tracked and their command-lines.
+ std::map<int64_t, std::string> known_processes_;
+
+ // A task-runner that can be used for doing background processing.
+ scoped_refptr<TaskRunner> background_task_runner_;
+
+ // A callback performed when a subprocess exits, including its exit-code
+ // and the phase it was in when that occurred. This will be called via
+ // the |background_task_runner_| if one is set or whatever thread reaped
+ // the process otherwise.
+ ProcessExitCallback process_exit_callback_;
+
DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
};
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc
index aced4fb36a..c7efa580e8 100644
--- a/base/debug/activity_tracker_unittest.cc
+++ b/base/debug/activity_tracker_unittest.cc
@@ -84,45 +84,73 @@ class ActivityTrackerTest : public testing::Test {
return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
}
+ void HandleProcessExit(int64_t id,
+ int64_t stamp,
+ int code,
+ GlobalActivityTracker::ProcessPhase phase,
+ std::string&& command,
+ ActivityUserData::Snapshot&& data) {
+ exit_id = id;
+ exit_stamp = stamp;
+ exit_code = code;
+ exit_phase = phase;
+ exit_command = std::move(command);
+ exit_data = std::move(data);
+ }
+
static void DoNothing() {}
+
+ int64_t exit_id = 0;
+ int64_t exit_stamp;
+ int exit_code;
+ GlobalActivityTracker::ProcessPhase exit_phase;
+ std::string exit_command;
+ ActivityUserData::Snapshot exit_data;
};
TEST_F(ActivityTrackerTest, UserDataTest) {
char buffer[256];
memset(buffer, 0, sizeof(buffer));
ActivityUserData data(buffer, sizeof(buffer));
- const size_t space = sizeof(buffer) - 8;
+ size_t space = sizeof(buffer) - sizeof(ActivityUserData::MemoryHeader);
ASSERT_EQ(space, data.available_);
data.SetInt("foo", 1);
- ASSERT_EQ(space - 24, data.available_);
+ space -= 24;
+ ASSERT_EQ(space, data.available_);
data.SetUint("b", 1U); // Small names fit beside header in a word.
- ASSERT_EQ(space - 24 - 16, data.available_);
+ space -= 16;
+ ASSERT_EQ(space, data.available_);
data.Set("c", buffer, 10);
- ASSERT_EQ(space - 24 - 16 - 24, data.available_);
+ space -= 24;
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "it's been fun");
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ space -= 32;
+ ASSERT_EQ(space, data.available_);
data.Set("c", buffer, 20);
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "but we're done together");
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "bye");
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetChar("d", 'x');
- ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8, data.available_);
+ space -= 8;
+ ASSERT_EQ(space, data.available_);
data.SetBool("ee", true);
- ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16, data.available_);
+ space -= 16;
+ ASSERT_EQ(space, data.available_);
data.SetString("f", "");
- ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16 - 8, data.available_);
+ space -= 8;
+ ASSERT_EQ(space, data.available_);
}
TEST_F(ActivityTrackerTest, PushPopTest) {
@@ -176,7 +204,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
}
TEST_F(ActivityTrackerTest, ScopedTaskTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
ThreadActivityTracker* tracker =
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
@@ -222,6 +250,28 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
}
+TEST_F(ActivityTrackerTest, ExceptionTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+ ThreadActivityTracker* tracker =
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+ ThreadActivityTracker::Snapshot snapshot;
+ ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.last_exception.activity_type);
+
+ char origin;
+ global->RecordException(&origin, 42);
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ EXPECT_EQ(Activity::ACT_EXCEPTION, snapshot.last_exception.activity_type);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin),
+ snapshot.last_exception.origin_address);
+ EXPECT_EQ(42U, snapshot.last_exception.data.exception.code);
+}
+
TEST_F(ActivityTrackerTest, CreateWithFileTest) {
const char temp_name[] = "CreateWithFileTest";
ScopedTempDir temp_dir;
@@ -250,6 +300,16 @@ TEST_F(ActivityTrackerTest, CreateWithFileTest) {
// GlobalActivityTracker tests below.
+TEST_F(ActivityTrackerTest, BasicTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+ // Ensure the data repositories have backing store, indicated by non-zero ID.
+ EXPECT_NE(0U, global->process_data().id());
+ EXPECT_NE(0U, global->global_data().id());
+ EXPECT_NE(global->process_data().id(), global->global_data().id());
+}
+
class SimpleActivityThread : public SimpleThread {
public:
SimpleActivityThread(const std::string& name,
@@ -304,7 +364,7 @@ class SimpleActivityThread : public SimpleThread {
};
TEST_F(ActivityTrackerTest, ThreadDeathTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
const size_t starting_active = GetGlobalActiveTrackerCount();
const size_t starting_inactive = GetGlobalInactiveTrackerCount();
@@ -336,5 +396,107 @@ TEST_F(ActivityTrackerTest, ThreadDeathTest) {
EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
}
+TEST_F(ActivityTrackerTest, ProcessDeathTest) {
+ // This doesn't actually create and destroy a process. Instead, it uses for-
+ // testing interfaces to simulate data created by other processes.
+ const ProcessId other_process_id = GetCurrentProcId() + 1;
+
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ ThreadActivityTracker* thread = global->GetOrCreateTrackerForCurrentThread();
+
+ // Get callbacks for process exit.
+ global->SetProcessExitCallback(
+ Bind(&ActivityTrackerTest::HandleProcessExit, Unretained(this)));
+
+ // Pretend than another process has started.
+ global->RecordProcessLaunch(other_process_id, FILE_PATH_LITERAL("foo --bar"));
+
+ // Do some activities.
+ PendingTask task(FROM_HERE, base::Bind(&DoNothing));
+ ScopedTaskRunActivity activity(task);
+ ActivityUserData& user_data = activity.user_data();
+ ASSERT_NE(0U, user_data.id());
+
+ // Get the memory-allocator references to that data.
+ PersistentMemoryAllocator::Reference proc_data_ref =
+ global->allocator()->GetAsReference(
+ global->process_data().GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdProcessDataRecord);
+ ASSERT_TRUE(proc_data_ref);
+ PersistentMemoryAllocator::Reference tracker_ref =
+ global->allocator()->GetAsReference(
+ thread->GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdActivityTracker);
+ ASSERT_TRUE(tracker_ref);
+ PersistentMemoryAllocator::Reference user_data_ref =
+ global->allocator()->GetAsReference(
+ user_data.GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdUserDataRecord);
+ ASSERT_TRUE(user_data_ref);
+
+ // Make a copy of the thread-tracker state so it can be restored later.
+ const size_t tracker_size = global->allocator()->GetAllocSize(tracker_ref);
+ std::unique_ptr<char[]> tracker_copy(new char[tracker_size]);
+ memcpy(tracker_copy.get(), thread->GetBaseAddress(), tracker_size);
+
+ // Change the objects to appear to be owned by another process.
+ int64_t owning_id;
+ int64_t stamp;
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+ global->process_data().GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+ thread->GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+ &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ global->process_data().SetOwningProcessIdForTesting(other_process_id, stamp);
+ thread->SetOwningProcessIdForTesting(other_process_id, stamp);
+ user_data.SetOwningProcessIdForTesting(other_process_id, stamp);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+ global->process_data().GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+ ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+ thread->GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+ &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+
+ // Check that process exit will perform callback and free the allocations.
+ ASSERT_EQ(0, exit_id);
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecord,
+ global->allocator()->GetType(proc_data_ref));
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdActivityTracker,
+ global->allocator()->GetType(tracker_ref));
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdUserDataRecord,
+ global->allocator()->GetType(user_data_ref));
+ global->RecordProcessExit(other_process_id, 0);
+ EXPECT_EQ(other_process_id, exit_id);
+ EXPECT_EQ("foo --bar", exit_command);
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecordFree,
+ global->allocator()->GetType(proc_data_ref));
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdActivityTrackerFree,
+ global->allocator()->GetType(tracker_ref));
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdUserDataRecordFree,
+ global->allocator()->GetType(user_data_ref));
+
+ // Restore memory contents and types so things don't crash when doing real
+ // process clean-up.
+ memcpy(const_cast<void*>(thread->GetBaseAddress()), tracker_copy.get(),
+ tracker_size);
+ global->allocator()->ChangeType(
+ proc_data_ref, GlobalActivityTracker::kTypeIdProcessDataRecord,
+ GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+ global->allocator()->ChangeType(
+ tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker,
+ GlobalActivityTracker::kTypeIdActivityTrackerFree, false);
+ global->allocator()->ChangeType(
+ user_data_ref, GlobalActivityTracker::kTypeIdUserDataRecord,
+ GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+}
+
} // namespace debug
} // namespace base
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 1996dfca18..08dcacfa30 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -35,7 +35,7 @@ namespace debug {
namespace {
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_WIN)
#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
// GCC and LLVM generate slightly different frames on ARM, see
@@ -144,7 +144,7 @@ void* LinkStackFrames(void* fpp, void* parent_fp) {
return prev_parent_fp;
}
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_WIN)
} // namespace
@@ -227,6 +227,18 @@ std::string StackTrace::ToString() const {
size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial) {
+// TODO(699863): Merge the frame-pointer based stack unwinder into the
+// base::debug::StackTrace platform-specific implementation files.
+#if defined(OS_WIN)
+ StackTrace stack(max_depth);
+ size_t count = 0;
+ const void* const* frames = stack.Addresses(&count);
+ if (count < skip_initial)
+ return 0u;
+ count -= skip_initial;
+ memcpy(out_trace, frames + skip_initial, count * sizeof(void*));
+ return count;
+#elif defined(OS_POSIX)
// Usage of __builtin_frame_address() enables frame pointers in this
// function even if they are not enabled globally. So 'fp' will always
// be valid.
@@ -260,8 +272,10 @@ size_t TraceStackFramePointers(const void** out_trace,
}
return depth;
+#endif
}
+#if !defined(OS_WIN)
ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
: fp_(fp),
parent_fp_(parent_fp),
@@ -272,6 +286,7 @@ ScopedStackFrameLinker::~ScopedStackFrameLinker() {
CHECK_EQ(parent_fp_, previous_parent_fp)
<< "Stack frame's parent pointer has changed!";
}
+#endif // !defined(OS_WIN)
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index 4c9b73e87d..ab1d2ebe6a 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -23,13 +23,23 @@ struct _EXCEPTION_POINTERS;
struct _CONTEXT;
#endif
-#if defined(OS_POSIX) && ( \
- defined(__i386__) || defined(__x86_64__) || \
- (defined(__arm__) && !defined(__thumb__)))
+// TODO(699863): Clean up HAVE_TRACE_STACK_FRAME_POINTERS.
+#if defined(OS_POSIX)
+
+#if defined(__i386__) || defined(__x86_64__)
#define HAVE_TRACE_STACK_FRAME_POINTERS 1
-#else
+#elif defined(__arm__) && !defined(__thumb__)
+#define HAVE_TRACE_STACK_FRAME_POINTERS 1
+#else // defined(__arm__) && !defined(__thumb__)
#define HAVE_TRACE_STACK_FRAME_POINTERS 0
-#endif
+#endif // defined(__arm__) && !defined(__thumb__)
+
+#elif defined(OS_WIN)
+#define HAVE_TRACE_STACK_FRAME_POINTERS 1
+
+#else // defined(OS_WIN)
+#define HAVE_TRACE_STACK_FRAME_POINTERS 0
+#endif // defined(OS_WIN)
namespace base {
namespace debug {
@@ -122,6 +132,7 @@ BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial);
+#if !defined(OS_WIN)
// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
// Both frame pointers must come from __builtin_frame_address().
@@ -171,6 +182,7 @@ class BASE_EXPORT ScopedStackFrameLinker {
DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
};
+#endif // !defined(OS_WIN)
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
diff --git a/base/environment.cc b/base/environment.cc
index 534a7a8812..8b1d8fc312 100644
--- a/base/environment.cc
+++ b/base/environment.cc
@@ -42,7 +42,7 @@ class EnvironmentImpl : public Environment {
alternate_case_var = ToLowerASCII(variable_name);
else
return false;
- return GetVarImpl(alternate_case_var.c_str(), result);
+ return GetVarImpl(alternate_case_var, result);
}
bool SetVar(StringPiece variable_name,
diff --git a/base/feature_list.cc b/base/feature_list.cc
index 353136c12b..61043ceb73 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -228,9 +228,9 @@ FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
}
// static
-std::vector<std::string> FeatureList::SplitFeatureListString(
- const std::string& input) {
- return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
+ base::StringPiece input) {
+ return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
}
// static
@@ -340,7 +340,7 @@ void FeatureList::RegisterOverridesFromCommandLine(
const std::string& feature_list,
OverrideState overridden_state) {
for (const auto& value : SplitFeatureListString(feature_list)) {
- StringPiece feature_name(value);
+ StringPiece feature_name = value;
base::FieldTrial* trial = nullptr;
// The entry may be of the form FeatureName<FieldTrialName - in which case,
@@ -348,7 +348,7 @@ void FeatureList::RegisterOverridesFromCommandLine(
std::string::size_type pos = feature_name.find('<');
if (pos != std::string::npos) {
feature_name.set(value.data(), pos);
- trial = base::FieldTrialList::Find(value.substr(pos + 1));
+ trial = base::FieldTrialList::Find(value.substr(pos + 1).as_string());
}
RegisterOverride(feature_name, overridden_state, trial);
diff --git a/base/feature_list.h b/base/feature_list.h
index 09e8408aa8..c9f4a7b0c4 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -156,9 +156,10 @@ class BASE_EXPORT FeatureList {
// called after the singleton instance has been registered via SetInstance().
static FieldTrial* GetFieldTrial(const Feature& feature);
- // Splits a comma-separated string containing feature names into a vector.
- static std::vector<std::string> SplitFeatureListString(
- const std::string& input);
+ // Splits a comma-separated string containing feature names into a vector. The
+ // resulting pieces point to parts of |input|.
+ static std::vector<base::StringPiece> SplitFeatureListString(
+ base::StringPiece input);
// Initializes and sets an instance of FeatureList with feature overrides via
// command-line flags |enable_features| and |disable_features| if one has not
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
index fb3b320ae9..5fbd294dcf 100644
--- a/base/feature_list_unittest.cc
+++ b/base/feature_list_unittest.cc
@@ -14,6 +14,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -33,7 +34,7 @@ struct Feature kFeatureOffByDefault {
};
std::string SortFeatureListString(const std::string& feature_list) {
- std::vector<std::string> features =
+ std::vector<base::StringPiece> features =
FeatureList::SplitFeatureListString(feature_list);
std::sort(features.begin(), features.end());
return JoinString(features, ",");
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 9f67f9bc49..21df995094 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -176,7 +176,7 @@ FilePath::FilePath() {
FilePath::FilePath(const FilePath& that) : path_(that.path_) {
}
-FilePath::FilePath(FilePath&& that) = default;
+FilePath::FilePath(FilePath&& that) noexcept = default;
FilePath::FilePath(StringPieceType path) {
path.CopyToString(&path_);
diff --git a/base/files/file_path.h b/base/files/file_path.h
index 02846f6892..0be0ad0b10 100644
--- a/base/files/file_path.h
+++ b/base/files/file_path.h
@@ -184,7 +184,7 @@ class BASE_EXPORT FilePath {
// Constructs FilePath with the contents of |that|, which is left in valid but
// unspecified state.
- FilePath(FilePath&& that);
+ FilePath(FilePath&& that) noexcept;
// Replaces the contents with those of |that|, which is left in valid but
// unspecified state.
FilePath& operator=(FilePath&& that);
diff --git a/base/files/file_util_mac.mm b/base/files/file_util_mac.mm
index 5a99aa0e81..d3e14a3787 100644
--- a/base/files/file_util_mac.mm
+++ b/base/files/file_util_mac.mm
@@ -7,8 +7,10 @@
#import <Foundation/Foundation.h>
#include <copyfile.h>
#include <stdlib.h>
+#include <string.h>
#include "base/files/file_path.h"
+#include "base/logging.h"
#include "base/mac/foundation_util.h"
#include "base/strings/string_util.h"
#include "base/threading/thread_restrictions.h"
@@ -24,10 +26,14 @@ bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
}
bool GetTempDir(base::FilePath* path) {
- // In order to facilitate hermetic runs on macOS, first check $TMPDIR.
- // NOTE: $TMPDIR is ALMOST ALWAYS set on macOS (unless the user un-set it).
- const char* env_tmpdir = getenv("TMPDIR");
+ // In order to facilitate hermetic runs on macOS, first check
+ // $MAC_CHROMIUM_TMPDIR. We check this instead of $TMPDIR because external
+ // programs currently set $TMPDIR with no effect, but when we respect it
+ // directly it can cause crashes (like crbug.com/698759).
+ const char* env_tmpdir = getenv("MAC_CHROMIUM_TMPDIR");
if (env_tmpdir) {
+ DCHECK_LT(strlen(env_tmpdir), 50u)
+ << "too-long TMPDIR causes socket name length issues.";
*path = base::FilePath(env_tmpdir);
return true;
}
diff --git a/base/mac/mach_port_broker_unittest.cc b/base/mac/mach_port_broker_unittest.cc
index bff8eb6a9b..cb4b82ca47 100644
--- a/base/mac/mach_port_broker_unittest.cc
+++ b/base/mac/mach_port_broker_unittest.cc
@@ -95,21 +95,21 @@ TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
CommandLine command_line(
base::GetMultiProcessTestChildBaseCommandLine());
broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
"MachPortBrokerTestChild", command_line, LaunchOptions());
- broker_.AddPlaceholderForPid(test_child_process.Handle());
+ broker_.AddPlaceholderForPid(spawn_result.process.Handle());
broker_.GetLock().Release();
WaitForTaskPort();
- EXPECT_EQ(test_child_process.Handle(), received_process_);
+ EXPECT_EQ(spawn_result.process.Handle(), received_process_);
int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
+ broker_.TaskForPid(spawn_result.process.Handle()));
}
TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
@@ -117,17 +117,18 @@ TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
CommandLine command_line(
base::GetMultiProcessTestChildBaseCommandLine());
broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
"MachPortBrokerTestChild", command_line, LaunchOptions());
+
broker_.GetLock().Release();
int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
+ broker_.TaskForPid(spawn_result.process.Handle()));
}
} // namespace base
diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc
index 46bbd7ad85..039f255b15 100644
--- a/base/memory/ref_counted.cc
+++ b/base/memory/ref_counted.cc
@@ -3,9 +3,17 @@
// found in the LICENSE file.
#include "base/memory/ref_counted.h"
+
#include "base/threading/thread_collision_warner.h"
namespace base {
+namespace {
+
+#if DCHECK_IS_ON()
+AtomicRefCount g_cross_thread_ref_count_access_allow_count = 0;
+#endif
+
+} // namespace
namespace subtle {
@@ -13,8 +21,6 @@ bool RefCountedThreadSafeBase::HasOneRef() const {
return AtomicRefCountIsOne(&ref_count_);
}
-RefCountedThreadSafeBase::RefCountedThreadSafeBase() = default;
-
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
@@ -25,6 +31,10 @@ RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
void RefCountedThreadSafeBase::AddRef() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeShared.";
#endif
AtomicRefCountInc(&ref_count_);
}
@@ -43,6 +53,23 @@ bool RefCountedThreadSafeBase::Release() const {
return false;
}
+#if DCHECK_IS_ON()
+bool RefCountedBase::CalledOnValidSequence() const {
+ return sequence_checker_.CalledOnValidSequence() ||
+ !AtomicRefCountIsZero(&g_cross_thread_ref_count_access_allow_count);
+}
+#endif
+
} // namespace subtle
+#if DCHECK_IS_ON()
+ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
+ AtomicRefCountInc(&g_cross_thread_ref_count_access_allow_count);
+}
+
+ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
+ AtomicRefCountDec(&g_cross_thread_ref_count_access_allow_count);
+}
+#endif
+
} // namespace base
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index 9dd09ad346..46088f2240 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -16,24 +16,40 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/sequence_checker.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
+template <class T>
+class scoped_refptr;
+
namespace base {
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
namespace subtle {
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
class BASE_EXPORT RefCountedBase {
public:
bool HasOneRef() const { return ref_count_ == 1; }
protected:
- RefCountedBase()
- : ref_count_(0)
+ explicit RefCountedBase(StartRefCountFromZeroTag) {
#if DCHECK_IS_ON()
- , in_dtor_(false)
+ sequence_checker_.DetachFromSequence();
+#endif
+ }
+
+ explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+ sequence_checker_.DetachFromSequence();
#endif
- {
}
~RefCountedBase() {
@@ -42,7 +58,6 @@ class BASE_EXPORT RefCountedBase {
#endif
}
-
void AddRef() const {
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
@@ -50,32 +65,62 @@ class BASE_EXPORT RefCountedBase {
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeShared.";
+ if (ref_count_ >= 1) {
+ DCHECK(CalledOnValidSequence());
+ }
#endif
+
++ref_count_;
}
// Returns true if the object should self-delete.
bool Release() const {
+ --ref_count_;
+
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
-#endif
- if (--ref_count_ == 0) {
-#if DCHECK_IS_ON()
+ if (ref_count_ == 0)
in_dtor_ = true;
+
+ if (ref_count_ >= 1)
+ DCHECK(CalledOnValidSequence());
+ if (ref_count_ == 1)
+ sequence_checker_.DetachFromSequence();
#endif
- return true;
- }
- return false;
+
+ return ref_count_ == 0;
}
private:
- mutable size_t ref_count_;
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
+#if DCHECK_IS_ON()
+ bool CalledOnValidSequence() const;
+#endif
+
+ mutable size_t ref_count_ = 0;
+
#if DCHECK_IS_ON()
- mutable bool in_dtor_;
+ mutable bool needs_adopt_ref_ = false;
+ mutable bool in_dtor_ = false;
+ mutable SequenceChecker sequence_checker_;
#endif
DFAKE_MUTEX(add_release_);
@@ -88,7 +133,13 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool HasOneRef() const;
protected:
- RefCountedThreadSafeBase();
+ explicit RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+ explicit RefCountedThreadSafeBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+#endif
+ }
+
~RefCountedThreadSafeBase();
void AddRef() const;
@@ -97,8 +148,19 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool Release() const;
private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
mutable AtomicRefCount ref_count_ = 0;
#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
#endif
@@ -107,6 +169,27 @@ class BASE_EXPORT RefCountedThreadSafeBase {
} // namespace subtle
+// ScopedAllowCrossThreadRefCountAccess disables the check documented on
+// RefCounted below for rare pre-existing use cases where thread-safety was
+// guaranteed through other means (e.g. explicit sequencing of calls across
+// execution sequences when bouncing between threads in order). New callers
+// should refrain from using this (callsites handling thread-safety through
+// locks should use RefCountedThreadSafe per the overhead of its atomics being
+// negligible compared to locks anyways and callsites doing explicit sequencing
+// should properly std::move() the ref to avoid hitting this check).
+// TODO(tzik): Cleanup existing use cases and remove
+// ScopedAllowCrossThreadRefCountAccess.
+class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
+ public:
+#if DCHECK_IS_ON()
+ ScopedAllowCrossThreadRefCountAccess();
+ ~ScopedAllowCrossThreadRefCountAccess();
+#else
+ ScopedAllowCrossThreadRefCountAccess() {}
+ ~ScopedAllowCrossThreadRefCountAccess() {}
+#endif
+};
+
//
// A base class for reference counted classes. Otherwise, known as a cheap
// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
@@ -121,10 +204,45 @@ class BASE_EXPORT RefCountedThreadSafeBase {
//
// You should always make your destructor non-public, to avoid any code deleting
// the object accidently while there are references to it.
+//
+//
+// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
+// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
+// passed to another execution sequence only when its ref count is 1. If the ref
+// count is more than 1, the RefCounted class verifies the ref updates are made
+// on the same execution sequence as the previous ones.
+//
+//
+// The reference count starts from zero by default, and we intended to migrate
+// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
+// the ref counted class to opt-in.
+//
+// If an object has start-from-one ref count, the first scoped_refptr need to be
+// created by base::AdoptRef() or base::MakeShared(). We can use
+// base::MakeShared() to create create both type of ref counted object.
+//
+// The motivations to use start-from-one ref count are:
+// - Start-from-one ref count doesn't need the ref count increment for the
+// first reference.
+// - It can detect an invalid object acquisition for a being-deleted object
+// that has zero ref count. That tends to happen on custom deleter that
+// delays the deletion.
+// TODO(tzik): Implement invalid acquisition detection.
+// - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
+// And start-from-one ref count is a step to merge WTF::RefCounted into
+// base::RefCounted.
+//
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
+ static constexpr ::base::subtle::StartRefCountFromOneTag \
+ kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+
template <class T>
class RefCounted : public subtle::RefCountedBase {
public:
- RefCounted() = default;
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
void AddRef() const {
subtle::RefCountedBase::AddRef();
@@ -140,7 +258,7 @@ class RefCounted : public subtle::RefCountedBase {
~RefCounted() = default;
private:
- DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
+ DISALLOW_COPY_AND_ASSIGN(RefCounted);
};
// Forward declaration.
@@ -171,10 +289,17 @@ struct DefaultRefCountedThreadSafeTraits {
// private:
// friend class base::RefCountedThreadSafe<MyFoo>;
// ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
- RefCountedThreadSafe() = default;
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ explicit RefCountedThreadSafe()
+ : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
void AddRef() const {
subtle::RefCountedThreadSafeBase::AddRef();
@@ -214,6 +339,43 @@ class RefCountedData
~RefCountedData() = default;
};
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+ using Tag = typename std::decay<decltype(T::kRefCountPreference)>::type;
+ static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+ "Use AdoptRef only for the reference count starts from one.");
+
+ DCHECK(obj);
+ DCHECK(obj->HasOneRef());
+ obj->Adopted();
+ return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+ return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+ return AdoptRef(obj);
+}
+
+} // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeShared(Args&&... args) {
+ T* obj = new T(std::forward<Args>(args)...);
+ return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
} // namespace base
//
@@ -385,6 +547,11 @@ class scoped_refptr {
T* ptr_ = nullptr;
private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
// Friend required for move constructors that set r.ptr_ to null.
template <typename U>
friend class scoped_refptr;
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index 65c15d26ab..515f4227ea 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/test/gtest_util.h"
#include "base/test/opaque_ref_counted.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -122,6 +123,16 @@ scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
return self_assign;
}
+class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+ InitialRefCountIsOne() {}
+
+ private:
+ friend class base::RefCounted<InitialRefCountIsOne>;
+ ~InitialRefCountIsOne() {}
+};
} // end namespace
@@ -528,3 +539,30 @@ TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
scoped_refptr<Other> other2(other);
EXPECT_EQ(other2, Overloaded(std::move(other)));
}
+
+TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
+ scoped_refptr<InitialRefCountIsOne> obj =
+ base::MakeShared<InitialRefCountIsOne>();
+ EXPECT_TRUE(obj->HasOneRef());
+ obj = nullptr;
+
+ scoped_refptr<InitialRefCountIsOne> obj2 =
+ base::AdoptRef(new InitialRefCountIsOne);
+ EXPECT_TRUE(obj2->HasOneRef());
+ obj2 = nullptr;
+
+ scoped_refptr<Other> obj3 = base::MakeShared<Other>();
+ EXPECT_TRUE(obj3->HasOneRef());
+ obj3 = nullptr;
+}
+
+TEST(RefCountedDeathTest, TestAdoptRef) {
+ EXPECT_DCHECK_DEATH(make_scoped_refptr(new InitialRefCountIsOne));
+
+ InitialRefCountIsOne* ptr = nullptr;
+ EXPECT_DCHECK_DEATH(base::AdoptRef(ptr));
+
+ scoped_refptr<InitialRefCountIsOne> obj =
+ base::MakeShared<InitialRefCountIsOne>();
+ EXPECT_DCHECK_DEATH(base::AdoptRef(obj.get()));
+}
diff --git a/base/memory/ref_counted_unittest.nc b/base/memory/ref_counted_unittest.nc
new file mode 100644
index 0000000000..5022779214
--- /dev/null
+++ b/base/memory/ref_counted_unittest.nc
@@ -0,0 +1,25 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class InitialRefCountIsZero : public base::RefCounted<InitialRefCountIsZero> {
+ public:
+ InitialRefCountIsZero() {}
+ private:
+ friend class base::RefCounted<InitialRefCountIsZero>;
+ ~InitialRefCountIsZero() {}
+};
+
+#if defined(NCTEST_ADOPT_REF_TO_ZERO_START) // [r"fatal error: static_assert failed \"Use AdoptRef only for the reference count starts from one\.\""]
+
+void WontCompile() {
+ AdoptRef(new InitialRefCountIsZero());
+}
+
+#endif
+
+} // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
index c7d20ec049..4ccee89deb 100644
--- a/base/memory/shared_memory_mac_unittest.cc
+++ b/base/memory/shared_memory_mac_unittest.cc
@@ -204,7 +204,7 @@ class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
// similar tests.
service_name_ = CreateRandomServiceName();
server_port_.reset(BecomeMachServer(service_name_.c_str()));
- child_process_ = SpawnChild(name);
+ spawn_child_ = SpawnChild(name);
client_port_.reset(ReceiveMachPort(server_port_.get()));
}
@@ -221,7 +221,7 @@ class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
// process.
mac::ScopedMachSendRight client_port_;
- base::Process child_process_;
+ base::SpawnChildResult spawn_child_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
};
@@ -237,7 +237,7 @@ TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
MACH_MSG_TYPE_COPY_SEND);
int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
}
@@ -277,7 +277,7 @@ TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
SendMachPort(
client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
}
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
index 19dedccb47..d87fad01d3 100644
--- a/base/memory/shared_memory_unittest.cc
+++ b/base/memory/shared_memory_unittest.cc
@@ -682,16 +682,16 @@ TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
// Start |kNumTasks| processes, each of which atomically increments the first
// word by 1.
- Process processes[kNumTasks];
+ SpawnChildResult children[kNumTasks];
for (int index = 0; index < kNumTasks; ++index) {
- processes[index] = SpawnChild("SharedMemoryTestMain");
- ASSERT_TRUE(processes[index].IsValid());
+ children[index] = SpawnChild("SharedMemoryTestMain");
+ ASSERT_TRUE(children[index].process.IsValid());
}
// Check that each process exited correctly.
int exit_code = 0;
for (int index = 0; index < kNumTasks; ++index) {
- EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+ EXPECT_TRUE(children[index].process.WaitForExit(&exit_code));
EXPECT_EQ(0, exit_code);
}
diff --git a/base/memory/singleton_objc.h b/base/memory/singleton_objc.h
deleted file mode 100644
index 6df3f7757e..0000000000
--- a/base/memory/singleton_objc.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Support for using the Singleton<T> pattern with Objective-C objects. A
-// SingletonObjC is the same as a Singleton, except the default traits are
-// appropriate for Objective-C objects. A typical Objective-C object of type
-// NSExampleType can be maintained as a singleton and accessed with:
-//
-// NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get();
-//
-// The first time this is used, it will create exampleSingleton as the result
-// of [[NSExampleType alloc] init]. Subsequent calls will return the same
-// NSExampleType* object. The object will be released by calling
-// -[NSExampleType release] when Singleton's atexit routines run
-// (see singleton.h).
-//
-// For Objective-C objects initialized through means other than the
-// no-parameter -init selector, DefaultSingletonObjCTraits may be extended
-// as needed:
-//
-// struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> {
-// static Foo* New() {
-// return [[Foo alloc] initWithName:@"selecty"];
-// }
-// };
-// ...
-// Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get();
-
-#ifndef BASE_MEMORY_SINGLETON_OBJC_H_
-#define BASE_MEMORY_SINGLETON_OBJC_H_
-
-#import <Foundation/Foundation.h>
-#include "base/memory/singleton.h"
-
-// Singleton traits usable to manage traditional Objective-C objects, which
-// are instantiated by sending |alloc| and |init| messages, and are deallocated
-// in a memory-managed environment when their retain counts drop to 0 by
-// sending |release| messages.
-template<typename Type>
-struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> {
- static Type* New() {
- return [[Type alloc] init];
- }
-
- static void Delete(Type* object) {
- [object release];
- }
-};
-
-// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the
-// default trait class. This makes it straightforward for Objective-C++ code
-// to hold Objective-C objects as singletons.
-template<typename Type,
- typename Traits = DefaultSingletonObjCTraits<Type>,
- typename DifferentiatingType = Type>
-class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> {
-};
-
-#endif // BASE_MEMORY_SINGLETON_OBJC_H_
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index fed1494c04..1eba532615 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -5,6 +5,7 @@
#include "base/message_loop/incoming_task_queue.h"
#include <limits>
+#include <utility>
#include "base/location.h"
#include "base/message_loop/message_loop.h"
@@ -60,16 +61,17 @@ IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
bool IncomingTaskQueue::AddToIncomingQueue(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay,
bool nestable) {
+ DCHECK(task);
DLOG_IF(WARNING,
delay.InSeconds() > kTaskDelayWarningThresholdInSeconds)
<< "Requesting super-long task delay period of " << delay.InSeconds()
<< " seconds from here: " << from_here.ToString();
- PendingTask pending_task(from_here, task, CalculateDelayedRuntime(delay),
- nestable);
+ PendingTask pending_task(from_here, std::move(task),
+ CalculateDelayedRuntime(delay), nestable);
#if defined(OS_WIN)
// We consider the task needs a high resolution timer if the delay is
// more than 0 and less than 32ms. This caps the relative error to
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index 157e47fa14..17bea07674 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -6,6 +6,7 @@
#define BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
@@ -35,7 +36,7 @@ class BASE_EXPORT IncomingTaskQueue
// returns false. In all cases, the ownership of |task| is transferred to the
// called method.
bool AddToIncomingQueue(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay,
bool nestable);
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index bfef261c38..6b4765bd1b 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -320,6 +320,8 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Runs the specified PendingTask.
void RunTask(PendingTask* pending_task);
+ bool nesting_allowed() const { return allow_nesting_; }
+
// Disallow nesting. After this is called, running a nested RunLoop or calling
// Add/RemoveNestingObserver() on this MessageLoop will crash.
void DisallowNesting() { allow_nesting_ = false; }
diff --git a/base/message_loop/message_loop_task_runner.cc b/base/message_loop/message_loop_task_runner.cc
index c9b5ffe3f7..aece087b76 100644
--- a/base/message_loop/message_loop_task_runner.cc
+++ b/base/message_loop/message_loop_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/message_loop/message_loop_task_runner.h"
+#include <utility>
+
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/incoming_task_queue.h"
@@ -24,18 +26,20 @@ void MessageLoopTaskRunner::BindToCurrentThread() {
bool MessageLoopTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
- return incoming_queue_->AddToIncomingQueue(from_here, task, delay, true);
+ return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+ true);
}
bool MessageLoopTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
- return incoming_queue_->AddToIncomingQueue(from_here, task, delay, false);
+ return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+ false);
}
bool MessageLoopTaskRunner::RunsTasksOnCurrentThread() const {
diff --git a/base/message_loop/message_loop_task_runner.h b/base/message_loop/message_loop_task_runner.h
index 5e70b128b2..99a96a711e 100644
--- a/base/message_loop/message_loop_task_runner.h
+++ b/base/message_loop/message_loop_task_runner.h
@@ -6,6 +6,7 @@
#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
@@ -31,10 +32,10 @@ class BASE_EXPORT MessageLoopTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index 54551daadd..d403c70700 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -127,7 +127,7 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
RunLoop().Run();
EXPECT_EQ(task_thread_.message_loop(), task_run_on);
- EXPECT_EQ(current_loop_.get(), task_deleted_on);
+ EXPECT_EQ(task_thread_.message_loop(), task_deleted_on);
EXPECT_EQ(current_loop_.get(), reply_run_on);
EXPECT_EQ(current_loop_.get(), reply_deleted_on);
EXPECT_LT(task_delete_order, reply_delete_order);
@@ -200,7 +200,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
EXPECT_LT(task_delete_order, reply_delete_order);
}
-TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
+TEST_F(MessageLoopTaskRunnerTest,
+ PostTaskAndReply_DeadReplyTaskRunnerBehavior) {
// Annotate the scope as having memory leaks to suppress heapchecker reports.
ANNOTATE_SCOPED_MEMORY_LEAK;
MessageLoop* task_run_on = NULL;
@@ -237,11 +238,13 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
MessageLoop* task_loop = task_thread_.message_loop();
task_thread_.Stop();
+ // Even if the reply task runner is already gone, the original task should
+ // already be deleted. However, the reply which hasn't executed yet should
+ // leak to avoid thread-safety issues.
EXPECT_EQ(task_loop, task_run_on);
- ASSERT_FALSE(task_deleted_on);
+ EXPECT_EQ(task_loop, task_deleted_on);
EXPECT_FALSE(reply_run_on);
ASSERT_FALSE(reply_deleted_on);
- EXPECT_EQ(task_delete_order, reply_delete_order);
// The PostTaskAndReplyRelay is leaked here. Even if we had a reference to
// it, we cannot just delete it because PostTaskAndReplyRelay's destructor
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index 14fe1ee391..9d771d5ecb 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -12,6 +12,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_test.h"
@@ -93,16 +94,19 @@ void AbortMessagePump() {
static_cast<base::MessageLoopForUI*>(base::MessageLoop::current())->Abort();
}
-void RunTest_AbortDontRunMoreTasks(bool delayed) {
- MessageLoop loop(MessageLoop::TYPE_JAVA);
-
+void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- std::unique_ptr<android::JavaHandlerThreadForTesting> java_thread;
- java_thread.reset(new android::JavaHandlerThreadForTesting(
- "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
- &test_done_event));
+ std::unique_ptr<android::JavaHandlerThread> java_thread;
+ if (init_java_first) {
+ java_thread =
+ android::JavaHandlerThreadForTesting::CreateJavaFirst(&test_done_event);
+ } else {
+ java_thread = android::JavaHandlerThreadForTesting::Create(
+ "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
+ &test_done_event);
+ }
java_thread->Start();
if (delayed) {
@@ -121,10 +125,19 @@ void RunTest_AbortDontRunMoreTasks(bool delayed) {
}
TEST(MessageLoopTest, JavaExceptionAbort) {
- RunTest_AbortDontRunMoreTasks(false);
+ constexpr bool delayed = false;
+ constexpr bool init_java_first = false;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
TEST(MessageLoopTest, DelayedJavaExceptionAbort) {
- RunTest_AbortDontRunMoreTasks(true);
+ constexpr bool delayed = true;
+ constexpr bool init_java_first = false;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+}
+TEST(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
+ constexpr bool delayed = false;
+ constexpr bool init_java_first = true;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
#endif // defined(OS_ANDROID)
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index 78473761dd..d39972a8a1 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -41,10 +41,9 @@
// delete and reused. The value in |sample| must be strictly less than
// |enum_max|.
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
- INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
- name, sample, enum_max, \
- base::HistogramBase::kUmaTargetedHistogramFlag)
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
+ name, sample, enum_max, base::HistogramBase::kUmaTargetedHistogramFlag)
// Histogram for boolean values.
@@ -68,14 +67,15 @@
// Sample usage:
// UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
- UMA_HISTOGRAM_ENUMERATION(name, sample, value_max)
+ INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
+ name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
// Used for capturing basic percentages. This will be 100 buckets of size 1.
// Sample usage:
// UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
-#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
- UMA_HISTOGRAM_ENUMERATION(name, percent_as_int, 101)
+#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+ UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
//------------------------------------------------------------------------------
// Count histograms. These are used for collecting numeric data. Note that we
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
index 53e4f11b75..c107a4729d 100644
--- a/base/metrics/histogram_macros_internal.h
+++ b/base/metrics/histogram_macros_internal.h
@@ -5,6 +5,11 @@
#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
#include "base/atomicops.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
@@ -96,17 +101,42 @@
base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
// This is a helper macro used by other macros and shouldn't be used directly.
-// For an enumeration with N items, recording values in the range [0, N - 1],
-// this macro creates a linear histogram with N + 1 buckets:
-// [0, 1), [1, 2), ..., [N - 1, N), and an overflow bucket [N, infinity).
+// The bucketing scheme is linear with a bucket size of 1. For N items,
+// recording values in the range [0, N - 1] creates a linear histogram with N +
+// 1 buckets:
+// [0, 1), [1, 2), ..., [N - 1, N)
+// and an overflow bucket [N, infinity).
+//
// Code should never emit to the overflow bucket; only to the other N buckets.
-// This allows future versions of Chrome to safely append new entries to the
-// enumeration. Otherwise, the histogram would have [N - 1, infinity) as its
-// overflow bucket, and so the maximal value (N - 1) would be emitted to this
-// overflow bucket. But, if an additional enumerated value were later added, the
-// bucket label for the value (N - 1) would change to [N - 1, N), which would
-// result in different versions of Chrome using different bucket labels for
-// identical data.
+// This allows future versions of Chrome to safely increase the boundary size.
+// Otherwise, the histogram would have [N - 1, infinity) as its overflow bucket,
+// and so the maximal value (N - 1) would be emitted to this overflow bucket.
+// But, if an additional value were later added, the bucket label for
+// the value (N - 1) would change to [N - 1, N), which would result in different
+// versions of Chrome using different bucket labels for identical data.
+#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary, \
+ flag) \
+ do { \
+ static_assert(!std::is_enum<decltype(sample)>::value, \
+ "|sample| should not be an enum type!"); \
+ static_assert(!std::is_enum<decltype(boundary)>::value, \
+ "|boundary| should not be an enum type!"); \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, Add(sample), \
+ base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
+ flag)); \
+ } while (0)
+
+// Similar to the previous macro but intended for enumerations. This delegates
+// the work to the previous macro, but supports scoped enumerations as well by
+// forcing an explicit cast to the HistogramBase::Sample integral type.
+//
+// Note the range checks verify two separate issues:
+// - that the declared enum max isn't out of range of HistogramBase::Sample
+// - that the declared enum max is > 0
+//
+// TODO(dcheng): This should assert that the passed in types are actually enum
+// types.
#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
do { \
static_assert( \
@@ -115,9 +145,14 @@
std::is_same<std::remove_const<decltype(sample)>::type, \
std::remove_const<decltype(boundary)>::type>::value, \
"|sample| and |boundary| shouldn't be of different enums"); \
- STATIC_HISTOGRAM_POINTER_BLOCK( \
- name, Add(sample), base::LinearHistogram::FactoryGet( \
- name, 1, boundary, boundary + 1, flag)); \
+ static_assert( \
+ static_cast<uintmax_t>(boundary) < \
+ static_cast<uintmax_t>( \
+ std::numeric_limits<base::HistogramBase::Sample>::max()), \
+ "|boundary| is out of range of HistogramBase::Sample"); \
+ INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
+ name, static_cast<base::HistogramBase::Sample>(sample), \
+ static_cast<base::HistogramBase::Sample>(boundary), flag); \
} while (0)
// This is a helper macro used by other macros and shouldn't be used directly.
diff --git a/base/metrics/histogram_macros_unittest.cc b/base/metrics/histogram_macros_unittest.cc
index c5991619a0..33a9c6e5b2 100644
--- a/base/metrics/histogram_macros_unittest.cc
+++ b/base/metrics/histogram_macros_unittest.cc
@@ -15,4 +15,35 @@ TEST(ScopedHistogramTimer, TwoTimersOneScope) {
SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer1");
}
+// Compile tests for UMA_HISTOGRAM_ENUMERATION with the three different types it
+// accepts:
+// - integral types
+// - unscoped enums
+// - scoped enums
+TEST(HistogramMacro, IntegralPsuedoEnumeration) {
+ UMA_HISTOGRAM_ENUMERATION("Test.FauxEnumeration", 1, 10000);
+}
+
+TEST(HistogramMacro, UnscopedEnumeration) {
+ enum TestEnum : char {
+ FIRST_VALUE,
+ SECOND_VALUE,
+ THIRD_VALUE,
+ MAX_ENTRIES,
+ };
+ UMA_HISTOGRAM_ENUMERATION("Test.UnscopedEnumeration", SECOND_VALUE,
+ MAX_ENTRIES);
+}
+
+TEST(HistogramMacro, ScopedEnumeration) {
+ enum class TestEnum {
+ FIRST_VALUE,
+ SECOND_VALUE,
+ THIRD_VALUE,
+ MAX_ENTRIES,
+ };
+ UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration", TestEnum::SECOND_VALUE,
+ TestEnum::MAX_ENTRIES);
+}
+
} // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 29910036c7..5f44b67311 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -785,24 +785,6 @@ void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
#endif // !defined(OS_NACL)
// static
-void GlobalHistogramAllocator::CreateWithSharedMemory(
- std::unique_ptr<SharedMemory> memory,
- size_t size,
- uint64_t /*id*/,
- StringPiece /*name*/) {
- if ((!memory->memory() && !memory->Map(size)) ||
- !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
- NOTREACHED();
- return;
- }
-
- DCHECK_LE(memory->mapped_size(), size);
- Set(WrapUnique(
- new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
- std::move(memory), 0, StringPiece(), /*readonly=*/false))));
-}
-
-// static
void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
const SharedMemoryHandle& handle,
size_t size) {
@@ -905,6 +887,8 @@ bool GlobalHistogramAllocator::WriteToPersistentLocation() {
}
void GlobalHistogramAllocator::DeletePersistentLocation() {
+ memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+
#if defined(OS_NACL)
NOTREACHED();
#else
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index 2eb28dfaf5..851d7ef5a4 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -431,15 +431,6 @@ class BASE_EXPORT GlobalHistogramAllocator
FilePath* out_active_path);
#endif
- // Create a global allocator using a block of shared |memory| of the
- // specified |size|. The allocator takes ownership of the shared memory
- // and releases it upon destruction, though the memory will continue to
- // live if other processes have access to it.
- static void CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,
- size_t size,
- uint64_t id,
- StringPiece name);
-
// Create a global allocator using a block of shared memory accessed
// through the given |handle| and |size|. The allocator takes ownership
// of the handle and closes it upon destruction, though the memory will
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index f70b396917..abcc532242 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -18,6 +18,7 @@
#include "base/memory/shared_memory.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/sparse_histogram.h"
+#include "base/threading/thread_restrictions.h"
namespace {
@@ -32,7 +33,7 @@ const uint32_t kGlobalCookie = 0x408305DC;
// The current version of the metadata. If updates are made that change
// the metadata, the version number can be queried to operate in a backward-
// compatible manner until the memory segment is completely re-initalized.
-const uint32_t kGlobalVersion = 1;
+const uint32_t kGlobalVersion = 2;
// Constant values placed in the block headers to indicate its state.
const uint32_t kBlockCookieFree = 0;
@@ -43,7 +44,7 @@ const uint32_t kBlockCookieAllocated = 0xC8799269;
// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
// types rather than combined bitfield.
-// Flags stored in the flags_ field of the SharedMetaData structure below.
+// Flags stored in the flags_ field of the SharedMetadata structure below.
enum : int {
kFlagCorrupt = 1 << 0,
kFlagFull = 1 << 1
@@ -100,7 +101,9 @@ struct PersistentMemoryAllocator::BlockHeader {
};
// The shared metadata exists once at the top of the memory segment to
-// describe the state of the allocator to all processes.
+// describe the state of the allocator to all processes. The size of this
+// structure must be a multiple of 64-bits to ensure compatibility between
+// architectures.
struct PersistentMemoryAllocator::SharedMetadata {
uint32_t cookie; // Some value that indicates complete initialization.
uint32_t size; // Total size of memory segment.
@@ -108,10 +111,15 @@ struct PersistentMemoryAllocator::SharedMetadata {
uint32_t version; // Version code so upgrades don't break.
uint64_t id; // Arbitrary ID number given by creator.
uint32_t name; // Reference to stored name string.
+ uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
// Above is read-only after first construction. Below may be changed and
// so must be marked "volatile" to provide correct inter-process behavior.
+ // State of the memory, plus some padding to keep alignment.
+ volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
+ uint8_t padding2[3];
+
// Bitfield of information flags. Access to this should be done through
// the CheckFlag() and SetFlag() methods defined above.
volatile std::atomic<uint32_t> flags;
@@ -121,6 +129,7 @@ struct PersistentMemoryAllocator::SharedMetadata {
// The "iterable" queue is an M&S Queue as described here, append-only:
// https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+ // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
volatile BlockHeader queue; // Empty block for linked-list head/tail.
};
@@ -312,7 +321,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
// definitions and so cannot be moved to the global scope.
static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
"struct is not portable across different natural word widths");
- static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
+ static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
"struct is not portable across different natural word widths");
static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
@@ -384,12 +393,13 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
if (name_cstr)
memcpy(name_cstr, name.data(), name.length());
}
+
+ shared_meta()->memory_state.store(MEMORY_INITIALIZED,
+ std::memory_order_release);
} else {
- if (shared_meta()->size == 0 ||
- shared_meta()->version == 0 ||
+ if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
- shared_meta()->tailptr == 0 ||
- shared_meta()->queue.cookie == 0 ||
+ shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
SetCorrupt();
}
@@ -470,6 +480,19 @@ void PersistentMemoryAllocator::CreateTrackingHistograms(
HistogramBase::kUmaTargetedHistogramFlag);
}
+void PersistentMemoryAllocator::Flush(bool sync) {
+ FlushPartial(used(), sync);
+}
+
+void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
+ shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
+ FlushPartial(sizeof(SharedMetadata), false);
+}
+
+uint8_t PersistentMemoryAllocator::GetMemoryState() const {
+ return shared_meta()->memory_state.load(std::memory_order_relaxed);
+}
+
size_t PersistentMemoryAllocator::used() const {
return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
mem_size_);
@@ -816,8 +839,12 @@ const volatile PersistentMemoryAllocator::BlockHeader*
PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok,
bool free_ok) const {
+ // Handle special cases.
+ if (ref == kReferenceQueue && queue_ok)
+ return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+
// Validation of parameters.
- if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
+ if (ref < sizeof(SharedMetadata))
return nullptr;
if (ref % kAllocAlignment != 0)
return nullptr;
@@ -827,17 +854,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
// Validation of referenced block-header.
if (!free_ok) {
- uint32_t freeptr = std::min(
- shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
- if (ref + size > freeptr)
- return nullptr;
const volatile BlockHeader* const block =
reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
- if (block->size < size)
+ if (block->cookie != kBlockCookieAllocated)
return nullptr;
- if (ref + block->size > freeptr)
+ if (block->size < size)
return nullptr;
- if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
+ if (ref + block->size > mem_size_)
return nullptr;
if (type_id != 0 &&
block->type_id.load(std::memory_order_relaxed) != type_id) {
@@ -849,6 +872,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
}
+void PersistentMemoryAllocator::FlushPartial(size_t /*length*/, bool /*sync*/) {
+ // Generally there is nothing to do as every write is done through volatile
+ // memory with atomic instructions to guarantee consistency. This (virtual)
+ // method exists so that derivced classes can do special things, such as
+ // tell the OS to write changes to disk now rather than when convenient.
+}
+
void PersistentMemoryAllocator::RecordError(int error) const {
if (errors_histogram_)
errors_histogram_->Add(error);
@@ -989,7 +1019,12 @@ FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
id,
name,
read_only),
- mapped_file_(std::move(file)) {}
+ mapped_file_(std::move(file)) {
+ // Ensure the disk-copy of the data reflects the fully-initialized memory as
+ // there is no guarantee as to what order the pages might be auto-flushed by
+ // the OS in the future.
+ Flush(true);
+}
FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
@@ -999,6 +1034,33 @@ bool FilePersistentMemoryAllocator::IsFileAcceptable(
bool read_only) {
return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
}
+
+void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
+ if (sync)
+ ThreadRestrictions::AssertIOAllowed();
+ if (IsReadonly())
+ return;
+
+#if defined(OS_WIN)
+ // Windows doesn't support a synchronous flush.
+ BOOL success = ::FlushViewOfFile(data(), length);
+ DPCHECK(success);
+#elif defined(OS_MACOSX)
+ // On OSX, "invalidate" removes all cached pages, forcing a re-read from
+ // disk. That's not applicable to "flush" so omit it.
+ int result =
+ ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
+ DCHECK_NE(EINVAL, result);
+#elif defined(OS_POSIX)
+ // On POSIX, "invalidate" forces _other_ processes to recognize what has
+ // been written to disk and so is applicable to "flush".
+ int result = ::msync(const_cast<void*>(data()), length,
+ MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
+ DCHECK_NE(EINVAL, result);
+#else
+#error Unsupported OS.
+#endif
+}
#endif // !defined(OS_NACL)
} // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index b38f284ff4..94a7744bfb 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -96,6 +96,29 @@ class BASE_EXPORT PersistentMemoryAllocator {
public:
typedef uint32_t Reference;
+ // These states are used to indicate the overall condition of the memory
+ // segment irrespective of what is stored within it. Because the data is
+ // often persistent and thus needs to be readable by different versions of
+ // a program, these values are fixed and can never change.
+ enum MemoryState : uint8_t {
+ // Persistent memory starts all zeros and so shows "uninitialized".
+ MEMORY_UNINITIALIZED = 0,
+
+ // The header has been written and the memory is ready for use.
+ MEMORY_INITIALIZED = 1,
+
+ // The data should be considered deleted. This would be set when the
+ // allocator is being cleaned up. If file-backed, the file is likely
+ // to be deleted but since deletion can fail for a variety of reasons,
+ // having this extra status means a future reader can realize what
+ // should have happened.
+ MEMORY_DELETED = 2,
+
+ // Outside code can create states starting with this number; these too
+ // must also never change between code versions.
+ MEMORY_USER_DEFINED = 100,
+ };
+
// Iterator for going through all iterable memory records in an allocator.
// Like the allocator itself, iterators are lock-free and thread-secure.
// That means that multiple threads can share an iterator and the same
@@ -280,7 +303,11 @@ class BASE_EXPORT PersistentMemoryAllocator {
const char* Name() const;
// Is this segment open only for read?
- bool IsReadonly() { return readonly_; }
+ bool IsReadonly() const { return readonly_; }
+
+ // Manage the saved state of the memory.
+ void SetMemoryState(uint8_t memory_state);
+ uint8_t GetMemoryState() const;
// Create internal histograms for tracking memory use and allocation sizes
// for allocator of |name| (which can simply be the result of Name()). This
@@ -293,6 +320,17 @@ class BASE_EXPORT PersistentMemoryAllocator {
// UMA.PersistentAllocator.name.UsedPct
void CreateTrackingHistograms(base::StringPiece name);
+ // Flushes the persistent memory to any backing store. This typically does
+ // nothing but is used by the FilePersistentMemoryAllocator to inform the
+ // OS that all the data should be sent to the disk immediately. This is
+ // useful in the rare case where something has just been stored that needs
+ // to survive a hard shutdown of the machine like from a power failure.
+ // The |sync| parameter indicates if this call should block until the flush
+ // is complete but is only advisory and may or may not have an effect
+ // depending on the capabilities of the OS. Synchronous flushes are allowed
+ // only from theads that are allowed to do I/O.
+ void Flush(bool sync);
+
// Direct access to underlying memory segment. If the segment is shared
// across threads or processes, reading data through these values does
// not guarantee consistency. Use with care. Do not write.
@@ -580,6 +618,9 @@ class BASE_EXPORT PersistentMemoryAllocator {
uint64_t id, base::StringPiece name,
bool readonly);
+ // Implementation of Flush that accepts how much to flush.
+ virtual void FlushPartial(size_t length, bool sync);
+
volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
const MemoryType mem_type_; // Type of memory allocation.
const uint32_t mem_size_; // Size of entire memory segment.
@@ -715,6 +756,10 @@ class BASE_EXPORT FilePersistentMemoryAllocator
// the rest.
static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
+ protected:
+ // PersistentMemoryAllocator:
+ void FlushPartial(size_t length, bool sync) override;
+
private:
std::unique_ptr<MemoryMappedFile> mapped_file_;
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index d12e00f6d6..c3027ecc12 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -100,6 +100,8 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_TRUE(allocator_->used_histogram_);
EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
allocator_->used_histogram_->histogram_name());
+ EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
+ allocator_->GetMemoryState());
// Get base memory info for later comparison.
PersistentMemoryAllocator::MemoryInfo meminfo0;
@@ -254,6 +256,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
allocator_->Delete(obj2);
PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
+
+ // Ensure that the memory state can be set.
+ allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+ EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
+ allocator_->GetMemoryState());
}
TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -691,8 +698,8 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
const size_t mmlength = mmfile->length();
EXPECT_GE(meminfo1.total, mmlength);
- FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
- EXPECT_TRUE(file.IsReadonly());
+ FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
+ EXPECT_FALSE(file.IsReadonly());
EXPECT_EQ(TEST_ID, file.Id());
EXPECT_FALSE(file.IsFull());
EXPECT_FALSE(file.IsCorrupt());
@@ -713,6 +720,11 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
EXPECT_GE(meminfo1.free, meminfo2.free);
EXPECT_EQ(mmlength, meminfo2.total);
EXPECT_EQ(0U, meminfo2.free);
+
+ // There's no way of knowing if Flush actually does anything but at least
+ // verify that it runs without CHECK violations.
+ file.Flush(false);
+ file.Flush(true);
}
TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
diff --git a/base/native_library.h b/base/native_library.h
index 02eae1d508..e2b9ca7e6d 100644
--- a/base/native_library.h
+++ b/base/native_library.h
@@ -91,16 +91,6 @@ BASE_EXPORT NativeLibrary LoadNativeLibraryWithOptions(
const NativeLibraryOptions& options,
NativeLibraryLoadError* error);
-#if defined(OS_WIN)
-// Loads a native library from disk. Release it with UnloadNativeLibrary when
-// you're done.
-// This function retrieves the LoadLibrary function exported from kernel32.dll
-// and calls it instead of directly calling the LoadLibrary function via the
-// import table.
-BASE_EXPORT NativeLibrary LoadNativeLibraryDynamically(
- const FilePath& library_path);
-#endif // OS_WIN
-
// Unloads a native library.
BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
diff --git a/base/post_task_and_reply_with_result_internal.h b/base/post_task_and_reply_with_result_internal.h
index 1456129324..6f50de8b86 100644
--- a/base/post_task_and_reply_with_result_internal.h
+++ b/base/post_task_and_reply_with_result_internal.h
@@ -16,16 +16,15 @@ namespace internal {
// Adapts a function that produces a result via a return value to
// one that returns via an output parameter.
template <typename ReturnType>
-void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
- ReturnType* result) {
- *result = func.Run();
+void ReturnAsParamAdapter(OnceCallback<ReturnType()> func, ReturnType* result) {
+ *result = std::move(func).Run();
}
// Adapts a T* result to a callblack that expects a T.
template <typename TaskReturnType, typename ReplyArgType>
-void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
+void ReplyAdapter(OnceCallback<void(ReplyArgType)> callback,
TaskReturnType* result) {
- callback.Run(std::move(*result));
+ std::move(callback).Run(std::move(*result));
}
} // namespace internal
diff --git a/base/process/launch.h b/base/process/launch.h
index be8f6e73b9..99a7280cb3 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -262,6 +262,11 @@ BASE_EXPORT bool GetAppOutput(const StringPiece16& cl, std::string* output);
BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
std::string* output);
+// Like the above POSIX-specific version of GetAppOutput, but also includes
+// stderr.
+BASE_EXPORT bool GetAppOutputAndError(const std::vector<std::string>& argv,
+ std::string* output);
+
// A version of |GetAppOutput()| which also returns the exit code of the
// executed command. Returns true if the application runs and exits cleanly. If
// this is the case the exit code of the application is available in
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 19effa2ce5..1c4df40665 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -668,6 +668,14 @@ bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
return result && exit_code == EXIT_SUCCESS;
}
+bool GetAppOutputAndError(const std::vector<std::string>& argv,
+ std::string* output) {
+ int exit_code;
+ bool result =
+ GetAppOutputInternal(argv, nullptr, true, output, true, &exit_code);
+ return result && exit_code == EXIT_SUCCESS;
+}
+
bool GetAppOutputWithExitCode(const CommandLine& cl,
std::string* output,
int* exit_code) {
diff --git a/base/process/process_info_unittest.cc b/base/process/process_info_unittest.cc
new file mode 100644
index 0000000000..a757774fda
--- /dev/null
+++ b/base/process/process_info_unittest.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+#if !defined(OS_IOS)
+TEST(ProcessInfoTest, CreationTime) {
+ Time creation_time = CurrentProcessInfo::CreationTime();
+ ASSERT_FALSE(creation_time.is_null());
+}
+#endif // !defined(OS_IOS)
+
+} // namespace base
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index a38930a208..ad555aedff 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -12,6 +12,11 @@
namespace base {
+SystemMemoryInfoKB::SystemMemoryInfoKB() = default;
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+ default;
+
SystemMetrics::SystemMetrics() {
committed_memory_ = 0;
}
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 71d6042e00..1562e7b156 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -27,6 +27,10 @@
#include "base/process/port_provider_mac.h"
#endif
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
namespace base {
#if defined(OS_WIN)
@@ -63,8 +67,12 @@ struct IoCounters {
// shareable: 0
// swapped Pages swapped out to zram.
//
-// On OS X: TODO(thakis): Revise.
-// priv: Memory.
+// On macOS:
+// priv: Resident size (RSS) including shared memory. Warning: This
+// does not include compressed size and does not always
+// accurately account for shared memory due to things like
+// copy-on-write. TODO(erikchen): Revamp this with something
+// more accurate.
// shared: 0
// shareable: 0
//
@@ -154,10 +162,13 @@ class BASE_EXPORT ProcessMetrics {
// system call.
bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
WorkingSetKBytes* ws_usage) const;
- // Returns private, shared, and total resident bytes.
+ // Returns private, shared, and total resident bytes. |locked_bytes| refers to
+ // bytes that must stay resident. |locked_bytes| only counts bytes locked by
+ // this task, not bytes locked by the kernel.
bool GetMemoryBytes(size_t* private_bytes,
size_t* shared_bytes,
- size_t* resident_bytes) const;
+ size_t* resident_bytes,
+ size_t* locked_bytes) const;
#endif
// Returns the CPU usage in percent since the last time this method or
@@ -188,6 +199,10 @@ class BASE_EXPORT ProcessMetrics {
// Returns the number of file descriptors currently open by the process, or
// -1 on error.
int GetOpenFdCount() const;
+
+ // Returns the soft limit of file descriptors that can be opened by the
+ // process, or -1 on error.
+ int GetOpenFdSoftLimit() const;
#endif // defined(OS_LINUX)
private:
@@ -209,7 +224,11 @@ class BASE_EXPORT ProcessMetrics {
int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
#endif
+#if defined(OS_WIN)
+ win::ScopedHandle process_;
+#else
ProcessHandle process_;
+#endif
int processor_count_;
@@ -264,11 +283,13 @@ BASE_EXPORT void SetFdLimit(unsigned int max_descriptors);
// Data about system-wide memory consumption. Values are in KB. Available on
// Windows, Mac, Linux, Android and Chrome OS.
//
-// Total/free memory are available on all platforms that implement
+// Total memory are available on all platforms that implement
// GetSystemMemoryInfo(). Total/free swap memory are available on all platforms
// except on Mac. Buffers/cached/active_anon/inactive_anon/active_file/
-// inactive_file/dirty/pswpin/pswpout/pgmajfault are available on
+// inactive_file/dirty/reclaimable/pswpin/pswpout/pgmajfault are available on
// Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
+// Speculative/file_backed/purgeable are Mac and iOS only.
+// Free is absent on Windows (see "avail_phys" below).
struct BASE_EXPORT SystemMemoryInfoKB {
SystemMemoryInfoKB();
SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
@@ -276,44 +297,64 @@ struct BASE_EXPORT SystemMemoryInfoKB {
// Serializes the platform specific fields to value.
std::unique_ptr<Value> ToValue() const;
- int total;
- int free;
+ int total = 0;
-#if defined(OS_LINUX)
+#if !defined(OS_WIN)
+ int free = 0;
+#endif
+
+#if defined(OS_WIN)
+ // "This is the amount of physical memory that can be immediately reused
+ // without having to write its contents to disk first. It is the sum of the
+ // size of the standby, free, and zero lists." (MSDN).
+ // Standby: not modified pages of physical ram (file-backed memory) that are
+ // not actively being used.
+ int avail_phys = 0;
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
// This provides an estimate of available memory as described here:
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
// NOTE: this is ONLY valid in kernels 3.14 and up. Its value will always
// be 0 in earlier kernel versions.
- int available;
+ // Note: it includes _all_ file-backed memory (active + inactive).
+ int available = 0;
#endif
#if !defined(OS_MACOSX)
- int swap_total;
- int swap_free;
+ int swap_total = 0;
+ int swap_free = 0;
#endif
#if defined(OS_ANDROID) || defined(OS_LINUX)
- int buffers;
- int cached;
- int active_anon;
- int inactive_anon;
- int active_file;
- int inactive_file;
- int dirty;
+ int buffers = 0;
+ int cached = 0;
+ int active_anon = 0;
+ int inactive_anon = 0;
+ int active_file = 0;
+ int inactive_file = 0;
+ int dirty = 0;
+ int reclaimable = 0;
// vmstats data.
- unsigned long pswpin;
- unsigned long pswpout;
- unsigned long pgmajfault;
+ unsigned long pswpin = 0;
+ unsigned long pswpout = 0;
+ unsigned long pgmajfault = 0;
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
#if defined(OS_CHROMEOS)
- int shmem;
- int slab;
+ int shmem = 0;
+ int slab = 0;
// Gem data will be -1 if not supported.
- int gem_objects;
- long long gem_size;
+ int gem_objects = -1;
+ long long gem_size = -1;
#endif // defined(OS_CHROMEOS)
+
+#if defined(OS_MACOSX)
+ int speculative = 0;
+ int file_backed = 0;
+ int purgeable = 0;
+#endif // defined(OS_MACOSX)
};
// On Linux/Android/Chrome OS, system-wide memory consumption data is parsed
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index 5d542cc675..ba0dfa76b9 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -311,6 +311,32 @@ int ProcessMetrics::GetOpenFdCount() const {
return total_count;
}
+
+int ProcessMetrics::GetOpenFdSoftLimit() const {
+ // Use /proc/<pid>/limits to read the open fd limit.
+ FilePath fd_path = internal::GetProcPidDir(process_).Append("limits");
+
+ std::string limits_contents;
+ if (!ReadFileToString(fd_path, &limits_contents))
+ return -1;
+
+ for (const auto& line :
+ base::SplitStringPiece(limits_contents, "\n", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY)) {
+ if (line.starts_with("Max open files")) {
+ auto tokens = base::SplitStringPiece(line, " ", base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ if (tokens.size() > 3) {
+ int limit = -1;
+ if (StringToInt(tokens[3], &limit))
+ return limit;
+ return -1;
+ }
+ }
+ }
+ return -1;
+}
+
#endif // defined(OS_LINUX)
ProcessMetrics::ProcessMetrics(ProcessHandle process)
@@ -532,45 +558,12 @@ const size_t kDiskWeightedIOTime = 13;
} // namespace
-SystemMemoryInfoKB::SystemMemoryInfoKB() {
- total = 0;
- free = 0;
-#if defined(OS_LINUX)
- available = 0;
-#endif
- buffers = 0;
- cached = 0;
- active_anon = 0;
- inactive_anon = 0;
- active_file = 0;
- inactive_file = 0;
- swap_total = 0;
- swap_free = 0;
- dirty = 0;
-
- pswpin = 0;
- pswpout = 0;
- pgmajfault = 0;
-
-#ifdef OS_CHROMEOS
- shmem = 0;
- slab = 0;
- gem_objects = -1;
- gem_size = -1;
-#endif
-}
-
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
-
std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
std::unique_ptr<DictionaryValue> res(new DictionaryValue());
res->SetInteger("total", total);
res->SetInteger("free", free);
-#if defined(OS_LINUX)
res->SetInteger("available", available);
-#endif
res->SetInteger("buffers", buffers);
res->SetInteger("cached", cached);
res->SetInteger("active_anon", active_anon);
@@ -581,6 +574,7 @@ std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
res->SetInteger("swap_free", swap_free);
res->SetInteger("swap_used", swap_total - swap_free);
res->SetInteger("dirty", dirty);
+ res->SetInteger("reclaimable", reclaimable);
res->SetInteger("pswpin", pswpin);
res->SetInteger("pswpout", pswpout);
res->SetInteger("pgmajfault", pgmajfault);
@@ -628,10 +622,8 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->total;
else if (tokens[0] == "MemFree:")
target = &meminfo->free;
-#if defined(OS_LINUX)
else if (tokens[0] == "MemAvailable:")
target = &meminfo->available;
-#endif
else if (tokens[0] == "Buffers:")
target = &meminfo->buffers;
else if (tokens[0] == "Cached:")
@@ -650,6 +642,8 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->swap_free;
else if (tokens[0] == "Dirty:")
target = &meminfo->dirty;
+ else if (tokens[0] == "SReclaimable:")
+ target = &meminfo->reclaimable;
#if defined(OS_CHROMEOS)
// Chrome OS has a tweaked kernel that allows us to query Shmem, which is
// usually video memory otherwise invisible to the OS.
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index 51f5fd4e16..d6c0f3c928 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -16,25 +16,9 @@
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_port.h"
#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
#include "base/sys_info.h"
-#if !defined(TASK_POWER_INFO)
-// Doesn't exist in the 10.6 or 10.7 SDKs.
-#define TASK_POWER_INFO 21
-struct task_power_info {
- uint64_t total_user;
- uint64_t total_system;
- uint64_t task_interrupt_wakeups;
- uint64_t task_platform_idle_wakeups;
- uint64_t task_timer_wakeups_bin_1;
- uint64_t task_timer_wakeups_bin_2;
-};
-typedef struct task_power_info task_power_info_data_t;
-typedef struct task_power_info *task_power_info_t;
-#define TASK_POWER_INFO_COUNT ((mach_msg_type_number_t) \
- (sizeof (task_power_info_data_t) / sizeof (natural_t)))
-#endif
-
namespace base {
namespace {
@@ -78,12 +62,57 @@ bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
}
}
-} // namespace
+enum MachVMRegionResult { Finished, Error, Success };
+
+// Both |size| and |address| are in-out parameters.
+// |info| is an output parameter, only valid on Success.
+MachVMRegionResult GetTopInfo(mach_port_t task,
+ mach_vm_size_t* size,
+ mach_vm_address_t* address,
+ vm_region_top_info_data_t* info) {
+ mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
+ mach_port_t object_name;
+ kern_return_t kr = mach_vm_region(task, address, size, VM_REGION_TOP_INFO,
+ reinterpret_cast<vm_region_info_t>(info),
+ &info_count, &object_name);
+ // We're at the end of the address space.
+ if (kr == KERN_INVALID_ADDRESS)
+ return Finished;
+
+ if (kr != KERN_SUCCESS)
+ return Error;
+
+ // The kernel always returns a null object for VM_REGION_TOP_INFO, but
+ // balance it with a deallocate in case this ever changes. See 10.9.2
+ // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+ mach_port_deallocate(task, object_name);
+ return Success;
+}
-SystemMemoryInfoKB::SystemMemoryInfoKB() : total(0), free(0) {}
+MachVMRegionResult GetBasicInfo(mach_port_t task,
+ mach_vm_size_t* size,
+ mach_vm_address_t* address,
+ vm_region_basic_info_64* info) {
+ mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
+ mach_port_t object_name;
+ kern_return_t kr = mach_vm_region(
+ task, address, size, VM_REGION_BASIC_INFO_64,
+ reinterpret_cast<vm_region_info_t>(info), &info_count, &object_name);
+ if (kr == KERN_INVALID_ADDRESS) {
+ // We're at the end of the address space.
+ return Finished;
+ } else if (kr != KERN_SUCCESS) {
+ return Error;
+ }
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
+ // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
+ // balance it with a deallocate in case this ever changes. See 10.9.2
+ // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+ mach_port_deallocate(task, object_name);
+ return Success;
+}
+
+} // namespace
// Getting a mach task from a pid for another process requires permissions in
// general, so there doesn't really seem to be a way to do these (and spinning
@@ -110,10 +139,8 @@ size_t ProcessMetrics::GetPeakPagefileUsage() const {
}
size_t ProcessMetrics::GetWorkingSetSize() const {
- size_t private_bytes = 0;
- size_t shared_bytes = 0;
size_t resident_bytes = 0;
- if (!GetMemoryBytes(&private_bytes, &shared_bytes, &resident_bytes))
+ if (!GetMemoryBytes(nullptr, nullptr, &resident_bytes, nullptr))
return 0;
return resident_bytes;
}
@@ -122,16 +149,21 @@ size_t ProcessMetrics::GetPeakWorkingSetSize() const {
return 0;
}
+bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes) const {
+ return GetMemoryBytes(private_bytes, shared_bytes, nullptr, nullptr);
+}
+
// This is a rough approximation of the algorithm that libtop uses.
// private_bytes is the size of private resident memory.
// shared_bytes is the size of shared resident memory.
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) const {
+ size_t* shared_bytes,
+ size_t* resident_bytes,
+ size_t* locked_bytes) const {
size_t private_pages_count = 0;
size_t shared_pages_count = 0;
-
- if (!private_bytes && !shared_bytes)
- return true;
+ size_t wired_pages_count = 0;
mach_port_t task = TaskForPid(process_);
if (task == MACH_PORT_NULL) {
@@ -160,28 +192,26 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
// http://www.opensource.apple.com/source/top/top-67/libtop.c
mach_vm_size_t size = 0;
for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) {
+ mach_vm_size_t size_copy = size;
+ mach_vm_address_t address_copy = address;
+
vm_region_top_info_data_t info;
- mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
- mach_port_t object_name;
- kern_return_t kr = mach_vm_region(task,
- &address,
- &size,
- VM_REGION_TOP_INFO,
- reinterpret_cast<vm_region_info_t>(&info),
- &info_count,
- &object_name);
- if (kr == KERN_INVALID_ADDRESS) {
- // We're at the end of the address space.
- break;
- } else if (kr != KERN_SUCCESS) {
- MACH_DLOG(ERROR, kr) << "mach_vm_region";
+ MachVMRegionResult result = GetTopInfo(task, &size, &address, &info);
+ if (result == Error)
return false;
- }
+ if (result == Finished)
+ break;
- // The kernel always returns a null object for VM_REGION_TOP_INFO, but
- // balance it with a deallocate in case this ever changes. See 10.9.2
- // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
- mach_port_deallocate(mach_task_self(), object_name);
+ vm_region_basic_info_64 basic_info;
+ result = GetBasicInfo(task, &size_copy, &address_copy, &basic_info);
+ switch (result) {
+ case Finished:
+ case Error:
+ return false;
+ case Success:
+ break;
+ }
+ bool is_wired = basic_info.user_wired_count > 0;
if (IsAddressInSharedRegion(address, cpu_type) &&
info.share_mode != SM_PRIVATE)
@@ -212,12 +242,20 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
default:
break;
}
+ if (is_wired) {
+ wired_pages_count +=
+ info.private_pages_resident + info.shared_pages_resident;
+ }
}
if (private_bytes)
*private_bytes = private_pages_count * PAGE_SIZE;
if (shared_bytes)
*shared_bytes = shared_pages_count * PAGE_SIZE;
+ if (resident_bytes)
+ *resident_bytes = (private_pages_count + shared_pages_count) * PAGE_SIZE;
+ if (locked_bytes)
+ *locked_bytes = wired_pages_count * PAGE_SIZE;
return true;
}
@@ -252,15 +290,6 @@ bool ProcessMetrics::GetCommittedAndWorkingSetKBytes(
return true;
}
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes,
- size_t* resident_bytes) const {
- if (!GetMemoryBytes(private_bytes, shared_bytes))
- return false;
- *resident_bytes = *private_bytes + *shared_bytes;
- return true;
-}
-
#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
(r)->tv_sec = (a)->seconds; \
(r)->tv_usec = (a)->microseconds; \
@@ -392,7 +421,6 @@ size_t GetSystemCommitCharge() {
return (data.active_count * PAGE_SIZE) / 1024;
}
-// On Mac, We only get total memory and free memory from the system.
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
struct host_basic_info hostinfo;
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
@@ -405,17 +433,25 @@ bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
- vm_statistics_data_t vm_info;
- count = HOST_VM_INFO_COUNT;
+ vm_statistics64_data_t vm_info;
+ count = HOST_VM_INFO64_COUNT;
- if (host_statistics(host.get(), HOST_VM_INFO,
- reinterpret_cast<host_info_t>(&vm_info),
- &count) != KERN_SUCCESS) {
+ if (host_statistics64(host.get(), HOST_VM_INFO64,
+ reinterpret_cast<host_info64_t>(&vm_info),
+ &count) != KERN_SUCCESS) {
return false;
}
-
- meminfo->free = static_cast<int>(
- (vm_info.free_count - vm_info.speculative_count) * PAGE_SIZE / 1024);
+ DCHECK_EQ(HOST_VM_INFO64_COUNT, count);
+
+ static_assert(PAGE_SIZE % 1024 == 0, "Invalid page size");
+ meminfo->free = saturated_cast<int>(
+ PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+ meminfo->speculative =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
+ meminfo->file_backed =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.external_page_count);
+ meminfo->purgeable =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.purgeable_count);
return true;
}
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index b0bd7ea80b..288cde9fc6 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -17,12 +17,17 @@
#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
#include "base/test/multiprocess_test.h"
#include "base/threading/thread.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
+#if defined(OS_MACOSX)
+#include <sys/mman.h>
+#endif
+
namespace base {
namespace debug {
@@ -52,6 +57,42 @@ class SystemMetricsTest : public testing::Test {
/////////////////////////////////////////////////////////////////////////////
+#if defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+TEST_F(SystemMetricsTest, LockedBytes) {
+ ProcessHandle handle = GetCurrentProcessHandle();
+ std::unique_ptr<ProcessMetrics> metrics(
+ ProcessMetrics::CreateProcessMetrics(handle, nullptr));
+
+ size_t initial_locked_bytes;
+ bool result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &initial_locked_bytes);
+ ASSERT_TRUE(result);
+
+ size_t size = 8 * 1024 * 1024;
+ std::unique_ptr<char[]> memory(new char[size]);
+ int r = mlock(memory.get(), size);
+ ASSERT_EQ(0, r);
+
+ size_t new_locked_bytes;
+ result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
+ ASSERT_TRUE(result);
+
+ // There should be around |size| more locked bytes, but multi-threading might
+ // cause noise.
+ EXPECT_LT(initial_locked_bytes + size / 2, new_locked_bytes);
+ EXPECT_GT(initial_locked_bytes + size * 1.5, new_locked_bytes);
+
+ r = munlock(memory.get(), size);
+ ASSERT_EQ(0, r);
+
+ result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
+ ASSERT_TRUE(result);
+ EXPECT_EQ(initial_locked_bytes, new_locked_bytes);
+}
+#endif // defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST_F(SystemMetricsTest, IsValidDiskName) {
std::string invalid_input1 = "";
@@ -106,6 +147,7 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
std::string valid_input1 =
"MemTotal: 3981504 kB\n"
"MemFree: 140764 kB\n"
+ "MemAvailable: 535413 kB\n"
"Buffers: 116480 kB\n"
"Cached: 406160 kB\n"
"SwapCached: 21304 kB\n"
@@ -171,6 +213,7 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
EXPECT_TRUE(ParseProcMeminfo(valid_input1, &meminfo));
EXPECT_EQ(meminfo.total, 3981504);
EXPECT_EQ(meminfo.free, 140764);
+ EXPECT_EQ(meminfo.available, 535413);
EXPECT_EQ(meminfo.buffers, 116480);
EXPECT_EQ(meminfo.cached, 406160);
EXPECT_EQ(meminfo.active_anon, 2972352);
@@ -180,18 +223,29 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
EXPECT_EQ(meminfo.swap_total, 5832280);
EXPECT_EQ(meminfo.swap_free, 3672368);
EXPECT_EQ(meminfo.dirty, 184);
+ EXPECT_EQ(meminfo.reclaimable, 30936);
#if defined(OS_CHROMEOS)
EXPECT_EQ(meminfo.shmem, 140204);
EXPECT_EQ(meminfo.slab, 54212);
#endif
+ EXPECT_EQ(355725,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+ // Simulate as if there is no MemAvailable.
+ meminfo.available = 0;
+ EXPECT_EQ(374448,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+ meminfo = {};
EXPECT_TRUE(ParseProcMeminfo(valid_input2, &meminfo));
EXPECT_EQ(meminfo.total, 255908);
EXPECT_EQ(meminfo.free, 69936);
+ EXPECT_EQ(meminfo.available, 0);
EXPECT_EQ(meminfo.buffers, 15812);
EXPECT_EQ(meminfo.cached, 115124);
EXPECT_EQ(meminfo.swap_total, 524280);
EXPECT_EQ(meminfo.swap_free, 524200);
EXPECT_EQ(meminfo.dirty, 4);
+ EXPECT_EQ(69936,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
}
TEST_F(SystemMetricsTest, ParseVmstat) {
@@ -341,15 +395,19 @@ TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
-#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
- defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
SystemMemoryInfoKB info;
EXPECT_TRUE(GetSystemMemoryInfo(&info));
// Ensure each field received a value.
EXPECT_GT(info.total, 0);
+#if defined(OS_WIN)
+ EXPECT_GT(info.avail_phys, 0);
+#else
EXPECT_GT(info.free, 0);
+#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_GT(info.buffers, 0);
EXPECT_GT(info.cached, 0);
@@ -360,7 +418,9 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
// All the values should be less than the total amount of memory.
+#if !defined(OS_WIN)
EXPECT_LT(info.free, info.total);
+#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_LT(info.buffers, info.total);
EXPECT_LT(info.cached, info.total);
@@ -370,6 +430,10 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
EXPECT_LT(info.inactive_file, info.total);
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_MACOSX) || defined(OS_IOS)
+ EXPECT_GT(info.file_backed, 0);
+#endif
+
#if defined(OS_CHROMEOS)
// Chrome OS exposes shmem.
EXPECT_GT(info.shmem, 0);
@@ -378,8 +442,8 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
// and gem_size cannot be tested here.
#endif
}
-#endif // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) ||
- // defined(OS_LINUX) || defined(OS_ANDROID)
+#endif // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
+ // defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST(ProcessMetricsTest, ParseProcStatCPU) {
@@ -494,13 +558,13 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
const FilePath temp_path = temp_dir.GetPath();
CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
- Process child = SpawnMultiProcessTestChild(
+ SpawnChildResult spawn_child = SpawnMultiProcessTestChild(
ChildMainString, child_command_line, LaunchOptions());
- ASSERT_TRUE(child.IsValid());
+ ASSERT_TRUE(spawn_child.process.IsValid());
WaitForEvent(temp_path, kSignalClosed);
std::unique_ptr<ProcessMetrics> metrics(
- ProcessMetrics::CreateProcessMetrics(child.Handle()));
+ ProcessMetrics::CreateProcessMetrics(spawn_child.process.Handle()));
// Try a couple times to observe the child with 0 fds open.
// Sometimes we've seen that the child can have 1 remaining
// fd shortly after receiving the signal. Potentially this
@@ -514,7 +578,7 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
}
EXPECT_EQ(0, open_fds);
- ASSERT_TRUE(child.Terminate(0, true));
+ ASSERT_TRUE(spawn_child.process.Terminate(0, true));
}
#endif // !defined(__ANDROID__)
diff --git a/base/sequence_checker_impl.cc b/base/sequence_checker_impl.cc
index df2a8cb24f..6a9b5b2d0f 100644
--- a/base/sequence_checker_impl.cc
+++ b/base/sequence_checker_impl.cc
@@ -26,7 +26,7 @@ class SequenceCheckerImpl::Core {
~Core() = default;
- bool CalledOnValidThread() const {
+ bool CalledOnValidSequence() const {
if (sequence_token_.IsValid())
return sequence_token_ == SequenceToken::GetForCurrentThread();
@@ -58,7 +58,7 @@ bool SequenceCheckerImpl::CalledOnValidSequence() const {
AutoLock auto_lock(lock_);
if (!core_)
core_ = MakeUnique<Core>();
- return core_->CalledOnValidThread();
+ return core_->CalledOnValidSequence();
}
void SequenceCheckerImpl::DetachFromSequence() {
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
index dc11ebc3f1..4c367cb927 100644
--- a/base/sequenced_task_runner.cc
+++ b/base/sequenced_task_runner.cc
@@ -4,14 +4,17 @@
#include "base/sequenced_task_runner.h"
+#include <utility>
+
#include "base/bind.h"
namespace base {
bool SequencedTaskRunner::PostNonNestableTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return PostNonNestableDelayedTask(from_here, task, base::TimeDelta());
+ OnceClosure task) {
+ return PostNonNestableDelayedTask(from_here, std::move(task),
+ base::TimeDelta());
}
bool SequencedTaskRunner::DeleteOrReleaseSoonInternal(
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
index 6b2726ed4f..b29153927e 100644
--- a/base/sequenced_task_runner.h
+++ b/base/sequenced_task_runner.h
@@ -6,6 +6,7 @@
#define BASE_SEQUENCED_TASK_RUNNER_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/sequenced_task_runner_helpers.h"
#include "base/task_runner.h"
@@ -109,11 +110,11 @@ class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
// below.
bool PostNonNestableTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
virtual bool PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay) = 0;
// Submits a non-nestable task to delete the given object. Returns
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
index eaec14de5d..5333640fee 100644
--- a/base/strings/string_piece.h
+++ b/base/strings/string_piece.h
@@ -245,6 +245,9 @@ template <typename STRING_TYPE> class BasicStringPiece {
return r;
}
+ // This is the style of conversion preferred by std::string_view in C++17.
+ explicit operator STRING_TYPE() const { return as_string(); }
+
STRING_TYPE as_string() const {
// std::string doesn't like to take a NULL pointer even with a 0 size.
return empty() ? STRING_TYPE() : STRING_TYPE(data(), size());
diff --git a/base/strings/string_piece_unittest.cc b/base/strings/string_piece_unittest.cc
index f05aa152b5..7dfd71116b 100644
--- a/base/strings/string_piece_unittest.cc
+++ b/base/strings/string_piece_unittest.cc
@@ -295,6 +295,8 @@ TYPED_TEST(CommonStringPieceTest, CheckFind) {
ASSERT_EQ(b.rfind(c, 0U), Piece::npos);
ASSERT_EQ(a.rfind(d), static_cast<size_t>(a.as_string().rfind(TypeParam())));
ASSERT_EQ(a.rfind(e), a.as_string().rfind(TypeParam()));
+ ASSERT_EQ(a.rfind(d), static_cast<size_t>(TypeParam(a).rfind(TypeParam())));
+ ASSERT_EQ(a.rfind(e), TypeParam(a).rfind(TypeParam()));
ASSERT_EQ(a.rfind(d, 12), 12U);
ASSERT_EQ(a.rfind(e, 17), 17U);
ASSERT_EQ(a.rfind(g), Piece::npos);
@@ -518,6 +520,12 @@ TYPED_TEST(CommonStringPieceTest, CheckCustom) {
ASSERT_TRUE(c == s3);
TypeParam s4(e.as_string());
ASSERT_TRUE(s4.empty());
+
+ // operator STRING_TYPE()
+ TypeParam s5(TypeParam(a).c_str(), 7); // Note, has an embedded NULL
+ ASSERT_TRUE(c == s5);
+ TypeParam s6(e);
+ ASSERT_TRUE(s6.empty());
}
TEST(StringPieceTest, CheckCustom) {
@@ -591,7 +599,11 @@ TYPED_TEST(CommonStringPieceTest, CheckNULL) {
ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
ASSERT_EQ(s.size(), 0U);
- TypeParam str = s.as_string();
+ TypeParam str(s);
+ ASSERT_EQ(str.length(), 0U);
+ ASSERT_EQ(str, TypeParam());
+
+ str = s.as_string();
ASSERT_EQ(str.length(), 0U);
ASSERT_EQ(str, TypeParam());
}
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index 761965f03a..e8caffeec3 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -112,6 +112,9 @@ class BASE_EXPORT WaitableEvent {
// You MUST NOT delete any of the WaitableEvent objects while this wait is
// happening, however WaitMany's return "happens after" the |Signal| call
// that caused it has completed, like |Wait|.
+ //
+ // If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
+ // index among them is returned.
static size_t WaitMany(WaitableEvent** waitables, size_t count);
// For asynchronous waiting, see WaitableEventWatcher
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index 5dfff468ad..846fa06700 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -5,6 +5,7 @@
#include <stddef.h>
#include <algorithm>
+#include <limits>
#include <vector>
#include "base/debug/activity_tracker.h"
@@ -266,12 +267,10 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
SyncWaiter sw;
const size_t r = EnqueueMany(&waitables[0], count, &sw);
- if (r) {
+ if (r < count) {
// One of the events is already signaled. The SyncWaiter has not been
- // enqueued anywhere. EnqueueMany returns the count of remaining waitables
- // when the signaled one was seen, so the index of the signaled event is
- // @count - @r.
- return waitables[count - r].second;
+ // enqueued anywhere.
+ return waitables[r].second;
}
// At this point, we hold the locks on all the WaitableEvents and we have
@@ -319,38 +318,50 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
}
// -----------------------------------------------------------------------------
-// If return value == 0:
+// If return value == count:
// The locks of the WaitableEvents have been taken in order and the Waiter has
// been enqueued in the wait-list of each. None of the WaitableEvents are
// currently signaled
// else:
// None of the WaitableEvent locks are held. The Waiter has not been enqueued
-// in any of them and the return value is the index of the first WaitableEvent
-// which was signaled, from the end of the array.
+// in any of them and the return value is the index of the WaitableEvent which
+// was signaled with the lowest input index from the original WaitMany call.
// -----------------------------------------------------------------------------
// static
-size_t WaitableEvent::EnqueueMany
- (std::pair<WaitableEvent*, size_t>* waitables,
- size_t count, Waiter* waiter) {
- if (!count)
- return 0;
-
- waitables[0].first->kernel_->lock_.Acquire();
- if (waitables[0].first->kernel_->signaled_) {
- if (!waitables[0].first->kernel_->manual_reset_)
- waitables[0].first->kernel_->signaled_ = false;
- waitables[0].first->kernel_->lock_.Release();
- return count;
+size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
+ size_t count,
+ Waiter* waiter) {
+ size_t winner = count;
+ size_t winner_index = count;
+ for (size_t i = 0; i < count; ++i) {
+ auto& kernel = waitables[i].first->kernel_;
+ kernel->lock_.Acquire();
+ if (kernel->signaled_ && waitables[i].second < winner) {
+ winner = waitables[i].second;
+ winner_index = i;
}
+ }
- const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
- if (r) {
- waitables[0].first->kernel_->lock_.Release();
- } else {
- waitables[0].first->Enqueue(waiter);
+ // No events signaled. All locks acquired. Enqueue the Waiter on all of them
+ // and return.
+ if (winner == count) {
+ for (size_t i = 0; i < count; ++i)
+ waitables[i].first->Enqueue(waiter);
+ return count;
+ }
+
+ // Unlock in reverse order and possibly clear the chosen winner's signal
+ // before returning its index.
+ for (auto* w = waitables + count - 1; w >= waitables; --w) {
+ auto& kernel = w->first->kernel_;
+ if (w->second == winner) {
+ if (!kernel->manual_reset_)
+ kernel->signaled_ = false;
}
+ kernel->lock_.Release();
+ }
- return r;
+ return winner_index;
}
// -----------------------------------------------------------------------------
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
index c0e280aa97..3aa1af1619 100644
--- a/base/synchronization/waitable_event_unittest.cc
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <algorithm>
+
#include "base/compiler_specific.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -78,6 +80,42 @@ TEST(WaitableEventTest, WaitManyShortcut) {
delete ev[i];
}
+TEST(WaitableEventTest, WaitManyLeftToRight) {
+ WaitableEvent* ev[5];
+ for (size_t i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
+
+ // Test for consistent left-to-right return behavior across all permutations
+ // of the input array. This is to verify that only the indices -- and not
+ // the WaitableEvents' addresses -- are relevant in determining who wins when
+ // multiple events are signaled.
+
+ std::sort(ev, ev + 5);
+ do {
+ ev[0]->Signal();
+ ev[1]->Signal();
+ EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+
+ ev[2]->Signal();
+ EXPECT_EQ(1u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+
+ ev[3]->Signal();
+ ev[4]->Signal();
+ ev[0]->Signal();
+ EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(3u, WaitableEvent::WaitMany(ev, 5));
+ ev[2]->Signal();
+ EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(4u, WaitableEvent::WaitMany(ev, 5));
+ } while (std::next_permutation(ev, ev + 5));
+
+ for (size_t i = 0; i < 5; ++i)
+ delete ev[i];
+}
+
class WaitableEventSignaler : public PlatformThread::Delegate {
public:
WaitableEventSignaler(TimeDelta delay, WaitableEvent* event)
diff --git a/base/sys_info.h b/base/sys_info.h
index e35feff735..18bdaf0096 100644
--- a/base/sys_info.h
+++ b/base/sys_info.h
@@ -13,11 +13,18 @@
#include "base/base_export.h"
#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
+namespace debug {
+FORWARD_DECLARE_TEST(SystemMetricsTest, ParseMeminfo);
+}
+
+struct SystemMemoryInfoKB;
+
class BASE_EXPORT SysInfo {
public:
// Return the number of logical processors/cores on the current machine.
@@ -28,6 +35,9 @@ class BASE_EXPORT SysInfo {
// Return the number of bytes of current available physical memory on the
// machine.
+ // (The amount of memory that can be allocated without any significant
+ // impact on the system. It can lead to freeing inactive file-backed
+ // and/or speculative file-backed memory).
static int64_t AmountOfAvailablePhysicalMemory();
// Return the number of bytes of virtual memory of this process. A return
@@ -70,8 +80,6 @@ class BASE_EXPORT SysInfo {
static std::string OperatingSystemVersion();
// Retrieves detailed numeric values for the OS version.
- // TODO(port): Implement a Linux version of this method and enable the
- // corresponding unit test.
// DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
// for OS version-specific feature checks and workarounds. If you must use
// an OS version check instead of a feature check, use the base::mac::IsOS*
@@ -147,6 +155,15 @@ class BASE_EXPORT SysInfo {
// Low-end device refers to devices having less than 512M memory in the
// current implementation.
static bool IsLowEndDevice();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(SysInfoTest, AmountOfAvailablePhysicalMemory);
+ FRIEND_TEST_ALL_PREFIXES(debug::SystemMetricsTest, ParseMeminfo);
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ static int64_t AmountOfAvailablePhysicalMemory(
+ const SystemMemoryInfoKB& meminfo);
+#endif
};
} // namespace base
diff --git a/base/sys_info_linux.cc b/base/sys_info_linux.cc
index 298d245ecf..0cd05b363a 100644
--- a/base/sys_info_linux.cc
+++ b/base/sys_info_linux.cc
@@ -13,6 +13,7 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
+#include "base/process/process_metrics.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_info_internal.h"
#include "build/build_config.h"
@@ -42,13 +43,29 @@ base::LazyInstance<
namespace base {
// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+ return g_lazy_physical_memory.Get().value();
+}
+
+// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- return AmountOfMemory(_SC_AVPHYS_PAGES);
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
+ return 0;
+ return AmountOfAvailablePhysicalMemory(info);
}
// static
-int64_t SysInfo::AmountOfPhysicalMemory() {
- return g_lazy_physical_memory.Get().value();
+int64_t SysInfo::AmountOfAvailablePhysicalMemory(
+ const SystemMemoryInfoKB& info) {
+ // See details here:
+ // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+ // The fallback logic (when there is no MemAvailable) would be more precise
+ // if we had info about zones watermarks (/proc/zoneinfo).
+ int64_t res_kb = info.available != 0
+ ? info.available - info.active_file
+ : info.free + info.reclaimable + info.inactive_file;
+ return res_kb * 1024;
}
// static
diff --git a/base/sys_info_mac.mm b/base/sys_info_mac.mm
index aab1103d4c..1141bd5577 100644
--- a/base/sys_info_mac.mm
+++ b/base/sys_info_mac.mm
@@ -19,6 +19,7 @@
#include "base/mac/scoped_mach_port.h"
#import "base/mac/sdk_forward_declarations.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
namespace base {
@@ -83,20 +84,12 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- base::mac::ScopedMachSendRight host(mach_host_self());
- vm_statistics_data_t vm_info;
- mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
-
- if (host_statistics(host.get(),
- HOST_VM_INFO,
- reinterpret_cast<host_info_t>(&vm_info),
- &count) != KERN_SUCCESS) {
- NOTREACHED();
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
return 0;
- }
-
- return static_cast<int64_t>(vm_info.free_count - vm_info.speculative_count) *
- PAGE_SIZE;
+ // We should add inactive file-backed memory also but there is no such
+ // information from Mac OS unfortunately.
+ return static_cast<int64_t>(info.free + info.speculative) * 1024;
}
// static
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
index cbdfa3f7a9..7d3714663b 100644
--- a/base/sys_info_posix.cc
+++ b/base/sys_info_posix.cc
@@ -183,6 +183,30 @@ std::string SysInfo::OperatingSystemVersion() {
}
#endif
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+ struct utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ *major_version = 0;
+ *minor_version = 0;
+ *bugfix_version = 0;
+ return;
+ }
+ int num_read = sscanf(info.release, "%d.%d.%d", major_version, minor_version,
+ bugfix_version);
+ if (num_read < 1)
+ *major_version = 0;
+ if (num_read < 2)
+ *minor_version = 0;
+ if (num_read < 3)
+ *bugfix_version = 0;
+}
+#endif
+
// static
std::string SysInfo::OperatingSystemArchitecture() {
struct utsname info;
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
index c3b8507707..94b5a84971 100644
--- a/base/sys_info_unittest.cc
+++ b/base/sys_info_unittest.cc
@@ -6,6 +6,7 @@
#include "base/environment.h"
#include "base/files/file_util.h"
+#include "base/process/process_metrics.h"
#include "base/sys_info.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -13,46 +14,71 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
-typedef PlatformTest SysInfoTest;
-using base::FilePath;
+namespace base {
+
+using SysInfoTest = PlatformTest;
TEST_F(SysInfoTest, NumProcs) {
// We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
+ EXPECT_GE(SysInfo::NumberOfProcessors(), 1);
}
TEST_F(SysInfoTest, AmountOfMem) {
// We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GT(base::SysInfo::AmountOfPhysicalMemory(), 0);
- EXPECT_GT(base::SysInfo::AmountOfPhysicalMemoryMB(), 0);
+ EXPECT_GT(SysInfo::AmountOfPhysicalMemory(), 0);
+ EXPECT_GT(SysInfo::AmountOfPhysicalMemoryMB(), 0);
// The maxmimal amount of virtual memory can be zero which means unlimited.
- EXPECT_GE(base::SysInfo::AmountOfVirtualMemory(), 0);
+ EXPECT_GE(SysInfo::AmountOfVirtualMemory(), 0);
}
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+TEST_F(SysInfoTest, AmountOfAvailablePhysicalMemory) {
+ // Note: info is in _K_bytes.
+ SystemMemoryInfoKB info;
+ ASSERT_TRUE(GetSystemMemoryInfo(&info));
+ EXPECT_GT(info.free, 0);
+
+ if (info.available != 0) {
+ // If there is MemAvailable from kernel.
+ EXPECT_LT(info.available, info.total);
+ const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+ EXPECT_LT(amount / 1024, info.available);
+ // Simulate as if there is no MemAvailable.
+ info.available = 0;
+ }
+
+ // There is no MemAvailable. Check the fallback logic.
+ const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+ EXPECT_LT(amount / 1024, info.total);
+}
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
- ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
- << tmp_path.value();
+ ASSERT_TRUE(GetTempDir(&tmp_path));
+ EXPECT_GE(SysInfo::AmountOfFreeDiskSpace(tmp_path), 0) << tmp_path.value();
}
TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
- ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
- << tmp_path.value();
+ ASSERT_TRUE(GetTempDir(&tmp_path));
+ EXPECT_GT(SysInfo::AmountOfTotalDiskSpace(tmp_path), 0) << tmp_path.value();
}
-#if defined(OS_WIN) || defined(OS_MACOSX)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
int32_t os_major_version = -1;
int32_t os_minor_version = -1;
int32_t os_bugfix_version = -1;
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_GT(os_major_version, -1);
EXPECT_GT(os_minor_version, -1);
EXPECT_GT(os_bugfix_version, -1);
@@ -60,18 +86,18 @@ TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
#endif
TEST_F(SysInfoTest, Uptime) {
- base::TimeDelta up_time_1 = base::SysInfo::Uptime();
+ TimeDelta up_time_1 = SysInfo::Uptime();
// UpTime() is implemented internally using TimeTicks::Now(), which documents
// system resolution as being 1-15ms. Sleep a little longer than that.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
- base::TimeDelta up_time_2 = base::SysInfo::Uptime();
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+ TimeDelta up_time_2 = SysInfo::Uptime();
EXPECT_GT(up_time_1.InMicroseconds(), 0);
EXPECT_GT(up_time_2.InMicroseconds(), up_time_1.InMicroseconds());
}
#if defined(OS_MACOSX) && !defined(OS_IOS)
TEST_F(SysInfoTest, HardwareModelName) {
- std::string hardware_model = base::SysInfo::HardwareModelName();
+ std::string hardware_model = SysInfo::HardwareModelName();
EXPECT_FALSE(hardware_model.empty());
}
#endif
@@ -85,10 +111,10 @@ TEST_F(SysInfoTest, GoogleChromeOSVersionNumbers) {
const char kLsbRelease[] =
"FOO=1234123.34.5\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(1, os_major_version);
EXPECT_EQ(2, os_minor_version);
EXPECT_EQ(3, os_bugfix_version);
@@ -101,10 +127,10 @@ TEST_F(SysInfoTest, GoogleChromeOSVersionNumbersFirst) {
const char kLsbRelease[] =
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
"FOO=1234123.34.5\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(1, os_major_version);
EXPECT_EQ(2, os_minor_version);
EXPECT_EQ(3, os_bugfix_version);
@@ -115,10 +141,10 @@ TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
int32_t os_minor_version = -1;
int32_t os_bugfix_version = -1;
const char kLsbRelease[] = "FOO=1234123.34.5\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(0, os_major_version);
EXPECT_EQ(0, os_minor_version);
EXPECT_EQ(0, os_bugfix_version);
@@ -127,43 +153,45 @@ TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
TEST_F(SysInfoTest, GoogleChromeOSLsbReleaseTime) {
const char kLsbRelease[] = "CHROMEOS_RELEASE_VERSION=1.2.3.4";
// Use a fake time that can be safely displayed as a string.
- const base::Time lsb_release_time(base::Time::FromDoubleT(12345.6));
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
- base::Time parsed_lsb_release_time = base::SysInfo::GetLsbReleaseTime();
+ const Time lsb_release_time(Time::FromDoubleT(12345.6));
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
+ Time parsed_lsb_release_time = SysInfo::GetLsbReleaseTime();
EXPECT_DOUBLE_EQ(lsb_release_time.ToDoubleT(),
parsed_lsb_release_time.ToDoubleT());
}
TEST_F(SysInfoTest, IsRunningOnChromeOS) {
- base::SysInfo::SetChromeOSVersionInfoForTest("", base::Time());
- EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest("", Time());
+ EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease1[] =
"CHROMEOS_RELEASE_NAME=Non Chrome OS\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
- EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+ EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease2[] =
"CHROMEOS_RELEASE_NAME=Chrome OS\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
- EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease3[] =
"CHROMEOS_RELEASE_NAME=Chromium OS\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, base::Time());
- EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
}
TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
- EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+ EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
- EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
}
#endif // OS_CHROMEOS
+
+} // namespace base
diff --git a/base/task_runner.cc b/base/task_runner.cc
index 35c0a23274..c3e0574a1b 100644
--- a/base/task_runner.cc
+++ b/base/task_runner.cc
@@ -23,7 +23,7 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override;
+ OnceClosure task) override;
// Non-owning.
TaskRunner* destination_;
@@ -36,20 +36,20 @@ PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
bool PostTaskAndReplyTaskRunner::PostTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return destination_->PostTask(from_here, task);
+ OnceClosure task) {
+ return destination_->PostTask(from_here, std::move(task));
}
} // namespace
bool TaskRunner::PostTask(const tracked_objects::Location& from_here,
- const Closure& task) {
- return PostDelayedTask(from_here, task, base::TimeDelta());
+ OnceClosure task) {
+ return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
}
bool TaskRunner::PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
diff --git a/base/task_runner.h b/base/task_runner.h
index be3039d372..0421d564e6 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -61,8 +61,7 @@ class BASE_EXPORT TaskRunner
// will not be run.
//
// Equivalent to PostDelayedTask(from_here, task, 0).
- bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ bool PostTask(const tracked_objects::Location& from_here, OnceClosure task);
// Like PostTask, but tries to run the posted task only after
// |delay_ms| has passed.
@@ -70,7 +69,7 @@ class BASE_EXPORT TaskRunner
// It is valid for an implementation to ignore |delay_ms|; that is,
// to have PostDelayedTask behave the same as PostTask.
virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay) = 0;
// Returns true if the current thread is a thread on which a task
@@ -123,8 +122,8 @@ class BASE_EXPORT TaskRunner
// and the reply will cancel itself safely because it is bound to a
// WeakPtr<>.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
protected:
friend struct TaskRunnerTraits;
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
index 601b5402d0..9867c1dfd2 100644
--- a/base/task_scheduler/sequence.cc
+++ b/base/task_scheduler/sequence.cc
@@ -15,6 +15,8 @@ namespace internal {
Sequence::Sequence() = default;
bool Sequence::PushTask(std::unique_ptr<Task> task) {
+ DCHECK(task);
+ DCHECK(task->task);
DCHECK(task->sequenced_time.is_null());
task->sequenced_time = base::TimeTicks::Now();
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
index c45d8a87d0..7093b1e94d 100644
--- a/base/task_scheduler/sequence_unittest.cc
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/test/gtest_util.h"
@@ -24,27 +25,27 @@ class TaskSchedulerSequenceTest : public testing::Test {
TaskSchedulerSequenceTest()
: task_a_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::BACKGROUND),
TimeDelta())),
task_b_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
TimeDelta())),
task_c_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
TimeDelta())),
task_d_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
TimeDelta())),
task_e_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::BACKGROUND),
TimeDelta())),
task_a_(task_a_owned_.get()),
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
index 3780c16dcb..fc513e3e9f 100644
--- a/base/task_scheduler/task.cc
+++ b/base/task_scheduler/task.cc
@@ -4,22 +4,28 @@
#include "base/task_scheduler/task.h"
+#include <utility>
+
+#include "base/critical_closure.h"
+
namespace base {
namespace internal {
Task::Task(const tracked_objects::Location& posted_from,
- const Closure& task,
+ OnceClosure task,
const TaskTraits& traits,
TimeDelta delay)
- : PendingTask(posted_from,
- task,
- delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
- false), // Not nestable.
+ : PendingTask(
+ posted_from,
+ traits.shutdown_behavior() == TaskShutdownBehavior::BLOCK_SHUTDOWN
+ ? MakeCriticalClosure(std::move(task))
+ : std::move(task),
+ delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
+ false), // Not nestable.
// Prevent a delayed BLOCK_SHUTDOWN task from blocking shutdown before
// being scheduled by changing its shutdown behavior to SKIP_ON_SHUTDOWN.
- traits(!delay.is_zero() &&
- traits.shutdown_behavior() ==
- TaskShutdownBehavior::BLOCK_SHUTDOWN
+ traits(!delay.is_zero() && traits.shutdown_behavior() ==
+ TaskShutdownBehavior::BLOCK_SHUTDOWN
? TaskTraits(traits).WithShutdownBehavior(
TaskShutdownBehavior::SKIP_ON_SHUTDOWN)
: traits),
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
index c5b9bdb53b..43095f2ae7 100644
--- a/base/task_scheduler/task.h
+++ b/base/task_scheduler/task.h
@@ -6,7 +6,7 @@
#define BASE_TASK_SCHEDULER_TASK_H_
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -28,7 +28,7 @@ struct BASE_EXPORT Task : public PendingTask {
// behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
// automatically adjusted to SKIP_ON_SHUTDOWN.
Task(const tracked_objects::Location& posted_from,
- const Closure& task,
+ OnceClosure task,
const TaskTraits& traits,
TimeDelta delay);
~Task();
diff --git a/base/template_util.h b/base/template_util.h
index 42552107cf..10154dbbeb 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -51,46 +51,8 @@ template <class T> struct is_non_const_reference : std::false_type {};
template <class T> struct is_non_const_reference<T&> : std::true_type {};
template <class T> struct is_non_const_reference<const T&> : std::false_type {};
-// is_assignable
-
namespace internal {
-template <typename First, typename Second>
-struct SelectSecond {
- using type = Second;
-};
-
-struct Any {
- Any(...);
-};
-
-// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
-// value is a true_type.
-template <class Lvalue, class Rvalue>
-typename internal::SelectSecond<
- decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
- std::true_type>::type
-IsAssignableTest(Lvalue&&, Rvalue&&);
-
-// False case: Otherwise the return value is a false_type.
-template <class Rvalue>
-std::false_type IsAssignableTest(internal::Any, Rvalue&&);
-
-// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
-// determine the type of IsAssignableImpl.
-template <class Lvalue,
- class Rvalue,
- bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
-struct IsAssignableImpl
- : public std::common_type<decltype(
- internal::IsAssignableTest(std::declval<Lvalue>(),
- std::declval<Rvalue>()))>::type {};
-
-// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
-// is false_type.
-template <class Lvalue, class Rvalue>
-struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
-
// Uses expression SFINAE to detect whether using operator<< would work.
template <typename T, typename = void>
struct SupportsOstreamOperator : std::false_type {};
@@ -102,29 +64,6 @@ struct SupportsOstreamOperator<T,
} // namespace internal
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class Lvalue, class Rvalue>
-struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
-
-// is_copy_assignable is true if a T const& is assignable to a T&.
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class T>
-struct is_copy_assignable
- : public is_assignable<typename std::add_lvalue_reference<T>::type,
- typename std::add_lvalue_reference<
- typename std::add_const<T>::type>::type> {};
-
-// is_move_assignable is true if a T&& is assignable to a T&.
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class T>
-struct is_move_assignable
- : public is_assignable<typename std::add_lvalue_reference<T>::type,
- const typename std::add_rvalue_reference<T>::type> {
-};
-
// underlying_type produces the integer type backing an enum type.
// TODO(crbug.com/554293): Remove this when all platforms have this in the std
// namespace.
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index 921596474b..e34a25b042 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -30,39 +30,6 @@ static_assert(!is_non_const_reference<const int&>::value,
"IsNonConstReference");
static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
-class AssignParent {};
-class AssignChild : AssignParent {};
-
-// is_assignable<Type1, Type2>
-static_assert(!is_assignable<int, int>::value, "IsAssignable"); // 1 = 1;
-static_assert(!is_assignable<int, double>::value, "IsAssignable");
-static_assert(is_assignable<int&, int>::value, "IsAssignable");
-static_assert(is_assignable<int&, double>::value, "IsAssignable");
-static_assert(is_assignable<int&, int&>::value, "IsAssignable");
-static_assert(is_assignable<int&, int const&>::value, "IsAssignable");
-static_assert(!is_assignable<int const&, int>::value, "IsAssignable");
-static_assert(!is_assignable<AssignParent&, AssignChild>::value,
- "IsAssignable");
-static_assert(!is_assignable<AssignChild&, AssignParent>::value,
- "IsAssignable");
-
-struct AssignCopy {};
-struct AssignNoCopy {
- AssignNoCopy& operator=(AssignNoCopy&&) { return *this; }
- AssignNoCopy& operator=(const AssignNoCopy&) = delete;
-};
-struct AssignNoMove {
- AssignNoMove& operator=(AssignNoMove&&) = delete;
- AssignNoMove& operator=(const AssignNoMove&) = delete;
-};
-
-static_assert(is_copy_assignable<AssignCopy>::value, "IsCopyAssignable");
-static_assert(!is_copy_assignable<AssignNoCopy>::value, "IsCopyAssignable");
-
-static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
-static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
-static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
-
// A few standard types that definitely support printing.
static_assert(internal::SupportsOstreamOperator<int>::value,
"ints should be printable");
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index 844707ebd1..fc0350f78e 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -329,6 +329,7 @@ if (is_android) {
generate_jni("base_unittests_jni_headers") {
sources = [
"android/java/src/org/chromium/base/ContentUriTestUtils.java",
+ "android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
"android/java/src/org/chromium/base/TestSystemMessageHandler.java",
"android/java/src/org/chromium/base/TestUiThread.java",
]
@@ -353,7 +354,6 @@ if (is_android) {
]
srcjar_deps = [ ":test_support_java_aidl" ]
java_files = [
- "android/java/src/org/chromium/base/FileDescriptorInfo.java",
"android/java/src/org/chromium/base/MainReturnCodeResult.java",
"android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
"android/java/src/org/chromium/base/MultiprocessTestClientService.java",
@@ -367,7 +367,10 @@ if (is_android) {
android_aidl("test_support_java_aidl") {
testonly = true
- import_include = [ "android/java/src" ]
+ import_include = [
+ "android/java/src",
+ "//base/android/java/src",
+ ]
sources = [
"android/java/src/org/chromium/base/ITestClient.aidl",
]
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
index fcc4d123ed..c8fd3eddad 100644
--- a/base/test/multiprocess_test.cc
+++ b/base/test/multiprocess_test.cc
@@ -13,7 +13,7 @@
namespace base {
#if !defined(OS_ANDROID) && !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
-Process SpawnMultiProcessTestChild(
+SpawnChildResult SpawnMultiProcessTestChild(
const std::string& procname,
const CommandLine& base_command_line,
const LaunchOptions& options) {
@@ -24,7 +24,9 @@ Process SpawnMultiProcessTestChild(
if (!command_line.HasSwitch(switches::kTestChildProcess))
command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
- return LaunchProcess(command_line, options);
+ SpawnChildResult result;
+ result.process = LaunchProcess(command_line, options);
+ return result;
}
bool WaitForMultiprocessTestChildExit(const Process& process,
@@ -54,7 +56,7 @@ MultiProcessTest::MultiProcessTest() {
// Don't compile on Arc++.
#if 0
-Process MultiProcessTest::SpawnChild(const std::string& procname) {
+SpawnChildResult MultiProcessTest::SpawnChild(const std::string& procname) {
LaunchOptions options;
#if defined(OS_WIN)
options.start_hidden = true;
@@ -62,7 +64,7 @@ Process MultiProcessTest::SpawnChild(const std::string& procname) {
return SpawnChildWithOptions(procname, options);
}
-Process MultiProcessTest::SpawnChildWithOptions(
+SpawnChildResult MultiProcessTest::SpawnChildWithOptions(
const std::string& procname,
const LaunchOptions& options) {
return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options);
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
index bf9663759e..f0027d9458 100644
--- a/base/test/multiprocess_test.h
+++ b/base/test/multiprocess_test.h
@@ -17,6 +17,17 @@ namespace base {
class CommandLine;
+struct SpawnChildResult {
+ SpawnChildResult() {}
+ SpawnChildResult(SpawnChildResult&& other) = default;
+
+ SpawnChildResult& operator=(SpawnChildResult&& other) = default;
+
+ Process process;
+
+ DISALLOW_COPY_AND_ASSIGN(SpawnChildResult);
+};
+
// Helpers to spawn a child for a multiprocess test and execute a designated
// function. Use these when you already have another base class for your test
// fixture, but you want (some) of your tests to be multiprocess (otherwise you
@@ -33,9 +44,10 @@ class CommandLine;
// // Maybe set some options (e.g., |start_hidden| on Windows)....
//
// // Start a child process and run |a_test_func|.
-// base::Process test_child_process =
+// SpawnChildResult result =
// base::SpawnMultiProcessTestChild("a_test_func", command_line,
// options);
+// base::Process test_child_process = std::move(result.process);
//
// // Do stuff involving |test_child_process| and the child process....
//
@@ -61,10 +73,9 @@ class CommandLine;
// |command_line| should be as provided by
// |GetMultiProcessTestChildBaseCommandLine()| (below), possibly with arguments
// added. Note: On Windows, you probably want to set |options.start_hidden|.
-Process SpawnMultiProcessTestChild(
- const std::string& procname,
- const CommandLine& command_line,
- const LaunchOptions& options);
+SpawnChildResult SpawnMultiProcessTestChild(const std::string& procname,
+ const CommandLine& command_line,
+ const LaunchOptions& options);
// Gets the base command line for |SpawnMultiProcessTestChild()|. To this, you
// may add any flags needed for your child process.
@@ -121,13 +132,13 @@ class MultiProcessTest : public PlatformTest {
// }
//
// Returns the child process.
- Process SpawnChild(const std::string& procname);
+ SpawnChildResult SpawnChild(const std::string& procname);
// Run a child process using the given launch options.
//
// Note: On Windows, you probably want to set |options.start_hidden|.
- Process SpawnChildWithOptions(const std::string& procname,
- const LaunchOptions& options);
+ SpawnChildResult SpawnChildWithOptions(const std::string& procname,
+ const LaunchOptions& options);
// Set up the command line used to spawn the child process.
// Override this to add things to the command line (calling this first in the
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
index c74f013da1..a1b8fcbfc0 100644
--- a/base/test/multiprocess_test_android.cc
+++ b/base/test/multiprocess_test_android.cc
@@ -25,9 +25,10 @@ namespace base {
// - All options except |fds_to_remap| are ignored.
//
// NOTE: This MUST NOT run on the main thread of the NativeTest application.
-Process SpawnMultiProcessTestChild(const std::string& procname,
- const CommandLine& base_command_line,
- const LaunchOptions& options) {
+SpawnChildResult SpawnMultiProcessTestChild(
+ const std::string& procname,
+ const CommandLine& base_command_line,
+ const LaunchOptions& options) {
JNIEnv* env = android::AttachCurrentThread();
DCHECK(env);
@@ -54,7 +55,10 @@ Process SpawnMultiProcessTestChild(const std::string& procname,
android::ToJavaArrayOfStrings(env, command_line.argv());
jint pid = android::Java_MultiprocessTestClientLauncher_launchClient(
env, android::GetApplicationContext(), j_argv, fds);
- return Process(pid);
+
+ SpawnChildResult result;
+ result.process = Process(pid);
+ return result;
}
bool WaitForMultiprocessTestChildExit(const Process& process,
diff --git a/base/test/test_mock_time_task_runner.cc b/base/test/test_mock_time_task_runner.cc
index f4bd7244b4..a236acffa1 100644
--- a/base/test/test_mock_time_task_runner.cc
+++ b/base/test/test_mock_time_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/test/test_mock_time_task_runner.h"
+#include <utility>
+
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
@@ -79,7 +81,7 @@ struct TestMockTimeTaskRunner::TestOrderedPendingTask
: public base::TestPendingTask {
TestOrderedPendingTask();
TestOrderedPendingTask(const tracked_objects::Location& location,
- const Closure& task,
+ OnceClosure task,
TimeTicks post_time,
TimeDelta delay,
size_t ordinal,
@@ -104,12 +106,16 @@ TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
const tracked_objects::Location& location,
- const Closure& task,
+ OnceClosure task,
TimeTicks post_time,
TimeDelta delay,
size_t ordinal,
TestNestability nestability)
- : base::TestPendingTask(location, task, post_time, delay, nestability),
+ : base::TestPendingTask(location,
+ std::move(task),
+ post_time,
+ delay,
+ nestability),
ordinal(ordinal) {}
TestMockTimeTaskRunner::TestOrderedPendingTask::~TestOrderedPendingTask() {
@@ -234,20 +240,20 @@ bool TestMockTimeTaskRunner::RunsTasksOnCurrentThread() const {
bool TestMockTimeTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock scoped_lock(tasks_lock_);
- tasks_.push(TestOrderedPendingTask(from_here, task, now_ticks_, delay,
- next_task_ordinal_++,
+ tasks_.push(TestOrderedPendingTask(from_here, std::move(task), now_ticks_,
+ delay, next_task_ordinal_++,
TestPendingTask::NESTABLE));
return true;
}
bool TestMockTimeTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
- return PostDelayedTask(from_here, task, delay);
+ return PostDelayedTask(from_here, std::move(task), delay);
}
bool TestMockTimeTaskRunner::IsElapsingStopped() {
diff --git a/base/test/test_mock_time_task_runner.h b/base/test/test_mock_time_task_runner.h
index 54ebbdb7a8..2f892f52cc 100644
--- a/base/test/test_mock_time_task_runner.h
+++ b/base/test/test_mock_time_task_runner.h
@@ -12,6 +12,7 @@
#include <queue>
#include <vector>
+#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
@@ -140,10 +141,10 @@ class TestMockTimeTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner:
bool RunsTasksOnCurrentThread() const override;
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
protected:
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 98bc0179b8..3f71a9988f 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <string>
-
#include "base/test/test_pending_task.h"
+#include <string>
+#include <utility>
+
namespace base {
TestPendingTask::TestPendingTask() : nestability(NESTABLE) {}
-TestPendingTask::TestPendingTask(
- const tracked_objects::Location& location,
- const Closure& task,
- TimeTicks post_time,
- TimeDelta delay,
- TestNestability nestability)
+TestPendingTask::TestPendingTask(const tracked_objects::Location& location,
+ OnceClosure task,
+ TimeTicks post_time,
+ TimeDelta delay,
+ TestNestability nestability)
: location(location),
- task(task),
+ task(std::move(task)),
post_time(post_time),
delay(delay),
nestability(nestability) {}
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 42f3f42c7b..52ca592f25 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -23,7 +23,7 @@ struct TestPendingTask {
TestPendingTask();
TestPendingTask(TestPendingTask&& other);
TestPendingTask(const tracked_objects::Location& location,
- const Closure& task,
+ OnceClosure task,
TimeTicks post_time,
TimeDelta delay,
TestNestability nestability);
diff --git a/base/test/test_simple_task_runner.cc b/base/test/test_simple_task_runner.cc
index 090a72e96a..4280a0de62 100644
--- a/base/test/test_simple_task_runner.cc
+++ b/base/test/test_simple_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/test/test_simple_task_runner.h"
+#include <utility>
+
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -16,23 +18,23 @@ TestSimpleTaskRunner::~TestSimpleTaskRunner() = default;
bool TestSimpleTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock auto_lock(lock_);
- pending_tasks_.push_back(
- TestPendingTask(from_here, task, TimeTicks(), delay,
- TestPendingTask::NESTABLE));
+ pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
+ TimeTicks(), delay,
+ TestPendingTask::NESTABLE));
return true;
}
bool TestSimpleTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock auto_lock(lock_);
- pending_tasks_.push_back(
- TestPendingTask(from_here, task, TimeTicks(), delay,
- TestPendingTask::NON_NESTABLE));
+ pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
+ TimeTicks(), delay,
+ TestPendingTask::NON_NESTABLE));
return true;
}
diff --git a/base/test/test_simple_task_runner.h b/base/test/test_simple_task_runner.h
index d089ba8a0b..f46e065e47 100644
--- a/base/test/test_simple_task_runner.h
+++ b/base/test/test_simple_task_runner.h
@@ -7,6 +7,7 @@
#include <deque>
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
@@ -43,10 +44,10 @@ class TestSimpleTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner implementation.
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index d16f8bd225..cddb8981ad 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -29,8 +29,8 @@ namespace {
class PostTaskAndReplyRelay {
public:
PostTaskAndReplyRelay(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply)
+ OnceClosure task,
+ OnceClosure reply)
: sequence_checker_(),
from_here_(from_here),
origin_task_runner_(SequencedTaskRunnerHandle::Get()),
@@ -39,12 +39,10 @@ class PostTaskAndReplyRelay {
~PostTaskAndReplyRelay() {
DCHECK(sequence_checker_.CalledOnValidSequence());
- task_.Reset();
- reply_.Reset();
}
void RunTaskAndPostReply() {
- task_.Run();
+ std::move(task_).Run();
origin_task_runner_->PostTask(
from_here_, Bind(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
base::Unretained(this)));
@@ -54,12 +52,12 @@ class PostTaskAndReplyRelay {
void RunReplyAndSelfDestruct() {
DCHECK(sequence_checker_.CalledOnValidSequence());
- // Force |task_| to be released before |reply_| is to ensure that no one
- // accidentally depends on |task_| keeping one of its arguments alive while
- // |reply_| is executing.
- task_.Reset();
+ // Ensure |task_| has already been released before |reply_| to ensure that
+ // no one accidentally depends on |task_| keeping one of its arguments alive
+ // while |reply_| is executing.
+ DCHECK(!task_);
- reply_.Run();
+ std::move(reply_).Run();
// Cue mission impossible theme.
delete this;
@@ -68,8 +66,8 @@ class PostTaskAndReplyRelay {
const SequenceChecker sequence_checker_;
const tracked_objects::Location from_here_;
const scoped_refptr<SequencedTaskRunner> origin_task_runner_;
- Closure reply_;
- Closure task_;
+ OnceClosure reply_;
+ OnceClosure task_;
};
} // namespace
@@ -78,8 +76,8 @@ namespace internal {
bool PostTaskAndReplyImpl::PostTaskAndReply(
const tracked_objects::Location& from_here,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
DCHECK(!task.is_null()) << from_here.ToString();
DCHECK(!reply.is_null()) << from_here.ToString();
PostTaskAndReplyRelay* relay =
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
index 696b668a4c..00aee6d0ed 100644
--- a/base/threading/post_task_and_reply_impl.h
+++ b/base/threading/post_task_and_reply_impl.h
@@ -29,12 +29,12 @@ class BASE_EXPORT PostTaskAndReplyImpl {
// SequencedTaskRunnerHandle::IsSet(). Both |task| and |reply| are guaranteed
// to be deleted on the sequence or thread that called this.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
private:
virtual bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) = 0;
+ OnceClosure task) = 0;
};
} // namespace internal
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index ce594cd7fb..e9f4aadd3d 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -97,12 +97,15 @@ struct SequencedTask : public TrackingInfo {
~SequencedTask() {}
+ SequencedTask(SequencedTask&&) = default;
+ SequencedTask& operator=(SequencedTask&&) = default;
+
int sequence_token_id;
int trace_id;
int64_t sequence_task_number;
SequencedWorkerPool::WorkerShutdown shutdown_behavior;
tracked_objects::Location posted_from;
- Closure task;
+ OnceClosure task;
// Non-delayed tasks and delayed tasks are managed together by time-to-run
// order. We calculate the time by adding the posted time and the given delay.
@@ -144,7 +147,7 @@ class SequencedWorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -168,13 +171,13 @@ SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
bool SequencedWorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
if (delay.is_zero()) {
- return pool_->PostWorkerTaskWithShutdownBehavior(
- from_here, task, shutdown_behavior_);
+ return pool_->PostWorkerTaskWithShutdownBehavior(from_here, std::move(task),
+ shutdown_behavior_);
}
- return pool_->PostDelayedWorkerTask(from_here, task, delay);
+ return pool_->PostDelayedWorkerTask(from_here, std::move(task), delay);
}
bool SequencedWorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -198,13 +201,13 @@ class SequencedWorkerPool::PoolSequencedTaskRunner
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
// SequencedTaskRunner implementation
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
private:
@@ -231,15 +234,16 @@ SequencedWorkerPool::PoolSequencedTaskRunner::
SequencedWorkerPool::PoolSequencedTaskRunner::
~PoolSequencedTaskRunner() = default;
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
- PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
if (delay.is_zero()) {
return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
- token_, from_here, task, shutdown_behavior_);
+ token_, from_here, std::move(task), shutdown_behavior_);
}
- return pool_->PostDelayedSequencedWorkerTask(token_, from_here, task, delay);
+ return pool_->PostDelayedSequencedWorkerTask(token_, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PoolSequencedTaskRunner::
@@ -247,13 +251,13 @@ bool SequencedWorkerPool::PoolSequencedTaskRunner::
return pool_->IsRunningSequenceOnCurrentThread(token_);
}
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
- PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
// There's no way to run nested tasks, so simply forward to
// PostDelayedTask.
- return PostDelayedTask(from_here, task, delay);
+ return PostDelayedTask(from_here, std::move(task), delay);
}
// Worker ---------------------------------------------------------------------
@@ -352,7 +356,7 @@ class SequencedWorkerPool::Inner {
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
bool RunsTasksOnCurrentThread() const;
@@ -397,8 +401,7 @@ class SequencedWorkerPool::Inner {
// Returns true if the task may run at some point in the future and false if
// it will definitely not run.
// Coalesce upon resolution of http://crbug.com/622400.
- bool PostTaskToTaskScheduler(const SequencedTask& sequenced,
- const TimeDelta& delay);
+ bool PostTaskToTaskScheduler(SequencedTask sequenced, const TimeDelta& delay);
// Returns the TaskScheduler TaskRunner for the specified |sequence_token_id|
// and |traits|.
@@ -696,8 +699,10 @@ bool SequencedWorkerPool::Inner::PostTask(
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
+ DCHECK(task);
+
// TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
// revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
// waterfall. https://crbug.com/622400
@@ -710,9 +715,9 @@ bool SequencedWorkerPool::Inner::PostTask(
sequenced.sequence_token_id = sequence_token.id_;
sequenced.shutdown_behavior = shutdown_behavior;
sequenced.posted_from = from_here;
- sequenced.task =
- shutdown_behavior == BLOCK_SHUTDOWN ?
- base::MakeCriticalClosure(task) : task;
+ sequenced.task = shutdown_behavior == BLOCK_SHUTDOWN
+ ? base::MakeCriticalClosure(std::move(task))
+ : std::move(task);
sequenced.time_to_run = TimeTicks::Now() + delay;
int create_thread_id = 0;
@@ -757,13 +762,15 @@ bool SequencedWorkerPool::Inner::PostTask(
// See on top of the file why we don't compile this on Arc++.
#if 0
if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
- if (!PostTaskToTaskScheduler(sequenced, delay))
+ if (!PostTaskToTaskScheduler(std::move(sequenced), delay))
return false;
} else {
#endif
- pending_tasks_.insert(sequenced);
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior =
+ sequenced.shutdown_behavior;
+ pending_tasks_.insert(std::move(sequenced));
- if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN)
+ if (shutdown_behavior == BLOCK_SHUTDOWN)
blocking_shutdown_pending_task_count_++;
create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
@@ -802,7 +809,7 @@ bool SequencedWorkerPool::Inner::PostTask(
}
bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
- const SequencedTask& sequenced,
+ SequencedTask sequenced,
const TimeDelta& delay) {
#if 1
NOTREACHED();
@@ -838,7 +845,8 @@ bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
.WithPriority(task_priority_)
.WithShutdownBehavior(task_shutdown_behavior);
return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits)
- ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay);
+ ->PostDelayedTask(sequenced.posted_from, std::move(sequenced.task),
+ delay);
#endif
}
@@ -1043,7 +1051,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
- task.task.Run();
+ std::move(task.task).Run();
stopwatch.Stop();
tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
@@ -1054,7 +1062,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// Also, do it before calling reset_running_task_info() so
// that sequence-checking from within the task's destructor
// still works.
- task.task = Closure();
+ DCHECK(!task.task);
this_worker->reset_running_task_info();
}
@@ -1266,7 +1274,11 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
// refcounted, so we just need to keep a copy of them alive until the lock
// is exited. The calling code can just clear() the vector they passed to
// us once the lock is exited to make this happen.
- delete_these_outside_lock->push_back(*i);
+ //
+ // The const_cast here is safe since the object is erased from
+ // |pending_tasks_| soon after the move.
+ delete_these_outside_lock->push_back(
+ std::move(const_cast<SequencedTask&>(*i)));
pending_tasks_.erase(i++);
continue;
}
@@ -1277,14 +1289,18 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
status = GET_WORK_WAIT;
if (cleanup_state_ == CLEANUP_RUNNING) {
// Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
- delete_these_outside_lock->push_back(*i);
+ // The const_cast here is safe since the object is erased from
+ // |pending_tasks_| soon after the move.
+ delete_these_outside_lock->push_back(
+ std::move(const_cast<SequencedTask&>(*i)));
pending_tasks_.erase(i);
}
break;
}
- // Found a runnable task.
- *task = *i;
+ // Found a runnable task. The const_cast is safe here since the object is
+ // erased from |pending_tasks_| soon after the move.
+ *task = std::move(const_cast<SequencedTask&>(*i));
pending_tasks_.erase(i);
if (task->shutdown_behavior == BLOCK_SHUTDOWN) {
blocking_shutdown_pending_task_count_--;
@@ -1562,71 +1578,71 @@ SequencedWorkerPool::GetTaskRunnerWithShutdownBehavior(
bool SequencedWorkerPool::PostWorkerTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ OnceClosure task) {
+ return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedWorkerTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
- return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
- from_here, task, delay);
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
- from_here, task, TimeDelta());
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task) {
- return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ OnceClosure task) {
+ return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
- from_here, task, delay);
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PostNamedSequencedWorkerTask(
const std::string& token_name,
const tracked_objects::Location& from_here,
- const Closure& task) {
+ OnceClosure task) {
DCHECK(!token_name.empty());
return inner_->PostTask(&token_name, SequenceToken(), BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ from_here, std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
- from_here, task, TimeDelta());
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
- return PostDelayedWorkerTask(from_here, task, delay);
+ return PostDelayedWorkerTask(from_here, std::move(task), delay);
}
bool SequencedWorkerPool::RunsTasksOnCurrentThread() const {
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index 0d42de9138..e577e1be11 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -12,7 +12,7 @@
#include <string>
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -276,7 +276,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// Returns true if the task was posted successfully. This may fail during
// shutdown regardless of the specified ShutdownBehavior.
bool PostWorkerTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
// Same as PostWorkerTask but allows a delay to be specified (although doing
// so changes the shutdown behavior). The task will be run after the given
@@ -288,13 +288,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// task will be guaranteed to run to completion before shutdown
// (BLOCK_SHUTDOWN semantics).
bool PostDelayedWorkerTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
// Same as PostWorkerTask but allows specification of the shutdown behavior.
bool PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior);
// Like PostWorkerTask above, but provides sequencing semantics. This means
@@ -310,13 +310,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// shutdown regardless of the specified ShutdownBehavior.
bool PostSequencedWorkerTask(SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
// Like PostSequencedWorkerTask above, but allows you to specify a named
// token, which saves an extra call to GetNamedSequenceToken.
bool PostNamedSequencedWorkerTask(const std::string& token_name,
const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
// Same as PostSequencedWorkerTask but allows a delay to be specified
// (although doing so changes the shutdown behavior). The task will be run
@@ -330,7 +330,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
// Same as PostSequencedWorkerTask but allows specification of the shutdown
@@ -338,12 +338,12 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior);
// TaskRunner implementation. Forwards to PostDelayedWorkerTask().
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/threading/worker_pool.cc b/base/threading/worker_pool.cc
index d47037d79a..26ff10f1f5 100644
--- a/base/threading/worker_pool.cc
+++ b/base/threading/worker_pool.cc
@@ -27,8 +27,8 @@ class PostTaskAndReplyWorkerPool : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override {
- return WorkerPool::PostTask(from_here, task, task_is_slow_);
+ OnceClosure task) override {
+ return WorkerPool::PostTask(from_here, std::move(task), task_is_slow_);
}
bool task_is_slow_;
@@ -45,7 +45,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -56,7 +56,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// zero because non-zero delays are not supported.
bool PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay);
const bool tasks_are_slow_;
@@ -73,9 +73,9 @@ WorkerPoolTaskRunner::~WorkerPoolTaskRunner() {
bool WorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
- return PostDelayedTaskAssertZeroDelay(from_here, task, delay);
+ return PostDelayedTaskAssertZeroDelay(from_here, std::move(task), delay);
}
bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -84,11 +84,11 @@ bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
bool WorkerPoolTaskRunner::PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK_EQ(delay.InMillisecondsRoundedUp(), 0)
<< "WorkerPoolTaskRunner does not support non-zero delays";
- return WorkerPool::PostTask(from_here, task, tasks_are_slow_);
+ return WorkerPool::PostTask(from_here, std::move(task), tasks_are_slow_);
}
struct TaskRunnerHolder {
@@ -102,8 +102,8 @@ struct TaskRunnerHolder {
} // namespace
bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply,
+ OnceClosure task,
+ OnceClosure reply,
bool task_is_slow) {
// Do not report PostTaskAndReplyRelay leaks in tests. There's nothing we can
// do about them because WorkerPool doesn't have a flushing API.
diff --git a/base/threading/worker_pool.h b/base/threading/worker_pool.h
index 865948e437..d1c666d2f9 100644
--- a/base/threading/worker_pool.h
+++ b/base/threading/worker_pool.h
@@ -32,14 +32,15 @@ class BASE_EXPORT WorkerPool {
// false if |task| could not be posted to a worker thread. Regardless of
// return value, ownership of |task| is transferred to the worker pool.
static bool PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task, bool task_is_slow);
+ OnceClosure task,
+ bool task_is_slow);
// Just like TaskRunner::PostTaskAndReply, except the destination
// for |task| is a worker thread and you can specify |task_is_slow| just
// like you can for PostTask above.
static bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply,
+ OnceClosure task,
+ OnceClosure reply,
bool task_is_slow);
// Return true if the current thread is one that this WorkerPool runs tasks
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index 0e19a1a0fe..5a5f28814d 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <utility>
+
#include "base/bind.h"
#include "base/callback.h"
#include "base/lazy_instance.h"
@@ -47,7 +49,7 @@ class WorkerPoolImpl {
~WorkerPoolImpl() = delete;
void PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::OnceClosure task,
bool task_is_slow);
private:
@@ -59,9 +61,9 @@ WorkerPoolImpl::WorkerPoolImpl()
kIdleSecondsBeforeExit)) {}
void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::OnceClosure task,
bool /*task_is_slow*/) {
- pool_->PostTask(from_here, task);
+ pool_->PostTask(from_here, std::move(task));
}
base::LazyInstance<WorkerPoolImpl>::Leaky g_lazy_worker_pool =
@@ -112,9 +114,10 @@ void WorkerThread::ThreadMain() {
// static
bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::OnceClosure task,
bool task_is_slow) {
- g_lazy_worker_pool.Pointer()->PostTask(from_here, task, task_is_slow);
+ g_lazy_worker_pool.Pointer()->PostTask(from_here, std::move(task),
+ task_is_slow);
return true;
}
@@ -137,12 +140,14 @@ PosixDynamicThreadPool::~PosixDynamicThreadPool() {
void PosixDynamicThreadPool::PostTask(
const tracked_objects::Location& from_here,
- const base::Closure& task) {
- PendingTask pending_task(from_here, task);
+ base::OnceClosure task) {
+ PendingTask pending_task(from_here, std::move(task));
AddTask(&pending_task);
}
void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
+ DCHECK(pending_task);
+ DCHECK(pending_task->task);
AutoLock locked(lock_);
pending_tasks_.push(std::move(*pending_task));
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index d65ae8f8cf..0b10adf8f3 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -28,7 +28,7 @@
#include <queue>
#include <string>
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -51,8 +51,7 @@ class BASE_EXPORT PosixDynamicThreadPool
int idle_seconds_before_exit);
// Adds |task| to the thread pool.
- void PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ void PostTask(const tracked_objects::Location& from_here, OnceClosure task);
// Worker thread method to wait for up to |idle_seconds_before_exit| for more
// work from the thread pool. Returns NULL if no work is available.
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 577f50043d..6317886b0d 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -34,7 +34,9 @@ const char kFilteringTraceConfig[] =
" \"excluded_categories\": [],"
" \"filter_args\": {},"
" \"filter_predicate\": \"heap_profiler_predicate\","
- " \"included_categories\": [\"*\"]"
+ " \"included_categories\": ["
+ " \"*\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("Testing") "\"]"
" }"
" ]"
"}";
@@ -122,6 +124,7 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
}
{
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("NotTesting"), kDonut);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
StackFrame frame_cc[] = {t, c, c};
AssertBacktraceEquals(frame_cc);
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 63d40611a6..b9f440adb6 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -5,6 +5,7 @@
#include "base/trace_event/heap_profiler_allocation_register.h"
#include <algorithm>
+#include <limits>
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -12,9 +13,9 @@ namespace base {
namespace trace_event {
AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register, AllocationIndex index)
- : register_(alloc_register),
- index_(index) {}
+ const AllocationRegister& alloc_register,
+ AllocationIndex index)
+ : register_(alloc_register), index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
index_ = register_.allocations_.Next(index_ + 1);
@@ -25,12 +26,12 @@ bool AllocationRegister::ConstIterator::operator!=(
return index_ != other.index_;
}
-AllocationRegister::Allocation
-AllocationRegister::ConstIterator::operator*() const {
+AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
+ const {
return register_.GetAllocation(index_);
}
-size_t AllocationRegister::BacktraceHasher::operator () (
+size_t AllocationRegister::BacktraceHasher::operator()(
const Backtrace& backtrace) const {
const size_t kSampleLength = 10;
@@ -42,7 +43,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
}
size_t tail_start = backtrace.frame_count -
- std::min(backtrace.frame_count - head_end, kSampleLength);
+ std::min(backtrace.frame_count - head_end, kSampleLength);
for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
@@ -55,7 +56,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
return (total_value * 131101) >> 14;
}
-size_t AllocationRegister::AddressHasher::operator () (
+size_t AllocationRegister::AddressHasher::operator()(
const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
@@ -75,34 +76,48 @@ AllocationRegister::AllocationRegister()
AllocationRegister::AllocationRegister(size_t allocation_capacity,
size_t backtrace_capacity)
- : allocations_(allocation_capacity),
- backtraces_(backtrace_capacity) {}
-
-AllocationRegister::~AllocationRegister() {
+ : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
+ Backtrace sentinel = {};
+ sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
+ sentinel.frame_count = 1;
+
+ // Rationale for max / 2: in theory we could just start the sentinel with a
+ // refcount == 0. However, using max / 2 allows short circuiting of the
+ // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
+ // path. From a functional viewpoint, the sentinel is safe even if we wrap
+ // over refcount because .
+ BacktraceMap::KVPair::second_type sentinel_refcount =
+ std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
+ auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
+ DCHECK(index_and_flag.second);
+ DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
}
-void AllocationRegister::Insert(const void* address,
+AllocationRegister::~AllocationRegister() {}
+
+bool AllocationRegister::Insert(const void* address,
size_t size,
const AllocationContext& context) {
DCHECK(address != nullptr);
if (size == 0) {
- return;
+ return false;
}
- AllocationInfo info = {
- size,
- context.type_name,
- InsertBacktrace(context.backtrace)
- };
+ AllocationInfo info = {size, context.type_name,
+ InsertBacktrace(context.backtrace)};
// Try to insert the allocation.
auto index_and_flag = allocations_.Insert(address, info);
- if (!index_and_flag.second) {
+ if (!index_and_flag.second &&
+ index_and_flag.first != AllocationMap::kInvalidKVIndex) {
// |address| is already there - overwrite the allocation info.
auto& old_info = allocations_.Get(index_and_flag.first).second;
RemoveBacktrace(old_info.backtrace_index);
old_info = info;
+ return true;
}
+
+ return index_and_flag.second;
}
void AllocationRegister::Remove(const void* address) {
@@ -140,15 +155,17 @@ AllocationRegister::ConstIterator AllocationRegister::end() const {
void AllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
size_t allocated = sizeof(AllocationRegister);
- size_t resident = sizeof(AllocationRegister)
- + allocations_.EstimateUsedMemory()
- + backtraces_.EstimateUsedMemory();
+ size_t resident = sizeof(AllocationRegister) +
+ allocations_.EstimateUsedMemory() +
+ backtraces_.EstimateUsedMemory();
overhead->Add("AllocationRegister", allocated, resident);
}
AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
const Backtrace& backtrace) {
auto index = backtraces_.Insert(backtrace, 0).first;
+ if (index == BacktraceMap::kInvalidKVIndex)
+ return kOutOfStorageBacktraceIndex;
auto& backtrace_and_count = backtraces_.Get(index);
backtrace_and_count.second++;
return index;
@@ -156,7 +173,8 @@ AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
auto& backtrace_and_count = backtraces_.Get(index);
- if (--backtrace_and_count.second == 0) {
+ if (--backtrace_and_count.second == 0 &&
+ index != kOutOfStorageBacktraceIndex) {
// Backtrace is not referenced anymore - remove it.
backtraces_.Remove(index);
}
@@ -165,15 +183,11 @@ void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
AllocationRegister::Allocation AllocationRegister::GetAllocation(
AllocationMap::KVIndex index) const {
const auto& address_and_info = allocations_.Get(index);
- const auto& backtrace_and_count = backtraces_.Get(
- address_and_info.second.backtrace_index);
- return {
- address_and_info.first,
- address_and_info.second.size,
- AllocationContext(
- backtrace_and_count.first,
- address_and_info.second.type_name)
- };
+ const auto& backtrace_and_count =
+ backtraces_.Get(address_and_info.second.backtrace_index);
+ return {address_and_info.first, address_and_info.second.size,
+ AllocationContext(backtrace_and_count.first,
+ address_and_info.second.type_name)};
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index d6a02faeae..ac9872f001 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -48,24 +48,26 @@ class FixedHashMap {
// For implementation simplicity API uses integer index instead
// of iterators. Most operations (except Find) on KVIndex are O(1).
using KVIndex = size_t;
- static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+ enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) };
// Capacity controls how many items this hash map can hold, and largely
// affects memory footprint.
- FixedHashMap(size_t capacity)
- : num_cells_(capacity),
- cells_(static_cast<Cell*>(
- AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
- buckets_(static_cast<Bucket*>(
- AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
- free_list_(nullptr),
- next_unused_cell_(0) {}
+ explicit FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ num_inserts_dropped_(0),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
~FixedHashMap() {
FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
}
+ // Returns {kInvalidKVIndex, false} if the table is full.
std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
Cell** p_cell = Lookup(key);
Cell* cell = *p_cell;
@@ -74,7 +76,15 @@ class FixedHashMap {
}
// Get a free cell and link it.
- *p_cell = cell = GetFreeCell();
+ cell = GetFreeCell();
+ if (!cell) {
+ if (num_inserts_dropped_ <
+ std::numeric_limits<decltype(num_inserts_dropped_)>::max()) {
+ ++num_inserts_dropped_;
+ }
+ return {kInvalidKVIndex, false};
+ }
+ *p_cell = cell;
cell->p_prev = p_cell;
cell->next = nullptr;
@@ -137,6 +147,8 @@ class FixedHashMap {
bits::Align(sizeof(Bucket) * NumBuckets, page_size);
}
+ size_t num_inserts_dropped() const { return num_inserts_dropped_; }
+
private:
friend base::trace_event::AllocationRegisterTest;
@@ -175,7 +187,8 @@ class FixedHashMap {
}
// Returns a cell that is not being used to store an entry (either by
- // recycling from the free list or by taking a fresh cell).
+ // recycling from the free list or by taking a fresh cell). May return
+ // nullptr if the hash table has run out of memory.
Cell* GetFreeCell() {
// First try to re-use a cell from the free list.
if (free_list_) {
@@ -184,26 +197,14 @@ class FixedHashMap {
return cell;
}
- // Otherwise pick the next cell that has not been touched before.
- size_t idx = next_unused_cell_;
- next_unused_cell_++;
-
// If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside
- // of the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
-
- CHECK_LT(next_unused_cell_, num_cells_ + 1)
- << "Allocation Register hash table has too little capacity. Increase "
- "the capacity to run heap profiler in large sessions.";
-
- return &cells_[idx];
+ // was reserved for |cells_|), return nullptr.
+ if (next_unused_cell_ >= num_cells_) {
+ return nullptr;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ return &cells_[next_unused_cell_++];
}
// Returns a value in the range [0, NumBuckets - 1] (inclusive).
@@ -219,6 +220,9 @@ class FixedHashMap {
// Number of cells.
size_t const num_cells_;
+ // Number of calls to Insert() that were lost because the hashtable was full.
+ size_t num_inserts_dropped_;
+
// The array of cells. This array is backed by mmapped memory. Lower indices
// are accessed first, higher indices are accessed only when the |free_list_|
// is empty. This is to minimize the amount of resident memory used.
@@ -248,6 +252,8 @@ class TraceEventMemoryOverhead;
// freed. Internally it has two hashtables: one for Backtraces and one for
// actual allocations. Sizes of both hashtables are fixed, and this class
// allocates (mmaps) only in its constructor.
+//
+// When either hash table hits max size, new inserts are dropped.
class BASE_EXPORT AllocationRegister {
public:
// Details about an allocation.
@@ -282,7 +288,10 @@ class BASE_EXPORT AllocationRegister {
// Inserts allocation details into the table. If the address was present
// already, its details are updated. |address| must not be null.
- void Insert(const void* address,
+ //
+ // Returns true if an insert occurred. Inserts may fail because the table
+ // is full.
+ bool Insert(const void* address,
size_t size,
const AllocationContext& context);
@@ -359,6 +368,14 @@ class BASE_EXPORT AllocationRegister {
AllocationMap allocations_;
BacktraceMap backtraces_;
+ // Sentinel used when the |backtraces_| table is full.
+ //
+ // This is a slightly abstraction to allow for constant propagation. It
+ // knows that the sentinel will be the first item inserted into the table
+ // and that the first index retuned will be 0. The constructor DCHECKs
+ // this assumption.
+ enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
+
DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
};
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 5f5a80af3b..d78de9b548 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -54,10 +54,10 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self,
return ptr;
}
-void* HookllocAligned(const AllocatorDispatch* self,
- size_t alignment,
- size_t size,
- void* context) {
+void* HookAllocAligned(const AllocatorDispatch* self,
+ size_t alignment,
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_aligned_function(next, alignment, size, context);
if (ptr)
@@ -129,7 +129,7 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self,
AllocatorDispatch g_allocator_hooks = {
&HookAlloc, /* alloc_function */
&HookZeroInitAlloc, /* alloc_zero_initialized_function */
- &HookllocAligned, /* alloc_aligned_function */
+ &HookAllocAligned, /* alloc_aligned_function */
&HookRealloc, /* realloc_function */
&HookFree, /* free_function */
&HookGetSizeEstimate, /* get_size_estimate_function */
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
index 7583763889..2692521c09 100644
--- a/base/trace_event/memory_allocator_dump.cc
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -29,7 +29,8 @@ MemoryAllocatorDump::MemoryAllocatorDump(const std::string& absolute_name,
process_memory_dump_(process_memory_dump),
attributes_(new TracedValue),
guid_(guid),
- flags_(Flags::DEFAULT) {
+ flags_(Flags::DEFAULT),
+ size_(0) {
// The |absolute_name| cannot be empty.
DCHECK(!absolute_name.empty());
@@ -59,6 +60,8 @@ MemoryAllocatorDump::~MemoryAllocatorDump() {
void MemoryAllocatorDump::AddScalar(const char* name,
const char* units,
uint64_t value) {
+ if (strcmp(kNameSize, name) == 0)
+ size_ = value;
SStringPrintf(&string_conversion_buffer_, "%" PRIx64, value);
attributes_->BeginDictionary(name);
attributes_->SetString("type", kTypeScalar);
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index c781f071bb..99ff114e5c 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
@@ -85,11 +86,21 @@ class BASE_EXPORT MemoryAllocatorDump {
TracedValue* attributes_for_testing() const { return attributes_.get(); }
private:
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ friend class MemoryDumpManager;
+ FRIEND_TEST_ALL_PREFIXES(MemoryAllocatorDumpTest, GetSize);
+
+ // Get the size for this dump.
+ // The size is the value set with AddScalar(kNameSize, kUnitsBytes, size);
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ uint64_t GetSize() const { return size_; };
+
const std::string absolute_name_;
ProcessMemoryDump* const process_memory_dump_; // Not owned (PMD owns this).
std::unique_ptr<TracedValue> attributes_;
MemoryAllocatorDumpGuid guid_;
int flags_; // See enum Flags.
+ uint64_t size_;
// A local buffer for Sprintf conversion on fastpath. Avoids allocating
// temporary strings on each AddScalar() call.
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index 1bf9715917..e1818f6eec 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -172,6 +172,16 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
pmd.AsValueInto(traced_value.get());
}
+TEST(MemoryAllocatorDumpTest, GetSize) {
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
+ MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, 1);
+ dump->AddScalar("foo", MemoryAllocatorDump::kUnitsBytes, 2);
+ EXPECT_EQ(1u, dump->GetSize());
+}
+
// DEATH tests are not supported in Android / iOS.
#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index 5a54a773c5..6ed1ca8fff 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -4,6 +4,9 @@
#include "base/trace_event/memory_dump_manager.h"
+#include <inttypes.h>
+#include <stdio.h>
+
#include <algorithm>
#include <utility>
@@ -17,6 +20,8 @@
#include "base/debug/stack_trace.h"
#include "base/debug/thread_heap_usage_tracker.h"
#include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_piece.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
@@ -80,9 +85,12 @@ const char* const kStrictThreadCheckBlacklist[] = {
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
uint64_t dump_guid,
bool success) {
- TRACE_EVENT_NESTABLE_ASYNC_END1(
- MemoryDumpManager::kTraceCategory, "GlobalMemoryDump",
- TRACE_ID_MANGLE(dump_guid), "success", success);
+ char guid_str[20];
+ sprintf(guid_str, "0x%" PRIx64, dump_guid);
+ TRACE_EVENT_NESTABLE_ASYNC_END2(MemoryDumpManager::kTraceCategory,
+ "GlobalMemoryDump", TRACE_ID_LOCAL(dump_guid),
+ "dump_guid", TRACE_STR_COPY(guid_str),
+ "success", success);
if (!wrapped_callback.is_null()) {
wrapped_callback.Run(dump_guid, success);
@@ -155,9 +163,7 @@ void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
}
MemoryDumpManager::MemoryDumpManager()
- : delegate_(nullptr),
- is_coordinator_(false),
- memory_tracing_enabled_(0),
+ : memory_tracing_enabled_(0),
tracing_process_id_(kInvalidTracingProcessId),
dumper_registrations_ignored_for_testing_(false),
heap_profiling_enabled_(false) {
@@ -214,14 +220,13 @@ void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
heap_profiling_enabled_ = true;
}
-void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
- bool is_coordinator) {
+void MemoryDumpManager::Initialize(
+ std::unique_ptr<MemoryDumpManagerDelegate> delegate) {
{
AutoLock lock(lock_);
DCHECK(delegate);
DCHECK(!delegate_);
- delegate_ = delegate;
- is_coordinator_ = is_coordinator;
+ delegate_ = std::move(delegate);
EnableHeapProfilingIfNeeded();
}
@@ -243,11 +248,19 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
!(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
// Create trace config with heap profiling filter.
+ std::string filter_string = "*";
+ const char* const kFilteredCategories[] = {
+ TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
+ MemoryDumpManager::kTraceCategory};
+ for (const char* cat : kFilteredCategories)
+ filter_string = filter_string + "," + cat;
+ TraceConfigCategoryFilter category_filter;
+ category_filter.InitializeFromString(filter_string);
+
TraceConfig::EventFilterConfig heap_profiler_filter_config(
HeapProfilerEventFilter::kName);
- heap_profiler_filter_config.AddIncludedCategory("*");
- heap_profiler_filter_config.AddIncludedCategory(
- MemoryDumpManager::kTraceCategory);
+ heap_profiler_filter_config.SetCategoryFilter(category_filter);
+
TraceConfig::EventFilters filters;
filters.push_back(heap_profiler_filter_config);
TraceConfig filtering_trace_config;
@@ -413,7 +426,7 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
}
void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
AutoLock lock(lock_);
dump_providers_for_polling_.insert(mdpinfo);
@@ -421,11 +434,11 @@ void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
// registered. This handles the case where OnTraceLogEnabled() did not notify
// ready since no polling supported mdp has yet been registered.
if (dump_providers_for_polling_.size() == 1)
- dump_scheduler_->NotifyPollingSupported();
+ MemoryDumpScheduler::GetInstance()->EnablePollingIfNeeded();
}
void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
mdpinfo->dump_provider->SuspendFastMemoryPolling();
AutoLock lock(lock_);
@@ -456,25 +469,16 @@ void MemoryDumpManager::RequestGlobalDump(
// Creates an async event to keep track of the global dump evolution.
// The |wrapped_callback| will generate the ASYNC_END event and then invoke
// the real |callback| provided by the caller.
- TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "GlobalMemoryDump",
- TRACE_ID_MANGLE(guid));
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(
+ kTraceCategory, "GlobalMemoryDump", TRACE_ID_LOCAL(guid), "dump_type",
+ MemoryDumpTypeToString(dump_type), "level_of_detail",
+ MemoryDumpLevelOfDetailToString(level_of_detail));
MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
- // Technically there is no need to grab the |lock_| here as the delegate is
- // long-lived and can only be set by Initialize(), which is locked and
- // necessarily happens before memory_tracing_enabled_ == true.
- // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
- // (memory-infra is enabled) we're not in the fast-path anymore.
- MemoryDumpManagerDelegate* delegate;
- {
- AutoLock lock(lock_);
- delegate = delegate_;
- }
-
// The delegate will coordinate the IPC broadcast and at some point invoke
// CreateProcessDump() to get a dump for the current process.
MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
- delegate->RequestGlobalMemoryDump(args, wrapped_callback);
+ delegate_->RequestGlobalMemoryDump(args, wrapped_callback);
}
void MemoryDumpManager::RequestGlobalDump(
@@ -483,10 +487,24 @@ void MemoryDumpManager::RequestGlobalDump(
RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
}
+bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
+ MemoryDumpProvider* provider) {
+ AutoLock lock(lock_);
+
+ for (const auto& info : dump_providers_) {
+ if (info->dump_provider == provider)
+ return true;
+ }
+ return false;
+}
+
void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) {
- TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
- TRACE_ID_MANGLE(args.dump_guid));
+ char guid_str[20];
+ sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
+ TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
+ TRACE_STR_COPY(guid_str));
// If argument filter is enabled then only background mode dumps should be
// allowed. In case the trace config passed for background tracing session
@@ -515,14 +533,9 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
CHECK(!session_state_ ||
session_state_->IsDumpModeAllowed(args.level_of_detail));
- if (dump_scheduler_)
- dump_scheduler_->NotifyDumpTriggered();
+ MemoryDumpScheduler::GetInstance()->NotifyDumpTriggered();
}
- TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
- TRACE_ID_MANGLE(args.dump_guid),
- TRACE_EVENT_FLAG_FLOW_OUT);
-
// Start the process dump. This involves task runner hops as specified by the
// MemoryDumpProvider(s) in RegisterDumpProvider()).
SetupNextMemoryDump(std::move(pmd_async_state));
@@ -666,11 +679,8 @@ void MemoryDumpManager::InvokeOnMemoryDump(
if (should_dump) {
// Invoke the dump provider.
- TRACE_EVENT_WITH_FLOW1(kTraceCategory,
- "MemoryDumpManager::InvokeOnMemoryDump",
- TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
- TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
- "dump_provider.name", mdpinfo->name);
+ TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
+ "dump_provider.name", mdpinfo->name);
// A stack allocated string with dump provider name is useful to debug
// crashes while invoking dump after a |dump_provider| is not unregistered
@@ -722,6 +732,18 @@ bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
}
// static
+uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern,
+ const ProcessMemoryDump* pmd) {
+ uint64_t sum = 0;
+ for (const auto& kv : pmd->allocator_dumps()) {
+ auto name = StringPiece(kv.first);
+ if (MatchPattern(name, pattern))
+ sum += kv.second->GetSize();
+ }
+ return sum / 1024;
+}
+
+// static
void MemoryDumpManager::FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
HEAP_PROFILER_SCOPED_IGNORE;
@@ -736,9 +758,11 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
return;
}
- TRACE_EVENT_WITH_FLOW0(kTraceCategory,
- "MemoryDumpManager::FinalizeDumpAndAddToTrace",
- TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN);
+ TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace");
+
+ // The results struct to fill.
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ MemoryDumpCallbackResult result;
for (const auto& kv : pmd_async_state->process_dumps) {
ProcessId pid = kv.first; // kNullProcessId for the current process.
@@ -760,6 +784,30 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
kTraceEventNumArgs, kTraceEventArgNames,
kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
TRACE_EVENT_FLAG_HAS_ID);
+
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ // Don't try to fill the struct in detailed mode since it is hard to avoid
+ // double counting.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::DETAILED)
+ continue;
+
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ if (pid == kNullProcessId) {
+ result.chrome_dump.malloc_total_kb =
+ GetDumpsSumKb("malloc", process_memory_dump);
+ result.chrome_dump.v8_total_kb =
+ GetDumpsSumKb("v8/*", process_memory_dump);
+
+ // partition_alloc reports sizes for both allocated_objects and
+ // partitions. The memory allocated_objects uses is a subset of
+ // the partitions memory so to avoid double counting we only
+ // count partitions memory.
+ result.chrome_dump.partition_alloc_total_kb =
+ GetDumpsSumKb("partition_alloc/partitions/*", process_memory_dump);
+ result.chrome_dump.blink_gc_total_kb =
+ GetDumpsSumKb("blink_gc", process_memory_dump);
+ }
}
bool tracing_still_enabled;
@@ -776,7 +824,7 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
}
TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
- TRACE_ID_MANGLE(dump_guid));
+ TRACE_ID_LOCAL(dump_guid));
}
void MemoryDumpManager::OnTraceLogEnabled() {
@@ -829,18 +877,6 @@ void MemoryDumpManager::OnTraceLogEnabled() {
session_state, &MemoryDumpSessionState::type_name_deduplicator));
}
- std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
- new MemoryDumpScheduler(this, dump_thread->task_runner()));
- DCHECK_LE(memory_dump_config.triggers.size(), 3u);
- for (const auto& trigger : memory_dump_config.triggers) {
- if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
- NOTREACHED();
- continue;
- }
- dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
- trigger.min_time_between_dumps_ms);
- }
-
{
AutoLock lock(lock_);
@@ -849,7 +885,6 @@ void MemoryDumpManager::OnTraceLogEnabled() {
DCHECK(!dump_thread_);
dump_thread_ = std::move(dump_thread);
- dump_scheduler_ = std::move(dump_scheduler);
subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
@@ -858,15 +893,28 @@ void MemoryDumpManager::OnTraceLogEnabled() {
if (mdpinfo->options.is_fast_polling_supported)
dump_providers_for_polling_.insert(mdpinfo);
}
+
+ MemoryDumpScheduler* dump_scheduler = MemoryDumpScheduler::GetInstance();
+ dump_scheduler->Setup(this, dump_thread_->task_runner());
+ DCHECK_LE(memory_dump_config.triggers.size(), 3u);
+ for (const auto& trigger : memory_dump_config.triggers) {
+ if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) {
+ NOTREACHED();
+ continue;
+ }
+ dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
+ trigger.min_time_between_dumps_ms);
+ }
+
// Notify polling supported only if some polling supported provider was
// registered, else RegisterPollingMDPOnDumpThread() will notify when first
// polling MDP registers.
if (!dump_providers_for_polling_.empty())
- dump_scheduler_->NotifyPollingSupported();
+ dump_scheduler->EnablePollingIfNeeded();
// Only coordinator process triggers periodic global memory dumps.
- if (is_coordinator_)
- dump_scheduler_->NotifyPeriodicTriggerSupported();
+ if (delegate_->IsCoordinator())
+ dump_scheduler->EnablePeriodicTriggerIfNeeded();
}
}
@@ -879,14 +927,12 @@ void MemoryDumpManager::OnTraceLogDisabled() {
return;
subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
std::unique_ptr<Thread> dump_thread;
- std::unique_ptr<MemoryDumpScheduler> scheduler;
{
AutoLock lock(lock_);
dump_thread = std::move(dump_thread_);
session_state_ = nullptr;
- scheduler = std::move(dump_scheduler_);
+ MemoryDumpScheduler::GetInstance()->DisableAllTriggers();
}
- scheduler->DisableAllTriggers();
// Thread stops are blocking and must be performed outside of the |lock_|
// or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
@@ -910,38 +956,6 @@ bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
return session_state_->IsDumpModeAllowed(dump_mode);
}
-uint64_t MemoryDumpManager::GetTracingProcessId() const {
- return delegate_->GetTracingProcessId();
-}
-
-MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
- MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode)
- : dump_provider(dump_provider),
- name(name),
- task_runner(std::move(task_runner)),
- options(options),
- consecutive_failures(0),
- disabled(false),
- whitelisted_for_background_mode(whitelisted_for_background_mode) {}
-
-MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
-
-bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
- if (!a || !b)
- return a.get() < b.get();
- // Ensure that unbound providers (task_runner == nullptr) always run last.
- // Rationale: some unbound dump providers are known to be slow, keep them last
- // to avoid skewing timings of the other dump providers.
- return std::tie(a->task_runner, a->dump_provider) >
- std::tie(b->task_runner, b->dump_provider);
-}
-
MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 92cc2f401b..e7f5194850 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -9,7 +9,7 @@
#include <map>
#include <memory>
-#include <set>
+#include <unordered_set>
#include <vector>
#include "base/atomicops.h"
@@ -18,10 +18,20 @@
#include "base/memory/ref_counted.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_provider_info.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
+// Forward declare |MemoryDumpManagerDelegateImplTest| so that we can make it a
+// friend of |MemoryDumpManager| and give it access to |SetInstanceForTesting|.
+namespace memory_instrumentation {
+
+class MemoryDumpManagerDelegateImplTest;
+
+} // namespace memory_instrumentation
+
namespace base {
class SingleThreadTaskRunner;
@@ -54,13 +64,10 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// On the other side, the MemoryDumpManager will not be fully operational
// (i.e. will NACK any RequestGlobalMemoryDump()) until initialized.
// Arguments:
- // is_coordinator: if true this MemoryDumpManager instance will act as a
- // coordinator and schedule periodic dumps (if enabled via TraceConfig);
- // false when the MemoryDumpManager is initialized in a slave process.
// delegate: inversion-of-control interface for embedder-specific behaviors
// (multiprocess handshaking). See the lifetime and thread-safety
// requirements in the |MemoryDumpManagerDelegate| docstring.
- void Initialize(MemoryDumpManagerDelegate* delegate, bool is_coordinator);
+ void Initialize(std::unique_ptr<MemoryDumpManagerDelegate> delegate);
// (Un)Registers a MemoryDumpProvider instance.
// Args:
@@ -123,6 +130,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// Returns true if the dump mode is allowed for current tracing session.
bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
+ // Lets tests see if a dump provider is registered.
+ bool IsDumpProviderRegisteredForTesting(MemoryDumpProvider*);
+
// Returns the MemoryDumpSessionState object, which is shared by all the
// ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
// session lifetime.
@@ -135,7 +145,10 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// retrieved by child processes only when tracing is enabled. This is
// intended to express cross-process sharing of memory dumps on the
// child-process side, without having to know its own child process id.
- uint64_t GetTracingProcessId() const;
+ uint64_t GetTracingProcessId() const { return tracing_process_id_; }
+ void set_tracing_process_id(uint64_t tracing_process_id) {
+ tracing_process_id_ = tracing_process_id;
+ }
// Returns the name for a the allocated_objects dump. Use this to declare
// suballocator dumps from other dump providers.
@@ -156,70 +169,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
friend class MemoryDumpScheduler;
-
- // Descriptor used to hold information about registered MDPs.
- // Some important considerations about lifetime of this object:
- // - In nominal conditions, all the MemoryDumpProviderInfo instances live in
- // the |dump_providers_| collection (% unregistration while dumping).
- // - Upon each dump they (actually their scoped_refptr-s) are copied into
- // the ProcessMemoryDumpAsyncState. This is to allow removal (see below).
- // - When the MDP.OnMemoryDump() is invoked, the corresponding MDPInfo copy
- // inside ProcessMemoryDumpAsyncState is removed.
- // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
- // - If UnregisterDumpProvider() is called while a dump is in progress, the
- // MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
- // when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
- // - The non-const fields of MemoryDumpProviderInfo are safe to access only
- // on tasks running in the |task_runner|, unless the thread has been
- // destroyed.
- struct MemoryDumpProviderInfo
- : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
- // Define a total order based on the |task_runner| affinity, so that MDPs
- // belonging to the same SequencedTaskRunner are adjacent in the set.
- struct Comparator {
- bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpProviderInfo>& b) const;
- };
- using OrderedSet =
- std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
-
- MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode);
-
- MemoryDumpProvider* const dump_provider;
-
- // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
- // nullptr in all other cases.
- std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
-
- // Human readable name, for debugging and testing. Not necessarily unique.
- const char* const name;
-
- // The task runner affinity. Can be nullptr, in which case the dump provider
- // will be invoked on |dump_thread_|.
- const scoped_refptr<SequencedTaskRunner> task_runner;
-
- // The |options| arg passed to RegisterDumpProvider().
- const MemoryDumpProvider::Options options;
-
- // For fail-safe logic (auto-disable failing MDPs).
- int consecutive_failures;
-
- // Flagged either by the auto-disable logic or during unregistration.
- bool disabled;
-
- // True if the dump provider is whitelisted for background mode.
- const bool whitelisted_for_background_mode;
-
- private:
- friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
- ~MemoryDumpProviderInfo();
-
- DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
- };
+ friend class memory_instrumentation::MemoryDumpManagerDelegateImplTest;
// Holds the state of a process memory dump that needs to be carried over
// across task runners in order to fulfil an asynchronous CreateProcessDump()
@@ -285,6 +235,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
~MemoryDumpManager() override;
static void SetInstanceForTesting(MemoryDumpManager* instance);
+ static uint32_t GetDumpsSumKb(const std::string&, const ProcessMemoryDump*);
static void FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
@@ -348,10 +299,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
std::unordered_set<StringPiece, StringPieceHash>
strict_thread_check_blacklist_;
- MemoryDumpManagerDelegate* delegate_; // Not owned.
-
- // When true, this instance is in charge of coordinating periodic dumps.
- bool is_coordinator_;
+ std::unique_ptr<MemoryDumpManagerDelegate> delegate_;
// Protects from concurrent accesses to the |dump_providers_*| and |delegate_|
// to guard against disabling logging while dumping on another thread.
@@ -361,9 +309,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// dump_providers_enabled_ list) when tracing is not enabled.
subtle::AtomicWord memory_tracing_enabled_;
- // For triggering memory dumps.
- std::unique_ptr<MemoryDumpScheduler> dump_scheduler_;
-
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
std::unique_ptr<Thread> dump_thread_;
@@ -385,17 +330,15 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// safe (i.e. should expect calls from any thread and handle thread hopping).
class BASE_EXPORT MemoryDumpManagerDelegate {
public:
+ MemoryDumpManagerDelegate() {}
+ virtual ~MemoryDumpManagerDelegate() {}
+
virtual void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) = 0;
- // Returns tracing process id of the current process. This is used by
- // MemoryDumpManager::GetTracingProcessId.
- virtual uint64_t GetTracingProcessId() const = 0;
+ virtual bool IsCoordinator() const = 0;
protected:
- MemoryDumpManagerDelegate() {}
- virtual ~MemoryDumpManagerDelegate() {}
-
void CreateProcessDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) {
MemoryDumpManager::GetInstance()->CreateProcessDump(args, callback);
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 51d41943fb..e126edd397 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -7,9 +7,11 @@
#include <stdint.h>
#include <memory>
+#include <utility>
#include <vector>
#include "base/bind_helpers.h"
+#include "base/callback.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/message_loop/message_loop.h"
@@ -30,6 +32,7 @@
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_config_memory_test_util.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -102,10 +105,10 @@ void OnTraceDataCollected(Closure quit_closure,
// Posts |task| to |task_runner| and blocks until it is executed.
void PostTaskAndWait(const tracked_objects::Location& from_here,
SequencedTaskRunner* task_runner,
- const base::Closure& task) {
+ base::OnceClosure task) {
base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- task_runner->PostTask(from_here, task);
+ task_runner->PostTask(from_here, std::move(task));
task_runner->PostTask(
FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
// The SequencedTaskRunner guarantees that |event| will only be signaled after
@@ -113,13 +116,12 @@ void PostTaskAndWait(const tracked_objects::Location& from_here,
event.Wait();
}
-} // namespace
-
// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
// requests locally to the MemoryDumpManager instead of performing IPC dances.
class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
public:
- MemoryDumpManagerDelegateForTesting() {
+ MemoryDumpManagerDelegateForTesting(bool is_coordinator)
+ : is_coordinator_(is_coordinator) {
ON_CALL(*this, RequestGlobalMemoryDump(_, _))
.WillByDefault(Invoke(
this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
@@ -129,13 +131,13 @@ class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
void(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback));
- uint64_t GetTracingProcessId() const override {
- NOTREACHED();
- return MemoryDumpManager::kInvalidTracingProcessId;
- }
+ bool IsCoordinator() const override { return is_coordinator_; }
// Promote the CreateProcessDump to public so it can be used by test fixtures.
using MemoryDumpManagerDelegate::CreateProcessDump;
+
+ private:
+ bool is_coordinator_;
};
class MockMemoryDumpProvider : public MemoryDumpProvider {
@@ -180,19 +182,19 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override {
NOTREACHED();
return false;
}
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override {
num_of_post_tasks_++;
if (enabled_) {
return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
- task);
+ std::move(task));
}
return false;
}
@@ -210,6 +212,8 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned num_of_post_tasks_;
};
+} // namespace
+
class MemoryDumpManagerTest : public testing::Test {
public:
MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -220,13 +224,12 @@ class MemoryDumpManagerTest : public testing::Test {
mdm_.reset(new MemoryDumpManager());
MemoryDumpManager::SetInstanceForTesting(mdm_.get());
ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
- delegate_.reset(new MemoryDumpManagerDelegateForTesting);
}
void TearDown() override {
MemoryDumpManager::SetInstanceForTesting(nullptr);
+ delegate_ = nullptr;
mdm_.reset();
- delegate_.reset();
message_loop_.reset();
TraceLog::DeleteForTesting();
}
@@ -248,7 +251,8 @@ class MemoryDumpManagerTest : public testing::Test {
protected:
void InitializeMemoryDumpManager(bool is_coordinator) {
mdm_->set_dumper_registrations_ignored_for_testing(true);
- mdm_->Initialize(delegate_.get(), is_coordinator);
+ delegate_ = new MemoryDumpManagerDelegateForTesting(is_coordinator);
+ mdm_->Initialize(base::WrapUnique(delegate_));
}
void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
@@ -274,7 +278,8 @@ class MemoryDumpManagerTest : public testing::Test {
void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
bool IsPeriodicDumpingEnabled() const {
- return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting();
+ return MemoryDumpScheduler::GetInstance()
+ ->IsPeriodicTimerRunningForTesting();
}
int GetMaxConsecutiveFailuresCount() const {
@@ -283,7 +288,7 @@ class MemoryDumpManagerTest : public testing::Test {
const MemoryDumpProvider::Options kDefaultOptions;
std::unique_ptr<MemoryDumpManager> mdm_;
- std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+ MemoryDumpManagerDelegateForTesting* delegate_;
bool last_callback_success_;
private:
@@ -435,7 +440,13 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
// Checks that the dump provider invocations depend only on the current
// registration state and not on previous registrations and dumps.
-TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
+// Flaky on iOS, see crbug.com/706874
+#if defined(OS_IOS)
+#define MAYBE_RegistrationConsistency DISABLED_RegistrationConsistency
+#else
+#define MAYBE_RegistrationConsistency RegistrationConsistency
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_RegistrationConsistency) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
@@ -897,7 +908,6 @@ TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
// initialization gets NACK-ed cleanly.
{
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
- EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED);
EXPECT_FALSE(last_callback_success_);
@@ -906,9 +916,9 @@ TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
// Now late-initialize the MemoryDumpManager and check that the
// RequestGlobalDump completes successfully.
{
+ InitializeMemoryDumpManager(false /* is_coordinator */);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
- InitializeMemoryDumpManager(false /* is_coordinator */);
RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED);
EXPECT_TRUE(last_callback_success_);
@@ -1010,7 +1020,13 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// Tests against race conditions that might arise when disabling tracing in the
// middle of a global memory dump.
-TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
+// Flaky on iOS, see crbug.com/706961
+#if defined(OS_IOS)
+#define MAYBE_DisableTracingWhileDumping DISABLED_DisableTracingWhileDumping
+#else
+#define MAYBE_DisableTracingWhileDumping DisableTracingWhileDumping
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_DisableTracingWhileDumping) {
base::WaitableEvent tracing_disabled_event(
WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
diff --git a/base/trace_event/memory_dump_provider_info.cc b/base/trace_event/memory_dump_provider_info.cc
new file mode 100644
index 0000000000..6bb711018b
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.cc
@@ -0,0 +1,43 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_provider_info.h"
+
+#include <tuple>
+
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+namespace trace_event {
+
+MemoryDumpProviderInfo::MemoryDumpProviderInfo(
+ MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
+ : dump_provider(dump_provider),
+ options(options),
+ name(name),
+ task_runner(std::move(task_runner)),
+ whitelisted_for_background_mode(whitelisted_for_background_mode),
+ consecutive_failures(0),
+ disabled(false) {}
+
+MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
+
+bool MemoryDumpProviderInfo::Comparator::operator()(
+ const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const {
+ if (!a || !b)
+ return a.get() < b.get();
+ // Ensure that unbound providers (task_runner == nullptr) always run last.
+ // Rationale: some unbound dump providers are known to be slow, keep them last
+ // to avoid skewing timings of the other dump providers.
+ return std::tie(a->task_runner, a->dump_provider) >
+ std::tie(b->task_runner, b->dump_provider);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_dump_provider_info.h b/base/trace_event/memory_dump_provider_info.h
new file mode 100644
index 0000000000..ca63a987b2
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.h
@@ -0,0 +1,108 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+// Wraps a MemoryDumpProvider (MDP), which is registered via
+// MemoryDumpManager(MDM)::RegisterDumpProvider(), holding the extra information
+// required to deal with it (which task runner it should be invoked onto,
+// whether it has been disabled, etc.)
+// More importantly, having a refptr to this object guarantees that a MDP that
+// is not thread-bound (hence which can only be unregistered via
+// MDM::UnregisterAndDeleteDumpProviderSoon()) will stay alive as long as the
+// refptr is held.
+//
+// Lifetime:
+// At any time, there is at most one instance of this class for each instance
+// of a given MemoryDumpProvider, but there might be several scoped_refptr
+// holding onto each of this. Specifically:
+// - In nominal conditions, there is a refptr for each registerd MDP in the
+// MDM's |dump_providers_| list.
+// - In most cases, the only refptr (in the |dump_providers_| list) is destroyed
+// by MDM::UnregisterDumpProvider().
+// - However, when MDM starts a dump, the list of refptrs is copied into the
+// ProcessMemoryDumpAsyncState. That list is pruned as MDP(s) are invoked.
+// - If UnregisterDumpProvider() is called on a non-thread-bound MDP while a
+// dump is in progress, the extar extra of the handle is destroyed in
+// MDM::SetupNextMemoryDump() or MDM::InvokeOnMemoryDump(), when the copy
+// inside ProcessMemoryDumpAsyncState is erase()-d.
+// - The PeakDetector can keep extra refptrs when enabled.
+struct BASE_EXPORT MemoryDumpProviderInfo
+ : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
+ public:
+ // Define a total order based on the |task_runner| affinity, so that MDPs
+ // belonging to the same SequencedTaskRunner are adjacent in the set.
+ struct Comparator {
+ bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const;
+ };
+ using OrderedSet =
+ std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
+
+ MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
+
+ // It is safe to access the const fields below from any thread as they are
+ // never mutated.
+
+ MemoryDumpProvider* const dump_provider;
+
+ // The |options| arg passed to MDM::RegisterDumpProvider().
+ const MemoryDumpProvider::Options options;
+
+ // Human readable name, not unique (distinct MDP instances might have the same
+ // name). Used for debugging, testing and whitelisting for BACKGROUND mode.
+ const char* const name;
+
+ // The task runner on which the MDP::OnMemoryDump call should be posted onto.
+ // Can be nullptr, in which case the MDP will be invoked on a background
+ // thread handled by MDM.
+ const scoped_refptr<SequencedTaskRunner> task_runner;
+
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
+ // These fields below, instead, are not thread safe and can be mutated only:
+ // - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
+ // - By the MDM's background thread (or in any other way that guarantees
+ // sequencing) for non-thread-bound MDPs.
+
+ // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
+ // nullptr in all other cases.
+ std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
+
+ // For fail-safe logic (auto-disable failing MDPs).
+ int consecutive_failures;
+
+ // Flagged either by the auto-disable logic or during unregistration.
+ bool disabled;
+
+ private:
+ friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
+ ~MemoryDumpProviderInfo();
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index bf72bef5e4..f2744007d7 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -60,5 +60,9 @@ MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
return MemoryDumpLevelOfDetail::LAST;
}
+MemoryDumpCallbackResult::MemoryDumpCallbackResult() {}
+
+MemoryDumpCallbackResult::~MemoryDumpCallbackResult() {}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index 90a866fa7a..a8b3f423ca 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -9,10 +9,12 @@
// These are also used in the IPCs for coordinating inter-process memory dumps.
#include <stdint.h>
+#include <map>
#include <string>
#include "base/base_export.h"
#include "base/callback.h"
+#include "base/process/process_handle.h"
namespace base {
namespace trace_event {
@@ -72,6 +74,33 @@ struct MemoryDumpArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
+// TODO(hjd): Not used yet, see crbug.com/703184
+// Summarises information about memory use as seen by a single process.
+// This information will eventually be passed to a service to be colated
+// and reported.
+struct MemoryDumpCallbackResult {
+ struct OSMemDump {
+ uint32_t resident_set_kb = 0;
+ };
+ struct ChromeMemDump {
+ uint32_t malloc_total_kb = 0;
+ uint32_t partition_alloc_total_kb = 0;
+ uint32_t blink_gc_total_kb = 0;
+ uint32_t v8_total_kb = 0;
+ };
+
+ // These are for the current process.
+ OSMemDump os_dump;
+ ChromeMemDump chrome_dump;
+
+ // In some cases, OS stats can only be dumped from a privileged process to
+ // get around to sandboxing/selinux restrictions (see crbug.com/461788).
+ std::map<ProcessId, OSMemDump> extra_processes_dump;
+
+ MemoryDumpCallbackResult();
+ ~MemoryDumpCallbackResult();
+};
+
using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
index eaa8d63661..150feb8e79 100644
--- a/base/trace_event/memory_dump_scheduler.cc
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -21,108 +21,131 @@ const uint32_t kMemoryTotalsPollingInterval = 25;
uint32_t g_polling_interval_ms_for_testing = 0;
} // namespace
-MemoryDumpScheduler::MemoryDumpScheduler(
- MemoryDumpManager* mdm,
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
- : mdm_(mdm), polling_state_(polling_task_runner) {}
+// static
+MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
+ static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
+ return instance;
+}
+MemoryDumpScheduler::MemoryDumpScheduler() : mdm_(nullptr), is_setup_(false) {}
MemoryDumpScheduler::~MemoryDumpScheduler() {}
+void MemoryDumpScheduler::Setup(
+ MemoryDumpManager* mdm,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner) {
+ mdm_ = mdm;
+ polling_task_runner_ = polling_task_runner;
+ periodic_state_.reset(new PeriodicTriggerState);
+ polling_state_.reset(new PollingTriggerState);
+ is_setup_ = true;
+}
+
void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
MemoryDumpLevelOfDetail level_of_detail,
uint32_t min_time_between_dumps_ms) {
+ DCHECK(is_setup_);
if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
- DCHECK(!periodic_state_.is_configured);
- DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
+ DCHECK(!periodic_state_->is_configured);
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
DCHECK_NE(0u, min_time_between_dumps_ms);
- polling_state_.level_of_detail = level_of_detail;
- polling_state_.min_polls_between_dumps =
- (min_time_between_dumps_ms + polling_state_.polling_interval_ms - 1) /
- polling_state_.polling_interval_ms;
- polling_state_.current_state = PollingTriggerState::CONFIGURED;
+ polling_state_->level_of_detail = level_of_detail;
+ polling_state_->min_polls_between_dumps =
+ (min_time_between_dumps_ms + polling_state_->polling_interval_ms - 1) /
+ polling_state_->polling_interval_ms;
+ polling_state_->current_state = PollingTriggerState::CONFIGURED;
} else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
- DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
- periodic_state_.is_configured = true;
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
+ periodic_state_->is_configured = true;
DCHECK_NE(0u, min_time_between_dumps_ms);
switch (level_of_detail) {
case MemoryDumpLevelOfDetail::BACKGROUND:
break;
case MemoryDumpLevelOfDetail::LIGHT:
- DCHECK_EQ(0u, periodic_state_.light_dump_period_ms);
- periodic_state_.light_dump_period_ms = min_time_between_dumps_ms;
+ DCHECK_EQ(0u, periodic_state_->light_dump_period_ms);
+ periodic_state_->light_dump_period_ms = min_time_between_dumps_ms;
break;
case MemoryDumpLevelOfDetail::DETAILED:
- DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms);
- periodic_state_.heavy_dump_period_ms = min_time_between_dumps_ms;
+ DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms);
+ periodic_state_->heavy_dump_period_ms = min_time_between_dumps_ms;
break;
}
- periodic_state_.min_timer_period_ms = std::min(
- periodic_state_.min_timer_period_ms, min_time_between_dumps_ms);
- DCHECK_EQ(0u, periodic_state_.light_dump_period_ms %
- periodic_state_.min_timer_period_ms);
- DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms %
- periodic_state_.min_timer_period_ms);
+ periodic_state_->min_timer_period_ms = std::min(
+ periodic_state_->min_timer_period_ms, min_time_between_dumps_ms);
+ DCHECK_EQ(0u, periodic_state_->light_dump_period_ms %
+ periodic_state_->min_timer_period_ms);
+ DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms %
+ periodic_state_->min_timer_period_ms);
}
}
-void MemoryDumpScheduler::NotifyPeriodicTriggerSupported() {
- if (!periodic_state_.is_configured || periodic_state_.timer.IsRunning())
+void MemoryDumpScheduler::EnablePeriodicTriggerIfNeeded() {
+ DCHECK(is_setup_);
+ if (!periodic_state_->is_configured || periodic_state_->timer.IsRunning())
return;
- periodic_state_.light_dumps_rate = periodic_state_.light_dump_period_ms /
- periodic_state_.min_timer_period_ms;
- periodic_state_.heavy_dumps_rate = periodic_state_.heavy_dump_period_ms /
- periodic_state_.min_timer_period_ms;
+ periodic_state_->light_dumps_rate = periodic_state_->light_dump_period_ms /
+ periodic_state_->min_timer_period_ms;
+ periodic_state_->heavy_dumps_rate = periodic_state_->heavy_dump_period_ms /
+ periodic_state_->min_timer_period_ms;
- periodic_state_.dump_count = 0;
- periodic_state_.timer.Start(
+ periodic_state_->dump_count = 0;
+ periodic_state_->timer.Start(
FROM_HERE,
- TimeDelta::FromMilliseconds(periodic_state_.min_timer_period_ms),
+ TimeDelta::FromMilliseconds(periodic_state_->min_timer_period_ms),
Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
}
-void MemoryDumpScheduler::NotifyPollingSupported() {
- if (polling_state_.current_state != PollingTriggerState::CONFIGURED)
+void MemoryDumpScheduler::EnablePollingIfNeeded() {
+ DCHECK(is_setup_);
+ if (polling_state_->current_state != PollingTriggerState::CONFIGURED)
return;
- polling_state_.current_state = PollingTriggerState::ENABLED;
- polling_state_.ResetTotals();
+ polling_state_->current_state = PollingTriggerState::ENABLED;
+ polling_state_->ResetTotals();
- polling_state_.polling_task_runner->PostTask(
+ polling_task_runner_->PostTask(
FROM_HERE,
Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
}
void MemoryDumpScheduler::NotifyDumpTriggered() {
- if (polling_state_.polling_task_runner &&
- polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
- polling_state_.polling_task_runner->PostTask(
+ if (polling_task_runner_ &&
+ !polling_task_runner_->RunsTasksOnCurrentThread()) {
+ polling_task_runner_->PostTask(
FROM_HERE,
Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
return;
}
- if (polling_state_.current_state != PollingTriggerState::ENABLED)
+
+ if (!polling_state_ ||
+ polling_state_->current_state != PollingTriggerState::ENABLED) {
return;
+ }
- polling_state_.ResetTotals();
+ polling_state_->ResetTotals();
}
void MemoryDumpScheduler::DisableAllTriggers() {
- if (periodic_state_.timer.IsRunning())
- periodic_state_.timer.Stop();
- DisablePolling();
-}
+ if (periodic_state_) {
+ if (periodic_state_->timer.IsRunning())
+ periodic_state_->timer.Stop();
+ periodic_state_.reset();
+ }
-void MemoryDumpScheduler::DisablePolling() {
- if (polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
- if (polling_state_.polling_task_runner->PostTask(
- FROM_HERE,
- Bind(&MemoryDumpScheduler::DisablePolling, Unretained(this))))
- return;
+ if (polling_task_runner_) {
+ DCHECK(polling_state_);
+ polling_task_runner_->PostTask(
+ FROM_HERE, Bind(&MemoryDumpScheduler::DisablePollingOnPollingThread,
+ Unretained(this)));
+ polling_task_runner_ = nullptr;
}
- polling_state_.current_state = PollingTriggerState::DISABLED;
- polling_state_.polling_task_runner = nullptr;
+ is_setup_ = false;
+}
+
+void MemoryDumpScheduler::DisablePollingOnPollingThread() {
+ polling_state_->current_state = PollingTriggerState::DISABLED;
+ polling_state_.reset();
}
// static
@@ -131,30 +154,32 @@ void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
}
bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
- return periodic_state_.timer.IsRunning();
+ return periodic_state_->timer.IsRunning();
}
void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
- if (periodic_state_.light_dumps_rate > 0 &&
- periodic_state_.dump_count % periodic_state_.light_dumps_rate == 0)
+ if (periodic_state_->light_dumps_rate > 0 &&
+ periodic_state_->dump_count % periodic_state_->light_dumps_rate == 0)
level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- if (periodic_state_.heavy_dumps_rate > 0 &&
- periodic_state_.dump_count % periodic_state_.heavy_dumps_rate == 0)
+ if (periodic_state_->heavy_dumps_rate > 0 &&
+ periodic_state_->dump_count % periodic_state_->heavy_dumps_rate == 0)
level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
- ++periodic_state_.dump_count;
+ ++periodic_state_->dump_count;
mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
}
void MemoryDumpScheduler::PollMemoryOnPollingThread() {
- if (polling_state_.current_state != PollingTriggerState::ENABLED)
+ if (!polling_state_)
return;
+ DCHECK_EQ(PollingTriggerState::ENABLED, polling_state_->current_state);
+
uint64_t polled_memory = 0;
bool res = mdm_->PollFastMemoryTotal(&polled_memory);
DCHECK(res);
- if (polling_state_.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+ if (polling_state_->level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
polled_memory / 1024 / 1024);
}
@@ -166,14 +191,14 @@ void MemoryDumpScheduler::PollMemoryOnPollingThread() {
polled_memory / 1024 / 1024);
mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
- polling_state_.level_of_detail);
+ polling_state_->level_of_detail);
}
// TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
- TimeDelta::FromMilliseconds(polling_state_.polling_interval_ms));
+ TimeDelta::FromMilliseconds(polling_state_->polling_interval_ms));
}
bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
@@ -184,52 +209,52 @@ bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
return false;
bool should_dump = false;
- ++polling_state_.num_polls_from_last_dump;
- if (polling_state_.last_dump_memory_total == 0) {
+ ++polling_state_->num_polls_from_last_dump;
+ if (polling_state_->last_dump_memory_total == 0) {
// If it's first sample then trigger memory dump.
should_dump = true;
- } else if (polling_state_.min_polls_between_dumps >
- polling_state_.num_polls_from_last_dump) {
+ } else if (polling_state_->min_polls_between_dumps >
+ polling_state_->num_polls_from_last_dump) {
return false;
}
int64_t increase_from_last_dump =
- current_memory_total - polling_state_.last_dump_memory_total;
+ current_memory_total - polling_state_->last_dump_memory_total;
should_dump |=
- increase_from_last_dump > polling_state_.memory_increase_threshold;
+ increase_from_last_dump > polling_state_->memory_increase_threshold;
should_dump |= IsCurrentSamplePeak(current_memory_total);
if (should_dump)
- polling_state_.ResetTotals();
+ polling_state_->ResetTotals();
return should_dump;
}
bool MemoryDumpScheduler::IsCurrentSamplePeak(
uint64_t current_memory_total_bytes) {
uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
- polling_state_.last_memory_totals_kb_index =
- (polling_state_.last_memory_totals_kb_index + 1) %
+ polling_state_->last_memory_totals_kb_index =
+ (polling_state_->last_memory_totals_kb_index + 1) %
PollingTriggerState::kMaxNumMemorySamples;
uint64_t mean = 0;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
- if (polling_state_.last_memory_totals_kb[i] == 0) {
+ if (polling_state_->last_memory_totals_kb[i] == 0) {
// Not enough samples to detect peaks.
polling_state_
- .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
current_memory_total_kb;
return false;
}
- mean += polling_state_.last_memory_totals_kb[i];
+ mean += polling_state_->last_memory_totals_kb[i];
}
mean = mean / PollingTriggerState::kMaxNumMemorySamples;
uint64_t variance = 0;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
- variance += (polling_state_.last_memory_totals_kb[i] - mean) *
- (polling_state_.last_memory_totals_kb[i] - mean);
+ variance += (polling_state_->last_memory_totals_kb[i] - mean) *
+ (polling_state_->last_memory_totals_kb[i] - mean);
}
variance = variance / PollingTriggerState::kMaxNumMemorySamples;
polling_state_
- .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
current_memory_total_kb;
// If stddev is less than 0.2% then we consider that the process is inactive.
@@ -256,11 +281,9 @@ MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
DCHECK(!timer.IsRunning());
}
-MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
+MemoryDumpScheduler::PollingTriggerState::PollingTriggerState()
: current_state(DISABLED),
level_of_detail(MemoryDumpLevelOfDetail::FIRST),
- polling_task_runner(polling_task_runner),
polling_interval_ms(g_polling_interval_ms_for_testing
? g_polling_interval_ms_for_testing
: kMemoryTotalsPollingInterval),
@@ -270,9 +293,7 @@ MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
memory_increase_threshold(0),
last_memory_totals_kb_index(0) {}
-MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
- DCHECK(!polling_task_runner);
-}
+MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {}
void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
if (!memory_increase_threshold) {
@@ -282,8 +303,11 @@ void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
// Set threshold to 1% of total system memory.
SystemMemoryInfoKB meminfo;
bool res = GetSystemMemoryInfo(&meminfo);
- if (res)
- memory_increase_threshold = (meminfo.total / 100) * 1024;
+ if (res) {
+ memory_increase_threshold =
+ (static_cast<int64_t>(meminfo.total) / 100) * 1024;
+ }
+ DCHECK_GT(memory_increase_threshold, 0u);
#endif
}
diff --git a/base/trace_event/memory_dump_scheduler.h b/base/trace_event/memory_dump_scheduler.h
index fd21fce834..ab8441bc20 100644
--- a/base/trace_event/memory_dump_scheduler.h
+++ b/base/trace_event/memory_dump_scheduler.h
@@ -5,6 +5,8 @@
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+#include <memory>
+
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
@@ -18,42 +20,50 @@ namespace trace_event {
class MemoryDumpManager;
-// Schedules global dump requests based on the triggers added.
+// Schedules global dump requests based on the triggers added. The methods of
+// this class are NOT thread safe and the client has to take care of invoking
+// all the methods of the class safely.
class BASE_EXPORT MemoryDumpScheduler {
public:
- MemoryDumpScheduler(
- MemoryDumpManager* mdm_,
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
- ~MemoryDumpScheduler();
+ static MemoryDumpScheduler* GetInstance();
+
+ // Initializes the scheduler. NOT thread safe.
+ void Setup(MemoryDumpManager* mdm_,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
// Adds triggers for scheduling global dumps. Both periodic and peak triggers
// cannot be added together. At the moment the periodic support is limited to
// at most one periodic trigger per dump mode and peak triggers are limited to
// at most one. All intervals should be an integeral multiple of the smallest
- // interval specified.
+ // interval specified. NOT thread safe.
void AddTrigger(MemoryDumpType trigger_type,
MemoryDumpLevelOfDetail level_of_detail,
uint32_t min_time_between_dumps_ms);
- // Starts periodic dumps.
- void NotifyPeriodicTriggerSupported();
+ // Starts periodic dumps. NOT thread safe and triggers must be added before
+ // enabling.
+ void EnablePeriodicTriggerIfNeeded();
- // Starts polling memory total.
- void NotifyPollingSupported();
+ // Starts polling memory total. NOT thread safe and triggers must be added
+ // before enabling.
+ void EnablePollingIfNeeded();
// Resets time for triggering dump to account for minimum time between the
- // dumps.
+ // dumps. NOT thread safe.
void NotifyDumpTriggered();
- // Disables all triggers.
+ // Disables all triggers. NOT thread safe. This should be called before
+ // polling thread is stopped to stop polling cleanly.
void DisableAllTriggers();
private:
friend class MemoryDumpManagerTest;
+ friend class MemoryDumpSchedulerPollingTest;
FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, TestPollingOnDumpThread);
+ FRIEND_TEST_ALL_PREFIXES(MemoryDumpSchedulerPollingTest, NotifyDumpTriggered);
// Helper class to schdule periodic memory dumps.
- struct PeriodicTriggerState {
+ struct BASE_EXPORT PeriodicTriggerState {
PeriodicTriggerState();
~PeriodicTriggerState();
@@ -71,7 +81,7 @@ class BASE_EXPORT MemoryDumpScheduler {
DISALLOW_COPY_AND_ASSIGN(PeriodicTriggerState);
};
- struct PollingTriggerState {
+ struct BASE_EXPORT PollingTriggerState {
enum State {
CONFIGURED, // Polling trigger was added.
ENABLED, // Polling is running.
@@ -80,8 +90,7 @@ class BASE_EXPORT MemoryDumpScheduler {
static const uint32_t kMaxNumMemorySamples = 50;
- explicit PollingTriggerState(
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+ PollingTriggerState();
~PollingTriggerState();
// Helper to clear the tracked memory totals and poll count from last dump.
@@ -90,7 +99,6 @@ class BASE_EXPORT MemoryDumpScheduler {
State current_state;
MemoryDumpLevelOfDetail level_of_detail;
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
uint32_t polling_interval_ms;
// Minimum numer of polls after the last dump at which next dump can be
@@ -106,8 +114,11 @@ class BASE_EXPORT MemoryDumpScheduler {
DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
};
- // Helper to set polling disabled on the polling thread.
- void DisablePolling();
+ MemoryDumpScheduler();
+ ~MemoryDumpScheduler();
+
+ // Helper to set polling disabled.
+ void DisablePollingOnPollingThread();
// Periodically called by the timer.
void RequestPeriodicGlobalDump();
@@ -129,8 +140,19 @@ class BASE_EXPORT MemoryDumpScheduler {
MemoryDumpManager* mdm_;
- PeriodicTriggerState periodic_state_;
- PollingTriggerState polling_state_;
+ // Accessed on the thread of the client before enabling and only accessed on
+ // the thread that called "EnablePeriodicTriggersIfNeeded()" after enabling.
+ std::unique_ptr<PeriodicTriggerState> periodic_state_;
+
+ // Accessed on the thread of the client before enabling and only accessed on
+ // the polling thread after enabling.
+ std::unique_ptr<PollingTriggerState> polling_state_;
+
+ // Accessed on the thread of the client only.
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner_;
+
+ // True when the scheduler is setup. Accessed on the thread of client only.
+ bool is_setup_;
DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
};
diff --git a/base/trace_event/memory_dump_scheduler_unittest.cc b/base/trace_event/memory_dump_scheduler_unittest.cc
new file mode 100644
index 0000000000..9af2a3b430
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler_unittest.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_scheduler.h"
+
+#include <memory>
+
+#include "base/single_thread_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+class MemoryDumpSchedulerPollingTest : public testing::Test {
+ public:
+ static const uint32_t kMinPollsToDump = 5;
+
+ MemoryDumpSchedulerPollingTest()
+ : testing::Test(),
+ num_samples_tracked_(
+ MemoryDumpScheduler::PollingTriggerState::kMaxNumMemorySamples) {}
+
+ void SetUp() override {
+ MemoryDumpScheduler::SetPollingIntervalForTesting(1);
+ uint32_t kMinPollsToDump = 5;
+ mds_ = MemoryDumpScheduler::GetInstance();
+ mds_->Setup(nullptr, nullptr);
+ mds_->AddTrigger(MemoryDumpType::PEAK_MEMORY_USAGE,
+ MemoryDumpLevelOfDetail::LIGHT, kMinPollsToDump);
+ mds_->polling_state_->ResetTotals();
+ mds_->polling_state_->current_state =
+ MemoryDumpScheduler::PollingTriggerState::ENABLED;
+ }
+
+ void TearDown() override {
+ mds_->polling_state_->current_state =
+ MemoryDumpScheduler::PollingTriggerState::DISABLED;
+ }
+
+ protected:
+ bool ShouldTriggerDump(uint64_t total) {
+ return mds_->ShouldTriggerDump(total);
+ }
+
+ uint32_t num_samples_tracked_;
+ MemoryDumpScheduler* mds_;
+};
+
+TEST_F(MemoryDumpSchedulerPollingTest, PeakDetection) {
+ for (uint32_t i = 0; i < num_samples_tracked_ * 6; ++i) {
+ // Memory is increased in steps and dumps must be triggered at every step.
+ uint64_t total = (2 + (i / (2 * num_samples_tracked_))) * 1024 * 1204;
+ bool did_trigger = ShouldTriggerDump(total);
+ // Dumps must be triggered only at specific iterations.
+ bool should_have_triggered = i == 0;
+ should_have_triggered |=
+ (i > num_samples_tracked_) && (i % (2 * num_samples_tracked_) == 1);
+ if (should_have_triggered) {
+ ASSERT_TRUE(did_trigger) << "Dump wasn't triggered at " << i;
+ } else {
+ ASSERT_FALSE(did_trigger) << "Unexpected dump at " << i;
+ }
+ }
+}
+
+TEST_F(MemoryDumpSchedulerPollingTest, SlowGrowthDetection) {
+ for (uint32_t i = 0; i < 15; ++i) {
+ // Record 1GiB of increase in each call. Dumps are triggered with 1% w.r.t
+ // system's total memory.
+ uint64_t total = static_cast<uint64_t>(i + 1) * 1024 * 1024 * 1024;
+ bool did_trigger = ShouldTriggerDump(total);
+ bool should_have_triggered = i % kMinPollsToDump == 0;
+ if (should_have_triggered) {
+ ASSERT_TRUE(did_trigger) << "Dump wasn't triggered at " << i;
+ } else {
+ ASSERT_FALSE(did_trigger) << "Unexpected dump at " << i;
+ }
+ }
+}
+
+TEST_F(MemoryDumpSchedulerPollingTest, NotifyDumpTriggered) {
+ for (uint32_t i = 0; i < num_samples_tracked_ * 6; ++i) {
+ uint64_t total = (2 + (i / (2 * num_samples_tracked_))) * 1024 * 1204;
+ if (i % num_samples_tracked_ == 0)
+ mds_->NotifyDumpTriggered();
+ bool did_trigger = ShouldTriggerDump(total);
+ // Dumps should never be triggered since NotifyDumpTriggered() is called
+ // frequently.
+ EXPECT_NE(0u, mds_->polling_state_->last_dump_memory_total);
+ EXPECT_GT(num_samples_tracked_ - 1,
+ mds_->polling_state_->last_memory_totals_kb_index);
+ EXPECT_LT(static_cast<int64_t>(
+ total - mds_->polling_state_->last_dump_memory_total),
+ mds_->polling_state_->memory_increase_threshold);
+ ASSERT_FALSE(did_trigger && i) << "Unexpected dump at " << i;
+ }
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
index ae74322040..746068a7b1 100644
--- a/base/trace_event/memory_infra_background_whitelist.cc
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -69,10 +69,70 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"net/http_network_session_0x?/stream_factory",
"net/sdch_manager_0x?",
"net/ssl_session_cache",
- "net/url_request_context_0x?",
- "net/url_request_context_0x?/http_cache",
- "net/url_request_context_0x?/http_network_session",
- "net/url_request_context_0x?/sdch_manager",
+ "net/url_request_context",
+ "net/url_request_context/app_request",
+ "net/url_request_context/app_request/0x?",
+ "net/url_request_context/app_request/0x?/http_cache",
+ "net/url_request_context/app_request/0x?/http_cache/memory_backend",
+ "net/url_request_context/app_request/0x?/http_cache/simple_backend",
+ "net/url_request_context/app_request/0x?/http_network_session",
+ "net/url_request_context/app_request/0x?/sdch_manager",
+ "net/url_request_context/extensions",
+ "net/url_request_context/extensions/0x?",
+ "net/url_request_context/extensions/0x?/http_cache",
+ "net/url_request_context/extensions/0x?/http_cache/memory_backend",
+ "net/url_request_context/extensions/0x?/http_cache/simple_backend",
+ "net/url_request_context/extensions/0x?/http_network_session",
+ "net/url_request_context/extensions/0x?/sdch_manager",
+ "net/url_request_context/isolated_media",
+ "net/url_request_context/isolated_media/0x?",
+ "net/url_request_context/isolated_media/0x?/http_cache",
+ "net/url_request_context/isolated_media/0x?/http_cache/memory_backend",
+ "net/url_request_context/isolated_media/0x?/http_cache/simple_backend",
+ "net/url_request_context/isolated_media/0x?/http_network_session",
+ "net/url_request_context/isolated_media/0x?/sdch_manager",
+ "net/url_request_context/main",
+ "net/url_request_context/main/0x?",
+ "net/url_request_context/main/0x?/http_cache",
+ "net/url_request_context/main/0x?/http_cache/memory_backend",
+ "net/url_request_context/main/0x?/http_cache/simple_backend",
+ "net/url_request_context/main/0x?/http_network_session",
+ "net/url_request_context/main/0x?/sdch_manager",
+ "net/url_request_context/main_media",
+ "net/url_request_context/main_media/0x?",
+ "net/url_request_context/main_media/0x?/http_cache",
+ "net/url_request_context/main_media/0x?/http_cache/memory_backend",
+ "net/url_request_context/main_media/0x?/http_cache/simple_backend",
+ "net/url_request_context/main_media/0x?/http_network_session",
+ "net/url_request_context/main_media/0x?/sdch_manager",
+ "net/url_request_context/proxy",
+ "net/url_request_context/proxy/0x?",
+ "net/url_request_context/proxy/0x?/http_cache",
+ "net/url_request_context/proxy/0x?/http_cache/memory_backend",
+ "net/url_request_context/proxy/0x?/http_cache/simple_backend",
+ "net/url_request_context/proxy/0x?/http_network_session",
+ "net/url_request_context/proxy/0x?/sdch_manager",
+ "net/url_request_context/safe_browsing",
+ "net/url_request_context/safe_browsing/0x?",
+ "net/url_request_context/safe_browsing/0x?/http_cache",
+ "net/url_request_context/safe_browsing/0x?/http_cache/memory_backend",
+ "net/url_request_context/safe_browsing/0x?/http_cache/simple_backend",
+ "net/url_request_context/safe_browsing/0x?/http_network_session",
+ "net/url_request_context/safe_browsing/0x?/sdch_manager",
+ "net/url_request_context/system",
+ "net/url_request_context/system/0x?",
+ "net/url_request_context/system/0x?/http_cache",
+ "net/url_request_context/system/0x?/http_cache/memory_backend",
+ "net/url_request_context/system/0x?/http_cache/simple_backend",
+ "net/url_request_context/system/0x?/http_network_session",
+ "net/url_request_context/system/0x?/sdch_manager",
+ "net/url_request_context/unknown",
+ "net/url_request_context/unknown/0x?",
+ "net/url_request_context/unknown/0x?/http_cache",
+ "net/url_request_context/unknown/0x?/http_cache/memory_backend",
+ "net/url_request_context/unknown/0x?/http_cache/simple_backend",
+ "net/url_request_context/unknown/0x?/http_network_session",
+ "net/url_request_context/unknown/0x?/sdch_manager",
"web_cache/Image_resources",
"web_cache/CSS stylesheet_resources",
"web_cache/Script_resources",
diff --git a/base/trace_event/memory_peak_detector.cc b/base/trace_event/memory_peak_detector.cc
new file mode 100644
index 0000000000..c361037c2d
--- /dev/null
+++ b/base/trace_event/memory_peak_detector.cc
@@ -0,0 +1,164 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_peak_detector.h"
+
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/memory_dump_provider_info.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+MemoryPeakDetector* MemoryPeakDetector::GetInstance() {
+ static MemoryPeakDetector* instance = new MemoryPeakDetector();
+ return instance;
+}
+
+MemoryPeakDetector::MemoryPeakDetector()
+ : generation_(0),
+ state_(NOT_INITIALIZED),
+ polling_interval_ms_(0),
+ poll_tasks_count_for_testing_(0) {}
+
+MemoryPeakDetector::~MemoryPeakDetector() {
+ // This is hit only in tests, in which case the test is expected to TearDown()
+ // cleanly and not leave the peak detector running.
+ DCHECK_EQ(NOT_INITIALIZED, state_);
+}
+
+void MemoryPeakDetector::Setup(
+ const GetDumpProvidersFunction& get_dump_providers_function,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ const OnPeakDetectedCallback& on_peak_detected_callback) {
+ DCHECK(!get_dump_providers_function.is_null());
+ DCHECK(task_runner);
+ DCHECK(!on_peak_detected_callback.is_null());
+ DCHECK(state_ == NOT_INITIALIZED || state_ == DISABLED);
+ DCHECK(dump_providers_.empty());
+ get_dump_providers_function_ = get_dump_providers_function;
+ task_runner_ = task_runner;
+ on_peak_detected_callback_ = on_peak_detected_callback;
+ state_ = DISABLED;
+}
+
+void MemoryPeakDetector::TearDown() {
+ if (task_runner_) {
+ task_runner_->PostTask(
+ FROM_HERE,
+ Bind(&MemoryPeakDetector::TearDownInternal, Unretained(this)));
+ }
+ task_runner_ = nullptr;
+}
+
+void MemoryPeakDetector::Start() {
+ task_runner_->PostTask(
+ FROM_HERE, Bind(&MemoryPeakDetector::StartInternal, Unretained(this)));
+}
+
+void MemoryPeakDetector::Stop() {
+ task_runner_->PostTask(
+ FROM_HERE, Bind(&MemoryPeakDetector::StopInternal, Unretained(this)));
+}
+
+void MemoryPeakDetector::NotifyMemoryDumpProvidersChanged() {
+ // It is possible to call this before the first Setup() call, in which case
+ // we want to just make this a noop. The next Start() will fetch the MDP list.
+ if (!task_runner_)
+ return;
+ task_runner_->PostTask(
+ FROM_HERE,
+ Bind(&MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded,
+ Unretained(this)));
+}
+
+void MemoryPeakDetector::StartInternal() {
+ DCHECK_EQ(DISABLED, state_);
+ state_ = ENABLED;
+ polling_interval_ms_ = 1; // TODO(primiano): temporary until next CL.
+
+ // If there are any dump providers available, NotifyMemoryDumpProvidersChanged
+ // will fetch them and start the polling. Otherwise this will remain in the
+ // ENABLED state and the actual polling will start on the next call to
+ // ReloadDumpProvidersAndStartPollingIfNeeded().
+ // Depending on the sandbox model, it is possible that no polling-capable dump
+ // providers will be ever available.
+ ReloadDumpProvidersAndStartPollingIfNeeded();
+}
+
+void MemoryPeakDetector::StopInternal() {
+ DCHECK_NE(NOT_INITIALIZED, state_);
+ state_ = DISABLED;
+ ++generation_;
+ dump_providers_.clear();
+}
+
+void MemoryPeakDetector::TearDownInternal() {
+ StopInternal();
+ get_dump_providers_function_.Reset();
+ on_peak_detected_callback_.Reset();
+ state_ = NOT_INITIALIZED;
+}
+
+void MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded() {
+ if (state_ == DISABLED || state_ == NOT_INITIALIZED)
+ return; // Start() will re-fetch the MDP list later.
+
+ DCHECK((state_ == RUNNING && !dump_providers_.empty()) ||
+ (state_ == ENABLED && dump_providers_.empty()));
+
+ dump_providers_.clear();
+
+ // This is really MemoryDumpManager::GetDumpProvidersForPolling, % testing.
+ get_dump_providers_function_.Run(&dump_providers_);
+
+ if (state_ == ENABLED && !dump_providers_.empty()) {
+ // It's now time to start polling for realz.
+ state_ = RUNNING;
+ task_runner_->PostTask(FROM_HERE,
+ Bind(&MemoryPeakDetector::PollMemoryAndDetectPeak,
+ Unretained(this), ++generation_));
+ } else if (state_ == RUNNING && dump_providers_.empty()) {
+ // Will cause the next PollMemoryAndDetectPeak() task to early return.
+ state_ = ENABLED;
+ ++generation_;
+ }
+}
+
+void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) {
+ if (state_ != RUNNING || expected_generation != generation_)
+ return;
+
+ // We should never end up in a situation where state_ == RUNNING but all dump
+ // providers are gone.
+ DCHECK(!dump_providers_.empty());
+
+ poll_tasks_count_for_testing_++;
+ uint64_t memory_total = 0;
+ for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info :
+ dump_providers_) {
+ DCHECK(mdp_info->options.is_fast_polling_supported);
+ uint64_t value = 0;
+ mdp_info->dump_provider->PollFastMemoryTotal(&value);
+ memory_total += value;
+ }
+ ignore_result(memory_total); // TODO(primiano): temporary until next CL.
+
+ // TODO(primiano): Move actual peak detection logic from the
+ // MemoryDumpScheduler in next CLs.
+
+ SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&MemoryPeakDetector::PollMemoryAndDetectPeak, Unretained(this),
+ expected_generation),
+ TimeDelta::FromMilliseconds(polling_interval_ms_));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_peak_detector.h b/base/trace_event/memory_peak_detector.h
new file mode 100644
index 0000000000..b914295833
--- /dev/null
+++ b/base/trace_event/memory_peak_detector.h
@@ -0,0 +1,139 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
+#define BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+struct MemoryDumpProviderInfo;
+
+// This class is NOT thread-safe, the caller has to ensure linearization of
+// the calls to the public methods. In any case, the public methods do NOT have
+// to be called from the |task_runner| on which the polling tasks run.
+class BASE_EXPORT MemoryPeakDetector {
+ public:
+ using OnPeakDetectedCallback = RepeatingClosure;
+ using DumpProvidersList = std::vector<scoped_refptr<MemoryDumpProviderInfo>>;
+ using GetDumpProvidersFunction = RepeatingCallback<void(DumpProvidersList*)>;
+
+ enum State {
+ NOT_INITIALIZED = 0, // Before Setup()
+ DISABLED, // Before Start() or after Stop().
+ ENABLED, // After Start() but no dump_providers_ are available.
+ RUNNING // After Start(). The PollMemoryAndDetectPeak() task is scheduled.
+ };
+
+ static MemoryPeakDetector* GetInstance();
+
+ // Configures the peak detector, binding the polling tasks on the given
+ // thread. Setup() can be called several times, provided that: (1) Stop()
+ // is called; (2a) the previous task_runner is flushed or (2b) the task_runner
+ // remains the same.
+ // GetDumpProvidersFunction: is the function that will be invoked to get
+ // an updated list of polling-capable dump providers. This is really just
+ // MemoryDumpManager::GetDumpProvidersForPolling, but this extra level of
+ // indirection allows easier testing.
+ // SequencedTaskRunner: the task runner where PollMemoryAndDetectPeak() will
+ // be periodically called.
+ // OnPeakDetectedCallback: a callback that will be invoked on the
+ // given task runner when a memory peak is detected.
+ void Setup(const GetDumpProvidersFunction&,
+ const scoped_refptr<SequencedTaskRunner>&,
+ const OnPeakDetectedCallback&);
+
+ // Releases the |task_runner_| and the bound callbacks.
+ void TearDown();
+
+ // This posts a task onto the passed task runner which refreshes the list of
+ // dump providers via the GetDumpProvidersFunction. If at least one dump
+ // provider is available, this starts immediately polling on the task runner.
+ // If not, the detector remains in the ENABLED state and will start polling
+ // automatically (i.e. without requiring another call to Start()) on the
+ // next call to NotifyMemoryDumpProvidersChanged().
+ void Start();
+
+ // Stops the polling on the task runner (if was active at all). This doesn't
+ // wait for the task runner to drain pending tasks, so it is possible that
+ // a polling will happen concurrently (or in the immediate future) with the
+ // Stop() call. It is responsibility of the caller to drain or synchronize
+ // with the task runner.
+ void Stop();
+
+ // Used by MemoryDumpManager to notify that the list of polling-capable dump
+ // providers has changed. The peak detector will reload the list on the next
+ // polling task. This function can be called before Setup(), in which
+ // case will be just a no-op.
+ void NotifyMemoryDumpProvidersChanged();
+
+ private:
+ friend class MemoryPeakDetectorTest;
+
+ MemoryPeakDetector();
+ ~MemoryPeakDetector();
+
+ // All these methods are always called on the |task_runner_|.
+ void StartInternal();
+ void StopInternal();
+ void TearDownInternal();
+ void ReloadDumpProvidersAndStartPollingIfNeeded();
+ void PollMemoryAndDetectPeak(uint32_t expected_generation);
+
+ // It is safe to call these testing methods only on the |task_runner_|.
+ State state_for_testing() const { return state_; }
+ uint32_t poll_tasks_count_for_testing() const {
+ return poll_tasks_count_for_testing_;
+ }
+
+ // The task runner where all the internal calls are posted onto. This field
+ // must be NOT be accessed by the tasks posted on the |task_runner_| because
+ // there might still be outstanding tasks on the |task_runner_| while this
+ // refptr is reset. This can only be safely accessed by the public methods
+ // above, which the client of this class is supposed to call sequentially.
+ scoped_refptr<SequencedTaskRunner> task_runner_;
+
+ // After the Setup() call, the fields below, must be accessed only from
+ // the |task_runner_|.
+
+ // Bound function to get an updated list of polling-capable dump providers.
+ GetDumpProvidersFunction get_dump_providers_function_;
+
+ // The callback to invoke when peaks are detected.
+ OnPeakDetectedCallback on_peak_detected_callback_;
+
+ // List of polling-aware dump providers to invoke upon each poll.
+ DumpProvidersList dump_providers_;
+
+ // The generation is incremented every time the |state_| is changed and causes
+ // PollMemoryAndDetectPeak() to early out if the posted task doesn't match the
+ // most recent |generation_|. This allows to drop on the floor outstanding
+ // PostDelayedTask that refer to an old sequence that was later Stop()-ed or
+ // disabled because of NotifyMemoryDumpProvidersChanged().
+ uint32_t generation_;
+
+ State state_;
+ uint32_t polling_interval_ms_;
+ uint32_t poll_tasks_count_for_testing_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryPeakDetector);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
diff --git a/base/trace_event/memory_peak_detector_unittest.cc b/base/trace_event/memory_peak_detector_unittest.cc
new file mode 100644
index 0000000000..9a9b92217c
--- /dev/null
+++ b/base/trace_event/memory_peak_detector_unittest.cc
@@ -0,0 +1,381 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_peak_detector.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_provider_info.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+using ::testing::Return;
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+class MockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+ bool OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump*) override {
+ NOTREACHED();
+ return true;
+ }
+
+ MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t*));
+};
+
+// Wrapper to use gmock on a callback.
+struct OnPeakDetectedWrapper {
+ MOCK_METHOD0(OnPeak, void());
+};
+
+} // namespace
+
+class MemoryPeakDetectorTest : public testing::Test {
+ public:
+ struct FriendDeleter {
+ void operator()(MemoryPeakDetector* inst) { delete inst; }
+ };
+
+ MemoryPeakDetectorTest() : testing::Test() {}
+
+ std::unique_ptr<MemoryPeakDetector, FriendDeleter> NewInstance() {
+ return std::unique_ptr<MemoryPeakDetector, FriendDeleter>(
+ new MemoryPeakDetector());
+ }
+
+ void RestartThreadAndReinitializePeakDetector() {
+ bg_thread_.reset(new Thread("Peak Detector Test Thread"));
+ bg_thread_->Start();
+ peak_detector_ = NewInstance();
+ peak_detector_->Setup(
+ Bind(&MemoryPeakDetectorTest::MockGetDumpProviders, Unretained(this)),
+ bg_thread_->task_runner(),
+ Bind(&OnPeakDetectedWrapper::OnPeak, Unretained(&on_peak_callback_)));
+ }
+
+ void SetUp() override {
+ get_mdp_call_count_ = 0;
+ RestartThreadAndReinitializePeakDetector();
+ }
+
+ void TearDown() override {
+ peak_detector_->TearDown();
+ bg_thread_->FlushForTesting();
+ EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
+ dump_providers_.clear();
+ }
+
+ // Calls MemoryPeakDetector::state_for_testing() on the bg thread and returns
+ // the result on the current thread.
+ MemoryPeakDetector::State GetPeakDetectorState() {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ MemoryPeakDetector::State res = MemoryPeakDetector::NOT_INITIALIZED;
+ auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
+ MemoryPeakDetector::State* res) {
+ *res = peak_detector->state_for_testing();
+ evt->Signal();
+ };
+ bg_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(get_fn, Unretained(&*peak_detector_), Unretained(&evt),
+ Unretained(&res)));
+ evt.Wait();
+ return res;
+ }
+
+ // Calls MemoryPeakDetector::poll_tasks_count_for_testing() on the bg thread
+ // and returns the result on the current thread.
+ uint32_t GetNumPollingTasksRan() {
+ uint32_t res = 0;
+ auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
+ uint32_t* res) {
+ *res = peak_detector->poll_tasks_count_for_testing();
+ evt->Signal();
+ };
+
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ bg_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(get_fn, Unretained(&*peak_detector_), Unretained(&evt),
+ Unretained(&res)));
+ evt.Wait();
+ return res;
+ }
+
+ // Called on the |bg_thread_|.
+ void MockGetDumpProviders(MemoryPeakDetector::DumpProvidersList* mdps) {
+ get_mdp_call_count_++;
+ *mdps = dump_providers_;
+ }
+
+ uint32_t GetNumGetDumpProvidersCalls() {
+ bg_thread_->FlushForTesting();
+ return get_mdp_call_count_;
+ }
+
+ scoped_refptr<MemoryDumpProviderInfo> CreateMockDumpProvider() {
+ std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider());
+ MemoryDumpProvider::Options opt;
+ opt.is_fast_polling_supported = true;
+ scoped_refptr<MemoryDumpProviderInfo> mdp_info(
+ new MemoryDumpProviderInfo(mdp.get(), "Mock MDP", nullptr, opt, false));
+
+ // The |mdp| instance will be destroyed together with the |mdp_info|.
+ mdp_info->owned_dump_provider = std::move(mdp);
+ return mdp_info;
+ }
+
+ static MockMemoryDumpProvider& GetMockMDP(
+ const scoped_refptr<MemoryDumpProviderInfo>& mdp_info) {
+ return *static_cast<MockMemoryDumpProvider*>(mdp_info->dump_provider);
+ }
+
+ protected:
+ MemoryPeakDetector::DumpProvidersList dump_providers_;
+ uint32_t get_mdp_call_count_;
+ std::unique_ptr<MemoryPeakDetector, FriendDeleter> peak_detector_;
+ std::unique_ptr<Thread> bg_thread_;
+ OnPeakDetectedWrapper on_peak_callback_;
+};
+
+TEST_F(MemoryPeakDetectorTest, GetDumpProvidersFunctionCalled) {
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ peak_detector_->Start();
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(0u, GetNumPollingTasksRan());
+}
+
+TEST_F(MemoryPeakDetectorTest, NotifyBeforeInitialize) {
+ peak_detector_->TearDown();
+
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
+ RestartThreadAndReinitializePeakDetector();
+
+ peak_detector_->Start();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ evt.Wait(); // Wait for a PollFastMemoryTotal() call.
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_GE(GetNumPollingTasksRan(), 1u);
+}
+
+TEST_F(MemoryPeakDetectorTest, DoubleStop) {
+ peak_detector_->Start();
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_EQ(0u, GetNumPollingTasksRan());
+}
+
+TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredBeforeStart) {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+
+ peak_detector_->Start();
+ evt.Wait(); // Signaled when PollFastMemoryTotal() is called on the MockMDP.
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_GT(GetNumPollingTasksRan(), 0u);
+}
+
+TEST_F(MemoryPeakDetectorTest, ReInitializeAndRebindToNewThread) {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+
+ for (int i = 0; i < 5; ++i) {
+ evt.Reset();
+ peak_detector_->Start();
+ evt.Wait(); // Wait for a PollFastMemoryTotal() call.
+ // Check that calling TearDown implicitly does a Stop().
+ peak_detector_->TearDown();
+
+ // Reinitialize and re-bind to a new task runner.
+ RestartThreadAndReinitializePeakDetector();
+ }
+}
+
+TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredOutOfBand) {
+ peak_detector_->Start();
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+
+ // Check that no poll tasks are posted before any dump provider is registered.
+ PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+ EXPECT_EQ(0u, GetNumPollingTasksRan());
+
+ // Registed the MDP After Start() has been issued and expect that the
+ // PeakDetector transitions ENABLED -> RUNNING on the next
+ // NotifyMemoryDumpProvidersChanged() call.
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+
+ evt.Wait(); // Signaled when PollFastMemoryTotal() is called on the MockMDP.
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ EXPECT_EQ(2u, GetNumGetDumpProvidersCalls());
+
+ // Now simulate the unregisration and expect that the PeakDetector transitions
+ // back to ENABLED.
+ dump_providers_.clear();
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+ EXPECT_EQ(3u, GetNumGetDumpProvidersCalls());
+ uint32_t num_poll_tasks = GetNumPollingTasksRan();
+ EXPECT_GT(num_poll_tasks, 0u);
+
+ // At this point, no more polling tasks should be posted.
+ PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
+}
+
+// Test that a sequence of Start()/Stop() back-to-back doesn't end up creating
+// several outstanding timer tasks and instead respects the polling_interval_ms.
+TEST_F(MemoryPeakDetectorTest, StartStopQuickly) {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ dump_providers_.push_back(mdp);
+ const uint32_t kNumPolls = 20;
+ uint32_t polls_done = 0;
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&polls_done, &evt, kNumPolls](uint64_t*) {
+ if (++polls_done == kNumPolls)
+ evt.Signal();
+ }));
+
+ const TimeTicks tstart = TimeTicks::Now();
+ for (int i = 0; i < 5; i++) {
+ peak_detector_->Start();
+ peak_detector_->Stop();
+ }
+ peak_detector_->Start();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ evt.Wait(); // Wait for kNumPolls.
+ const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
+
+ // TODO(primiano): this will become config.polling_interval_ms in the next CL.
+ const uint32_t polling_interval_ms = 1;
+ EXPECT_GE(time_ms, kNumPolls * polling_interval_ms);
+ peak_detector_->Stop();
+}
+
+TEST_F(MemoryPeakDetectorTest, RegisterAndUnregisterTwoDumpProviders) {
+ WaitableEvent evt1(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent evt2(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp1 = CreateMockDumpProvider();
+ scoped_refptr<MemoryDumpProviderInfo> mdp2 = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp1), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt1](uint64_t*) { evt1.Signal(); }));
+ EXPECT_CALL(GetMockMDP(mdp2), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt2](uint64_t*) { evt2.Signal(); }));
+
+ // Register only one MDP and start the detector.
+ dump_providers_.push_back(mdp1);
+ peak_detector_->Start();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ // Wait for one poll task and then register also the other one.
+ evt1.Wait();
+ dump_providers_.push_back(mdp2);
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ evt2.Wait();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ // Now unregister the first MDP and check that everything is still running.
+ dump_providers_.erase(dump_providers_.begin());
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ // Now unregister both and check that the detector goes to idle.
+ dump_providers_.clear();
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+ // Now re-register both and check that the detector re-activates posting
+ // new polling tasks.
+ uint32_t num_poll_tasks = GetNumPollingTasksRan();
+ evt1.Reset();
+ evt2.Reset();
+ dump_providers_.push_back(mdp1);
+ dump_providers_.push_back(mdp2);
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ evt1.Wait();
+ evt2.Wait();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ EXPECT_GT(GetNumPollingTasksRan(), num_poll_tasks);
+
+ // Stop everything, tear down the MDPs, restart the detector and check that
+ // it detector doesn't accidentally try to re-access them.
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ dump_providers_.clear();
+ mdp1 = nullptr;
+ mdp2 = nullptr;
+
+ num_poll_tasks = GetNumPollingTasksRan();
+ peak_detector_->Start();
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+ PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
+
+ EXPECT_EQ(6u, GetNumGetDumpProvidersCalls());
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 36de107bf8..7ee9a4a101 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -11,11 +11,7 @@
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
#include "base/memory/ptr_util.h"
-#include "base/strings/pattern.h"
#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_event.h"
@@ -37,11 +33,6 @@ const char kEnableArgumentFilter[] = "enable-argument-filter";
const char kRecordModeParam[] = "record_mode";
const char kEnableSystraceParam[] = "enable_systrace";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
-const char kIncludedCategoriesParam[] = "included_categories";
-const char kExcludedCategoriesParam[] = "excluded_categories";
-const char kSyntheticDelaysParam[] = "synthetic_delays";
-
-const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
// String parameters that is used to parse memory dump config in trace config
// string.
@@ -148,27 +139,36 @@ TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
return *this;
predicate_name_ = rhs.predicate_name_;
- included_categories_ = rhs.included_categories_;
- excluded_categories_ = rhs.excluded_categories_;
+ category_filter_ = rhs.category_filter_;
+
if (rhs.args_)
args_ = rhs.args_->CreateDeepCopy();
return *this;
}
-void TraceConfig::EventFilterConfig::AddIncludedCategory(
- const std::string& category) {
- included_categories_.push_back(category);
+void TraceConfig::EventFilterConfig::InitializeFromConfigDict(
+ const base::DictionaryValue* event_filter) {
+ category_filter_.InitializeFromConfigDict(*event_filter);
+
+ const base::DictionaryValue* args_dict = nullptr;
+ if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
+ args_ = args_dict->CreateDeepCopy();
}
-void TraceConfig::EventFilterConfig::AddExcludedCategory(
- const std::string& category) {
- excluded_categories_.push_back(category);
+void TraceConfig::EventFilterConfig::SetCategoryFilter(
+ const TraceConfigCategoryFilter& category_filter) {
+ category_filter_ = category_filter;
}
-void TraceConfig::EventFilterConfig::SetArgs(
- std::unique_ptr<base::DictionaryValue> args) {
- args_ = std::move(args);
+void TraceConfig::EventFilterConfig::ToDict(
+ DictionaryValue* filter_dict) const {
+ filter_dict->SetString(kFilterPredicateParam, predicate_name());
+
+ category_filter_.ToDict(filter_dict);
+
+ if (args_)
+ filter_dict->Set(kFilterArgsParam, args_->CreateDeepCopy());
}
bool TraceConfig::EventFilterConfig::GetArgAsSet(
@@ -186,27 +186,8 @@ bool TraceConfig::EventFilterConfig::GetArgAsSet(
}
bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
- CStringTokenizer category_group_tokens(
- category_group_name, category_group_name + strlen(category_group_name),
- ",");
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
-
- for (const auto& excluded_category : excluded_categories_) {
- if (base::MatchPattern(category_group_token, excluded_category)) {
- return false;
- }
- }
-
- for (const auto& included_category : included_categories_) {
- if (base::MatchPattern(category_group_token, included_category)) {
- return true;
- }
- }
- }
-
- return false;
+ const StringPiece& category_group_name) const {
+ return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
TraceConfig::TraceConfig() {
@@ -255,11 +236,8 @@ TraceConfig::TraceConfig(const TraceConfig& tc)
: record_mode_(tc.record_mode_),
enable_systrace_(tc.enable_systrace_),
enable_argument_filter_(tc.enable_argument_filter_),
+ category_filter_(tc.category_filter_),
memory_dump_config_(tc.memory_dump_config_),
- included_categories_(tc.included_categories_),
- disabled_categories_(tc.disabled_categories_),
- excluded_categories_(tc.excluded_categories_),
- synthetic_delays_(tc.synthetic_delays_),
event_filters_(tc.event_filters_) {}
TraceConfig::~TraceConfig() {
@@ -272,17 +250,14 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
record_mode_ = rhs.record_mode_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
+ category_filter_ = rhs.category_filter_;
memory_dump_config_ = rhs.memory_dump_config_;
- included_categories_ = rhs.included_categories_;
- disabled_categories_ = rhs.disabled_categories_;
- excluded_categories_ = rhs.excluded_categories_;
- synthetic_delays_ = rhs.synthetic_delays_;
event_filters_ = rhs.event_filters_;
return *this;
}
const TraceConfig::StringList& TraceConfig::GetSyntheticDelayValues() const {
- return synthetic_delays_;
+ return category_filter_.synthetic_delays();
}
std::string TraceConfig::ToString() const {
@@ -298,69 +273,14 @@ TraceConfig::AsConvertableToTraceFormat() const {
}
std::string TraceConfig::ToCategoryFilterString() const {
- std::string filter_string;
- WriteCategoryFilterString(included_categories_, &filter_string, true);
- WriteCategoryFilterString(disabled_categories_, &filter_string, true);
- WriteCategoryFilterString(excluded_categories_, &filter_string, false);
- WriteCategoryFilterString(synthetic_delays_, &filter_string);
- return filter_string;
+ return category_filter_.ToFilterString();
}
bool TraceConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
// TraceLog should call this method only as part of enabling/disabling
// categories.
-
- bool had_enabled_by_default = false;
- DCHECK(category_group_name);
- std::string category_group_name_str = category_group_name;
- StringTokenizer category_group_tokens(category_group_name_str, ",");
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
- // Don't allow empty tokens, nor tokens with leading or trailing space.
- DCHECK(!TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- category_group_token))
- << "Disallowed category string";
- if (IsCategoryEnabled(category_group_token.c_str()))
- return true;
-
- if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
- had_enabled_by_default = true;
- }
- // Do a second pass to check for explicitly disabled categories
- // (those explicitly enabled have priority due to first pass).
- category_group_tokens.Reset();
- bool category_group_disabled = false;
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
- for (const std::string& category : excluded_categories_) {
- if (MatchPattern(category_group_token, category)) {
- // Current token of category_group_name is present in excluded_list.
- // Flag the exclusion and proceed further to check if any of the
- // remaining categories of category_group_name is not present in the
- // excluded_ list.
- category_group_disabled = true;
- break;
- }
- // One of the category of category_group_name is not present in
- // excluded_ list. So, if it's not a disabled-by-default category,
- // it has to be included_ list. Enable the category_group_name
- // for recording.
- if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*"))) {
- category_group_disabled = false;
- }
- }
- // One of the categories present in category_group_name is not present in
- // excluded_ list. Implies this category_group_name group can be enabled
- // for recording, since one of its groups is enabled for recording.
- if (!category_group_disabled)
- break;
- }
- // If the category group is not excluded, and there are no included patterns
- // we consider this category group enabled, as long as it had categories
- // other than disabled-by-default.
- return !category_group_disabled && had_enabled_by_default &&
- included_categories_.empty();
+ return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
void TraceConfig::Merge(const TraceConfig& config) {
@@ -371,28 +291,10 @@ void TraceConfig::Merge(const TraceConfig& config) {
<< "set of options.";
}
- // Keep included patterns only if both filters have an included entry.
- // Otherwise, one of the filter was specifying "*" and we want to honor the
- // broadest filter.
- if (HasIncludedPatterns() && config.HasIncludedPatterns()) {
- included_categories_.insert(included_categories_.end(),
- config.included_categories_.begin(),
- config.included_categories_.end());
- } else {
- included_categories_.clear();
- }
+ category_filter_.Merge(config.category_filter_);
memory_dump_config_.Merge(config.memory_dump_config_);
- disabled_categories_.insert(disabled_categories_.end(),
- config.disabled_categories_.begin(),
- config.disabled_categories_.end());
- excluded_categories_.insert(excluded_categories_.end(),
- config.excluded_categories_.begin(),
- config.excluded_categories_.end());
- synthetic_delays_.insert(synthetic_delays_.end(),
- config.synthetic_delays_.begin(),
- config.synthetic_delays_.end());
event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
config.event_filters().end());
}
@@ -401,10 +303,7 @@ void TraceConfig::Clear() {
record_mode_ = RECORD_UNTIL_FULL;
enable_systrace_ = false;
enable_argument_filter_ = false;
- included_categories_.clear();
- disabled_categories_.clear();
- excluded_categories_.clear();
- synthetic_delays_.clear();
+ category_filter_.Clear();
memory_dump_config_.Clear();
event_filters_.clear();
}
@@ -435,19 +334,13 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
enable_argument_filter_ =
dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
- const ListValue* category_list = nullptr;
- if (dict.GetList(kIncludedCategoriesParam, &category_list))
- SetCategoriesFromIncludedList(*category_list);
- if (dict.GetList(kExcludedCategoriesParam, &category_list))
- SetCategoriesFromExcludedList(*category_list);
- if (dict.GetList(kSyntheticDelaysParam, &category_list))
- SetSyntheticDelaysFromList(*category_list);
+ category_filter_.InitializeFromConfigDict(dict);
const base::ListValue* category_event_filters = nullptr;
if (dict.GetList(kEventFiltersParam, &category_event_filters))
SetEventFiltersFromConfigList(*category_event_filters);
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
const DictionaryValue* memory_dump_config = nullptr;
@@ -468,37 +361,8 @@ void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string) {
- if (!category_filter_string.empty()) {
- std::vector<std::string> split = SplitString(
- category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
- for (const std::string& category : split) {
- // Ignore empty categories.
- if (category.empty())
- continue;
- // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
- if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
- CompareCase::SENSITIVE) &&
- category.back() == ')') {
- std::string synthetic_category = category.substr(
- strlen(kSyntheticDelayCategoryFilterPrefix),
- category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
- size_t name_length = synthetic_category.find(';');
- if (name_length != std::string::npos && name_length > 0 &&
- name_length != synthetic_category.size() - 1) {
- synthetic_delays_.push_back(synthetic_category);
- }
- } else if (category.front() == '-') {
- // Excluded categories start with '-'.
- // Remove '-' from category string.
- excluded_categories_.push_back(category.substr(1));
- } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
- } else {
- included_categories_.push_back(category);
- }
- }
- }
+ if (!category_filter_string.empty())
+ category_filter_.InitializeFromString(category_filter_string);
record_mode_ = RECORD_UNTIL_FULL;
enable_systrace_ = false;
@@ -523,64 +387,11 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
}
}
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
SetDefaultMemoryDumpConfig();
}
}
-void TraceConfig::SetCategoriesFromIncludedList(
- const ListValue& included_list) {
- included_categories_.clear();
- for (size_t i = 0; i < included_list.GetSize(); ++i) {
- std::string category;
- if (!included_list.GetString(i, &category))
- continue;
- if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
- } else {
- included_categories_.push_back(category);
- }
- }
-}
-
-void TraceConfig::SetCategoriesFromExcludedList(
- const ListValue& excluded_list) {
- excluded_categories_.clear();
- for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
- std::string category;
- if (excluded_list.GetString(i, &category))
- excluded_categories_.push_back(category);
- }
-}
-
-void TraceConfig::SetSyntheticDelaysFromList(const ListValue& list) {
- synthetic_delays_.clear();
- for (size_t i = 0; i < list.GetSize(); ++i) {
- std::string delay;
- if (!list.GetString(i, &delay))
- continue;
- // Synthetic delays are of the form "delay;option;option;...".
- size_t name_length = delay.find(';');
- if (name_length != std::string::npos && name_length > 0 &&
- name_length != delay.size() - 1) {
- synthetic_delays_.push_back(delay);
- }
- }
-}
-
-void TraceConfig::AddCategoryToDict(DictionaryValue* dict,
- const char* param,
- const StringList& categories) const {
- if (categories.empty())
- return;
-
- auto list = MakeUnique<ListValue>();
- for (const std::string& category : categories)
- list->AppendString(category);
- dict->Set(param, std::move(list));
-}
-
void TraceConfig::SetMemoryDumpConfigFromConfigDict(
const DictionaryValue& memory_dump_config) {
// Set allowed dump modes.
@@ -673,29 +484,7 @@ void TraceConfig::SetEventFiltersFromConfigList(
<< "Invalid predicate name in category event filter.";
EventFilterConfig new_config(predicate_name);
- const base::ListValue* included_list = nullptr;
- CHECK(event_filter->GetList(kIncludedCategoriesParam, &included_list))
- << "Missing included_categories in category event filter.";
-
- for (size_t i = 0; i < included_list->GetSize(); ++i) {
- std::string category;
- if (included_list->GetString(i, &category))
- new_config.AddIncludedCategory(category);
- }
-
- const base::ListValue* excluded_list = nullptr;
- if (event_filter->GetList(kExcludedCategoriesParam, &excluded_list)) {
- for (size_t i = 0; i < excluded_list->GetSize(); ++i) {
- std::string category;
- if (excluded_list->GetString(i, &category))
- new_config.AddExcludedCategory(category);
- }
- }
-
- const base::DictionaryValue* args_dict = nullptr;
- if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
- new_config.SetArgs(args_dict->CreateDeepCopy());
-
+ new_config.InitializeFromConfigDict(event_filter);
event_filters_.push_back(new_config);
}
}
@@ -722,50 +511,20 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
- StringList categories(included_categories_);
- categories.insert(categories.end(),
- disabled_categories_.begin(),
- disabled_categories_.end());
- AddCategoryToDict(dict.get(), kIncludedCategoriesParam, categories);
- AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
- AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
+ category_filter_.ToDict(dict.get());
if (!event_filters_.empty()) {
std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
for (const EventFilterConfig& filter : event_filters_) {
std::unique_ptr<base::DictionaryValue> filter_dict(
new base::DictionaryValue());
- filter_dict->SetString(kFilterPredicateParam, filter.predicate_name());
-
- std::unique_ptr<base::ListValue> included_categories_list(
- new base::ListValue());
- for (const std::string& included_category : filter.included_categories())
- included_categories_list->AppendString(included_category);
-
- filter_dict->Set(kIncludedCategoriesParam,
- std::move(included_categories_list));
-
- if (!filter.excluded_categories().empty()) {
- std::unique_ptr<base::ListValue> excluded_categories_list(
- new base::ListValue());
- for (const std::string& excluded_category :
- filter.excluded_categories())
- excluded_categories_list->AppendString(excluded_category);
-
- filter_dict->Set(kExcludedCategoriesParam,
- std::move(excluded_categories_list));
- }
-
- if (filter.filter_args())
- filter_dict->Set(kFilterArgsParam,
- filter.filter_args()->CreateDeepCopy());
-
+ filter.ToDict(filter_dict.get());
filter_list->Append(std::move(filter_dict));
}
dict->Set(kEventFiltersParam, std::move(filter_list));
}
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
auto allowed_modes = MakeUnique<ListValue>();
for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
@@ -829,59 +588,5 @@ std::string TraceConfig::ToTraceOptionsString() const {
return ret;
}
-void TraceConfig::WriteCategoryFilterString(const StringList& values,
- std::string* out,
- bool included) const {
- bool prepend_comma = !out->empty();
- int token_cnt = 0;
- for (const std::string& category : values) {
- if (token_cnt > 0 || prepend_comma)
- StringAppendF(out, ",");
- StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
- ++token_cnt;
- }
-}
-
-void TraceConfig::WriteCategoryFilterString(const StringList& delays,
- std::string* out) const {
- bool prepend_comma = !out->empty();
- int token_cnt = 0;
- for (const std::string& category : delays) {
- if (token_cnt > 0 || prepend_comma)
- StringAppendF(out, ",");
- StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
- category.c_str());
- ++token_cnt;
- }
-}
-
-bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
- // Check the disabled- filters and the disabled-* wildcard first so that a
- // "*" filter does not include the disabled.
- for (const std::string& category : disabled_categories_) {
- if (MatchPattern(category_name, category))
- return true;
- }
-
- if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
- return false;
-
- for (const std::string& category : included_categories_) {
- if (MatchPattern(category_name, category))
- return true;
- }
-
- return false;
-}
-
-bool TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- StringPiece str) {
- return str.empty() || str.front() == ' ' || str.back() == ' ';
-}
-
-bool TraceConfig::HasIncludedPatterns() const {
- return !included_categories_.empty();
-}
-
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 717c261316..13b2f5f0ee 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -17,6 +17,7 @@
#include "base/gtest_prod_util.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_config_category_filter.h"
#include "base/values.h"
namespace base {
@@ -94,26 +95,25 @@ class BASE_EXPORT TraceConfig {
EventFilterConfig& operator=(const EventFilterConfig& rhs);
- void AddIncludedCategory(const std::string& category);
- void AddExcludedCategory(const std::string& category);
- void SetArgs(std::unique_ptr<base::DictionaryValue> args);
+ void InitializeFromConfigDict(const base::DictionaryValue* event_filter);
+
+ void SetCategoryFilter(const TraceConfigCategoryFilter& category_filter);
+
+ void ToDict(DictionaryValue* filter_dict) const;
+
bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
const std::string& predicate_name() const { return predicate_name_; }
base::DictionaryValue* filter_args() const { return args_.get(); }
- const StringList& included_categories() const {
- return included_categories_;
- }
- const StringList& excluded_categories() const {
- return excluded_categories_;
+ const TraceConfigCategoryFilter& category_filter() const {
+ return category_filter_;
}
private:
std::string predicate_name_;
- StringList included_categories_;
- StringList excluded_categories_;
+ TraceConfigCategoryFilter category_filter_;
std::unique_ptr<base::DictionaryValue> args_;
};
typedef std::vector<EventFilterConfig> EventFilters;
@@ -231,7 +231,7 @@ class BASE_EXPORT TraceConfig {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Merges config with the current TraceConfig
void Merge(const TraceConfig& config);
@@ -241,6 +241,10 @@ class BASE_EXPORT TraceConfig {
// Clears and resets the memory dump config.
void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+ const TraceConfigCategoryFilter& category_filter() const {
+ return category_filter_;
+ }
+
const MemoryDumpConfig& memory_dump_config() const {
return memory_dump_config_;
}
@@ -254,15 +258,6 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
TraceConfigFromInvalidLegacyStrings);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
- IsEmptyOrContainsLeadingOrTrailingWhitespace);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
- EmptyAndAsteriskCategoryFilterString);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
@@ -279,13 +274,6 @@ class BASE_EXPORT TraceConfig {
void InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string);
- void SetCategoriesFromIncludedList(const ListValue& included_list);
- void SetCategoriesFromExcludedList(const ListValue& excluded_list);
- void SetSyntheticDelaysFromList(const ListValue& list);
- void AddCategoryToDict(DictionaryValue* dict,
- const char* param,
- const StringList& categories) const;
-
void SetMemoryDumpConfigFromConfigDict(
const DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
@@ -295,32 +283,14 @@ class BASE_EXPORT TraceConfig {
std::string ToTraceOptionsString() const;
- void WriteCategoryFilterString(const StringList& values,
- std::string* out,
- bool included) const;
- void WriteCategoryFilterString(const StringList& delays,
- std::string* out) const;
-
- // Returns true if the category is enabled according to this trace config.
- // This tells whether a category is enabled from the TraceConfig's
- // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
- // category is enabled from the tracing runtime's perspective.
- bool IsCategoryEnabled(const char* category_name) const;
-
- static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(StringPiece str);
-
- bool HasIncludedPatterns() const;
-
TraceRecordMode record_mode_;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
+ TraceConfigCategoryFilter category_filter_;
+
MemoryDumpConfig memory_dump_config_;
- StringList included_categories_;
- StringList disabled_categories_;
- StringList excluded_categories_;
- StringList synthetic_delays_;
EventFilters event_filters_;
};
diff --git a/base/trace_event/trace_config_category_filter.cc b/base/trace_event/trace_config_category_filter.cc
new file mode 100644
index 0000000000..234db18c5c
--- /dev/null
+++ b/base/trace_event/trace_config_category_filter.cc
@@ -0,0 +1,297 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_config_category_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+const char kIncludedCategoriesParam[] = "included_categories";
+const char kExcludedCategoriesParam[] = "excluded_categories";
+const char kSyntheticDelaysParam[] = "synthetic_delays";
+
+const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
+}
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter() {}
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter(
+ const TraceConfigCategoryFilter& other)
+ : included_categories_(other.included_categories_),
+ disabled_categories_(other.disabled_categories_),
+ excluded_categories_(other.excluded_categories_),
+ synthetic_delays_(other.synthetic_delays_) {}
+
+TraceConfigCategoryFilter::~TraceConfigCategoryFilter() {}
+
+TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
+ const TraceConfigCategoryFilter& rhs) {
+ included_categories_ = rhs.included_categories_;
+ disabled_categories_ = rhs.disabled_categories_;
+ excluded_categories_ = rhs.excluded_categories_;
+ synthetic_delays_ = rhs.synthetic_delays_;
+ return *this;
+}
+
+void TraceConfigCategoryFilter::InitializeFromString(
+ const StringPiece& category_filter_string) {
+ std::vector<StringPiece> split = SplitStringPiece(
+ category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const StringPiece& category : split) {
+ // Ignore empty categories.
+ if (category.empty())
+ continue;
+ // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
+ if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
+ CompareCase::SENSITIVE) &&
+ category.back() == ')') {
+ StringPiece synthetic_category = category.substr(
+ strlen(kSyntheticDelayCategoryFilterPrefix),
+ category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
+ size_t name_length = synthetic_category.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != synthetic_category.size() - 1) {
+ synthetic_delays_.push_back(synthetic_category.as_string());
+ }
+ } else if (category.front() == '-') {
+ // Excluded categories start with '-'.
+ // Remove '-' from category string.
+ excluded_categories_.push_back(category.substr(1).as_string());
+ } else if (category.starts_with(TRACE_DISABLED_BY_DEFAULT(""))) {
+ disabled_categories_.push_back(category.as_string());
+ } else {
+ included_categories_.push_back(category.as_string());
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::InitializeFromConfigDict(
+ const DictionaryValue& dict) {
+ const ListValue* category_list = nullptr;
+ if (dict.GetList(kIncludedCategoriesParam, &category_list))
+ SetCategoriesFromIncludedList(*category_list);
+ if (dict.GetList(kExcludedCategoriesParam, &category_list))
+ SetCategoriesFromExcludedList(*category_list);
+ if (dict.GetList(kSyntheticDelaysParam, &category_list))
+ SetSyntheticDelaysFromList(*category_list);
+}
+
+bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
+ const StringPiece& category_group_name) const {
+ bool had_enabled_by_default = false;
+ DCHECK(!category_group_name.empty());
+ CStringTokenizer category_group_tokens(category_group_name.begin(),
+ category_group_name.end(), ",");
+ while (category_group_tokens.GetNext()) {
+ StringPiece category_group_token = category_group_tokens.token_piece();
+ // Don't allow empty tokens, nor tokens with leading or trailing space.
+ DCHECK(IsCategoryNameAllowed(category_group_token))
+ << "Disallowed category string";
+ if (IsCategoryEnabled(category_group_token))
+ return true;
+
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+ had_enabled_by_default = true;
+ }
+ // Do a second pass to check for explicitly disabled categories
+ // (those explicitly enabled have priority due to first pass).
+ category_group_tokens.Reset();
+ bool category_group_disabled = false;
+ while (category_group_tokens.GetNext()) {
+ StringPiece category_group_token = category_group_tokens.token_piece();
+ for (const std::string& category : excluded_categories_) {
+ if (MatchPattern(category_group_token, category)) {
+ // Current token of category_group_name is present in excluded_list.
+ // Flag the exclusion and proceed further to check if any of the
+ // remaining categories of category_group_name is not present in the
+ // excluded_ list.
+ category_group_disabled = true;
+ break;
+ }
+ // One of the category of category_group_name is not present in
+ // excluded_ list. So, if it's not a disabled-by-default category,
+ // it has to be included_ list. Enable the category_group_name
+ // for recording.
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+ category_group_disabled = false;
+ }
+ // One of the categories present in category_group_name is not present in
+ // excluded_ list. Implies this category_group_name group can be enabled
+ // for recording, since one of its groups is enabled for recording.
+ if (!category_group_disabled)
+ break;
+ }
+ // If the category group is not excluded, and there are no included patterns
+ // we consider this category group enabled, as long as it had categories
+ // other than disabled-by-default.
+ return !category_group_disabled && had_enabled_by_default &&
+ included_categories_.empty();
+}
+
+bool TraceConfigCategoryFilter::IsCategoryEnabled(
+ const StringPiece& category_name) const {
+ // Check the disabled- filters and the disabled-* wildcard first so that a
+ // "*" filter does not include the disabled.
+ for (const std::string& category : disabled_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+ return false;
+
+ for (const std::string& category : included_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ return false;
+}
+
+void TraceConfigCategoryFilter::Merge(const TraceConfigCategoryFilter& config) {
+ // Keep included patterns only if both filters have an included entry.
+ // Otherwise, one of the filter was specifying "*" and we want to honor the
+ // broadest filter.
+ if (!included_categories_.empty() && !config.included_categories_.empty()) {
+ included_categories_.insert(included_categories_.end(),
+ config.included_categories_.begin(),
+ config.included_categories_.end());
+ } else {
+ included_categories_.clear();
+ }
+
+ disabled_categories_.insert(disabled_categories_.end(),
+ config.disabled_categories_.begin(),
+ config.disabled_categories_.end());
+ excluded_categories_.insert(excluded_categories_.end(),
+ config.excluded_categories_.begin(),
+ config.excluded_categories_.end());
+ synthetic_delays_.insert(synthetic_delays_.end(),
+ config.synthetic_delays_.begin(),
+ config.synthetic_delays_.end());
+}
+
+void TraceConfigCategoryFilter::Clear() {
+ included_categories_.clear();
+ disabled_categories_.clear();
+ excluded_categories_.clear();
+ synthetic_delays_.clear();
+}
+
+void TraceConfigCategoryFilter::ToDict(DictionaryValue* dict) const {
+ StringList categories(included_categories_);
+ categories.insert(categories.end(), disabled_categories_.begin(),
+ disabled_categories_.end());
+ AddCategoriesToDict(categories, kIncludedCategoriesParam, dict);
+ AddCategoriesToDict(excluded_categories_, kExcludedCategoriesParam, dict);
+ AddCategoriesToDict(synthetic_delays_, kSyntheticDelaysParam, dict);
+}
+
+std::string TraceConfigCategoryFilter::ToFilterString() const {
+ std::string filter_string;
+ WriteCategoryFilterString(included_categories_, &filter_string, true);
+ WriteCategoryFilterString(disabled_categories_, &filter_string, true);
+ WriteCategoryFilterString(excluded_categories_, &filter_string, false);
+ WriteCategoryFilterString(synthetic_delays_, &filter_string);
+ return filter_string;
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromIncludedList(
+ const ListValue& included_list) {
+ included_categories_.clear();
+ for (size_t i = 0; i < included_list.GetSize(); ++i) {
+ std::string category;
+ if (!included_list.GetString(i, &category))
+ continue;
+ if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+ TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+ disabled_categories_.push_back(category);
+ } else {
+ included_categories_.push_back(category);
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromExcludedList(
+ const ListValue& excluded_list) {
+ excluded_categories_.clear();
+ for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
+ std::string category;
+ if (excluded_list.GetString(i, &category))
+ excluded_categories_.push_back(category);
+ }
+}
+
+void TraceConfigCategoryFilter::SetSyntheticDelaysFromList(
+ const ListValue& list) {
+ for (size_t i = 0; i < list.GetSize(); ++i) {
+ std::string delay;
+ if (!list.GetString(i, &delay))
+ continue;
+ // Synthetic delays are of the form "delay;option;option;...".
+ size_t name_length = delay.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != delay.size() - 1) {
+ synthetic_delays_.push_back(delay);
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::AddCategoriesToDict(
+ const StringList& categories,
+ const char* param,
+ DictionaryValue* dict) const {
+ if (categories.empty())
+ return;
+
+ auto list = MakeUnique<ListValue>();
+ for (const std::string& category : categories)
+ list->AppendString(category);
+ dict->Set(param, std::move(list));
+}
+
+void TraceConfigCategoryFilter::WriteCategoryFilterString(
+ const StringList& values,
+ std::string* out,
+ bool included) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : values) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
+ ++token_cnt;
+ }
+}
+
+void TraceConfigCategoryFilter::WriteCategoryFilterString(
+ const StringList& delays,
+ std::string* out) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : delays) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
+ category.c_str());
+ ++token_cnt;
+ }
+}
+
+// static
+bool TraceConfigCategoryFilter::IsCategoryNameAllowed(StringPiece str) {
+ return !str.empty() && str.front() != ' ' && str.back() != ' ';
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_config_category_filter.h b/base/trace_event/trace_config_category_filter.h
new file mode 100644
index 0000000000..0d7dba0374
--- /dev/null
+++ b/base/trace_event/trace_config_category_filter.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+// Configuration of categories enabled and disabled in TraceConfig.
+class BASE_EXPORT TraceConfigCategoryFilter {
+ public:
+ using StringList = std::vector<std::string>;
+
+ TraceConfigCategoryFilter();
+ TraceConfigCategoryFilter(const TraceConfigCategoryFilter& other);
+ ~TraceConfigCategoryFilter();
+
+ TraceConfigCategoryFilter& operator=(const TraceConfigCategoryFilter& rhs);
+
+ // Initializes from category filter string. See TraceConfig constructor for
+ // description of how to write category filter string.
+ void InitializeFromString(const StringPiece& category_filter_string);
+
+ // Initializes TraceConfigCategoryFilter object from the config dictionary.
+ void InitializeFromConfigDict(const DictionaryValue& dict);
+
+ // Merges this with category filter config.
+ void Merge(const TraceConfigCategoryFilter& config);
+ void Clear();
+
+ // Returns true if at least one category in the list is enabled by this
+ // trace config. This is used to determine if the category filters are
+ // enabled in the TRACE_* macros.
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
+
+ // Returns true if the category is enabled according to this trace config.
+ // This tells whether a category is enabled from the TraceConfig's
+ // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+ // category is enabled from the tracing runtime's perspective.
+ bool IsCategoryEnabled(const StringPiece& category_name) const;
+
+ void ToDict(DictionaryValue* dict) const;
+
+ std::string ToFilterString() const;
+
+ // Returns true if category name is a valid string.
+ static bool IsCategoryNameAllowed(StringPiece str);
+
+ const StringList& included_categories() const { return included_categories_; }
+ const StringList& excluded_categories() const { return excluded_categories_; }
+ const StringList& synthetic_delays() const { return synthetic_delays_; }
+
+ private:
+ void SetCategoriesFromIncludedList(const ListValue& included_list);
+ void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+ void SetSyntheticDelaysFromList(const ListValue& list);
+
+ void AddCategoriesToDict(const StringList& categories,
+ const char* param,
+ DictionaryValue* dict) const;
+
+ void WriteCategoryFilterString(const StringList& values,
+ std::string* out,
+ bool included) const;
+ void WriteCategoryFilterString(const StringList& delays,
+ std::string* out) const;
+
+ StringList included_categories_;
+ StringList disabled_categories_;
+ StringList excluded_categories_;
+ StringList synthetic_delays_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index 74aa7bdc63..a856c27192 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -304,10 +304,12 @@ TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
CheckDefaultTraceConfigBehavior(tc_asterisk);
// They differ only for internal checking.
- EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
- EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
- EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
- EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+ EXPECT_FALSE(tc_empty.category_filter().IsCategoryEnabled("Category1"));
+ EXPECT_FALSE(
+ tc_empty.category_filter().IsCategoryEnabled("not-excluded-category"));
+ EXPECT_TRUE(tc_asterisk.category_filter().IsCategoryEnabled("Category1"));
+ EXPECT_TRUE(
+ tc_asterisk.category_filter().IsCategoryEnabled("not-excluded-category"));
}
TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
@@ -402,13 +404,15 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
"-exc_pattern*,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
tc.ToCategoryFilterString().c_str());
- EXPECT_TRUE(tc.IsCategoryEnabled("included"));
- EXPECT_TRUE(tc.IsCategoryEnabled("inc_pattern_category"));
- EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-cc"));
- EXPECT_FALSE(tc.IsCategoryEnabled("excluded"));
- EXPECT_FALSE(tc.IsCategoryEnabled("exc_pattern_category"));
- EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-others"));
- EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-nor-included"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("included"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("inc_pattern_category"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("disabled-by-default-cc"));
+ EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("excluded"));
+ EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("exc_pattern_category"));
+ EXPECT_FALSE(
+ tc.category_filter().IsCategoryEnabled("disabled-by-default-others"));
+ EXPECT_FALSE(
+ tc.category_filter().IsCategoryEnabled("not-excluded-nor-included"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("inc_pattern_category"));
@@ -431,10 +435,12 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
EXPECT_STREQ("event_whitelist_predicate",
event_filter.predicate_name().c_str());
- EXPECT_EQ(1u, event_filter.included_categories().size());
- EXPECT_STREQ("*", event_filter.included_categories()[0].c_str());
- EXPECT_EQ(1u, event_filter.excluded_categories().size());
- EXPECT_STREQ("unfiltered_cat", event_filter.excluded_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.category_filter().included_categories().size());
+ EXPECT_STREQ("*",
+ event_filter.category_filter().included_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.category_filter().excluded_categories().size());
+ EXPECT_STREQ("unfiltered_cat",
+ event_filter.category_filter().excluded_categories()[0].c_str());
EXPECT_TRUE(event_filter.filter_args());
std::string json_out;
@@ -449,8 +455,10 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
TraceConfig tc2(config_string_2);
- EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
- EXPECT_FALSE(tc2.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc2.category_filter().IsCategoryEnabled(
+ "non-disabled-by-default-pattern"));
+ EXPECT_FALSE(
+ tc2.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
EXPECT_TRUE(tc2.IsCategoryGroupEnabled("non-disabled-by-default-pattern"));
EXPECT_FALSE(tc2.IsCategoryGroupEnabled("disabled-by-default-pattern"));
@@ -538,8 +546,9 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
"\"excluded_categories\":[\"category\",\"disabled-by-default-pattern\"]"
"}";
tc = TraceConfig(invalid_config_string_2);
- EXPECT_TRUE(tc.IsCategoryEnabled("category"));
- EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("category"));
+ EXPECT_TRUE(
+ tc.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("category"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-pattern"));
}
@@ -591,27 +600,25 @@ TEST(TraceConfigTest, IsCategoryGroupEnabled) {
EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,disabled-by-default-cc"));
}
-TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
- // Test that IsEmptyOrContainsLeadingOrTrailingWhitespace actually catches
- // categories that are explicitly forbidden.
- // This method is called in a DCHECK to assert that we don't have these types
- // of strings as categories.
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category"));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category"));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- ""));
- EXPECT_FALSE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "good_category"));
+TEST(TraceConfigTest, IsCategoryNameAllowed) {
+ // Test that IsCategoryNameAllowed actually catches categories that are
+ // explicitly forbidden. This method is called in a DCHECK to assert that we
+ // don't have these types of strings as categories.
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
+ EXPECT_FALSE(TraceConfigCategoryFilter::IsCategoryNameAllowed(""));
+ EXPECT_TRUE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("good_category"));
}
TEST(TraceConfigTest, SetTraceOptionValues) {
@@ -637,20 +644,20 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
EXPECT_EQ(tc_str1, tc2.ToString());
EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
+ ASSERT_EQ(2u, tc1.memory_dump_config().triggers.size());
EXPECT_EQ(200u,
- tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ tc1.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
- tc1.memory_dump_config_.triggers[0].level_of_detail);
+ tc1.memory_dump_config().triggers[0].level_of_detail);
EXPECT_EQ(2000u,
- tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
+ tc1.memory_dump_config().triggers[1].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc1.memory_dump_config_.triggers[1].level_of_detail);
+ tc1.memory_dump_config().triggers[1].level_of_detail);
EXPECT_EQ(
2048u,
- tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+ tc1.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
std::string tc_str3 =
TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
@@ -658,20 +665,20 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
TraceConfig tc3(tc_str3);
EXPECT_EQ(tc_str3, tc3.ToString());
EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ ASSERT_EQ(1u, tc3.memory_dump_config().triggers.size());
+ EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
- tc3.memory_dump_config_.triggers[0].level_of_detail);
+ tc3.memory_dump_config().triggers[0].level_of_detail);
std::string tc_str4 =
TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
1 /*heavy_period */);
TraceConfig tc4(tc_str4);
EXPECT_EQ(tc_str4, tc4.ToString());
- ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ ASSERT_EQ(1u, tc4.memory_dump_config().triggers.size());
+ EXPECT_EQ(1u, tc4.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc4.memory_dump_config_.triggers[0].level_of_detail);
+ tc4.memory_dump_config().triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
@@ -679,22 +686,22 @@ TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
tc.ToString());
- EXPECT_EQ(0u, tc.memory_dump_config_.triggers.size());
- EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
- ::kDefaultBreakdownThresholdBytes,
- tc.memory_dump_config_.heap_profiler_options
- .breakdown_threshold_bytes);
+ EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
+ EXPECT_EQ(
+ TraceConfig::MemoryDumpConfig::HeapProfiler ::
+ kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
- EXPECT_EQ(2u, tc.memory_dump_config_.triggers.size());
- EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
- ::kDefaultBreakdownThresholdBytes,
- tc.memory_dump_config_.heap_profiler_options
- .breakdown_threshold_bytes);
+ EXPECT_EQ(2u, tc.memory_dump_config().triggers.size());
+ EXPECT_EQ(
+ TraceConfig::MemoryDumpConfig::HeapProfiler ::
+ kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
} // namespace trace_event
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 82a552aa4e..85e1e16312 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -3088,11 +3088,15 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"testing_predicate\", "
- " \"included_categories\": [\"filtered_cat\"]"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
" }"
" "
" ]"
@@ -3111,12 +3115,15 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
TRACE_EVENT0("filtered_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a horse");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
+
// This is scoped so we can test the end event being filtered.
{ TRACE_EVENT0("filtered_cat", "another cat whoa"); }
EndTraceAndFlush();
- EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(4u, filter_hits_counter.filter_trace_event_hit_count);
EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
}
@@ -3125,12 +3132,14 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"%s\", "
- " \"included_categories\": [\"*\"], "
- " \"excluded_categories\": [\"unfiltered_cat\"], "
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("*") "\"], "
" \"filter_args\": {"
" \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
" }"
@@ -3148,12 +3157,16 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
TRACE_EVENT0("filtered_cat", "a snake");
TRACE_EVENT0("filtered_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a cat");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a pony");
EndTraceAndFlush();
EXPECT_TRUE(FindMatchingValue("name", "a snake"));
EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+ EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+ EXPECT_FALSE(FindMatchingValue("name", "a pony"));
}
TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
@@ -3161,12 +3174,16 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
" \"excluded_categories\": [\"excluded_cat\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"%s\", "
- " \"included_categories\": [\"*\"]"
+ " \"included_categories\": ["
+ " \"*\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
" }"
" ]"
"}",
@@ -3180,6 +3197,8 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
TRACE_EVENT0("filtered_cat", "a snake");
TRACE_EVENT0("excluded_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a cat");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
EndTraceAndFlush();
@@ -3187,6 +3206,8 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
EXPECT_TRUE(FindMatchingValue("name", "a snake"));
EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+ EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+ EXPECT_TRUE(FindMatchingValue("name", "a pony"));
}
TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 10b090ae57..abb0d36177 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -19,8 +19,10 @@
#include "base/memory/ref_counted_memory.h"
#include "base/memory/singleton.h"
#include "base/message_loop/message_loop.h"
+#include "base/process/process_info.h"
#include "base/process/process_metrics.h"
#include "base/stl_util.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
@@ -1509,8 +1511,20 @@ void TraceLog::AddMetadataEventsWhileLocked() {
process_name_);
}
+#if !defined(OS_NACL) && !defined(OS_IOS)
+/*
+ Time process_creation_time = CurrentProcessInfo::CreationTime();
+ if (!process_creation_time.is_null()) {
+ TimeDelta process_uptime = Time::Now() - process_creation_time;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_uptime_seconds",
+ "uptime", process_uptime.InSeconds());
+ }
+*/
+#endif // !defined(OS_NACL) && !defined(OS_IOS)
+
if (!process_labels_.empty()) {
- std::vector<std::string> labels;
+ std::vector<base::StringPiece> labels;
for (const auto& it : process_labels_)
labels.push_back(it.second);
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
diff --git a/base/values.cc b/base/values.cc
index 5cc0d693bd..b5e44e68dd 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -69,7 +69,7 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
static_cast<const DictionaryValue&>(node));
default:
- return node.CreateDeepCopy();
+ return MakeUnique<Value>(node);
}
}
@@ -91,11 +91,11 @@ Value::Value(const Value& that) {
InternalCopyConstructFrom(that);
}
-Value::Value(Value&& that) {
+Value::Value(Value&& that) noexcept {
InternalMoveConstructFrom(std::move(that));
}
-Value::Value() : type_(Type::NONE) {}
+Value::Value() noexcept : type_(Type::NONE) {}
Value::Value(Type type) : type_(type) {
// Initialize with the default value.
@@ -149,7 +149,7 @@ Value::Value(const std::string& in_string) : type_(Type::STRING) {
DCHECK(IsStringUTF8(*string_value_));
}
-Value::Value(std::string&& in_string) : type_(Type::STRING) {
+Value::Value(std::string&& in_string) noexcept : type_(Type::STRING) {
string_value_.Init(std::move(in_string));
DCHECK(IsStringUTF8(*string_value_));
}
@@ -168,32 +168,26 @@ Value::Value(const std::vector<char>& in_blob) : type_(Type::BINARY) {
binary_value_.Init(in_blob);
}
-Value::Value(std::vector<char>&& in_blob) : type_(Type::BINARY) {
+Value::Value(std::vector<char>&& in_blob) noexcept : type_(Type::BINARY) {
binary_value_.Init(std::move(in_blob));
}
Value& Value::operator=(const Value& that) {
- if (this != &that) {
- if (type_ == that.type_) {
- InternalCopyAssignFromSameType(that);
- } else {
- InternalCleanup();
- InternalCopyConstructFrom(that);
- }
+ if (type_ == that.type_) {
+ InternalCopyAssignFromSameType(that);
+ } else {
+ // This is not a self assignment because the type_ doesn't match.
+ InternalCleanup();
+ InternalCopyConstructFrom(that);
}
return *this;
}
-Value& Value::operator=(Value&& that) {
- if (this != &that) {
- if (type_ == that.type_) {
- InternalMoveAssignFromSameType(std::move(that));
- } else {
- InternalCleanup();
- InternalMoveConstructFrom(std::move(that));
- }
- }
+Value& Value::operator=(Value&& that) noexcept {
+ DCHECK(this != &that) << "attempt to self move assign.";
+ InternalCleanup();
+ InternalMoveConstructFrom(std::move(that));
return *this;
}
@@ -347,112 +341,122 @@ bool Value::GetAsDictionary(const DictionaryValue** out_value) const {
}
Value* Value::DeepCopy() const {
- // This method should only be getting called for null Values--all subclasses
- // need to provide their own implementation;.
- switch (type()) {
- case Type::NONE:
- return CreateNullValue().release();
-
- case Type::BOOLEAN:
- return new Value(bool_value_);
- case Type::INTEGER:
- return new Value(int_value_);
- case Type::DOUBLE:
- return new Value(double_value_);
- case Type::STRING:
- return new Value(*string_value_);
- // For now, make BinaryValues for backward-compatibility. Convert to
- // Value when that code is deleted.
- case Type::BINARY:
- return new Value(*binary_value_);
-
- // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
- // are completely inlined.
- case Type::DICTIONARY: {
- DictionaryValue* result = new DictionaryValue;
-
- for (const auto& current_entry : **dict_ptr_) {
- result->SetWithoutPathExpansion(current_entry.first,
- current_entry.second->CreateDeepCopy());
- }
-
- return result;
- }
-
- case Type::LIST: {
- ListValue* result = new ListValue;
-
- for (const auto& entry : *list_)
- result->Append(entry->CreateDeepCopy());
-
- return result;
- }
-
- default:
- NOTREACHED();
- return nullptr;
- }
+ return new Value(*this);
}
std::unique_ptr<Value> Value::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<Value>(*this);
}
-bool Value::Equals(const Value* other) const {
- if (other->type() != type())
+bool operator==(const Value& lhs, const Value& rhs) {
+ if (lhs.type_ != rhs.type_)
return false;
- switch (type()) {
- case Type::NONE:
+ switch (lhs.type_) {
+ case Value::Type::NONE:
return true;
- case Type::BOOLEAN:
- return bool_value_ == other->bool_value_;
- case Type::INTEGER:
- return int_value_ == other->int_value_;
- case Type::DOUBLE:
- return double_value_ == other->double_value_;
- case Type::STRING:
- return *string_value_ == *(other->string_value_);
- case Type::BINARY:
- return *binary_value_ == *(other->binary_value_);
+ case Value::Type::BOOLEAN:
+ return lhs.bool_value_ == rhs.bool_value_;
+ case Value::Type::INTEGER:
+ return lhs.int_value_ == rhs.int_value_;
+ case Value::Type::DOUBLE:
+ return lhs.double_value_ == rhs.double_value_;
+ case Value::Type::STRING:
+ return *lhs.string_value_ == *rhs.string_value_;
+ case Value::Type::BINARY:
+ return *lhs.binary_value_ == *rhs.binary_value_;
// TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
// are completely inlined.
- case Type::DICTIONARY: {
- if ((*dict_ptr_)->size() != (*other->dict_ptr_)->size())
+ case Value::Type::DICTIONARY:
+ if ((*lhs.dict_ptr_)->size() != (*rhs.dict_ptr_)->size())
return false;
-
- return std::equal(std::begin(**dict_ptr_), std::end(**dict_ptr_),
- std::begin(**(other->dict_ptr_)),
- [](const DictStorage::value_type& lhs,
- const DictStorage::value_type& rhs) {
- if (lhs.first != rhs.first)
- return false;
-
- return lhs.second->Equals(rhs.second.get());
+ return std::equal(std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
+ std::begin(**rhs.dict_ptr_),
+ [](const Value::DictStorage::value_type& u,
+ const Value::DictStorage::value_type& v) {
+ return std::tie(u.first, *u.second) ==
+ std::tie(v.first, *v.second);
});
- }
- case Type::LIST: {
- if (list_->size() != other->list_->size())
+ case Value::Type::LIST:
+ if (lhs.list_->size() != rhs.list_->size())
return false;
+ return std::equal(
+ std::begin(*lhs.list_), std::end(*lhs.list_), std::begin(*rhs.list_),
+ [](const Value::ListStorage::value_type& u,
+ const Value::ListStorage::value_type& v) { return *u == *v; });
+ }
- return std::equal(std::begin(*list_), std::end(*list_),
- std::begin(*(other->list_)),
- [](const ListStorage::value_type& lhs,
- const ListStorage::value_type& rhs) {
- return lhs->Equals(rhs.get());
- });
- }
+ NOTREACHED();
+ return false;
+}
+
+bool operator!=(const Value& lhs, const Value& rhs) {
+ return !(lhs == rhs);
+}
+
+bool operator<(const Value& lhs, const Value& rhs) {
+ if (lhs.type_ != rhs.type_)
+ return lhs.type_ < rhs.type_;
+
+ switch (lhs.type_) {
+ case Value::Type::NONE:
+ return false;
+ case Value::Type::BOOLEAN:
+ return lhs.bool_value_ < rhs.bool_value_;
+ case Value::Type::INTEGER:
+ return lhs.int_value_ < rhs.int_value_;
+ case Value::Type::DOUBLE:
+ return lhs.double_value_ < rhs.double_value_;
+ case Value::Type::STRING:
+ return *lhs.string_value_ < *rhs.string_value_;
+ case Value::Type::BINARY:
+ return *lhs.binary_value_ < *rhs.binary_value_;
+ // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+ // are completely inlined.
+ case Value::Type::DICTIONARY:
+ return std::lexicographical_compare(
+ std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
+ std::begin(**rhs.dict_ptr_), std::end(**rhs.dict_ptr_),
+ [](const Value::DictStorage::value_type& u,
+ const Value::DictStorage::value_type& v) {
+ return std::tie(u.first, *u.second) < std::tie(v.first, *v.second);
+ });
+ case Value::Type::LIST:
+ return std::lexicographical_compare(
+ std::begin(*lhs.list_), std::end(*lhs.list_), std::begin(*rhs.list_),
+ std::end(*rhs.list_),
+ [](const Value::ListStorage::value_type& u,
+ const Value::ListStorage::value_type& v) { return *u < *v; });
}
NOTREACHED();
return false;
}
+bool operator>(const Value& lhs, const Value& rhs) {
+ return rhs < lhs;
+}
+
+bool operator<=(const Value& lhs, const Value& rhs) {
+ return !(rhs < lhs);
+}
+
+bool operator>=(const Value& lhs, const Value& rhs) {
+ return !(lhs < rhs);
+}
+
+bool Value::Equals(const Value* other) const {
+ DCHECK(other);
+ return *this == *other;
+}
+
// static
bool Value::Equals(const Value* a, const Value* b) {
- if ((a == NULL) && (b == NULL)) return true;
- if ((a == NULL) ^ (b == NULL)) return false;
- return a->Equals(b);
+ if ((a == NULL) && (b == NULL))
+ return true;
+ if ((a == NULL) ^ (b == NULL))
+ return false;
+ return *a == *b;
}
void Value::InternalCopyFundamentalValue(const Value& that) {
@@ -494,14 +498,23 @@ void Value::InternalCopyConstructFrom(const Value& that) {
binary_value_.Init(*that.binary_value_);
return;
// DictStorage and ListStorage are move-only types due to the presence of
- // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
+ // unique_ptrs. This is why the explicit copy of every element is necessary
+ // here.
// TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
// can be copied directly.
case Type::DICTIONARY:
- dict_ptr_.Init(std::move(*that.CreateDeepCopy()->dict_ptr_));
+ dict_ptr_.Init(MakeUnique<DictStorage>());
+ for (const auto& it : **that.dict_ptr_) {
+ (*dict_ptr_)
+ ->emplace_hint((*dict_ptr_)->end(), it.first,
+ MakeUnique<Value>(*it.second));
+ }
return;
case Type::LIST:
- list_.Init(std::move(*that.CreateDeepCopy()->list_));
+ list_.Init();
+ list_->reserve(that.list_->size());
+ for (const auto& it : *that.list_)
+ list_->push_back(MakeUnique<Value>(*it));
return;
}
}
@@ -533,6 +546,8 @@ void Value::InternalMoveConstructFrom(Value&& that) {
}
void Value::InternalCopyAssignFromSameType(const Value& that) {
+ // TODO(crbug.com/646113): make this a DCHECK once base::Value does not have
+ // subclasses.
CHECK_EQ(type_, that.type_);
switch (type_) {
@@ -550,40 +565,15 @@ void Value::InternalCopyAssignFromSameType(const Value& that) {
*binary_value_ = *that.binary_value_;
return;
// DictStorage and ListStorage are move-only types due to the presence of
- // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
+ // unique_ptrs. This is why the explicit call to the copy constructor is
+ // necessary here.
// TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
// can be copied directly.
case Type::DICTIONARY:
- *dict_ptr_ = std::move(*that.CreateDeepCopy()->dict_ptr_);
+ *dict_ptr_ = std::move(*Value(that).dict_ptr_);
return;
case Type::LIST:
- *list_ = std::move(*that.CreateDeepCopy()->list_);
- return;
- }
-}
-
-void Value::InternalMoveAssignFromSameType(Value&& that) {
- CHECK_EQ(type_, that.type_);
-
- switch (type_) {
- case Type::NONE:
- case Type::BOOLEAN:
- case Type::INTEGER:
- case Type::DOUBLE:
- InternalCopyFundamentalValue(that);
- return;
-
- case Type::STRING:
- *string_value_ = std::move(*that.string_value_);
- return;
- case Type::BINARY:
- *binary_value_ = std::move(*that.binary_value_);
- return;
- case Type::DICTIONARY:
- *dict_ptr_ = std::move(*that.dict_ptr_);
- return;
- case Type::LIST:
- *list_ = std::move(*that.list_);
+ *list_ = std::move(*Value(that).list_);
return;
}
}
@@ -1049,8 +1039,7 @@ void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
}
}
// All other cases: Make a copy and hook it up.
- SetWithoutPathExpansion(it.key(),
- base::WrapUnique(merge_value->DeepCopy()));
+ SetWithoutPathExpansion(it.key(), MakeUnique<Value>(*merge_value));
}
}
@@ -1067,11 +1056,11 @@ DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
DictionaryValue::Iterator::~Iterator() {}
DictionaryValue* DictionaryValue::DeepCopy() const {
- return static_cast<DictionaryValue*>(Value::DeepCopy());
+ return new DictionaryValue(*this);
}
std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<DictionaryValue>(*this);
}
///////////////////// ListValue ////////////////////
@@ -1237,7 +1226,7 @@ bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
bool ListValue::Remove(const Value& value, size_t* index) {
for (auto it = list_->begin(); it != list_->end(); ++it) {
- if ((*it)->Equals(&value)) {
+ if (**it == value) {
size_t previous_index = it - list_->begin();
list_->erase(it);
@@ -1305,9 +1294,8 @@ void ListValue::AppendStrings(const std::vector<string16>& in_values) {
bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
DCHECK(in_value);
for (const auto& entry : *list_) {
- if (entry->Equals(in_value.get())) {
+ if (*entry == *in_value)
return false;
- }
}
list_->push_back(std::move(in_value));
return true;
@@ -1325,7 +1313,7 @@ bool ListValue::Insert(size_t index, std::unique_ptr<Value> in_value) {
ListValue::const_iterator ListValue::Find(const Value& value) const {
return std::find_if(list_->begin(), list_->end(),
[&value](const std::unique_ptr<Value>& entry) {
- return entry->Equals(&value);
+ return *entry == value;
});
}
@@ -1335,11 +1323,11 @@ void ListValue::Swap(ListValue* other) {
}
ListValue* ListValue::DeepCopy() const {
- return static_cast<ListValue*>(Value::DeepCopy());
+ return new ListValue(*this);
}
std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<ListValue>(*this);
}
ValueSerializer::~ValueSerializer() {
diff --git a/base/values.h b/base/values.h
index 35f66df904..925152dbee 100644
--- a/base/values.h
+++ b/base/values.h
@@ -74,8 +74,8 @@ class BASE_EXPORT Value {
size_t size);
Value(const Value& that);
- Value(Value&& that);
- Value(); // A null value.
+ Value(Value&& that) noexcept;
+ Value() noexcept; // A null value.
explicit Value(Type type);
explicit Value(bool in_bool);
explicit Value(int in_int);
@@ -89,16 +89,16 @@ class BASE_EXPORT Value {
// arguments.
explicit Value(const char* in_string);
explicit Value(const std::string& in_string);
- explicit Value(std::string&& in_string);
+ explicit Value(std::string&& in_string) noexcept;
explicit Value(const char16* in_string);
explicit Value(const string16& in_string);
explicit Value(StringPiece in_string);
explicit Value(const std::vector<char>& in_blob);
- explicit Value(std::vector<char>&& in_blob);
+ explicit Value(std::vector<char>&& in_blob) noexcept;
Value& operator=(const Value& that);
- Value& operator=(Value&& that);
+ Value& operator=(Value&& that) noexcept;
~Value();
@@ -157,15 +157,30 @@ class BASE_EXPORT Value {
// to the copy. The caller gets ownership of the copy, of course.
// Subclasses return their own type directly in their overrides;
// this works because C++ supports covariant return types.
+ // DEPRECATED, use Value's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
Value* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<Value> CreateDeepCopy() const;
+ // Comparison operators so that Values can easily be used with standard
+ // library algorithms and associative containers.
+ BASE_EXPORT friend bool operator==(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator!=(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator<(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator>(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator<=(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator>=(const Value& lhs, const Value& rhs);
+
// Compares if two Value objects have equal contents.
+ // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
bool Equals(const Value* other) const;
// Compares if two Value objects have equal contents. Can handle NULLs.
// NULLs are considered equal but different from Value::CreateNullValue().
+ // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
static bool Equals(const Value* a, const Value* b);
protected:
@@ -191,7 +206,6 @@ class BASE_EXPORT Value {
void InternalCopyConstructFrom(const Value& that);
void InternalMoveConstructFrom(Value&& that);
void InternalCopyAssignFromSameType(const Value& that);
- void InternalMoveAssignFromSameType(Value&& that);
void InternalCleanup();
};
@@ -352,6 +366,8 @@ class BASE_EXPORT DictionaryValue : public Value {
DictStorage::const_iterator it_;
};
+ // DEPRECATED, use DictionaryValue's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
DictionaryValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
@@ -468,6 +484,8 @@ class BASE_EXPORT ListValue : public Value {
const_iterator begin() const { return list_->begin(); }
const_iterator end() const { return list_->end(); }
+ // DEPRECATED, use ListValue's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
ListValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
std::unique_ptr<ListValue> CreateDeepCopy() const;
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 3bcdc16e37..6c1f017095 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -8,6 +8,8 @@
#include <limits>
#include <memory>
+#include <string>
+#include <type_traits>
#include <utility>
#include <vector>
@@ -18,6 +20,20 @@
namespace base {
+TEST(ValuesTest, TestNothrow) {
+ static_assert(std::is_nothrow_move_constructible<Value>::value,
+ "IsNothrowMoveConstructible");
+ static_assert(std::is_nothrow_default_constructible<Value>::value,
+ "IsNothrowDefaultConstructible");
+ static_assert(std::is_nothrow_constructible<Value, std::string&&>::value,
+ "IsNothrowMoveConstructibleFromString");
+ static_assert(
+ std::is_nothrow_constructible<Value, std::vector<char>&&>::value,
+ "IsNothrowMoveConstructibleFromBlob");
+ static_assert(std::is_nothrow_move_assignable<Value>::value,
+ "IsNothrowMoveAssignable");
+}
+
// Group of tests for the value constructors.
TEST(ValuesTest, ConstructBool) {
Value true_value(true);
@@ -679,7 +695,7 @@ TEST(ValuesTest, DeepCopy) {
scoped_nested_dictionary->SetString("key", "value");
original_dict.Set("dictionary", std::move(scoped_nested_dictionary));
- std::unique_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
+ auto copy_dict = MakeUnique<DictionaryValue>(original_dict);
ASSERT_TRUE(copy_dict.get());
ASSERT_NE(copy_dict.get(), &original_dict);
@@ -789,10 +805,10 @@ TEST(ValuesTest, Equals) {
std::unique_ptr<Value> null1(Value::CreateNullValue());
std::unique_ptr<Value> null2(Value::CreateNullValue());
EXPECT_NE(null1.get(), null2.get());
- EXPECT_TRUE(null1->Equals(null2.get()));
+ EXPECT_EQ(*null1, *null2);
Value boolean(false);
- EXPECT_FALSE(null1->Equals(&boolean));
+ EXPECT_NE(*null1, boolean);
DictionaryValue dv;
dv.SetBoolean("a", false);
@@ -802,29 +818,29 @@ TEST(ValuesTest, Equals) {
dv.SetString("d2", ASCIIToUTF16("http://google.com"));
dv.Set("e", Value::CreateNullValue());
- std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
- EXPECT_TRUE(dv.Equals(copy.get()));
+ auto copy = MakeUnique<DictionaryValue>(dv);
+ EXPECT_EQ(dv, *copy);
std::unique_ptr<ListValue> list(new ListValue);
ListValue* original_list = list.get();
list->Append(Value::CreateNullValue());
list->Append(WrapUnique(new DictionaryValue));
- std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
+ auto list_copy = MakeUnique<Value>(*list);
dv.Set("f", std::move(list));
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
copy->Set("f", std::move(list_copy));
- EXPECT_TRUE(dv.Equals(copy.get()));
+ EXPECT_EQ(dv, *copy);
original_list->Append(MakeUnique<Value>(true));
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
// Check if Equals detects differences in only the keys.
- copy = dv.CreateDeepCopy();
- EXPECT_TRUE(dv.Equals(copy.get()));
+ copy = MakeUnique<DictionaryValue>(dv);
+ EXPECT_EQ(dv, *copy);
copy->Remove("a", NULL);
copy->SetBoolean("aa", false);
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
}
TEST(ValuesTest, StaticEquals) {
@@ -850,6 +866,126 @@ TEST(ValuesTest, StaticEquals) {
EXPECT_FALSE(Value::Equals(NULL, null1.get()));
}
+TEST(ValuesTest, Comparisons) {
+ // Test None Values.
+ Value null1;
+ Value null2;
+ EXPECT_EQ(null1, null2);
+ EXPECT_FALSE(null1 != null2);
+ EXPECT_FALSE(null1 < null2);
+ EXPECT_FALSE(null1 > null2);
+ EXPECT_LE(null1, null2);
+ EXPECT_GE(null1, null2);
+
+ // Test Bool Values.
+ Value bool1(false);
+ Value bool2(true);
+ EXPECT_FALSE(bool1 == bool2);
+ EXPECT_NE(bool1, bool2);
+ EXPECT_LT(bool1, bool2);
+ EXPECT_FALSE(bool1 > bool2);
+ EXPECT_LE(bool1, bool2);
+ EXPECT_FALSE(bool1 >= bool2);
+
+ // Test Int Values.
+ Value int1(1);
+ Value int2(2);
+ EXPECT_FALSE(int1 == int2);
+ EXPECT_NE(int1, int2);
+ EXPECT_LT(int1, int2);
+ EXPECT_FALSE(int1 > int2);
+ EXPECT_LE(int1, int2);
+ EXPECT_FALSE(int1 >= int2);
+
+ // Test Double Values.
+ Value double1(1.0);
+ Value double2(2.0);
+ EXPECT_FALSE(double1 == double2);
+ EXPECT_NE(double1, double2);
+ EXPECT_LT(double1, double2);
+ EXPECT_FALSE(double1 > double2);
+ EXPECT_LE(double1, double2);
+ EXPECT_FALSE(double1 >= double2);
+
+ // Test String Values.
+ Value string1("1");
+ Value string2("2");
+ EXPECT_FALSE(string1 == string2);
+ EXPECT_NE(string1, string2);
+ EXPECT_LT(string1, string2);
+ EXPECT_FALSE(string1 > string2);
+ EXPECT_LE(string1, string2);
+ EXPECT_FALSE(string1 >= string2);
+
+ // Test Binary Values.
+ Value binary1(std::vector<char>{0x01});
+ Value binary2(std::vector<char>{0x02});
+ EXPECT_FALSE(binary1 == binary2);
+ EXPECT_NE(binary1, binary2);
+ EXPECT_LT(binary1, binary2);
+ EXPECT_FALSE(binary1 > binary2);
+ EXPECT_LE(binary1, binary2);
+ EXPECT_FALSE(binary1 >= binary2);
+
+ // Test Empty List Values.
+ ListValue null_list1;
+ ListValue null_list2;
+ EXPECT_EQ(null_list1, null_list2);
+ EXPECT_FALSE(null_list1 != null_list2);
+ EXPECT_FALSE(null_list1 < null_list2);
+ EXPECT_FALSE(null_list1 > null_list2);
+ EXPECT_LE(null_list1, null_list2);
+ EXPECT_GE(null_list1, null_list2);
+
+ // Test Non Empty List Values.
+ ListValue int_list1;
+ ListValue int_list2;
+ int_list1.AppendInteger(1);
+ int_list2.AppendInteger(2);
+ EXPECT_FALSE(int_list1 == int_list2);
+ EXPECT_NE(int_list1, int_list2);
+ EXPECT_LT(int_list1, int_list2);
+ EXPECT_FALSE(int_list1 > int_list2);
+ EXPECT_LE(int_list1, int_list2);
+ EXPECT_FALSE(int_list1 >= int_list2);
+
+ // Test Empty Dict Values.
+ DictionaryValue null_dict1;
+ DictionaryValue null_dict2;
+ EXPECT_EQ(null_dict1, null_dict2);
+ EXPECT_FALSE(null_dict1 != null_dict2);
+ EXPECT_FALSE(null_dict1 < null_dict2);
+ EXPECT_FALSE(null_dict1 > null_dict2);
+ EXPECT_LE(null_dict1, null_dict2);
+ EXPECT_GE(null_dict1, null_dict2);
+
+ // Test Non Empty Dict Values.
+ DictionaryValue int_dict1;
+ DictionaryValue int_dict2;
+ int_dict1.SetInteger("key", 1);
+ int_dict2.SetInteger("key", 2);
+ EXPECT_FALSE(int_dict1 == int_dict2);
+ EXPECT_NE(int_dict1, int_dict2);
+ EXPECT_LT(int_dict1, int_dict2);
+ EXPECT_FALSE(int_dict1 > int_dict2);
+ EXPECT_LE(int_dict1, int_dict2);
+ EXPECT_FALSE(int_dict1 >= int_dict2);
+
+ // Test Values of different types.
+ std::vector<Value> values = {null1, bool1, int1, double1,
+ string1, binary1, int_dict1, int_list1};
+ for (size_t i = 0; i < values.size(); ++i) {
+ for (size_t j = i + 1; j < values.size(); ++j) {
+ EXPECT_FALSE(values[i] == values[j]);
+ EXPECT_NE(values[i], values[j]);
+ EXPECT_LT(values[i], values[j]);
+ EXPECT_FALSE(values[i] > values[j]);
+ EXPECT_LE(values[i], values[j]);
+ EXPECT_FALSE(values[i] >= values[j]);
+ }
+ }
+}
+
TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
DictionaryValue original_dict;
std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
@@ -885,25 +1021,25 @@ TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
scoped_list->Append(std::move(scoped_list_element_1));
original_dict.Set("list", std::move(scoped_list));
- std::unique_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
- std::unique_ptr<Value> copy_null = original_null->CreateDeepCopy();
- std::unique_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
- std::unique_ptr<Value> copy_int = original_int->CreateDeepCopy();
- std::unique_ptr<Value> copy_double = original_double->CreateDeepCopy();
- std::unique_ptr<Value> copy_string = original_string->CreateDeepCopy();
- std::unique_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
- std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
- std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
-
- EXPECT_TRUE(original_dict.Equals(copy_dict.get()));
- EXPECT_TRUE(original_null->Equals(copy_null.get()));
- EXPECT_TRUE(original_bool->Equals(copy_bool.get()));
- EXPECT_TRUE(original_int->Equals(copy_int.get()));
- EXPECT_TRUE(original_double->Equals(copy_double.get()));
- EXPECT_TRUE(original_string->Equals(copy_string.get()));
- EXPECT_TRUE(original_string16->Equals(copy_string16.get()));
- EXPECT_TRUE(original_binary->Equals(copy_binary.get()));
- EXPECT_TRUE(original_list->Equals(copy_list.get()));
+ auto copy_dict = MakeUnique<Value>(original_dict);
+ auto copy_null = MakeUnique<Value>(*original_null);
+ auto copy_bool = MakeUnique<Value>(*original_bool);
+ auto copy_int = MakeUnique<Value>(*original_int);
+ auto copy_double = MakeUnique<Value>(*original_double);
+ auto copy_string = MakeUnique<Value>(*original_string);
+ auto copy_string16 = MakeUnique<Value>(*original_string16);
+ auto copy_binary = MakeUnique<Value>(*original_binary);
+ auto copy_list = MakeUnique<Value>(*original_list);
+
+ EXPECT_EQ(original_dict, *copy_dict);
+ EXPECT_EQ(*original_null, *copy_null);
+ EXPECT_EQ(*original_bool, *copy_bool);
+ EXPECT_EQ(*original_int, *copy_int);
+ EXPECT_EQ(*original_double, *copy_double);
+ EXPECT_EQ(*original_string, *copy_string);
+ EXPECT_EQ(*original_string16, *copy_string16);
+ EXPECT_EQ(*original_binary, *copy_binary);
+ EXPECT_EQ(*original_list, *copy_list);
}
TEST(ValuesTest, RemoveEmptyChildren) {
@@ -1068,27 +1204,27 @@ TEST(ValuesTest, DictionaryIterator) {
}
Value value1("value1");
- dict.Set("key1", value1.CreateDeepCopy());
+ dict.Set("key1", MakeUnique<Value>(value1));
bool seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
EXPECT_FALSE(seen1);
EXPECT_EQ("key1", it.key());
- EXPECT_TRUE(value1.Equals(&it.value()));
+ EXPECT_EQ(value1, it.value());
seen1 = true;
}
EXPECT_TRUE(seen1);
Value value2("value2");
- dict.Set("key2", value2.CreateDeepCopy());
+ dict.Set("key2", MakeUnique<Value>(value2));
bool seen2 = seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
if (it.key() == "key1") {
EXPECT_FALSE(seen1);
- EXPECT_TRUE(value1.Equals(&it.value()));
+ EXPECT_EQ(value1, it.value());
seen1 = true;
} else if (it.key() == "key2") {
EXPECT_FALSE(seen2);
- EXPECT_TRUE(value2.Equals(&it.value()));
+ EXPECT_EQ(value2, it.value());
seen2 = true;
} else {
ADD_FAILURE();
@@ -1112,21 +1248,21 @@ TEST(ValuesTest, GetWithNullOutValue) {
DictionaryValue dict_value;
ListValue list_value;
- main_dict.Set("bool", bool_value.CreateDeepCopy());
- main_dict.Set("int", int_value.CreateDeepCopy());
- main_dict.Set("double", double_value.CreateDeepCopy());
- main_dict.Set("string", string_value.CreateDeepCopy());
- main_dict.Set("binary", binary_value.CreateDeepCopy());
- main_dict.Set("dict", dict_value.CreateDeepCopy());
- main_dict.Set("list", list_value.CreateDeepCopy());
-
- main_list.Append(bool_value.CreateDeepCopy());
- main_list.Append(int_value.CreateDeepCopy());
- main_list.Append(double_value.CreateDeepCopy());
- main_list.Append(string_value.CreateDeepCopy());
- main_list.Append(binary_value.CreateDeepCopy());
- main_list.Append(dict_value.CreateDeepCopy());
- main_list.Append(list_value.CreateDeepCopy());
+ main_dict.Set("bool", MakeUnique<Value>(bool_value));
+ main_dict.Set("int", MakeUnique<Value>(int_value));
+ main_dict.Set("double", MakeUnique<Value>(double_value));
+ main_dict.Set("string", MakeUnique<Value>(string_value));
+ main_dict.Set("binary", MakeUnique<Value>(binary_value));
+ main_dict.Set("dict", MakeUnique<Value>(dict_value));
+ main_dict.Set("list", MakeUnique<Value>(list_value));
+
+ main_list.Append(MakeUnique<Value>(bool_value));
+ main_list.Append(MakeUnique<Value>(int_value));
+ main_list.Append(MakeUnique<Value>(double_value));
+ main_list.Append(MakeUnique<Value>(string_value));
+ main_list.Append(MakeUnique<Value>(binary_value));
+ main_list.Append(MakeUnique<Value>(dict_value));
+ main_list.Append(MakeUnique<Value>(list_value));
EXPECT_TRUE(main_dict.Get("bool", NULL));
EXPECT_TRUE(main_dict.Get("int", NULL));
diff --git a/base/win/scoped_comptr.h b/base/win/scoped_comptr.h
index 9442672054..d4aaa84201 100644
--- a/base/win/scoped_comptr.h
+++ b/base/win/scoped_comptr.h
@@ -5,19 +5,18 @@
#ifndef BASE_WIN_SCOPED_COMPTR_H_
#define BASE_WIN_SCOPED_COMPTR_H_
+#include <objbase.h>
#include <unknwn.h>
#include "base/logging.h"
-#include "base/memory/ref_counted.h"
namespace base {
namespace win {
+// DEPRECATED: Use Microsoft::WRL::ComPtr instead.
// A fairly minimalistic smart class for COM interface pointers.
-// Uses scoped_refptr for the basic smart pointer functionality
-// and adds a few IUnknown specific services.
template <class Interface, const IID* interface_id = &__uuidof(Interface)>
-class ScopedComPtr : public scoped_refptr<Interface> {
+class ScopedComPtr {
public:
// Utility template to prevent users of ScopedComPtr from calling AddRef
// and/or Release() without going through the ScopedComPtr class.
@@ -28,16 +27,17 @@ class ScopedComPtr : public scoped_refptr<Interface> {
STDMETHOD_(ULONG, Release)() = 0;
};
- typedef scoped_refptr<Interface> ParentClass;
-
ScopedComPtr() {
}
- explicit ScopedComPtr(Interface* p) : ParentClass(p) {
+ explicit ScopedComPtr(Interface* p) : ptr_(p) {
+ if (ptr_)
+ ptr_->AddRef();
}
- ScopedComPtr(const ScopedComPtr<Interface, interface_id>& p)
- : ParentClass(p) {
+ ScopedComPtr(const ScopedComPtr<Interface, interface_id>& p) : ptr_(p.get()) {
+ if (ptr_)
+ ptr_->AddRef();
}
~ScopedComPtr() {
@@ -46,31 +46,37 @@ class ScopedComPtr : public scoped_refptr<Interface> {
static_assert(
sizeof(ScopedComPtr<Interface, interface_id>) == sizeof(Interface*),
"ScopedComPtrSize");
+ Release();
}
+ Interface* get() const { return ptr_; }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
// Explicit Release() of the held object. Useful for reuse of the
// ScopedComPtr instance.
// Note that this function equates to IUnknown::Release and should not
// be confused with e.g. unique_ptr::release().
void Release() {
- if (this->ptr_ != NULL) {
- this->ptr_->Release();
- this->ptr_ = NULL;
+ Interface* temp = ptr_;
+ if (temp) {
+ ptr_ = nullptr;
+ temp->Release();
}
}
// Sets the internal pointer to NULL and returns the held object without
// releasing the reference.
Interface* Detach() {
- Interface* p = this->ptr_;
- this->ptr_ = NULL;
+ Interface* p = ptr_;
+ ptr_ = nullptr;
return p;
}
// Accepts an interface pointer that has already been addref-ed.
void Attach(Interface* p) {
- DCHECK(!this->ptr_);
- this->ptr_ = p;
+ DCHECK(!ptr_);
+ ptr_ = p;
}
// Retrieves the pointer address.
@@ -78,8 +84,8 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// The function DCHECKs on the current value being NULL.
// Usage: Foo(p.Receive());
Interface** Receive() {
- DCHECK(!this->ptr_) << "Object leak. Pointer must be NULL";
- return &this->ptr_;
+ DCHECK(!ptr_) << "Object leak. Pointer must be NULL";
+ return &ptr_;
}
// A convenience for whenever a void pointer is needed as an out argument.
@@ -89,50 +95,51 @@ class ScopedComPtr : public scoped_refptr<Interface> {
template <class Query>
HRESULT QueryInterface(Query** p) {
- DCHECK(p != NULL);
- DCHECK(this->ptr_ != NULL);
+ DCHECK(p);
+ DCHECK(ptr_);
// IUnknown already has a template version of QueryInterface
// so the iid parameter is implicit here. The only thing this
// function adds are the DCHECKs.
- return this->ptr_->QueryInterface(p);
+ return ptr_->QueryInterface(IID_PPV_ARGS(p));
}
// QI for times when the IID is not associated with the type.
HRESULT QueryInterface(const IID& iid, void** obj) {
- DCHECK(obj != NULL);
- DCHECK(this->ptr_ != NULL);
- return this->ptr_->QueryInterface(iid, obj);
+ DCHECK(obj);
+ DCHECK(ptr_);
+ return ptr_->QueryInterface(iid, obj);
}
// Queries |other| for the interface this object wraps and returns the
// error code from the other->QueryInterface operation.
HRESULT QueryFrom(IUnknown* object) {
- DCHECK(object != NULL);
- return object->QueryInterface(Receive());
+ DCHECK(object);
+ return object->QueryInterface(IID_PPV_ARGS(Receive()));
}
// Convenience wrapper around CoCreateInstance
- HRESULT CreateInstance(const CLSID& clsid, IUnknown* outer = NULL,
+ HRESULT CreateInstance(const CLSID& clsid,
+ IUnknown* outer = nullptr,
DWORD context = CLSCTX_ALL) {
- DCHECK(!this->ptr_);
+ DCHECK(!ptr_);
HRESULT hr = ::CoCreateInstance(clsid, outer, context, *interface_id,
- reinterpret_cast<void**>(&this->ptr_));
+ reinterpret_cast<void**>(&ptr_));
return hr;
}
// Checks if the identity of |other| and this object is the same.
bool IsSameObject(IUnknown* other) {
- if (!other && !this->ptr_)
+ if (!other && !ptr_)
return true;
- if (!other || !this->ptr_)
+ if (!other || !ptr_)
return false;
ScopedComPtr<IUnknown> my_identity;
- QueryInterface(my_identity.Receive());
+ QueryInterface(IID_PPV_ARGS(my_identity.Receive()));
ScopedComPtr<IUnknown> other_identity;
- other->QueryInterface(other_identity.Receive());
+ other->QueryInterface(IID_PPV_ARGS(other_identity.Receive()));
return my_identity == other_identity;
}
@@ -148,20 +155,115 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// by statically casting the ScopedComPtr instance to the wrapped interface
// and then making the call... but generally that shouldn't be necessary.
BlockIUnknownMethods* operator->() const {
- DCHECK(this->ptr_ != NULL);
- return reinterpret_cast<BlockIUnknownMethods*>(this->ptr_);
+ DCHECK(ptr_);
+ return reinterpret_cast<BlockIUnknownMethods*>(ptr_);
+ }
+
+ ScopedComPtr<Interface, interface_id>& operator=(Interface* rhs) {
+ // AddRef first so that self assignment should work
+ if (rhs)
+ rhs->AddRef();
+ Interface* old_ptr = ptr_;
+ ptr_ = rhs;
+ if (old_ptr)
+ old_ptr->Release();
+ return *this;
}
- // Pull in operator=() from the parent class.
- using scoped_refptr<Interface>::operator=;
+ ScopedComPtr<Interface, interface_id>& operator=(
+ const ScopedComPtr<Interface, interface_id>& rhs) {
+ return *this = rhs.ptr_;
+ }
- // static methods
+ Interface& operator*() const {
+ DCHECK(ptr_);
+ return *ptr_;
+ }
+
+ bool operator==(const ScopedComPtr<Interface, interface_id>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator==(const ScopedComPtr<U>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator==(const U* rhs) const {
+ return ptr_ == rhs;
+ }
+
+ bool operator!=(const ScopedComPtr<Interface, interface_id>& rhs) const {
+ return ptr_ != rhs.get();
+ }
+ template <typename U>
+ bool operator!=(const ScopedComPtr<U>& rhs) const {
+ return ptr_ != rhs.get();
+ }
+
+ template <typename U>
+ bool operator!=(const U* rhs) const {
+ return ptr_ != rhs;
+ }
+
+ void swap(ScopedComPtr<Interface, interface_id>& r) {
+ Interface* tmp = ptr_;
+ ptr_ = r.ptr_;
+ r.ptr_ = tmp;
+ }
+
+ // static methods
static const IID& iid() {
return *interface_id;
}
+
+ private:
+ Interface* ptr_ = nullptr;
};
+template <typename T, typename U>
+bool operator==(const T* lhs, const ScopedComPtr<U>& rhs) {
+ return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const ScopedComPtr<T>& lhs, std::nullptr_t null) {
+ return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t null, const ScopedComPtr<T>& rhs) {
+ return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const ScopedComPtr<U>& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const ScopedComPtr<T>& lhs, std::nullptr_t null) {
+ return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const ScopedComPtr<T>& rhs) {
+ return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const ScopedComPtr<T>& p) {
+ return out << p.get();
+}
+
+// Helper to make IID_PPV_ARGS work with ScopedComPtr.
+template <typename T>
+void** IID_PPV_ARGS_Helper(base::win::ScopedComPtr<T>* pp) throw() {
+ return pp->ReceiveVoid();
+}
+
} // namespace win
} // namespace base