summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Pawlowski <jpawlowski@google.com>2017-03-14 10:55:53 -0700
committerJakub Pawlowski <jpawlowski@google.com>2017-12-22 03:06:26 -0800
commit319afc59a539d6261307aadbdab4d4ee93eaf1ff (patch)
tree0f21b95ba579352a9829d3868c7365d4f9dde210
parent8abac493f652a1835c61e538919820aa77658a39 (diff)
downloadlibchrome-319afc59a539d6261307aadbdab4d4ee93eaf1ff.tar.gz
Uprev the library to r462023 from Chromium, 3rd attempt
This merge was done against r462023 which corresponds to git commit 32eb7c31af9cab6231f0d3d05206072079177605 from Apr 05, 2017 First attempt, in commit bf8c17f71511c1e90cd8cccfe71f0852c566bd3b was badly squashed, causing automated test failure in system/bt. Next one broke mac build. Test: manually ran all test from system/bt that failed on previous attempt, plus libchrome_unittest Change-Id: I60003263418de3078c7be2da9fb1eeaeb786f3d0
-rw-r--r--Android.bp6
-rw-r--r--base/BUILD.gn17
-rw-r--r--base/allocator/allocator_shim.cc6
-rw-r--r--base/allocator/allocator_shim_internals.h21
-rw-r--r--base/base.isolate60
-rw-r--r--base/callback.h90
-rw-r--r--base/callback_helpers.h4
-rw-r--r--base/callback_unittest.cc42
-rw-r--r--base/containers/mru_cache.h2
-rw-r--r--base/critical_closure.h10
-rw-r--r--base/debug/activity_tracker.cc678
-rw-r--r--base/debug/activity_tracker.h263
-rw-r--r--base/debug/activity_tracker_unittest.cc184
-rw-r--r--base/debug/stack_trace.cc19
-rw-r--r--base/debug/stack_trace.h22
-rw-r--r--base/environment.cc2
-rw-r--r--base/feature_list.cc10
-rw-r--r--base/feature_list.h7
-rw-r--r--base/feature_list_unittest.cc3
-rw-r--r--base/mac/mach_port_broker_unittest.cc17
-rw-r--r--base/memory/shared_memory_mac_unittest.cc8
-rw-r--r--base/memory/shared_memory_unittest.cc8
-rw-r--r--base/memory/singleton_objc.h60
-rw-r--r--base/message_loop/incoming_task_queue.cc7
-rw-r--r--base/message_loop/incoming_task_queue.h3
-rw-r--r--base/message_loop/message_loop.h2
-rw-r--r--base/message_loop/message_loop_task_runner.cc12
-rw-r--r--base/message_loop/message_loop_task_runner.h5
-rw-r--r--base/metrics/persistent_histogram_allocator.cc20
-rw-r--r--base/metrics/persistent_histogram_allocator.h9
-rw-r--r--base/metrics/persistent_memory_allocator.cc96
-rw-r--r--base/metrics/persistent_memory_allocator.h47
-rw-r--r--base/metrics/persistent_memory_allocator_unittest.cc16
-rw-r--r--base/process/process_info_linux.cc29
-rw-r--r--base/process/process_info_mac.cc34
-rw-r--r--base/process/process_info_unittest.cc20
-rw-r--r--base/process/process_metrics.cc5
-rw-r--r--base/process/process_metrics.h70
-rw-r--r--base/process/process_metrics_linux.cc64
-rw-r--r--base/process/process_metrics_mac.cc31
-rw-r--r--base/process/process_metrics_unittest.cc40
-rw-r--r--base/sequenced_task_runner.cc7
-rw-r--r--base/sequenced_task_runner.h5
-rw-r--r--base/strings/string_piece.h3
-rw-r--r--base/strings/string_piece_unittest.cc14
-rw-r--r--base/synchronization/waitable_event.h3
-rw-r--r--base/synchronization/waitable_event_posix.cc63
-rw-r--r--base/synchronization/waitable_event_unittest.cc38
-rw-r--r--base/sys_info.h21
-rw-r--r--base/sys_info_linux.cc23
-rw-r--r--base/sys_info_mac.mm19
-rw-r--r--base/sys_info_posix.cc24
-rw-r--r--base/sys_info_unittest.cc122
-rw-r--r--base/task_runner.cc10
-rw-r--r--base/task_runner.h5
-rw-r--r--base/task_scheduler/task.cc22
-rw-r--r--base/task_scheduler/task.h4
-rw-r--r--base/test/BUILD.gn6
-rw-r--r--base/test/multiprocess_test.cc10
-rw-r--r--base/test/multiprocess_test.h27
-rw-r--r--base/test/multiprocess_test_android.cc12
-rw-r--r--base/test/test_mock_time_task_runner.cc22
-rw-r--r--base/test/test_mock_time_task_runner.h5
-rw-r--r--base/test/test_pending_task.cc18
-rw-r--r--base/test/test_pending_task.h2
-rw-r--r--base/test/test_simple_task_runner.cc18
-rw-r--r--base/test/test_simple_task_runner.h5
-rw-r--r--base/threading/post_task_and_reply_impl.h2
-rw-r--r--base/threading/sequenced_worker_pool.cc95
-rw-r--r--base/threading/sequenced_worker_pool.h19
-rw-r--r--base/threading/worker_pool.cc16
-rw-r--r--base/threading/worker_pool.h3
-rw-r--r--base/threading/worker_pool_posix.cc17
-rw-r--r--base/threading/worker_pool_posix.h5
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc5
-rw-r--r--base/trace_event/malloc_dump_provider.cc10
-rw-r--r--base/trace_event/memory_allocator_dump.cc5
-rw-r--r--base/trace_event/memory_allocator_dump.h11
-rw-r--r--base/trace_event/memory_allocator_dump_unittest.cc10
-rw-r--r--base/trace_event/memory_dump_manager.cc178
-rw-r--r--base/trace_event/memory_dump_manager.h42
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc37
-rw-r--r--base/trace_event/memory_dump_request_args.cc4
-rw-r--r--base/trace_event/memory_dump_request_args.h29
-rw-r--r--base/trace_event/memory_dump_scheduler.cc196
-rw-r--r--base/trace_event/memory_dump_scheduler.h64
-rw-r--r--base/trace_event/memory_dump_scheduler_unittest.cc101
-rw-r--r--base/trace_event/memory_infra_background_whitelist.cc68
-rw-r--r--base/trace_event/trace_config.cc369
-rw-r--r--base/trace_event/trace_config.h62
-rw-r--r--base/trace_event/trace_config_category_filter.cc298
-rw-r--r--base/trace_event/trace_config_category_filter.h86
-rw-r--r--base/trace_event/trace_config_unittest.cc131
-rw-r--r--base/trace_event/trace_event_unittest.cc37
-rw-r--r--base/trace_event/trace_log.cc14
-rw-r--r--base/values.cc191
-rw-r--r--base/values.h16
-rw-r--r--base/values_unittest.cc160
98 files changed, 3221 insertions, 1617 deletions
diff --git a/Android.bp b/Android.bp
index fdbcd93923..b34182d081 100644
--- a/Android.bp
+++ b/Android.bp
@@ -249,6 +249,7 @@ libchromeCommonSrc = [
"base/trace_event/process_memory_totals.cc",
"base/trace_event/trace_buffer.cc",
"base/trace_event/trace_config.cc",
+ "base/trace_event/trace_config_category_filter.cc",
"base/trace_event/trace_event_argument.cc",
"base/trace_event/trace_event_filter.cc",
"base/trace_event/trace_event_impl.cc",
@@ -272,6 +273,7 @@ libchromeLinuxSrc = [
"base/process/internal_linux.cc",
"base/process/memory_linux.cc",
"base/process/process_handle_linux.cc",
+ "base/process/process_info_linux.cc",
"base/process/process_iterator_linux.cc",
"base/process/process_metrics_linux.cc",
"base/strings/sys_string_conversions_posix.cc",
@@ -300,6 +302,7 @@ libchromeMacSrc = [
"base/process/launch_mac.cc",
"base/process/port_provider_mac.cc",
"base/process/process_handle_mac.cc",
+ "base/process/process_info_mac.cc",
"base/process/process_iterator_mac.cc",
"base/process/process_metrics_mac.cc",
"base/strings/sys_string_conversions_mac.mm",
@@ -491,6 +494,7 @@ cc_test {
"base/pickle_unittest.cc",
"base/posix/file_descriptor_shuffle_unittest.cc",
"base/posix/unix_domain_socket_linux_unittest.cc",
+ "base/process/process_info_unittest.cc",
"base/process/process_metrics_unittest.cc",
"base/profiler/tracked_time_unittest.cc",
"base/rand_util_unittest.cc",
@@ -498,7 +502,7 @@ cc_test {
"base/scoped_generic_unittest.cc",
"base/security_unittest.cc",
"base/sequence_checker_unittest.cc",
- "base/sequence_token_unittest.cc",
+ "base/sequence_token_unittest.cc",
"base/sha1_unittest.cc",
"base/stl_util_unittest.cc",
"base/strings/pattern_unittest.cc",
diff --git a/base/BUILD.gn b/base/BUILD.gn
index f84856de5c..069de984e2 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -276,7 +276,9 @@ component("base") {
"command_line.h",
"compiler_specific.h",
"containers/adapters.h",
+ "containers/flat_map.h",
"containers/flat_set.h",
+ "containers/flat_tree.h",
"containers/hash_tables.h",
"containers/linked_list.h",
"containers/mru_cache.h",
@@ -472,6 +474,7 @@ component("base") {
"mac/scoped_aedesc.h",
"mac/scoped_authorizationref.h",
"mac/scoped_block.h",
+ "mac/scoped_cffiledescriptorref.h",
"mac/scoped_cftyperef.h",
"mac/scoped_dispatch_object.h",
"mac/scoped_ionotificationportref.h",
@@ -854,6 +857,7 @@ component("base") {
"task_scheduler/scheduler_single_thread_task_runner_manager.h",
"task_scheduler/scheduler_worker.cc",
"task_scheduler/scheduler_worker.h",
+ "task_scheduler/scheduler_worker_params.h",
"task_scheduler/scheduler_worker_pool.h",
"task_scheduler/scheduler_worker_pool_impl.cc",
"task_scheduler/scheduler_worker_pool_impl.h",
@@ -1014,6 +1018,8 @@ component("base") {
"trace_event/trace_category.h",
"trace_event/trace_config.cc",
"trace_event/trace_config.h",
+ "trace_event/trace_config_category_filter.cc",
+ "trace_event/trace_config_category_filter.h",
"trace_event/trace_event.h",
"trace_event/trace_event_android.cc",
"trace_event/trace_event_argument.cc",
@@ -1050,6 +1056,7 @@ component("base") {
"version.h",
"vlog.cc",
"vlog.h",
+ "win/current_module.h",
"win/enum_variant.cc",
"win/enum_variant.h",
"win/event_trace_consumer.h",
@@ -1194,6 +1201,7 @@ component("base") {
"process/internal_linux.cc",
"process/memory_linux.cc",
"process/process_handle_linux.cc",
+ "process/process_info_linux.cc",
"process/process_iterator_linux.cc",
"process/process_metrics_linux.cc",
"sys_info_linux.cc",
@@ -1700,6 +1708,7 @@ component("i18n") {
"i18n/time_formatting.h",
"i18n/timezone.cc",
"i18n/timezone.h",
+ "i18n/unicodestring.h",
"i18n/utf8_validator_tables.cc",
"i18n/utf8_validator_tables.h",
]
@@ -1922,7 +1931,10 @@ test("base_unittests") {
"cancelable_callback_unittest.cc",
"command_line_unittest.cc",
"containers/adapters_unittest.cc",
+ "containers/container_test_utils.h",
+ "containers/flat_map_unittest.cc",
"containers/flat_set_unittest.cc",
+ "containers/flat_tree_unittest.cc",
"containers/hash_tables_unittest.cc",
"containers/linked_list_unittest.cc",
"containers/mru_cache_unittest.cc",
@@ -1972,6 +1984,7 @@ test("base_unittests") {
"i18n/time_formatting_unittest.cc",
"i18n/timezone_unittest.cc",
"id_map_unittest.cc",
+ "ios/crb_protocol_observers_unittest.mm",
"ios/device_util_unittest.mm",
"ios/weak_nsobject_unittest.mm",
"json/json_parser_unittest.cc",
@@ -2049,6 +2062,7 @@ test("base_unittests") {
"process/memory_unittest.cc",
"process/memory_unittest_mac.h",
"process/memory_unittest_mac.mm",
+ "process/process_info_unittest.cc",
"process/process_metrics_unittest.cc",
"process/process_metrics_unittest_ios.cc",
"process/process_unittest.cc",
@@ -2154,6 +2168,7 @@ test("base_unittests") {
"trace_event/java_heap_dump_provider_android_unittest.cc",
"trace_event/memory_allocator_dump_unittest.cc",
"trace_event/memory_dump_manager_unittest.cc",
+ "trace_event/memory_dump_scheduler_unittest.cc",
"trace_event/memory_usage_estimator_unittest.cc",
"trace_event/process_memory_dump_unittest.cc",
"trace_event/trace_category_unittest.cc",
@@ -2524,6 +2539,8 @@ if (is_android) {
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
"android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
"android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildProcessCreationParams.java",
+ "android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
]
# New versions of BuildConfig.java and NativeLibraries.java
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
index fbdbdfc8c2..4887142d25 100644
--- a/base/allocator/allocator_shim.cc
+++ b/base/allocator/allocator_shim.cc
@@ -23,6 +23,8 @@
#if defined(OS_MACOSX)
#include <malloc/malloc.h>
+
+#include "base/allocator/allocator_interception_mac.h"
#endif
// No calls to malloc / new in this file. They would would cause re-entrancy of
@@ -336,9 +338,11 @@ void InitializeAllocatorShim() {
// traversed the shim this will route them to the default malloc zone.
InitializeDefaultDispatchToMacAllocator();
+ MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
+
// This replaces the default malloc zone, causing calls to malloc & friends
// from the codebase to be routed to ShimMalloc() above.
- OverrideMacSymbols();
+ base::allocator::ReplaceFunctionsForStoredZones(&functions);
}
} // namespace allocator
} // namespace base
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
index 82624ee45b..0196f899ae 100644
--- a/base/allocator/allocator_shim_internals.h
+++ b/base/allocator/allocator_shim_internals.h
@@ -18,7 +18,26 @@
#endif
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
-#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
+//
+// If an exported symbol is linked into a DSO, it may be preempted by a
+// definition in the main executable. If this happens to an allocator symbol, it
+// will mean that the DSO will use the main executable's allocator. This is
+// normally relatively harmless -- regular allocations should all use the same
+// allocator, but if the DSO tries to hook the allocator it will not see any
+// allocations.
+//
+// However, if LLVM LTO is enabled, the compiler may inline the shim layer
+// symbols into callers. The end result is that allocator calls in DSOs may use
+// either the main executable's allocator or the DSO's allocator, depending on
+// whether the call was inlined. This is arguably a bug in LLVM caused by its
+// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
+// To work around the bug we use noinline to prevent the symbols from being
+// inlined.
+//
+// In the long run we probably want to avoid linking the allocator bits into
+// DSOs altogether. This will save a little space and stop giving DSOs the false
+// impression that they can hook the allocator.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
#endif // __GNUC__
diff --git a/base/base.isolate b/base/base.isolate
deleted file mode 100644
index 079d07d810..0000000000
--- a/base/base.isolate
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'includes': [
- # While the target 'base' doesn't depend on ../third_party/icu/icu.gyp
- # itself, virtually all targets using it has to include icu. The only
- # exception is the Windows sandbox (?).
- '../third_party/icu/icu.isolate',
- # Sanitizer-instrumented third-party libraries (if enabled).
- '../third_party/instrumented_libraries/instrumented_libraries.isolate',
- # MSVS runtime libraries.
- '../build/config/win/msvs_dependencies.isolate',
- ],
- 'conditions': [
- ['use_custom_libcxx==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/lib/libc++.so',
- ],
- },
- }],
- ['OS=="mac" and asan==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
- ],
- },
- }],
- ['OS=="win" and asan==1 and component=="shared_library"', {
- 'variables': {
- 'files': [
- # We only need x.y.z/lib/windows/clang_rt.asan_dynamic-i386.dll,
- # but since the version (x.y.z) changes, just grab the whole dir.
- '../third_party/llvm-build/Release+Asserts/lib/clang/',
- ],
- },
- }],
- ['OS=="linux" and (asan==1 or lsan==1 or msan==1 or tsan==1)', {
- 'variables': {
- 'files': [
- # For llvm-symbolizer.
- '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
- ],
- },
- }],
- ['asan==1 or lsan==1 or msan==1 or tsan==1', {
- 'variables': {
- 'files': [
- '../tools/valgrind/asan/',
- '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
- ],
- },
- }],
- # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
- ['asan==0 or lsan==0 or msan==0 or tsan==0', {
- 'variables': {},
- }],
- ],
-}
diff --git a/base/callback.h b/base/callback.h
index 40bd5208a8..c91e1a88d3 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -21,71 +21,6 @@ namespace base {
namespace internal {
-template <typename CallbackType>
-struct IsOnceCallback : std::false_type {};
-
-template <typename Signature>
-struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
-
-// RunMixin provides different variants of `Run()` function to `Callback<>`
-// based on the type of callback.
-template <typename CallbackType>
-class RunMixin;
-
-// Specialization for OnceCallback.
-template <typename R, typename... Args>
-class RunMixin<OnceCallback<R(Args...)>> {
- private:
- using CallbackType = OnceCallback<R(Args...)>;
-
- public:
- using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
- R Run(Args... /* args */) const & {
- // Note: even though this static_assert will trivially always fail, it
- // cannot be simply replaced with static_assert(false, ...) because:
- // - Per [dcl.dcl]/p4, a program is ill-formed if the constant-expression
- // argument does not evaluate to true.
- // - Per [temp.res]/p8, if no valid specialization can be generated for a
- // template definition, and that template is not instantiated, the
- // template definition is ill-formed, no diagnostic required.
- // These two clauses, taken together, would allow a conforming C++ compiler
- // to immediately reject static_assert(false, ...), even inside an
- // uninstantiated template.
- static_assert(!IsOnceCallback<CallbackType>::value,
- "OnceCallback::Run() may only be invoked on a non-const "
- "rvalue, i.e. std::move(callback).Run().");
- }
-
- R Run(Args... args) && {
- // Move the callback instance into a local variable before the invocation,
- // that ensures the internal state is cleared after the invocation.
- // It's not safe to touch |this| after the invocation, since running the
- // bound function may destroy |this|.
- CallbackType cb = static_cast<CallbackType&&>(*this);
- PolymorphicInvoke f =
- reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
- return f(cb.bind_state_.get(), std::forward<Args>(args)...);
- }
-};
-
-// Specialization for RepeatingCallback.
-template <typename R, typename... Args>
-class RunMixin<RepeatingCallback<R(Args...)>> {
- private:
- using CallbackType = RepeatingCallback<R(Args...)>;
-
- public:
- using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
- R Run(Args... args) const {
- const CallbackType& cb = static_cast<const CallbackType&>(*this);
- PolymorphicInvoke f =
- reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
- return f(cb.bind_state_.get(), std::forward<Args>(args)...);
- }
-};
-
template <typename From, typename To>
struct IsCallbackConvertible : std::false_type {};
@@ -100,14 +35,14 @@ template <typename R,
internal::CopyMode copy_mode,
internal::RepeatMode repeat_mode>
class Callback<R(Args...), copy_mode, repeat_mode>
- : public internal::CallbackBase<copy_mode>,
- public internal::RunMixin<Callback<R(Args...), copy_mode, repeat_mode>> {
+ : public internal::CallbackBase<copy_mode> {
public:
static_assert(repeat_mode != internal::RepeatMode::Once ||
copy_mode == internal::CopyMode::MoveOnly,
"OnceCallback must be MoveOnly.");
using RunType = R(Args...);
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
@@ -135,7 +70,26 @@ class Callback<R(Args...), copy_mode, repeat_mode>
return this->EqualsInternal(other);
}
- friend class internal::RunMixin<Callback>;
+ R Run(Args... args) const & {
+ static_assert(repeat_mode == internal::RepeatMode::Repeating,
+ "OnceCallback::Run() may only be invoked on a non-const "
+ "rvalue, i.e. std::move(callback).Run().");
+
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
+ return f(this->bind_state_.get(), std::forward<Args>(args)...);
+ }
+
+ R Run(Args... args) && {
+ // Move the callback instance into a local variable before the invocation,
+ // that ensures the internal state is cleared after the invocation.
+ // It's not safe to touch |this| after the invocation, since running the
+ // bound function may destroy |this|.
+ Callback cb = std::move(*this);
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+ return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+ }
};
} // namespace base
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
index ec3d6cbf16..6e0aee8882 100644
--- a/base/callback_helpers.h
+++ b/base/callback_helpers.h
@@ -8,8 +8,8 @@
// generated). Instead, consider adding methods here.
//
// ResetAndReturn(&cb) is like cb.Reset() but allows executing a callback (via a
-// copy) after the original callback is Reset(). This can be handy if Run()
-// reads/writes the variable holding the Callback.
+// move or copy) after the original callback is Reset(). This can be handy if
+// Run() reads/writes the variable holding the Callback.
#ifndef BASE_CALLBACK_HELPERS_H_
#define BASE_CALLBACK_HELPERS_H_
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index a41736946a..f76adbcdd2 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -21,24 +21,13 @@ void NopInvokeFunc() {}
// based on a type we declared in the anonymous namespace above to remove any
// chance of colliding with another instantiation and breaking the
// one-definition-rule.
-struct FakeBindState1 : internal::BindStateBase {
- FakeBindState1() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
- private:
- ~FakeBindState1() {}
- static void Destroy(const internal::BindStateBase* self) {
- delete static_cast<const FakeBindState1*>(self);
- }
- static bool IsCancelled(const internal::BindStateBase*) {
- return false;
- }
-};
+struct FakeBindState : internal::BindStateBase {
+ FakeBindState() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
-struct FakeBindState2 : internal::BindStateBase {
- FakeBindState2() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
private:
- ~FakeBindState2() {}
+ ~FakeBindState() {}
static void Destroy(const internal::BindStateBase* self) {
- delete static_cast<const FakeBindState2*>(self);
+ delete static_cast<const FakeBindState*>(self);
}
static bool IsCancelled(const internal::BindStateBase*) {
return false;
@@ -50,9 +39,7 @@ namespace {
class CallbackTest : public ::testing::Test {
public:
CallbackTest()
- : callback_a_(new FakeBindState1()),
- callback_b_(new FakeBindState2()) {
- }
+ : callback_a_(new FakeBindState()), callback_b_(new FakeBindState()) {}
~CallbackTest() override {}
@@ -94,7 +81,7 @@ TEST_F(CallbackTest, Equals) {
EXPECT_FALSE(callback_b_.Equals(callback_a_));
// We should compare based on instance, not type.
- Callback<void()> callback_c(new FakeBindState1());
+ Callback<void()> callback_c(new FakeBindState());
Callback<void()> callback_a2 = callback_a_;
EXPECT_TRUE(callback_a_.Equals(callback_a2));
EXPECT_FALSE(callback_a_.Equals(callback_c));
@@ -148,6 +135,23 @@ TEST_F(CallbackTest, ResetAndReturn) {
ASSERT_TRUE(tfr.cb_already_run);
}
+TEST_F(CallbackTest, NullAfterMoveRun) {
+ Closure cb = Bind([] {});
+ ASSERT_TRUE(cb);
+ std::move(cb).Run();
+ ASSERT_FALSE(cb);
+
+ const Closure cb2 = Bind([] {});
+ ASSERT_TRUE(cb2);
+ std::move(cb2).Run();
+ ASSERT_TRUE(cb2);
+
+ OnceClosure cb3 = BindOnce([] {});
+ ASSERT_TRUE(cb3);
+ std::move(cb3).Run();
+ ASSERT_FALSE(cb3);
+}
+
class CallbackOwner : public base::RefCounted<CallbackOwner> {
public:
explicit CallbackOwner(bool* deleted) {
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
index 4005489d4b..7c684a9690 100644
--- a/base/containers/mru_cache.h
+++ b/base/containers/mru_cache.h
@@ -105,8 +105,6 @@ class MRUCacheBase {
// Retrieves the contents of the given key, or end() if not found. This method
// has the side effect of moving the requested item to the front of the
// recency list.
- //
- // TODO(brettw) We may want a const version of this function in the future.
iterator Get(const KeyType& key) {
typename KeyIndex::iterator index_iter = index_.find(key);
if (index_iter == index_.end())
diff --git a/base/critical_closure.h b/base/critical_closure.h
index 1b10cde7ce..35ce2b5c46 100644
--- a/base/critical_closure.h
+++ b/base/critical_closure.h
@@ -5,6 +5,8 @@
#ifndef BASE_CRITICAL_CLOSURE_H_
#define BASE_CRITICAL_CLOSURE_H_
+#include <utility>
+
#include "base/callback.h"
#include "base/macros.h"
#include "build/build_config.h"
@@ -27,7 +29,7 @@ bool IsMultiTaskingSupported();
// |ios::ScopedCriticalAction|.
class CriticalClosure {
public:
- explicit CriticalClosure(const Closure& closure);
+ explicit CriticalClosure(Closure closure);
~CriticalClosure();
void Run();
@@ -55,13 +57,13 @@ class CriticalClosure {
// background running time, |MakeCriticalClosure| should be applied on them
// before posting.
#if defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+inline Closure MakeCriticalClosure(Closure closure) {
DCHECK(internal::IsMultiTaskingSupported());
return base::Bind(&internal::CriticalClosure::Run,
- Owned(new internal::CriticalClosure(closure)));
+ Owned(new internal::CriticalClosure(std::move(closure))));
}
#else // defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+inline Closure MakeCriticalClosure(Closure closure) {
// No-op for platforms where the application does not need to acquire
// background time for closures to finish when it goes into the background.
return closure;
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
index 40e9b9537c..6b492f0e15 100644
--- a/base/debug/activity_tracker.cc
+++ b/base/debug/activity_tracker.cc
@@ -23,6 +23,7 @@
#include "base/process/process_handle.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/threading/platform_thread.h"
namespace base {
@@ -30,18 +31,13 @@ namespace debug {
namespace {
-// A number that identifies the memory as having been initialized. It's
-// arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
-// A version number is added on so that major structure changes won't try to
-// read an older version (since the cookie won't match).
-const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
-
// The minimum depth a stack should support.
const int kMinStackDepth = 2;
// The amount of memory set aside for holding arbitrary user data (key/value
// pairs) globally or associated with ActivityData entries.
const size_t kUserDataSize = 1 << 10; // 1 KiB
+const size_t kProcessDataSize = 4 << 10; // 4 KiB
const size_t kGlobalDataSize = 16 << 10; // 16 KiB
const size_t kMaxUserDataNameLength =
static_cast<size_t>(std::numeric_limits<uint8_t>::max());
@@ -49,6 +45,13 @@ const size_t kMaxUserDataNameLength =
// A constant used to indicate that module information is changing.
const uint32_t kModuleInformationChanging = 0x80000000;
+// The key used to record process information.
+const char kProcessPhaseDataKey[] = "process-phase";
+
+// An atomically incrementing number, used to check for recreations of objects
+// in the same memory space.
+StaticAtomicSequenceNumber g_next_id;
+
union ThreadRef {
int64_t as_id;
#if defined(OS_WIN)
@@ -64,6 +67,33 @@ union ThreadRef {
#endif
};
+// Get the next non-zero identifier. It is only unique within a process.
+uint32_t GetNextDataId() {
+ uint32_t id;
+ while ((id = g_next_id.GetNext()) == 0)
+ ;
+ return id;
+}
+
+// Finds and reuses a specific allocation or creates a new one.
+PersistentMemoryAllocator::Reference AllocateFrom(
+ PersistentMemoryAllocator* allocator,
+ uint32_t from_type,
+ size_t size,
+ uint32_t to_type) {
+ PersistentMemoryAllocator::Iterator iter(allocator);
+ PersistentMemoryAllocator::Reference ref;
+ while ((ref = iter.GetNextOfType(from_type)) != 0) {
+ DCHECK_LE(size, allocator->GetAllocSize(ref));
+ // This can fail if a another thread has just taken it. It is assumed that
+ // the memory is cleared during the "free" operation.
+ if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
+ return ref;
+ }
+
+ return allocator->Allocate(size, to_type);
+}
+
// Determines the previous aligned index.
size_t RoundDownToAlignment(size_t index, size_t alignment) {
return index & (0 - alignment);
@@ -74,8 +104,43 @@ size_t RoundUpToAlignment(size_t index, size_t alignment) {
return (index + (alignment - 1)) & (0 - alignment);
}
+// Converts "tick" timing into wall time.
+Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
+ return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
+}
+
} // namespace
+OwningProcess::OwningProcess() {}
+OwningProcess::~OwningProcess() {}
+
+void OwningProcess::Release_Initialize() {
+ uint32_t old_id = data_id.load(std::memory_order_acquire);
+ DCHECK_EQ(0U, old_id);
+ process_id = GetCurrentProcId();
+ create_stamp = Time::Now().ToInternalValue();
+ data_id.store(GetNextDataId(), std::memory_order_release);
+}
+
+void OwningProcess::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
+ DCHECK_NE(0U, data_id);
+ process_id = pid;
+ create_stamp = stamp;
+}
+
+// static
+bool OwningProcess::GetOwningProcessId(const void* memory,
+ ProcessId* out_id,
+ int64_t* out_stamp) {
+ const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
+ uint32_t id = info->data_id.load(std::memory_order_acquire);
+ if (id == 0)
+ return false;
+
+ *out_id = static_cast<ProcessId>(info->process_id);
+ *out_stamp = info->create_stamp;
+ return id == info->data_id.load(std::memory_order_seq_cst);
+}
// It doesn't matter what is contained in this (though it will be all zeros)
// as only the address of it is important.
@@ -246,32 +311,33 @@ StringPiece ActivityUserData::TypedValue::GetStringReference() const {
return ref_value_;
}
+// These are required because std::atomic is (currently) not a POD type and
+// thus clang requires explicit out-of-line constructors and destructors even
+// when they do nothing.
ActivityUserData::ValueInfo::ValueInfo() {}
ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
ActivityUserData::ValueInfo::~ValueInfo() {}
+ActivityUserData::MemoryHeader::MemoryHeader() {}
+ActivityUserData::MemoryHeader::~MemoryHeader() {}
+ActivityUserData::FieldHeader::FieldHeader() {}
+ActivityUserData::FieldHeader::~FieldHeader() {}
-StaticAtomicSequenceNumber ActivityUserData::next_id_;
+ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0) {}
ActivityUserData::ActivityUserData(void* memory, size_t size)
: memory_(reinterpret_cast<char*>(memory)),
available_(RoundDownToAlignment(size, kMemoryAlignment)),
- id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) {
+ header_(reinterpret_cast<MemoryHeader*>(memory)) {
// It's possible that no user data is being stored.
if (!memory_)
return;
- DCHECK_LT(kMemoryAlignment, available_);
- if (id_->load(std::memory_order_relaxed) == 0) {
- // Generate a new ID and store it in the first 32-bit word of memory_.
- // |id_| must be non-zero for non-sink instances.
- uint32_t id;
- while ((id = next_id_.GetNext()) == 0)
- ;
- id_->store(id, std::memory_order_relaxed);
- DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
- }
- memory_ += kMemoryAlignment;
- available_ -= kMemoryAlignment;
+ static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
+ DCHECK_LT(sizeof(MemoryHeader), available_);
+ if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
+ header_->owner.Release_Initialize();
+ memory_ += sizeof(MemoryHeader);
+ available_ -= sizeof(MemoryHeader);
// If there is already data present, load that. This allows the same class
// to be used for analysis through snapshots.
@@ -280,6 +346,75 @@ ActivityUserData::ActivityUserData(void* memory, size_t size)
ActivityUserData::~ActivityUserData() {}
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
+ DCHECK(output_snapshot);
+ DCHECK(output_snapshot->empty());
+
+ // Find any new data that may have been added by an active instance of this
+ // class that is adding records.
+ ImportExistingData();
+
+ for (const auto& entry : values_) {
+ TypedValue value;
+ value.type_ = entry.second.type;
+ DCHECK_GE(entry.second.extent,
+ entry.second.size_ptr->load(std::memory_order_relaxed));
+
+ switch (entry.second.type) {
+ case RAW_VALUE:
+ case STRING_VALUE:
+ value.long_value_ =
+ std::string(reinterpret_cast<char*>(entry.second.memory),
+ entry.second.size_ptr->load(std::memory_order_relaxed));
+ break;
+ case RAW_VALUE_REFERENCE:
+ case STRING_VALUE_REFERENCE: {
+ ReferenceRecord* ref =
+ reinterpret_cast<ReferenceRecord*>(entry.second.memory);
+ value.ref_value_ = StringPiece(
+ reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
+ static_cast<size_t>(ref->size));
+ } break;
+ case BOOL_VALUE:
+ case CHAR_VALUE:
+ value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
+ break;
+ case SIGNED_VALUE:
+ case UNSIGNED_VALUE:
+ value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
+ break;
+ case END_OF_VALUES: // Included for completeness purposes.
+ NOTREACHED();
+ }
+ auto inserted = output_snapshot->insert(
+ std::make_pair(entry.second.name.as_string(), std::move(value)));
+ DCHECK(inserted.second); // True if inserted, false if existed.
+ }
+
+ return true;
+}
+
+const void* ActivityUserData::GetBaseAddress() const {
+ // The |memory_| pointer advances as elements are written but the |header_|
+ // value is always at the start of the block so just return that.
+ return header_;
+}
+
+void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
+ int64_t stamp) {
+ if (!header_)
+ return;
+ header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ActivityUserData::GetOwningProcessId(const void* memory,
+ ProcessId* out_id,
+ int64_t* out_stamp) {
+ const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
+ return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
void ActivityUserData::Set(StringPiece name,
ValueType type,
const void* memory,
@@ -308,13 +443,13 @@ void ActivityUserData::Set(StringPiece name,
// following field will be aligned properly.
size_t name_size = name.length();
size_t name_extent =
- RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
- sizeof(Header);
+ RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
+ sizeof(FieldHeader);
size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
// The "base size" is the size of the header and (padded) string key. Stop
// now if there's not room enough for even this.
- size_t base_size = sizeof(Header) + name_extent;
+ size_t base_size = sizeof(FieldHeader) + name_extent;
if (base_size > available_)
return;
@@ -338,7 +473,7 @@ void ActivityUserData::Set(StringPiece name,
}
// Allocate a chunk of memory.
- Header* header = reinterpret_cast<Header*>(memory_);
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
memory_ += full_size;
available_ -= full_size;
@@ -348,9 +483,9 @@ void ActivityUserData::Set(StringPiece name,
DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
header->name_size = static_cast<uint8_t>(name_size);
header->record_size = full_size;
- char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
+ char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
void* value_memory =
- reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
+ reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
memcpy(name_memory, name.data(), name_size);
header->type.store(type, std::memory_order_release);
@@ -364,7 +499,7 @@ void ActivityUserData::Set(StringPiece name,
info->name = persistent_name;
info->memory = value_memory;
info->size_ptr = &header->value_size;
- info->extent = full_size - sizeof(Header) - name_extent;
+ info->extent = full_size - sizeof(FieldHeader) - name_extent;
info->type = type;
}
@@ -389,8 +524,8 @@ void ActivityUserData::SetReference(StringPiece name,
}
void ActivityUserData::ImportExistingData() const {
- while (available_ > sizeof(Header)) {
- Header* header = reinterpret_cast<Header*>(memory_);
+ while (available_ > sizeof(FieldHeader)) {
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
ValueType type =
static_cast<ValueType>(header->type.load(std::memory_order_acquire));
if (type == END_OF_VALUES)
@@ -398,8 +533,8 @@ void ActivityUserData::ImportExistingData() const {
if (header->record_size > available_)
return;
- size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size,
- kMemoryAlignment);
+ size_t value_offset = RoundUpToAlignment(
+ sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
if (header->record_size == value_offset &&
header->value_size.load(std::memory_order_relaxed) == 1) {
value_offset -= 1;
@@ -408,7 +543,7 @@ void ActivityUserData::ImportExistingData() const {
return;
ValueInfo info;
- info.name = StringPiece(memory_ + sizeof(Header), header->name_size);
+ info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
info.type = type;
info.memory = memory_ + value_offset;
info.size_ptr = &header->value_size;
@@ -422,60 +557,6 @@ void ActivityUserData::ImportExistingData() const {
}
}
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
- DCHECK(output_snapshot);
- DCHECK(output_snapshot->empty());
-
- // Find any new data that may have been added by an active instance of this
- // class that is adding records.
- ImportExistingData();
-
- for (const auto& entry : values_) {
- TypedValue value;
- value.type_ = entry.second.type;
- DCHECK_GE(entry.second.extent,
- entry.second.size_ptr->load(std::memory_order_relaxed));
-
- switch (entry.second.type) {
- case RAW_VALUE:
- case STRING_VALUE:
- value.long_value_ =
- std::string(reinterpret_cast<char*>(entry.second.memory),
- entry.second.size_ptr->load(std::memory_order_relaxed));
- break;
- case RAW_VALUE_REFERENCE:
- case STRING_VALUE_REFERENCE: {
- ReferenceRecord* ref =
- reinterpret_cast<ReferenceRecord*>(entry.second.memory);
- value.ref_value_ = StringPiece(
- reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
- static_cast<size_t>(ref->size));
- } break;
- case BOOL_VALUE:
- case CHAR_VALUE:
- value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
- break;
- case SIGNED_VALUE:
- case UNSIGNED_VALUE:
- value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
- break;
- case END_OF_VALUES: // Included for completeness purposes.
- NOTREACHED();
- }
- auto inserted = output_snapshot->insert(
- std::make_pair(entry.second.name.as_string(), std::move(value)));
- DCHECK(inserted.second); // True if inserted, false if existed.
- }
-
- return true;
-}
-
-const void* ActivityUserData::GetBaseAddress() {
- // The |memory_| pointer advances as elements are written but the |id_|
- // value is always at the start of the block so just return that.
- return id_;
-}
-
// This information is kept for every thread that is tracked. It is filled
// the very first time the thread is seen. All fields must be of exact sizes
// so there is no issue moving between 32 and 64-bit builds.
@@ -485,27 +566,16 @@ struct ThreadActivityTracker::Header {
GlobalActivityTracker::kTypeIdActivityTracker;
// Expected size for 32/64-bit check.
- static constexpr size_t kExpectedInstanceSize = 80;
+ static constexpr size_t kExpectedInstanceSize =
+ OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
+ 72;
- // This unique number indicates a valid initialization of the memory.
- std::atomic<uint32_t> cookie;
+ // This information uniquely identifies a process.
+ OwningProcess owner;
- // The number of Activity slots (spaces that can hold an Activity) that
- // immediately follow this structure in memory.
- uint32_t stack_slots;
-
- // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
- // These identifiers are not guaranteed to mean anything but are unique, in
- // combination, among all active trackers. It would be nice to always have
- // the process_id be a 64-bit value but the necessity of having it atomic
- // (for the memory barriers it provides) limits it to the natural word size
- // of the machine.
-#ifdef ARCH_CPU_64_BITS
- std::atomic<int64_t> process_id;
-#else
- std::atomic<int32_t> process_id;
- int32_t process_id_padding;
-#endif
+ // The thread-id (thread_ref.as_id) to which this data belongs. This number
+ // is not guaranteed to mean anything but combined with the process-id from
+ // OwningProcess is unique among all active trackers.
ThreadRef thread_ref;
// The start-time and start-ticks when the data was created. Each activity
@@ -514,12 +584,19 @@ struct ThreadActivityTracker::Header {
int64_t start_time;
int64_t start_ticks;
+ // The number of Activity slots (spaces that can hold an Activity) that
+ // immediately follow this structure in memory.
+ uint32_t stack_slots;
+
+ // Some padding to keep everything 64-bit aligned.
+ uint32_t padding;
+
// The current depth of the stack. This may be greater than the number of
// slots. If the depth exceeds the number of slots, the newest entries
// won't be recorded.
std::atomic<uint32_t> current_depth;
- // A memory location used to indicate if changes have been made to the stack
+ // A memory location used to indicate if changes have been made to the data
// that would invalidate an in-progress read of its contents. The active
// tracker will zero the value whenever something gets popped from the
// stack. A monitoring tracker can write a non-zero value here, copy the
@@ -527,7 +604,11 @@ struct ThreadActivityTracker::Header {
// the contents didn't change while being copied. This can handle concurrent
// snapshot operations only if each snapshot writes a different bit (which
// is not the current implementation so no parallel snapshots allowed).
- std::atomic<uint32_t> stack_unchanged;
+ std::atomic<uint32_t> data_unchanged;
+
+ // The last "exception" activity. This can't be stored on the stack because
+ // that could get popped as things unwind.
+ Activity last_exception;
// The name of the thread (up to a maximum length). Dynamic-length names
// are not practical since the memory has to come from the same persistent
@@ -596,15 +677,16 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
"ActivityData.data is not 64-bit aligned");
// Provided memory should either be completely initialized or all zeros.
- if (header_->cookie.load(std::memory_order_relaxed) == 0) {
+ if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
// This is a new file. Double-check other fields and then initialize.
- DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
+ DCHECK_EQ(0, header_->owner.process_id);
+ DCHECK_EQ(0, header_->owner.create_stamp);
DCHECK_EQ(0, header_->thread_ref.as_id);
DCHECK_EQ(0, header_->start_time);
DCHECK_EQ(0, header_->start_ticks);
DCHECK_EQ(0U, header_->stack_slots);
DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
- DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
+ DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
DCHECK_EQ(0, stack_[0].time_internal);
DCHECK_EQ(0U, stack_[0].origin_address);
DCHECK_EQ(0U, stack_[0].call_stack[0]);
@@ -616,7 +698,6 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
header_->thread_ref.as_handle =
PlatformThread::CurrentHandle().platform_handle();
#endif
- header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
header_->start_time = base::Time::Now().ToInternalValue();
header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
@@ -626,7 +707,7 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
// This is done last so as to guarantee that everything above is "released"
// by the time this value gets written.
- header_->cookie.store(kHeaderCookie, std::memory_order_release);
+ header_->owner.Release_Initialize();
valid_ = true;
DCHECK(IsValid());
@@ -719,40 +800,28 @@ void ThreadActivityTracker::PopActivity(ActivityId id) {
// The stack has shrunk meaning that some other thread trying to copy the
// contents for reporting purposes could get bad data. That thread would
- // have written a non-zero value into |stack_unchanged|; clearing it here
+ // have written a non-zero value into |data_unchanged|; clearing it here
// will let that thread detect that something did change. This needs to
// happen after the atomic |depth| operation above so a "release" store
// is required.
- header_->stack_unchanged.store(0, std::memory_order_release);
+ header_->data_unchanged.store(0, std::memory_order_release);
}
std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
ActivityId id,
ActivityTrackerMemoryAllocator* allocator) {
- // User-data is only stored for activities actually held in the stack.
- if (id < stack_slots_) {
- // Don't allow user data for lock acquisition as recursion may occur.
- if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
- NOTREACHED();
- return MakeUnique<ActivityUserData>(nullptr, 0);
- }
-
- // Get (or reuse) a block of memory and create a real UserData object
- // on it.
- PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
- void* memory =
- allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
- if (memory) {
- std::unique_ptr<ActivityUserData> user_data =
- MakeUnique<ActivityUserData>(memory, kUserDataSize);
- stack_[id].user_data_ref = ref;
- stack_[id].user_data_id = user_data->id();
- return user_data;
- }
+ // Don't allow user data for lock acquisition as recursion may occur.
+ if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+ NOTREACHED();
+ return MakeUnique<ActivityUserData>();
}
- // Return a dummy object that will still accept (but ignore) Set() calls.
- return MakeUnique<ActivityUserData>(nullptr, 0);
+ // User-data is only stored for activities actually held in the stack.
+ if (id >= stack_slots_)
+ return MakeUnique<ActivityUserData>();
+
+ // Create and return a real UserData object.
+ return CreateUserDataForActivity(&stack_[id], allocator);
}
bool ThreadActivityTracker::HasUserData(ActivityId id) {
@@ -770,12 +839,27 @@ void ThreadActivityTracker::ReleaseUserData(
}
}
+void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
+ // A thread-checker creates a lock to check the thread-id which means
+ // re-entry into this code if lock acquisitions are being tracked.
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Fill the reusable exception activity.
+ Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
+ data);
+
+ // The data has changed meaning that some other thread trying to copy the
+ // contents for reporting purposes could get bad data.
+ header_->data_unchanged.store(0, std::memory_order_relaxed);
+}
+
bool ThreadActivityTracker::IsValid() const {
- if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
- header_->process_id.load(std::memory_order_relaxed) == 0 ||
- header_->thread_ref.as_id == 0 ||
- header_->start_time == 0 ||
- header_->start_ticks == 0 ||
+ if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
+ header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
+ header_->start_time == 0 || header_->start_ticks == 0 ||
header_->stack_slots != stack_slots_ ||
header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
return false;
@@ -806,20 +890,20 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
output_snapshot->activity_stack.reserve(stack_slots_);
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
- // Remember the process and thread IDs to ensure they aren't replaced
- // during the snapshot operation. Use "acquire" to ensure that all the
- // non-atomic fields of the structure are valid (at least at the current
- // moment in time).
- const int64_t starting_process_id =
- header_->process_id.load(std::memory_order_acquire);
+ // Remember the data IDs to ensure nothing is replaced during the snapshot
+ // operation. Use "acquire" so that all the non-atomic fields of the
+ // structure are valid (at least at the current moment in time).
+ const uint32_t starting_id =
+ header_->owner.data_id.load(std::memory_order_acquire);
+ const int64_t starting_process_id = header_->owner.process_id;
const int64_t starting_thread_id = header_->thread_ref.as_id;
- // Write a non-zero value to |stack_unchanged| so it's possible to detect
+ // Write a non-zero value to |data_unchanged| so it's possible to detect
// at the end that nothing has changed since copying the data began. A
// "cst" operation is required to ensure it occurs before everything else.
// Using "cst" memory ordering is relatively expensive but this is only
// done during analysis so doesn't directly affect the worker threads.
- header_->stack_unchanged.store(1, std::memory_order_seq_cst);
+ header_->data_unchanged.store(1, std::memory_order_seq_cst);
// Fetching the current depth also "acquires" the contents of the stack.
depth = header_->current_depth.load(std::memory_order_acquire);
@@ -831,29 +915,25 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
count * sizeof(Activity));
}
+ // Capture the last exception.
+ memcpy(&output_snapshot->last_exception, &header_->last_exception,
+ sizeof(Activity));
+
+ // TODO(bcwhite): Snapshot other things here.
+
// Retry if something changed during the copy. A "cst" operation ensures
// it must happen after all the above operations.
- if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
+ if (!header_->data_unchanged.load(std::memory_order_seq_cst))
continue;
// Stack copied. Record it's full depth.
output_snapshot->activity_stack_depth = depth;
- // TODO(bcwhite): Snapshot other things here.
-
- // Get the general thread information. Loading of "process_id" is guaranteed
- // to be last so that it's possible to detect below if any content has
- // changed while reading it. It's technically possible for a thread to end,
- // have its data cleared, a new thread get created with the same IDs, and
- // it perform an action which starts tracking all in the time since the
- // ID reads above but the chance is so unlikely that it's not worth the
- // effort and complexity of protecting against it (perhaps with an
- // "unchanged" field like is done for the stack).
+ // Get the general thread information.
output_snapshot->thread_name =
std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
output_snapshot->thread_id = header_->thread_ref.as_id;
- output_snapshot->process_id =
- header_->process_id.load(std::memory_order_seq_cst);
+ output_snapshot->process_id = header_->owner.process_id;
// All characters of the thread-name buffer were copied so as to not break
// if the trailing NUL were missing. Now limit the length if the actual
@@ -861,9 +941,10 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
output_snapshot->thread_name.resize(
strlen(output_snapshot->thread_name.c_str()));
- // If the process or thread ID has changed then the tracker has exited and
- // the memory reused by a new one. Try again.
- if (output_snapshot->process_id != starting_process_id ||
+ // If the data ID has changed then the tracker has exited and the memory
+ // reused by a new one. Try again.
+ if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
+ output_snapshot->process_id != starting_process_id ||
output_snapshot->thread_id != starting_thread_id) {
continue;
}
@@ -879,10 +960,14 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
const int64_t start_ticks = header_->start_ticks;
for (Activity& activity : output_snapshot->activity_stack) {
activity.time_internal =
- (start_time +
- TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
+ WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
.ToInternalValue();
}
+ output_snapshot->last_exception.time_internal =
+ WallTimeFromTickTime(start_ticks,
+ output_snapshot->last_exception.time_internal,
+ start_time)
+ .ToInternalValue();
// Success!
return true;
@@ -892,11 +977,48 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
return false;
}
+const void* ThreadActivityTracker::GetBaseAddress() {
+ return header_;
+}
+
+void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid,
+ int64_t stamp) {
+ header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
+ ProcessId* out_id,
+ int64_t* out_stamp) {
+ const Header* header = reinterpret_cast<const Header*>(memory);
+ return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
// static
size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
}
+std::unique_ptr<ActivityUserData>
+ThreadActivityTracker::CreateUserDataForActivity(
+ Activity* activity,
+ ActivityTrackerMemoryAllocator* allocator) {
+ DCHECK_EQ(0U, activity->user_data_ref);
+
+ PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
+ void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
+ if (memory) {
+ std::unique_ptr<ActivityUserData> user_data =
+ MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ activity->user_data_ref = ref;
+ activity->user_data_id = user_data->id();
+ return user_data;
+ }
+
+ // Return a dummy object that will still accept (but ignore) Set() calls.
+ return MakeUnique<ActivityUserData>();
+}
+
// The instantiation of the GlobalActivityTracker object.
// The object held here will obviously not be destructed at process exit
// but that's best since PersistentMemoryAllocator objects (that underlie
@@ -979,6 +1101,9 @@ bool GlobalActivityTracker::ModuleInfoRecord::EncodeFrom(
pickle_size = pickler.size();
changes.store(0, std::memory_order_relaxed);
+ // Initialize the owner info.
+ owner.Release_Initialize();
+
// Now set those fields that can change.
return UpdateFrom(info);
}
@@ -1047,21 +1172,22 @@ ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
user_data_ =
tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
} else {
- user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
+ user_data_ = MakeUnique<ActivityUserData>();
}
}
return *user_data_;
}
-GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size)
+GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
+ size_t size)
: ActivityUserData(memory, size) {}
-GlobalActivityTracker::GlobalUserData::~GlobalUserData() {}
+GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
-void GlobalActivityTracker::GlobalUserData::Set(StringPiece name,
- ValueType type,
- const void* memory,
- size_t size) {
+void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
AutoLock lock(data_lock_);
ActivityUserData::Set(name, type, memory, size);
}
@@ -1186,6 +1312,174 @@ void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
delete tracker;
}
+void GlobalActivityTracker::SetBackgroundTaskRunner(
+ const scoped_refptr<TaskRunner>& runner) {
+ AutoLock lock(global_tracker_lock_);
+ background_task_runner_ = runner;
+}
+
+void GlobalActivityTracker::SetProcessExitCallback(
+ ProcessExitCallback callback) {
+ AutoLock lock(global_tracker_lock_);
+ process_exit_callback_ = callback;
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+ ProcessId process_id,
+ const FilePath::StringType& cmd) {
+ DCHECK_NE(GetCurrentProcId(), process_id);
+
+ base::AutoLock lock(global_tracker_lock_);
+ if (base::ContainsKey(known_processes_, process_id)) {
+ // TODO(bcwhite): Measure this in UMA.
+ NOTREACHED() << "Process #" << process_id
+ << " was previously recorded as \"launched\""
+ << " with no corresponding exit.";
+ known_processes_.erase(process_id);
+ }
+
+#if defined(OS_WIN)
+ known_processes_.insert(std::make_pair(process_id, UTF16ToUTF8(cmd)));
+#else
+ known_processes_.insert(std::make_pair(process_id, cmd));
+#endif
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+ ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args) {
+ if (exe.find(FILE_PATH_LITERAL(" "))) {
+ RecordProcessLaunch(process_id,
+ FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
+ FILE_PATH_LITERAL("\" ") + args);
+ } else {
+ RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
+ }
+}
+
+void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
+ int exit_code) {
+ DCHECK_NE(GetCurrentProcId(), process_id);
+
+ scoped_refptr<TaskRunner> task_runner;
+ std::string command_line;
+ {
+ base::AutoLock lock(global_tracker_lock_);
+ task_runner = background_task_runner_;
+ auto found = known_processes_.find(process_id);
+ if (found != known_processes_.end()) {
+ command_line = std::move(found->second);
+ known_processes_.erase(found);
+ } else {
+ DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
+ }
+ }
+
+ // Use the current time to differentiate the process that just exited
+ // from any that might be created in the future with the same ID.
+ int64_t now_stamp = Time::Now().ToInternalValue();
+
+ // The persistent allocator is thread-safe so run the iteration and
+ // adjustments on a worker thread if one was provided.
+ if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
+ task_runner->PostTask(
+ FROM_HERE,
+ Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
+ process_id, now_stamp, exit_code, Passed(&command_line)));
+ return;
+ }
+
+ CleanupAfterProcess(process_id, now_stamp, exit_code,
+ std::move(command_line));
+}
+
+void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
+ process_data().SetInt(kProcessPhaseDataKey, phase);
+}
+
+void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ std::string&& command_line) {
+ // The process may not have exited cleanly so its necessary to go through
+ // all the data structures it may have allocated in the persistent memory
+ // segment and mark them as "released". This will allow them to be reused
+ // later on.
+
+ PersistentMemoryAllocator::Iterator iter(allocator_.get());
+ PersistentMemoryAllocator::Reference ref;
+
+ ProcessExitCallback process_exit_callback;
+ {
+ AutoLock lock(global_tracker_lock_);
+ process_exit_callback = process_exit_callback_;
+ }
+ if (process_exit_callback) {
+ // Find the processes user-data record so the process phase can be passed
+ // to the callback.
+ ActivityUserData::Snapshot process_data_snapshot;
+ while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
+ const void* memory = allocator_->GetAsArray<char>(
+ ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
+ ProcessId found_id;
+ int64_t create_stamp;
+ if (ActivityUserData::GetOwningProcessId(memory, &found_id,
+ &create_stamp)) {
+ if (found_id == process_id && create_stamp < exit_stamp) {
+ const ActivityUserData process_data(const_cast<void*>(memory),
+ allocator_->GetAllocSize(ref));
+ process_data.CreateSnapshot(&process_data_snapshot);
+ break; // No need to look for any others.
+ }
+ }
+ }
+ iter.Reset(); // So it starts anew when used below.
+
+ // Record the process's phase at exit so callback doesn't need to go
+ // searching based on a private key value.
+ ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
+ auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
+ if (phase != process_data_snapshot.end())
+ exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
+
+ // Perform the callback.
+ process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
+ std::move(command_line),
+ std::move(process_data_snapshot));
+ }
+
+ // Find all allocations associated with the exited process and free them.
+ uint32_t type;
+ while ((ref = iter.GetNext(&type)) != 0) {
+ switch (type) {
+ case kTypeIdActivityTracker:
+ case kTypeIdUserDataRecord:
+ case kTypeIdProcessDataRecord:
+ case ModuleInfoRecord::kPersistentTypeId: {
+ const void* memory = allocator_->GetAsArray<char>(
+ ref, type, PersistentMemoryAllocator::kSizeAny);
+ ProcessId found_id;
+ int64_t create_stamp;
+
+ // By convention, the OwningProcess structure is always the first
+ // field of the structure so there's no need to handle all the
+ // cases separately.
+ if (OwningProcess::GetOwningProcessId(memory, &found_id,
+ &create_stamp)) {
+ // Only change the type to be "free" if the process ID matches and
+ // the creation time is before the exit time (so PID re-use doesn't
+ // cause the erasure of something that is in-use). Memory is cleared
+ // here, rather than when it's needed, so as to limit the impact at
+ // that critical time.
+ if (found_id == process_id && create_stamp < exit_stamp)
+ allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
+ }
+ } break;
+ }
+ }
+}
+
void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
// Allocate at least one extra byte so the string is NUL terminated. All
// memory returned by the allocator is guaranteed to be zeroed.
@@ -1249,12 +1543,20 @@ GlobalActivityTracker::GlobalActivityTracker(
kTypeIdUserDataRecordFree,
kUserDataSize,
kCachedUserDataMemories,
- /*make_iterable=*/false),
+ /*make_iterable=*/true),
+ process_data_(allocator_->GetAsArray<char>(
+ AllocateFrom(allocator_.get(),
+ kTypeIdProcessDataRecordFree,
+ kProcessDataSize,
+ kTypeIdProcessDataRecord),
+ kTypeIdProcessDataRecord,
+ kProcessDataSize),
+ kProcessDataSize),
global_data_(
allocator_->GetAsArray<char>(
allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
kTypeIdGlobalDataRecord,
- PersistentMemoryAllocator::kSizeAny),
+ kGlobalDataSize),
kGlobalDataSize) {
// Ensure the passed memory is valid and empty (iterator finds nothing).
uint32_t type;
@@ -1264,10 +1566,15 @@ GlobalActivityTracker::GlobalActivityTracker(
DCHECK(!g_tracker_);
subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
- // The global records must be iterable in order to be found by an analyzer.
+ // The data records must be iterable in order to be found by an analyzer.
+ allocator_->MakeIterable(allocator_->GetAsReference(
+ process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
allocator_->MakeIterable(allocator_->GetAsReference(
global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
+ // Note that this process has launched.
+ SetProcessPhase(PROCESS_LAUNCHED);
+
// Fetch and record all activated field trials.
FieldTrial::ActiveGroups active_groups;
FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -1297,6 +1604,23 @@ void GlobalActivityTracker::ReturnTrackerMemory(
thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
}
+void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
+ const void* origin,
+ uint32_t code) {
+ // Get an existing tracker for this thread. It's not possible to create
+ // one at this point because such would involve memory allocations and
+ // other potentially complex operations that can cause failures if done
+ // within an exception handler. In most cases various operations will
+ // have already created the tracker so this shouldn't generally be a
+ // problem.
+ ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
+ if (!tracker)
+ return;
+
+ tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
+ ActivityData::ForException(code));
+}
+
// static
void GlobalActivityTracker::OnTLSDestroy(void* value) {
delete reinterpret_cast<ManagedActivityTracker*>(value);
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
index 719a31865c..e6eb197881 100644
--- a/base/debug/activity_tracker.h
+++ b/base/debug/activity_tracker.h
@@ -23,12 +23,15 @@
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/location.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/process/process_handle.h"
#include "base/strings/string_piece.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/task_runner.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_local_storage.h"
@@ -41,7 +44,6 @@ class FilePath;
class Lock;
class PlatformThreadHandle;
class Process;
-class StaticAtomicSequenceNumber;
class WaitableEvent;
namespace debug {
@@ -56,11 +58,48 @@ enum : int {
kActivityCallStackSize = 10,
};
+// A class for keeping all information needed to verify that a structure is
+// associated with a given process.
+struct OwningProcess {
+ OwningProcess();
+ ~OwningProcess();
+
+ // Initializes structure with the current process id and the current time.
+ // These can uniquely identify a process. A unique non-zero data_id will be
+ // set making it possible to tell using atomic reads if the data has changed.
+ void Release_Initialize();
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from memory without loading the entire structure for analysis. This will
+ // return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ ProcessId* out_id,
+ int64_t* out_stamp);
+
+ // SHA1(base::debug::OwningProcess): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0xB1179672 + 1;
+
+ // Expected size for 32/64-bit check by PersistentMemoryAllocator.
+ static constexpr size_t kExpectedInstanceSize = 24;
+
+ std::atomic<uint32_t> data_id;
+ uint32_t padding;
+ int64_t process_id;
+ int64_t create_stamp;
+};
+
// The data associated with an activity is dependent upon the activity type.
// This union defines all of the various fields. All fields must be explicitly
// sized types to ensure no interoperability problems between 32-bit and
// 64-bit systems.
union ActivityData {
+ // Expected size for 32/64-bit check.
+ // TODO(bcwhite): VC2015 doesn't allow statics in unions. Fix when it does.
+ // static constexpr size_t kExpectedInstanceSize = 8;
+
// Generic activities don't have any defined structure.
struct {
uint32_t id; // An arbitrary identifier used for association.
@@ -81,6 +120,9 @@ union ActivityData {
struct {
int64_t process_id; // A unique identifier for a process.
} process;
+ struct {
+ uint32_t code; // An "exception code" number.
+ } exception;
// These methods create an ActivityData object from the appropriate
// parameters. Objects of this type should always be created this way to
@@ -126,6 +168,12 @@ union ActivityData {
data.process.process_id = id;
return data;
}
+
+ static ActivityData ForException(const uint32_t code) {
+ ActivityData data;
+ data.exception.code = code;
+ return data;
+ }
};
// A "null" activity-data that can be passed to indicate "do not change".
@@ -237,6 +285,9 @@ struct Activity {
ACT_PROCESS_START = ACT_PROCESS,
ACT_PROCESS_WAIT,
+ // Exception activities indicate the occurence of something unexpected.
+ ACT_EXCEPTION = 14 << 4,
+
// Generic activities are user defined and can be anything.
ACT_GENERIC = 15 << 4,
@@ -293,7 +344,9 @@ struct Activity {
// This class manages arbitrary user data that can be associated with activities
// done by a thread by supporting key/value pairs of any type. This can provide
// additional information during debugging. It is also used to store arbitrary
-// global data. All updates must be done from the same thread.
+// global data. All updates must be done from the same thread though other
+// threads can read it concurrently if they create new objects using the same
+// memory.
class BASE_EXPORT ActivityUserData {
public:
// List of known value type. REFERENCE types must immediately follow the non-
@@ -348,6 +401,9 @@ class BASE_EXPORT ActivityUserData {
using Snapshot = std::map<std::string, TypedValue>;
+ // Initialize the object either as a "sink" that just accepts and discards
+ // data or an active one that writes to a given (zeroed) memory block.
+ ActivityUserData();
ActivityUserData(void* memory, size_t size);
virtual ~ActivityUserData();
@@ -355,7 +411,7 @@ class BASE_EXPORT ActivityUserData {
// contents have been overwritten by another thread. The return value is
// always non-zero unless it's actually just a data "sink".
uint32_t id() const {
- return memory_ ? id_->load(std::memory_order_relaxed) : 0;
+ return header_ ? header_->owner.data_id.load(std::memory_order_relaxed) : 0;
}
// Writes a |value| (as part of a key/value pair) that will be included with
@@ -409,7 +465,17 @@ class BASE_EXPORT ActivityUserData {
bool CreateSnapshot(Snapshot* output_snapshot) const;
// Gets the base memory address used for storing data.
- const void* GetBaseAddress();
+ const void* GetBaseAddress() const;
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from tracker memory without loading the entire structure for analysis. This
+ // will return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ ProcessId* out_id,
+ int64_t* out_stamp);
protected:
virtual void Set(StringPiece name,
@@ -422,20 +488,31 @@ class BASE_EXPORT ActivityUserData {
enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
- // A structure used to reference data held outside of persistent memory.
- struct ReferenceRecord {
- uint64_t address;
- uint64_t size;
+ // A structure that defines the structure header in memory.
+ struct MemoryHeader {
+ MemoryHeader();
+ ~MemoryHeader();
+
+ OwningProcess owner; // Information about the creating process.
};
// Header to a key/value record held in persistent memory.
- struct Header {
+ struct FieldHeader {
+ FieldHeader();
+ ~FieldHeader();
+
std::atomic<uint8_t> type; // Encoded ValueType
uint8_t name_size; // Length of "name" key.
std::atomic<uint16_t> value_size; // Actual size of of the stored value.
uint16_t record_size; // Total storage of name, value, header.
};
+ // A structure used to reference data held outside of persistent memory.
+ struct ReferenceRecord {
+ uint64_t address;
+ uint64_t size;
+ };
+
// This record is used to hold known value is a map so that they can be
// found and overwritten later.
struct ValueInfo {
@@ -470,12 +547,8 @@ class BASE_EXPORT ActivityUserData {
mutable char* memory_;
mutable size_t available_;
- // A pointer to the unique ID for this instance.
- std::atomic<uint32_t>* const id_;
-
- // This ID is used to create unique indentifiers for user data so that it's
- // possible to tell if the information has been overwritten.
- static StaticAtomicSequenceNumber next_id_;
+ // A pointer to the memory header for this instance.
+ MemoryHeader* const header_;
DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
};
@@ -525,6 +598,9 @@ class BASE_EXPORT ThreadActivityTracker {
// The current total depth of the activity stack, including those later
// entries not recorded in the |activity_stack| vector.
uint32_t activity_stack_depth = 0;
+
+ // The last recorded "exception" activity.
+ Activity last_exception;
};
// This is the base class for having the compiler manage an activity on the
@@ -608,6 +684,12 @@ class BASE_EXPORT ThreadActivityTracker {
void ReleaseUserData(ActivityId id,
ActivityTrackerMemoryAllocator* allocator);
+ // Save an exception. |origin| is the location of the exception.
+ void RecordExceptionActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data);
+
// Returns whether the current data is valid or not. It is not valid if
// corruption has been detected in the header or other data structures.
bool IsValid() const;
@@ -618,6 +700,19 @@ class BASE_EXPORT ThreadActivityTracker {
// implementation does not support concurrent snapshot operations.
bool CreateSnapshot(Snapshot* output_snapshot) const;
+ // Gets the base memory address used for storing data.
+ const void* GetBaseAddress();
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from tracker memory without loading the entire structure for analysis. This
+ // will return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ ProcessId* out_id,
+ int64_t* out_stamp);
+
// Calculates the memory size required for a given stack depth, including
// the internal header structure for the stack.
static size_t SizeForStackDepth(int stack_depth);
@@ -625,6 +720,10 @@ class BASE_EXPORT ThreadActivityTracker {
private:
friend class ActivityTrackerTest;
+ std::unique_ptr<ActivityUserData> CreateUserDataForActivity(
+ Activity* activity,
+ ActivityTrackerMemoryAllocator* allocator);
+
Header* const header_; // Pointer to the Header structure.
Activity* const stack_; // The stack of activities.
const uint32_t stack_slots_; // The total number of stack slots.
@@ -649,15 +748,45 @@ class BASE_EXPORT GlobalActivityTracker {
// will be safely ignored. These are public so that an external process
// can recognize records of this type within an allocator.
enum : uint32_t {
- kTypeIdActivityTracker = 0x5D7381AF + 3, // SHA1(ActivityTracker) v3
- kTypeIdUserDataRecord = 0x615EDDD7 + 2, // SHA1(UserDataRecord) v2
+ kTypeIdActivityTracker = 0x5D7381AF + 4, // SHA1(ActivityTracker) v4
+ kTypeIdUserDataRecord = 0x615EDDD7 + 3, // SHA1(UserDataRecord) v3
kTypeIdGlobalLogMessage = 0x4CF434F9 + 1, // SHA1(GlobalLogMessage) v1
- kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 1000,
+ kTypeIdProcessDataRecord = kTypeIdUserDataRecord + 0x100,
+ kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 0x200,
kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
+ kTypeIdProcessDataRecordFree = ~kTypeIdProcessDataRecord,
+ };
+
+ // An enumeration of common process life stages. All entries are given an
+ // explicit number so they are known and remain constant; this allows for
+ // cross-version analysis either locally or on a server.
+ enum ProcessPhase : int {
+ // The phases are generic and may have meaning to the tracker.
+ PROCESS_PHASE_UNKNOWN = 0,
+ PROCESS_LAUNCHED = 1,
+ PROCESS_LAUNCH_FAILED = 2,
+ PROCESS_EXITED_CLEANLY = 10,
+ PROCESS_EXITED_WITH_CODE = 11,
+
+ // Add here whatever is useful for analysis.
+ PROCESS_SHUTDOWN_STARTED = 100,
+ PROCESS_MAIN_LOOP_STARTED = 101,
};
+ // A callback made when a process exits to allow immediate analysis of its
+ // data. Note that the system may reuse the |process_id| so when fetching
+ // records it's important to ensure that what is returned was created before
+ // the |exit_stamp|. Movement of |process_data| information is allowed.
+ using ProcessExitCallback =
+ Callback<void(int64_t process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ ProcessPhase exit_phase,
+ std::string&& command_line,
+ ActivityUserData::Snapshot&& process_data)>;
+
// This structure contains information about a loaded module, as shown to
// users of the tracker.
struct BASE_EXPORT ModuleInfo {
@@ -789,6 +918,50 @@ class BASE_EXPORT GlobalActivityTracker {
// Releases the activity-tracker for the current thread (for testing only).
void ReleaseTrackerForCurrentThreadForTesting();
+ // Sets a task-runner that can be used for background work.
+ void SetBackgroundTaskRunner(const scoped_refptr<TaskRunner>& runner);
+
+ // Sets an optional callback to be called when a process exits.
+ void SetProcessExitCallback(ProcessExitCallback callback);
+
+ // Manages process lifetimes. These are called by the process that launched
+ // and reaped the subprocess, not the subprocess itself. If it is expensive
+ // to generate the parameters, Get() the global tracker and call these
+ // conditionally rather than using the static versions.
+ void RecordProcessLaunch(ProcessId process_id,
+ const FilePath::StringType& cmd);
+ void RecordProcessLaunch(ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args);
+ void RecordProcessExit(ProcessId process_id, int exit_code);
+ static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+ const FilePath::StringType& cmd) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessLaunch(process_id, cmd);
+ }
+ static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessLaunch(process_id, exe, args);
+ }
+ static void RecordProcessExitIfEnabled(ProcessId process_id, int exit_code) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessExit(process_id, exit_code);
+ }
+
+ // Sets the "phase" of the current process, useful for knowing what it was
+ // doing when it last reported.
+ void SetProcessPhase(ProcessPhase phase);
+ static void SetProcessPhaseIfEnabled(ProcessPhase phase) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->SetProcessPhase(phase);
+ }
+
// Records a log message. The current implementation does NOT recycle these
// only store critical messages such as FATAL ones.
void RecordLogMessage(StringPiece message);
@@ -818,7 +991,19 @@ class BASE_EXPORT GlobalActivityTracker {
tracker->RecordFieldTrial(trial_name, group_name);
}
+ // Record exception information for the current thread.
+ ALWAYS_INLINE
+ void RecordException(const void* origin, uint32_t code) {
+ return RecordExceptionImpl(::tracked_objects::GetProgramCounter(), origin,
+ code);
+ }
+
+ // Accesses the process data record for storing arbitrary key/value pairs.
+ // Updates to this are thread-safe.
+ ActivityUserData& process_data() { return process_data_; }
+
// Accesses the global data record for storing arbitrary key/value pairs.
+ // Updates to this are thread-safe.
ActivityUserData& global_data() { return global_data_; }
private:
@@ -837,10 +1022,10 @@ class BASE_EXPORT GlobalActivityTracker {
// A wrapper around ActivityUserData that is thread-safe and thus can be used
// in the global scope without the requirement of being called from only one
// thread.
- class GlobalUserData : public ActivityUserData {
+ class ThreadSafeUserData : public ActivityUserData {
public:
- GlobalUserData(void* memory, size_t size);
- ~GlobalUserData() override;
+ ThreadSafeUserData(void* memory, size_t size);
+ ~ThreadSafeUserData() override;
private:
void Set(StringPiece name,
@@ -850,7 +1035,7 @@ class BASE_EXPORT GlobalActivityTracker {
Lock data_lock_;
- DISALLOW_COPY_AND_ASSIGN(GlobalUserData);
+ DISALLOW_COPY_AND_ASSIGN(ThreadSafeUserData);
};
// State of a module as stored in persistent memory. This supports a single
@@ -862,7 +1047,8 @@ class BASE_EXPORT GlobalActivityTracker {
static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
// Expected size for 32/64-bit check by PersistentMemoryAllocator.
- static constexpr size_t kExpectedInstanceSize = 56;
+ static constexpr size_t kExpectedInstanceSize =
+ OwningProcess::kExpectedInstanceSize + 56;
// The atomic unfortunately makes this a "complex" class on some compilers
// and thus requires an out-of-line constructor & destructor even though
@@ -870,6 +1056,7 @@ class BASE_EXPORT GlobalActivityTracker {
ModuleInfoRecord();
~ModuleInfoRecord();
+ OwningProcess owner; // The process that created this record.
uint64_t address; // The base address of the module.
uint64_t load_time; // Time of last load/unload.
uint64_t size; // The size of the module in bytes.
@@ -928,11 +1115,20 @@ class BASE_EXPORT GlobalActivityTracker {
// It is called during the destruction of a ManagedActivityTracker object.
void ReturnTrackerMemory(ManagedActivityTracker* tracker);
+ // Records exception information.
+ void RecordExceptionImpl(const void* pc, const void* origin, uint32_t code);
+
// Releases the activity-tracker associcated with thread. It is called
// automatically when a thread is joined and thus there is nothing more to
// be tracked. |value| is a pointer to a ManagedActivityTracker.
static void OnTLSDestroy(void* value);
+ // Does process-exit work. This can be run on any thread.
+ void CleanupAfterProcess(ProcessId process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ std::string&& command_line);
+
// The persistent-memory allocator from which the memory for all trackers
// is taken.
std::unique_ptr<PersistentMemoryAllocator> allocator_;
@@ -955,9 +1151,9 @@ class BASE_EXPORT GlobalActivityTracker {
ActivityTrackerMemoryAllocator user_data_allocator_;
base::Lock user_data_allocator_lock_;
- // An object for holding global arbitrary key value pairs. Values must always
- // be written from the main UI thread.
- GlobalUserData global_data_;
+ // An object for holding arbitrary key value pairs with thread-safe access.
+ ThreadSafeUserData process_data_;
+ ThreadSafeUserData global_data_;
// A map of global module information, keyed by module path.
std::map<const std::string, ModuleInfoRecord*> modules_;
@@ -966,6 +1162,21 @@ class BASE_EXPORT GlobalActivityTracker {
// The active global activity tracker.
static subtle::AtomicWord g_tracker_;
+ // A lock that is used to protect access to the following fields.
+ base::Lock global_tracker_lock_;
+
+ // The collection of processes being tracked and their command-lines.
+ std::map<int64_t, std::string> known_processes_;
+
+ // A task-runner that can be used for doing background processing.
+ scoped_refptr<TaskRunner> background_task_runner_;
+
+ // A callback performed when a subprocess exits, including its exit-code
+ // and the phase it was in when that occurred. This will be called via
+ // the |background_task_runner_| if one is set or whatever thread reaped
+ // the process otherwise.
+ ProcessExitCallback process_exit_callback_;
+
DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
};
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc
index aced4fb36a..116c13d623 100644
--- a/base/debug/activity_tracker_unittest.cc
+++ b/base/debug/activity_tracker_unittest.cc
@@ -84,45 +84,73 @@ class ActivityTrackerTest : public testing::Test {
return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
}
+ void HandleProcessExit(int64_t id,
+ int64_t stamp,
+ int code,
+ GlobalActivityTracker::ProcessPhase phase,
+ std::string&& command,
+ ActivityUserData::Snapshot&& data) {
+ exit_id = id;
+ exit_stamp = stamp;
+ exit_code = code;
+ exit_phase = phase;
+ exit_command = std::move(command);
+ exit_data = std::move(data);
+ }
+
static void DoNothing() {}
+
+ int64_t exit_id = 0;
+ int64_t exit_stamp;
+ int exit_code;
+ GlobalActivityTracker::ProcessPhase exit_phase;
+ std::string exit_command;
+ ActivityUserData::Snapshot exit_data;
};
TEST_F(ActivityTrackerTest, UserDataTest) {
char buffer[256];
memset(buffer, 0, sizeof(buffer));
ActivityUserData data(buffer, sizeof(buffer));
- const size_t space = sizeof(buffer) - 8;
+ size_t space = sizeof(buffer) - sizeof(ActivityUserData::MemoryHeader);
ASSERT_EQ(space, data.available_);
data.SetInt("foo", 1);
- ASSERT_EQ(space - 24, data.available_);
+ space -= 24;
+ ASSERT_EQ(space, data.available_);
data.SetUint("b", 1U); // Small names fit beside header in a word.
- ASSERT_EQ(space - 24 - 16, data.available_);
+ space -= 16;
+ ASSERT_EQ(space, data.available_);
data.Set("c", buffer, 10);
- ASSERT_EQ(space - 24 - 16 - 24, data.available_);
+ space -= 24;
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "it's been fun");
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ space -= 32;
+ ASSERT_EQ(space, data.available_);
data.Set("c", buffer, 20);
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "but we're done together");
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "bye");
- ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetChar("d", 'x');
- ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8, data.available_);
+ space -= 8;
+ ASSERT_EQ(space, data.available_);
data.SetBool("ee", true);
- ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16, data.available_);
+ space -= 16;
+ ASSERT_EQ(space, data.available_);
data.SetString("f", "");
- ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16 - 8, data.available_);
+ space -= 8;
+ ASSERT_EQ(space, data.available_);
}
TEST_F(ActivityTrackerTest, PushPopTest) {
@@ -222,6 +250,28 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
}
+TEST_F(ActivityTrackerTest, ExceptionTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+ ThreadActivityTracker* tracker =
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+ ThreadActivityTracker::Snapshot snapshot;
+ ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.last_exception.activity_type);
+
+ char origin;
+ global->RecordException(&origin, 42);
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ EXPECT_EQ(Activity::ACT_EXCEPTION, snapshot.last_exception.activity_type);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin),
+ snapshot.last_exception.origin_address);
+ EXPECT_EQ(42U, snapshot.last_exception.data.exception.code);
+}
+
TEST_F(ActivityTrackerTest, CreateWithFileTest) {
const char temp_name[] = "CreateWithFileTest";
ScopedTempDir temp_dir;
@@ -250,6 +300,16 @@ TEST_F(ActivityTrackerTest, CreateWithFileTest) {
// GlobalActivityTracker tests below.
+TEST_F(ActivityTrackerTest, BasicTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+ // Ensure the data repositories have backing store, indicated by non-zero ID.
+ EXPECT_NE(0U, global->process_data().id());
+ EXPECT_NE(0U, global->global_data().id());
+ EXPECT_NE(global->process_data().id(), global->global_data().id());
+}
+
class SimpleActivityThread : public SimpleThread {
public:
SimpleActivityThread(const std::string& name,
@@ -336,5 +396,107 @@ TEST_F(ActivityTrackerTest, ThreadDeathTest) {
EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
}
+TEST_F(ActivityTrackerTest, ProcessDeathTest) {
+ // This doesn't actually create and destroy a process. Instead, it uses for-
+ // testing interfaces to simulate data created by other processes.
+ const ProcessId other_process_id = GetCurrentProcId() + 1;
+
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ ThreadActivityTracker* thread = global->GetOrCreateTrackerForCurrentThread();
+
+ // Get callbacks for process exit.
+ global->SetProcessExitCallback(
+ Bind(&ActivityTrackerTest::HandleProcessExit, Unretained(this)));
+
+ // Pretend than another process has started.
+ global->RecordProcessLaunch(other_process_id, FILE_PATH_LITERAL("foo --bar"));
+
+ // Do some activities.
+ PendingTask task(FROM_HERE, base::Bind(&DoNothing));
+ ScopedTaskRunActivity activity(task);
+ ActivityUserData& user_data = activity.user_data();
+ ASSERT_NE(0U, user_data.id());
+
+ // Get the memory-allocator references to that data.
+ PersistentMemoryAllocator::Reference proc_data_ref =
+ global->allocator()->GetAsReference(
+ global->process_data().GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdProcessDataRecord);
+ ASSERT_TRUE(proc_data_ref);
+ PersistentMemoryAllocator::Reference tracker_ref =
+ global->allocator()->GetAsReference(
+ thread->GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdActivityTracker);
+ ASSERT_TRUE(tracker_ref);
+ PersistentMemoryAllocator::Reference user_data_ref =
+ global->allocator()->GetAsReference(
+ user_data.GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdUserDataRecord);
+ ASSERT_TRUE(user_data_ref);
+
+ // Make a copy of the thread-tracker state so it can be restored later.
+ const size_t tracker_size = global->allocator()->GetAllocSize(tracker_ref);
+ std::unique_ptr<char[]> tracker_copy(new char[tracker_size]);
+ memcpy(tracker_copy.get(), thread->GetBaseAddress(), tracker_size);
+
+ // Change the objects to appear to be owned by another process.
+ ProcessId owning_id;
+ int64_t stamp;
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+ global->process_data().GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+ thread->GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+ &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ global->process_data().SetOwningProcessIdForTesting(other_process_id, stamp);
+ thread->SetOwningProcessIdForTesting(other_process_id, stamp);
+ user_data.SetOwningProcessIdForTesting(other_process_id, stamp);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+ global->process_data().GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+ ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+ thread->GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+ &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+
+ // Check that process exit will perform callback and free the allocations.
+ ASSERT_EQ(0, exit_id);
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecord,
+ global->allocator()->GetType(proc_data_ref));
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdActivityTracker,
+ global->allocator()->GetType(tracker_ref));
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdUserDataRecord,
+ global->allocator()->GetType(user_data_ref));
+ global->RecordProcessExit(other_process_id, 0);
+ EXPECT_EQ(other_process_id, exit_id);
+ EXPECT_EQ("foo --bar", exit_command);
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecordFree,
+ global->allocator()->GetType(proc_data_ref));
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdActivityTrackerFree,
+ global->allocator()->GetType(tracker_ref));
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdUserDataRecordFree,
+ global->allocator()->GetType(user_data_ref));
+
+ // Restore memory contents and types so things don't crash when doing real
+ // process clean-up.
+ memcpy(const_cast<void*>(thread->GetBaseAddress()), tracker_copy.get(),
+ tracker_size);
+ global->allocator()->ChangeType(
+ proc_data_ref, GlobalActivityTracker::kTypeIdProcessDataRecord,
+ GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+ global->allocator()->ChangeType(
+ tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker,
+ GlobalActivityTracker::kTypeIdActivityTrackerFree, false);
+ global->allocator()->ChangeType(
+ user_data_ref, GlobalActivityTracker::kTypeIdUserDataRecord,
+ GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+}
+
} // namespace debug
} // namespace base
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 1996dfca18..08dcacfa30 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -35,7 +35,7 @@ namespace debug {
namespace {
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_WIN)
#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
// GCC and LLVM generate slightly different frames on ARM, see
@@ -144,7 +144,7 @@ void* LinkStackFrames(void* fpp, void* parent_fp) {
return prev_parent_fp;
}
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_WIN)
} // namespace
@@ -227,6 +227,18 @@ std::string StackTrace::ToString() const {
size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial) {
+// TODO(699863): Merge the frame-pointer based stack unwinder into the
+// base::debug::StackTrace platform-specific implementation files.
+#if defined(OS_WIN)
+ StackTrace stack(max_depth);
+ size_t count = 0;
+ const void* const* frames = stack.Addresses(&count);
+ if (count < skip_initial)
+ return 0u;
+ count -= skip_initial;
+ memcpy(out_trace, frames + skip_initial, count * sizeof(void*));
+ return count;
+#elif defined(OS_POSIX)
// Usage of __builtin_frame_address() enables frame pointers in this
// function even if they are not enabled globally. So 'fp' will always
// be valid.
@@ -260,8 +272,10 @@ size_t TraceStackFramePointers(const void** out_trace,
}
return depth;
+#endif
}
+#if !defined(OS_WIN)
ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
: fp_(fp),
parent_fp_(parent_fp),
@@ -272,6 +286,7 @@ ScopedStackFrameLinker::~ScopedStackFrameLinker() {
CHECK_EQ(parent_fp_, previous_parent_fp)
<< "Stack frame's parent pointer has changed!";
}
+#endif // !defined(OS_WIN)
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index 4c9b73e87d..ab1d2ebe6a 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -23,13 +23,23 @@ struct _EXCEPTION_POINTERS;
struct _CONTEXT;
#endif
-#if defined(OS_POSIX) && ( \
- defined(__i386__) || defined(__x86_64__) || \
- (defined(__arm__) && !defined(__thumb__)))
+// TODO(699863): Clean up HAVE_TRACE_STACK_FRAME_POINTERS.
+#if defined(OS_POSIX)
+
+#if defined(__i386__) || defined(__x86_64__)
#define HAVE_TRACE_STACK_FRAME_POINTERS 1
-#else
+#elif defined(__arm__) && !defined(__thumb__)
+#define HAVE_TRACE_STACK_FRAME_POINTERS 1
+#else // defined(__arm__) && !defined(__thumb__)
#define HAVE_TRACE_STACK_FRAME_POINTERS 0
-#endif
+#endif // defined(__arm__) && !defined(__thumb__)
+
+#elif defined(OS_WIN)
+#define HAVE_TRACE_STACK_FRAME_POINTERS 1
+
+#else // defined(OS_WIN)
+#define HAVE_TRACE_STACK_FRAME_POINTERS 0
+#endif // defined(OS_WIN)
namespace base {
namespace debug {
@@ -122,6 +132,7 @@ BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial);
+#if !defined(OS_WIN)
// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
// Both frame pointers must come from __builtin_frame_address().
@@ -171,6 +182,7 @@ class BASE_EXPORT ScopedStackFrameLinker {
DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
};
+#endif // !defined(OS_WIN)
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
diff --git a/base/environment.cc b/base/environment.cc
index 534a7a8812..8b1d8fc312 100644
--- a/base/environment.cc
+++ b/base/environment.cc
@@ -42,7 +42,7 @@ class EnvironmentImpl : public Environment {
alternate_case_var = ToLowerASCII(variable_name);
else
return false;
- return GetVarImpl(alternate_case_var.c_str(), result);
+ return GetVarImpl(alternate_case_var, result);
}
bool SetVar(StringPiece variable_name,
diff --git a/base/feature_list.cc b/base/feature_list.cc
index 353136c12b..61043ceb73 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -228,9 +228,9 @@ FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
}
// static
-std::vector<std::string> FeatureList::SplitFeatureListString(
- const std::string& input) {
- return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
+ base::StringPiece input) {
+ return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
}
// static
@@ -340,7 +340,7 @@ void FeatureList::RegisterOverridesFromCommandLine(
const std::string& feature_list,
OverrideState overridden_state) {
for (const auto& value : SplitFeatureListString(feature_list)) {
- StringPiece feature_name(value);
+ StringPiece feature_name = value;
base::FieldTrial* trial = nullptr;
// The entry may be of the form FeatureName<FieldTrialName - in which case,
@@ -348,7 +348,7 @@ void FeatureList::RegisterOverridesFromCommandLine(
std::string::size_type pos = feature_name.find('<');
if (pos != std::string::npos) {
feature_name.set(value.data(), pos);
- trial = base::FieldTrialList::Find(value.substr(pos + 1));
+ trial = base::FieldTrialList::Find(value.substr(pos + 1).as_string());
}
RegisterOverride(feature_name, overridden_state, trial);
diff --git a/base/feature_list.h b/base/feature_list.h
index 09e8408aa8..c9f4a7b0c4 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -156,9 +156,10 @@ class BASE_EXPORT FeatureList {
// called after the singleton instance has been registered via SetInstance().
static FieldTrial* GetFieldTrial(const Feature& feature);
- // Splits a comma-separated string containing feature names into a vector.
- static std::vector<std::string> SplitFeatureListString(
- const std::string& input);
+ // Splits a comma-separated string containing feature names into a vector. The
+ // resulting pieces point to parts of |input|.
+ static std::vector<base::StringPiece> SplitFeatureListString(
+ base::StringPiece input);
// Initializes and sets an instance of FeatureList with feature overrides via
// command-line flags |enable_features| and |disable_features| if one has not
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
index fb3b320ae9..5fbd294dcf 100644
--- a/base/feature_list_unittest.cc
+++ b/base/feature_list_unittest.cc
@@ -14,6 +14,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -33,7 +34,7 @@ struct Feature kFeatureOffByDefault {
};
std::string SortFeatureListString(const std::string& feature_list) {
- std::vector<std::string> features =
+ std::vector<base::StringPiece> features =
FeatureList::SplitFeatureListString(feature_list);
std::sort(features.begin(), features.end());
return JoinString(features, ",");
diff --git a/base/mac/mach_port_broker_unittest.cc b/base/mac/mach_port_broker_unittest.cc
index bff8eb6a9b..cb4b82ca47 100644
--- a/base/mac/mach_port_broker_unittest.cc
+++ b/base/mac/mach_port_broker_unittest.cc
@@ -95,21 +95,21 @@ TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
CommandLine command_line(
base::GetMultiProcessTestChildBaseCommandLine());
broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
"MachPortBrokerTestChild", command_line, LaunchOptions());
- broker_.AddPlaceholderForPid(test_child_process.Handle());
+ broker_.AddPlaceholderForPid(spawn_result.process.Handle());
broker_.GetLock().Release();
WaitForTaskPort();
- EXPECT_EQ(test_child_process.Handle(), received_process_);
+ EXPECT_EQ(spawn_result.process.Handle(), received_process_);
int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
+ broker_.TaskForPid(spawn_result.process.Handle()));
}
TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
@@ -117,17 +117,18 @@ TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
CommandLine command_line(
base::GetMultiProcessTestChildBaseCommandLine());
broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
"MachPortBrokerTestChild", command_line, LaunchOptions());
+
broker_.GetLock().Release();
int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
+ broker_.TaskForPid(spawn_result.process.Handle()));
}
} // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
index c7d20ec049..4ccee89deb 100644
--- a/base/memory/shared_memory_mac_unittest.cc
+++ b/base/memory/shared_memory_mac_unittest.cc
@@ -204,7 +204,7 @@ class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
// similar tests.
service_name_ = CreateRandomServiceName();
server_port_.reset(BecomeMachServer(service_name_.c_str()));
- child_process_ = SpawnChild(name);
+ spawn_child_ = SpawnChild(name);
client_port_.reset(ReceiveMachPort(server_port_.get()));
}
@@ -221,7 +221,7 @@ class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
// process.
mac::ScopedMachSendRight client_port_;
- base::Process child_process_;
+ base::SpawnChildResult spawn_child_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
};
@@ -237,7 +237,7 @@ TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
MACH_MSG_TYPE_COPY_SEND);
int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
}
@@ -277,7 +277,7 @@ TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
SendMachPort(
client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
}
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
index 19dedccb47..d87fad01d3 100644
--- a/base/memory/shared_memory_unittest.cc
+++ b/base/memory/shared_memory_unittest.cc
@@ -682,16 +682,16 @@ TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
// Start |kNumTasks| processes, each of which atomically increments the first
// word by 1.
- Process processes[kNumTasks];
+ SpawnChildResult children[kNumTasks];
for (int index = 0; index < kNumTasks; ++index) {
- processes[index] = SpawnChild("SharedMemoryTestMain");
- ASSERT_TRUE(processes[index].IsValid());
+ children[index] = SpawnChild("SharedMemoryTestMain");
+ ASSERT_TRUE(children[index].process.IsValid());
}
// Check that each process exited correctly.
int exit_code = 0;
for (int index = 0; index < kNumTasks; ++index) {
- EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+ EXPECT_TRUE(children[index].process.WaitForExit(&exit_code));
EXPECT_EQ(0, exit_code);
}
diff --git a/base/memory/singleton_objc.h b/base/memory/singleton_objc.h
deleted file mode 100644
index 6df3f7757e..0000000000
--- a/base/memory/singleton_objc.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Support for using the Singleton<T> pattern with Objective-C objects. A
-// SingletonObjC is the same as a Singleton, except the default traits are
-// appropriate for Objective-C objects. A typical Objective-C object of type
-// NSExampleType can be maintained as a singleton and accessed with:
-//
-// NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get();
-//
-// The first time this is used, it will create exampleSingleton as the result
-// of [[NSExampleType alloc] init]. Subsequent calls will return the same
-// NSExampleType* object. The object will be released by calling
-// -[NSExampleType release] when Singleton's atexit routines run
-// (see singleton.h).
-//
-// For Objective-C objects initialized through means other than the
-// no-parameter -init selector, DefaultSingletonObjCTraits may be extended
-// as needed:
-//
-// struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> {
-// static Foo* New() {
-// return [[Foo alloc] initWithName:@"selecty"];
-// }
-// };
-// ...
-// Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get();
-
-#ifndef BASE_MEMORY_SINGLETON_OBJC_H_
-#define BASE_MEMORY_SINGLETON_OBJC_H_
-
-#import <Foundation/Foundation.h>
-#include "base/memory/singleton.h"
-
-// Singleton traits usable to manage traditional Objective-C objects, which
-// are instantiated by sending |alloc| and |init| messages, and are deallocated
-// in a memory-managed environment when their retain counts drop to 0 by
-// sending |release| messages.
-template<typename Type>
-struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> {
- static Type* New() {
- return [[Type alloc] init];
- }
-
- static void Delete(Type* object) {
- [object release];
- }
-};
-
-// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the
-// default trait class. This makes it straightforward for Objective-C++ code
-// to hold Objective-C objects as singletons.
-template<typename Type,
- typename Traits = DefaultSingletonObjCTraits<Type>,
- typename DifferentiatingType = Type>
-class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> {
-};
-
-#endif // BASE_MEMORY_SINGLETON_OBJC_H_
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index fed1494c04..762e6100b3 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -5,6 +5,7 @@
#include "base/message_loop/incoming_task_queue.h"
#include <limits>
+#include <utility>
#include "base/location.h"
#include "base/message_loop/message_loop.h"
@@ -60,7 +61,7 @@ IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
bool IncomingTaskQueue::AddToIncomingQueue(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay,
bool nestable) {
DLOG_IF(WARNING,
@@ -68,8 +69,8 @@ bool IncomingTaskQueue::AddToIncomingQueue(
<< "Requesting super-long task delay period of " << delay.InSeconds()
<< " seconds from here: " << from_here.ToString();
- PendingTask pending_task(from_here, task, CalculateDelayedRuntime(delay),
- nestable);
+ PendingTask pending_task(from_here, std::move(task),
+ CalculateDelayedRuntime(delay), nestable);
#if defined(OS_WIN)
// We consider the task needs a high resolution timer if the delay is
// more than 0 and less than 32ms. This caps the relative error to
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index 157e47fa14..a912dc2ee1 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -6,6 +6,7 @@
#define BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
@@ -35,7 +36,7 @@ class BASE_EXPORT IncomingTaskQueue
// returns false. In all cases, the ownership of |task| is transferred to the
// called method.
bool AddToIncomingQueue(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay,
bool nestable);
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index bfef261c38..6b4765bd1b 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -320,6 +320,8 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Runs the specified PendingTask.
void RunTask(PendingTask* pending_task);
+ bool nesting_allowed() const { return allow_nesting_; }
+
// Disallow nesting. After this is called, running a nested RunLoop or calling
// Add/RemoveNestingObserver() on this MessageLoop will crash.
void DisallowNesting() { allow_nesting_ = false; }
diff --git a/base/message_loop/message_loop_task_runner.cc b/base/message_loop/message_loop_task_runner.cc
index c9b5ffe3f7..ddfdeb2b65 100644
--- a/base/message_loop/message_loop_task_runner.cc
+++ b/base/message_loop/message_loop_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/message_loop/message_loop_task_runner.h"
+#include <utility>
+
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/incoming_task_queue.h"
@@ -24,18 +26,20 @@ void MessageLoopTaskRunner::BindToCurrentThread() {
bool MessageLoopTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const base::Closure& task,
+ Closure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
- return incoming_queue_->AddToIncomingQueue(from_here, task, delay, true);
+ return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+ true);
}
bool MessageLoopTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const base::Closure& task,
+ Closure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
- return incoming_queue_->AddToIncomingQueue(from_here, task, delay, false);
+ return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+ false);
}
bool MessageLoopTaskRunner::RunsTasksOnCurrentThread() const {
diff --git a/base/message_loop/message_loop_task_runner.h b/base/message_loop/message_loop_task_runner.h
index 5e70b128b2..11ee8a6bf7 100644
--- a/base/message_loop/message_loop_task_runner.h
+++ b/base/message_loop/message_loop_task_runner.h
@@ -6,6 +6,7 @@
#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
@@ -31,10 +32,10 @@ class BASE_EXPORT MessageLoopTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ Closure task,
base::TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ Closure task,
base::TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 29910036c7..5f44b67311 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -785,24 +785,6 @@ void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
#endif // !defined(OS_NACL)
// static
-void GlobalHistogramAllocator::CreateWithSharedMemory(
- std::unique_ptr<SharedMemory> memory,
- size_t size,
- uint64_t /*id*/,
- StringPiece /*name*/) {
- if ((!memory->memory() && !memory->Map(size)) ||
- !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
- NOTREACHED();
- return;
- }
-
- DCHECK_LE(memory->mapped_size(), size);
- Set(WrapUnique(
- new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
- std::move(memory), 0, StringPiece(), /*readonly=*/false))));
-}
-
-// static
void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
const SharedMemoryHandle& handle,
size_t size) {
@@ -905,6 +887,8 @@ bool GlobalHistogramAllocator::WriteToPersistentLocation() {
}
void GlobalHistogramAllocator::DeletePersistentLocation() {
+ memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+
#if defined(OS_NACL)
NOTREACHED();
#else
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index 2eb28dfaf5..851d7ef5a4 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -431,15 +431,6 @@ class BASE_EXPORT GlobalHistogramAllocator
FilePath* out_active_path);
#endif
- // Create a global allocator using a block of shared |memory| of the
- // specified |size|. The allocator takes ownership of the shared memory
- // and releases it upon destruction, though the memory will continue to
- // live if other processes have access to it.
- static void CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,
- size_t size,
- uint64_t id,
- StringPiece name);
-
// Create a global allocator using a block of shared memory accessed
// through the given |handle| and |size|. The allocator takes ownership
// of the handle and closes it upon destruction, though the memory will
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index f70b396917..abcc532242 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -18,6 +18,7 @@
#include "base/memory/shared_memory.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/sparse_histogram.h"
+#include "base/threading/thread_restrictions.h"
namespace {
@@ -32,7 +33,7 @@ const uint32_t kGlobalCookie = 0x408305DC;
// The current version of the metadata. If updates are made that change
// the metadata, the version number can be queried to operate in a backward-
// compatible manner until the memory segment is completely re-initalized.
-const uint32_t kGlobalVersion = 1;
+const uint32_t kGlobalVersion = 2;
// Constant values placed in the block headers to indicate its state.
const uint32_t kBlockCookieFree = 0;
@@ -43,7 +44,7 @@ const uint32_t kBlockCookieAllocated = 0xC8799269;
// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
// types rather than combined bitfield.
-// Flags stored in the flags_ field of the SharedMetaData structure below.
+// Flags stored in the flags_ field of the SharedMetadata structure below.
enum : int {
kFlagCorrupt = 1 << 0,
kFlagFull = 1 << 1
@@ -100,7 +101,9 @@ struct PersistentMemoryAllocator::BlockHeader {
};
// The shared metadata exists once at the top of the memory segment to
-// describe the state of the allocator to all processes.
+// describe the state of the allocator to all processes. The size of this
+// structure must be a multiple of 64-bits to ensure compatibility between
+// architectures.
struct PersistentMemoryAllocator::SharedMetadata {
uint32_t cookie; // Some value that indicates complete initialization.
uint32_t size; // Total size of memory segment.
@@ -108,10 +111,15 @@ struct PersistentMemoryAllocator::SharedMetadata {
uint32_t version; // Version code so upgrades don't break.
uint64_t id; // Arbitrary ID number given by creator.
uint32_t name; // Reference to stored name string.
+ uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
// Above is read-only after first construction. Below may be changed and
// so must be marked "volatile" to provide correct inter-process behavior.
+ // State of the memory, plus some padding to keep alignment.
+ volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
+ uint8_t padding2[3];
+
// Bitfield of information flags. Access to this should be done through
// the CheckFlag() and SetFlag() methods defined above.
volatile std::atomic<uint32_t> flags;
@@ -121,6 +129,7 @@ struct PersistentMemoryAllocator::SharedMetadata {
// The "iterable" queue is an M&S Queue as described here, append-only:
// https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+ // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
volatile BlockHeader queue; // Empty block for linked-list head/tail.
};
@@ -312,7 +321,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
// definitions and so cannot be moved to the global scope.
static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
"struct is not portable across different natural word widths");
- static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
+ static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
"struct is not portable across different natural word widths");
static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
@@ -384,12 +393,13 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
if (name_cstr)
memcpy(name_cstr, name.data(), name.length());
}
+
+ shared_meta()->memory_state.store(MEMORY_INITIALIZED,
+ std::memory_order_release);
} else {
- if (shared_meta()->size == 0 ||
- shared_meta()->version == 0 ||
+ if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
- shared_meta()->tailptr == 0 ||
- shared_meta()->queue.cookie == 0 ||
+ shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
SetCorrupt();
}
@@ -470,6 +480,19 @@ void PersistentMemoryAllocator::CreateTrackingHistograms(
HistogramBase::kUmaTargetedHistogramFlag);
}
+void PersistentMemoryAllocator::Flush(bool sync) {
+ FlushPartial(used(), sync);
+}
+
+void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
+ shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
+ FlushPartial(sizeof(SharedMetadata), false);
+}
+
+uint8_t PersistentMemoryAllocator::GetMemoryState() const {
+ return shared_meta()->memory_state.load(std::memory_order_relaxed);
+}
+
size_t PersistentMemoryAllocator::used() const {
return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
mem_size_);
@@ -816,8 +839,12 @@ const volatile PersistentMemoryAllocator::BlockHeader*
PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok,
bool free_ok) const {
+ // Handle special cases.
+ if (ref == kReferenceQueue && queue_ok)
+ return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+
// Validation of parameters.
- if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
+ if (ref < sizeof(SharedMetadata))
return nullptr;
if (ref % kAllocAlignment != 0)
return nullptr;
@@ -827,17 +854,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
// Validation of referenced block-header.
if (!free_ok) {
- uint32_t freeptr = std::min(
- shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
- if (ref + size > freeptr)
- return nullptr;
const volatile BlockHeader* const block =
reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
- if (block->size < size)
+ if (block->cookie != kBlockCookieAllocated)
return nullptr;
- if (ref + block->size > freeptr)
+ if (block->size < size)
return nullptr;
- if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
+ if (ref + block->size > mem_size_)
return nullptr;
if (type_id != 0 &&
block->type_id.load(std::memory_order_relaxed) != type_id) {
@@ -849,6 +872,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
}
+void PersistentMemoryAllocator::FlushPartial(size_t /*length*/, bool /*sync*/) {
+ // Generally there is nothing to do as every write is done through volatile
+ // memory with atomic instructions to guarantee consistency. This (virtual)
+ // method exists so that derivced classes can do special things, such as
+ // tell the OS to write changes to disk now rather than when convenient.
+}
+
void PersistentMemoryAllocator::RecordError(int error) const {
if (errors_histogram_)
errors_histogram_->Add(error);
@@ -989,7 +1019,12 @@ FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
id,
name,
read_only),
- mapped_file_(std::move(file)) {}
+ mapped_file_(std::move(file)) {
+ // Ensure the disk-copy of the data reflects the fully-initialized memory as
+ // there is no guarantee as to what order the pages might be auto-flushed by
+ // the OS in the future.
+ Flush(true);
+}
FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
@@ -999,6 +1034,33 @@ bool FilePersistentMemoryAllocator::IsFileAcceptable(
bool read_only) {
return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
}
+
+void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
+ if (sync)
+ ThreadRestrictions::AssertIOAllowed();
+ if (IsReadonly())
+ return;
+
+#if defined(OS_WIN)
+ // Windows doesn't support a synchronous flush.
+ BOOL success = ::FlushViewOfFile(data(), length);
+ DPCHECK(success);
+#elif defined(OS_MACOSX)
+ // On OSX, "invalidate" removes all cached pages, forcing a re-read from
+ // disk. That's not applicable to "flush" so omit it.
+ int result =
+ ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
+ DCHECK_NE(EINVAL, result);
+#elif defined(OS_POSIX)
+ // On POSIX, "invalidate" forces _other_ processes to recognize what has
+ // been written to disk and so is applicable to "flush".
+ int result = ::msync(const_cast<void*>(data()), length,
+ MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
+ DCHECK_NE(EINVAL, result);
+#else
+#error Unsupported OS.
+#endif
+}
#endif // !defined(OS_NACL)
} // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index b38f284ff4..94a7744bfb 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -96,6 +96,29 @@ class BASE_EXPORT PersistentMemoryAllocator {
public:
typedef uint32_t Reference;
+ // These states are used to indicate the overall condition of the memory
+ // segment irrespective of what is stored within it. Because the data is
+ // often persistent and thus needs to be readable by different versions of
+ // a program, these values are fixed and can never change.
+ enum MemoryState : uint8_t {
+ // Persistent memory starts all zeros and so shows "uninitialized".
+ MEMORY_UNINITIALIZED = 0,
+
+ // The header has been written and the memory is ready for use.
+ MEMORY_INITIALIZED = 1,
+
+ // The data should be considered deleted. This would be set when the
+ // allocator is being cleaned up. If file-backed, the file is likely
+ // to be deleted but since deletion can fail for a variety of reasons,
+ // having this extra status means a future reader can realize what
+ // should have happened.
+ MEMORY_DELETED = 2,
+
+ // Outside code can create states starting with this number; these too
+ // must also never change between code versions.
+ MEMORY_USER_DEFINED = 100,
+ };
+
// Iterator for going through all iterable memory records in an allocator.
// Like the allocator itself, iterators are lock-free and thread-secure.
// That means that multiple threads can share an iterator and the same
@@ -280,7 +303,11 @@ class BASE_EXPORT PersistentMemoryAllocator {
const char* Name() const;
// Is this segment open only for read?
- bool IsReadonly() { return readonly_; }
+ bool IsReadonly() const { return readonly_; }
+
+ // Manage the saved state of the memory.
+ void SetMemoryState(uint8_t memory_state);
+ uint8_t GetMemoryState() const;
// Create internal histograms for tracking memory use and allocation sizes
// for allocator of |name| (which can simply be the result of Name()). This
@@ -293,6 +320,17 @@ class BASE_EXPORT PersistentMemoryAllocator {
// UMA.PersistentAllocator.name.UsedPct
void CreateTrackingHistograms(base::StringPiece name);
+ // Flushes the persistent memory to any backing store. This typically does
+ // nothing but is used by the FilePersistentMemoryAllocator to inform the
+ // OS that all the data should be sent to the disk immediately. This is
+ // useful in the rare case where something has just been stored that needs
+ // to survive a hard shutdown of the machine like from a power failure.
+ // The |sync| parameter indicates if this call should block until the flush
+ // is complete but is only advisory and may or may not have an effect
+ // depending on the capabilities of the OS. Synchronous flushes are allowed
+ // only from theads that are allowed to do I/O.
+ void Flush(bool sync);
+
// Direct access to underlying memory segment. If the segment is shared
// across threads or processes, reading data through these values does
// not guarantee consistency. Use with care. Do not write.
@@ -580,6 +618,9 @@ class BASE_EXPORT PersistentMemoryAllocator {
uint64_t id, base::StringPiece name,
bool readonly);
+ // Implementation of Flush that accepts how much to flush.
+ virtual void FlushPartial(size_t length, bool sync);
+
volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
const MemoryType mem_type_; // Type of memory allocation.
const uint32_t mem_size_; // Size of entire memory segment.
@@ -715,6 +756,10 @@ class BASE_EXPORT FilePersistentMemoryAllocator
// the rest.
static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
+ protected:
+ // PersistentMemoryAllocator:
+ void FlushPartial(size_t length, bool sync) override;
+
private:
std::unique_ptr<MemoryMappedFile> mapped_file_;
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index d12e00f6d6..c3027ecc12 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -100,6 +100,8 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_TRUE(allocator_->used_histogram_);
EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
allocator_->used_histogram_->histogram_name());
+ EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
+ allocator_->GetMemoryState());
// Get base memory info for later comparison.
PersistentMemoryAllocator::MemoryInfo meminfo0;
@@ -254,6 +256,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
allocator_->Delete(obj2);
PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
+
+ // Ensure that the memory state can be set.
+ allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+ EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
+ allocator_->GetMemoryState());
}
TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -691,8 +698,8 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
const size_t mmlength = mmfile->length();
EXPECT_GE(meminfo1.total, mmlength);
- FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
- EXPECT_TRUE(file.IsReadonly());
+ FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
+ EXPECT_FALSE(file.IsReadonly());
EXPECT_EQ(TEST_ID, file.Id());
EXPECT_FALSE(file.IsFull());
EXPECT_FALSE(file.IsCorrupt());
@@ -713,6 +720,11 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
EXPECT_GE(meminfo1.free, meminfo2.free);
EXPECT_EQ(mmlength, meminfo2.total);
EXPECT_EQ(0U, meminfo2.free);
+
+ // There's no way of knowing if Flush actually does anything but at least
+ // verify that it runs without CHECK violations.
+ file.Flush(false);
+ file.Flush(true);
}
TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
diff --git a/base/process/process_info_linux.cc b/base/process/process_info_linux.cc
new file mode 100644
index 0000000000..2f227484f5
--- /dev/null
+++ b/base/process/process_info_linux.cc
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// static
+const Time CurrentProcessInfo::CreationTime() {
+ int64_t start_ticks =
+ internal::ReadProcSelfStatsAndGetFieldAsInt64(internal::VM_STARTTIME);
+ if (!start_ticks)
+ return Time();
+ TimeDelta start_offset = internal::ClockTicksToTimeDelta(start_ticks);
+ Time boot_time = internal::GetBootTime();
+ if (boot_time.is_null())
+ return Time();
+ return Time(boot_time + start_offset);
+}
+
+} // namespace base
diff --git a/base/process/process_info_mac.cc b/base/process/process_info_mac.cc
new file mode 100644
index 0000000000..27b9623d9e
--- /dev/null
+++ b/base/process/process_info_mac.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/free_deleter.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// static
+const Time CurrentProcessInfo::CreationTime() {
+ int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid() };
+ size_t len = 0;
+ if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0)
+ return Time();
+
+ std::unique_ptr<struct kinfo_proc, base::FreeDeleter> proc(
+ static_cast<struct kinfo_proc*>(malloc(len)));
+ if (sysctl(mib, arraysize(mib), proc.get(), &len, NULL, 0) < 0)
+ return Time();
+ return Time::FromTimeVal(proc->kp_proc.p_un.__p_starttime);
+}
+
+} // namespace base
diff --git a/base/process/process_info_unittest.cc b/base/process/process_info_unittest.cc
new file mode 100644
index 0000000000..a757774fda
--- /dev/null
+++ b/base/process/process_info_unittest.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+#if !defined(OS_IOS)
+TEST(ProcessInfoTest, CreationTime) {
+ Time creation_time = CurrentProcessInfo::CreationTime();
+ ASSERT_FALSE(creation_time.is_null());
+}
+#endif // !defined(OS_IOS)
+
+} // namespace base
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index a38930a208..ad555aedff 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -12,6 +12,11 @@
namespace base {
+SystemMemoryInfoKB::SystemMemoryInfoKB() = default;
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+ default;
+
SystemMetrics::SystemMetrics() {
committed_memory_ = 0;
}
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 71d6042e00..33cb64e244 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -188,6 +188,10 @@ class BASE_EXPORT ProcessMetrics {
// Returns the number of file descriptors currently open by the process, or
// -1 on error.
int GetOpenFdCount() const;
+
+ // Returns the soft limit of file descriptors that can be opened by the
+ // process, or -1 on error.
+ int GetOpenFdSoftLimit() const;
#endif // defined(OS_LINUX)
private:
@@ -264,11 +268,13 @@ BASE_EXPORT void SetFdLimit(unsigned int max_descriptors);
// Data about system-wide memory consumption. Values are in KB. Available on
// Windows, Mac, Linux, Android and Chrome OS.
//
-// Total/free memory are available on all platforms that implement
+// Total memory are available on all platforms that implement
// GetSystemMemoryInfo(). Total/free swap memory are available on all platforms
// except on Mac. Buffers/cached/active_anon/inactive_anon/active_file/
-// inactive_file/dirty/pswpin/pswpout/pgmajfault are available on
+// inactive_file/dirty/reclaimable/pswpin/pswpout/pgmajfault are available on
// Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
+// Speculative/file_backed/purgeable are Mac and iOS only.
+// Free is absent on Windows (see "avail_phys" below).
struct BASE_EXPORT SystemMemoryInfoKB {
SystemMemoryInfoKB();
SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
@@ -276,44 +282,64 @@ struct BASE_EXPORT SystemMemoryInfoKB {
// Serializes the platform specific fields to value.
std::unique_ptr<Value> ToValue() const;
- int total;
- int free;
+ int total = 0;
-#if defined(OS_LINUX)
+#if !defined(OS_WIN)
+ int free = 0;
+#endif
+
+#if defined(OS_WIN)
+ // "This is the amount of physical memory that can be immediately reused
+ // without having to write its contents to disk first. It is the sum of the
+ // size of the standby, free, and zero lists." (MSDN).
+ // Standby: not modified pages of physical ram (file-backed memory) that are
+ // not actively being used.
+ int avail_phys = 0;
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
// This provides an estimate of available memory as described here:
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
// NOTE: this is ONLY valid in kernels 3.14 and up. Its value will always
// be 0 in earlier kernel versions.
- int available;
+ // Note: it includes _all_ file-backed memory (active + inactive).
+ int available = 0;
#endif
#if !defined(OS_MACOSX)
- int swap_total;
- int swap_free;
+ int swap_total = 0;
+ int swap_free = 0;
#endif
#if defined(OS_ANDROID) || defined(OS_LINUX)
- int buffers;
- int cached;
- int active_anon;
- int inactive_anon;
- int active_file;
- int inactive_file;
- int dirty;
+ int buffers = 0;
+ int cached = 0;
+ int active_anon = 0;
+ int inactive_anon = 0;
+ int active_file = 0;
+ int inactive_file = 0;
+ int dirty = 0;
+ int reclaimable = 0;
// vmstats data.
- unsigned long pswpin;
- unsigned long pswpout;
- unsigned long pgmajfault;
+ unsigned long pswpin = 0;
+ unsigned long pswpout = 0;
+ unsigned long pgmajfault = 0;
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
#if defined(OS_CHROMEOS)
- int shmem;
- int slab;
+ int shmem = 0;
+ int slab = 0;
// Gem data will be -1 if not supported.
- int gem_objects;
- long long gem_size;
+ int gem_objects = -1;
+ long long gem_size = -1;
#endif // defined(OS_CHROMEOS)
+
+#if defined(OS_MACOSX)
+ int speculative = 0;
+ int file_backed = 0;
+ int purgeable = 0;
+#endif // defined(OS_MACOSX)
};
// On Linux/Android/Chrome OS, system-wide memory consumption data is parsed
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index 5d542cc675..ba0dfa76b9 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -311,6 +311,32 @@ int ProcessMetrics::GetOpenFdCount() const {
return total_count;
}
+
+int ProcessMetrics::GetOpenFdSoftLimit() const {
+ // Use /proc/<pid>/limits to read the open fd limit.
+ FilePath fd_path = internal::GetProcPidDir(process_).Append("limits");
+
+ std::string limits_contents;
+ if (!ReadFileToString(fd_path, &limits_contents))
+ return -1;
+
+ for (const auto& line :
+ base::SplitStringPiece(limits_contents, "\n", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY)) {
+ if (line.starts_with("Max open files")) {
+ auto tokens = base::SplitStringPiece(line, " ", base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ if (tokens.size() > 3) {
+ int limit = -1;
+ if (StringToInt(tokens[3], &limit))
+ return limit;
+ return -1;
+ }
+ }
+ }
+ return -1;
+}
+
#endif // defined(OS_LINUX)
ProcessMetrics::ProcessMetrics(ProcessHandle process)
@@ -532,45 +558,12 @@ const size_t kDiskWeightedIOTime = 13;
} // namespace
-SystemMemoryInfoKB::SystemMemoryInfoKB() {
- total = 0;
- free = 0;
-#if defined(OS_LINUX)
- available = 0;
-#endif
- buffers = 0;
- cached = 0;
- active_anon = 0;
- inactive_anon = 0;
- active_file = 0;
- inactive_file = 0;
- swap_total = 0;
- swap_free = 0;
- dirty = 0;
-
- pswpin = 0;
- pswpout = 0;
- pgmajfault = 0;
-
-#ifdef OS_CHROMEOS
- shmem = 0;
- slab = 0;
- gem_objects = -1;
- gem_size = -1;
-#endif
-}
-
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
-
std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
std::unique_ptr<DictionaryValue> res(new DictionaryValue());
res->SetInteger("total", total);
res->SetInteger("free", free);
-#if defined(OS_LINUX)
res->SetInteger("available", available);
-#endif
res->SetInteger("buffers", buffers);
res->SetInteger("cached", cached);
res->SetInteger("active_anon", active_anon);
@@ -581,6 +574,7 @@ std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
res->SetInteger("swap_free", swap_free);
res->SetInteger("swap_used", swap_total - swap_free);
res->SetInteger("dirty", dirty);
+ res->SetInteger("reclaimable", reclaimable);
res->SetInteger("pswpin", pswpin);
res->SetInteger("pswpout", pswpout);
res->SetInteger("pgmajfault", pgmajfault);
@@ -628,10 +622,8 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->total;
else if (tokens[0] == "MemFree:")
target = &meminfo->free;
-#if defined(OS_LINUX)
else if (tokens[0] == "MemAvailable:")
target = &meminfo->available;
-#endif
else if (tokens[0] == "Buffers:")
target = &meminfo->buffers;
else if (tokens[0] == "Cached:")
@@ -650,6 +642,8 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->swap_free;
else if (tokens[0] == "Dirty:")
target = &meminfo->dirty;
+ else if (tokens[0] == "SReclaimable:")
+ target = &meminfo->reclaimable;
#if defined(OS_CHROMEOS)
// Chrome OS has a tweaked kernel that allows us to query Shmem, which is
// usually video memory otherwise invisible to the OS.
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index 51f5fd4e16..d94024f0c5 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -16,6 +16,7 @@
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_port.h"
#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
#include "base/sys_info.h"
#if !defined(TASK_POWER_INFO)
@@ -80,11 +81,6 @@ bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
} // namespace
-SystemMemoryInfoKB::SystemMemoryInfoKB() : total(0), free(0) {}
-
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
-
// Getting a mach task from a pid for another process requires permissions in
// general, so there doesn't really seem to be a way to do these (and spinning
// up ps to fetch each stats seems dangerous to put in a base api for anyone to
@@ -392,7 +388,6 @@ size_t GetSystemCommitCharge() {
return (data.active_count * PAGE_SIZE) / 1024;
}
-// On Mac, We only get total memory and free memory from the system.
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
struct host_basic_info hostinfo;
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
@@ -405,17 +400,25 @@ bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
- vm_statistics_data_t vm_info;
- count = HOST_VM_INFO_COUNT;
+ vm_statistics64_data_t vm_info;
+ count = HOST_VM_INFO64_COUNT;
- if (host_statistics(host.get(), HOST_VM_INFO,
- reinterpret_cast<host_info_t>(&vm_info),
- &count) != KERN_SUCCESS) {
+ if (host_statistics64(host.get(), HOST_VM_INFO64,
+ reinterpret_cast<host_info64_t>(&vm_info),
+ &count) != KERN_SUCCESS) {
return false;
}
-
- meminfo->free = static_cast<int>(
- (vm_info.free_count - vm_info.speculative_count) * PAGE_SIZE / 1024);
+ DCHECK_EQ(HOST_VM_INFO64_COUNT, count);
+
+ static_assert(PAGE_SIZE % 1024 == 0, "Invalid page size");
+ meminfo->free = saturated_cast<int>(
+ PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+ meminfo->speculative =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
+ meminfo->file_backed =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.external_page_count);
+ meminfo->purgeable =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.purgeable_count);
return true;
}
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index b0bd7ea80b..21ad8ceea9 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -17,6 +17,7 @@
#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
#include "base/test/multiprocess_test.h"
#include "base/threading/thread.h"
#include "build/build_config.h"
@@ -106,6 +107,7 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
std::string valid_input1 =
"MemTotal: 3981504 kB\n"
"MemFree: 140764 kB\n"
+ "MemAvailable: 535413 kB\n"
"Buffers: 116480 kB\n"
"Cached: 406160 kB\n"
"SwapCached: 21304 kB\n"
@@ -171,6 +173,7 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
EXPECT_TRUE(ParseProcMeminfo(valid_input1, &meminfo));
EXPECT_EQ(meminfo.total, 3981504);
EXPECT_EQ(meminfo.free, 140764);
+ EXPECT_EQ(meminfo.available, 535413);
EXPECT_EQ(meminfo.buffers, 116480);
EXPECT_EQ(meminfo.cached, 406160);
EXPECT_EQ(meminfo.active_anon, 2972352);
@@ -180,18 +183,29 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
EXPECT_EQ(meminfo.swap_total, 5832280);
EXPECT_EQ(meminfo.swap_free, 3672368);
EXPECT_EQ(meminfo.dirty, 184);
+ EXPECT_EQ(meminfo.reclaimable, 30936);
#if defined(OS_CHROMEOS)
EXPECT_EQ(meminfo.shmem, 140204);
EXPECT_EQ(meminfo.slab, 54212);
#endif
+ EXPECT_EQ(355725,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+ // Simulate as if there is no MemAvailable.
+ meminfo.available = 0;
+ EXPECT_EQ(374448,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+ meminfo = {};
EXPECT_TRUE(ParseProcMeminfo(valid_input2, &meminfo));
EXPECT_EQ(meminfo.total, 255908);
EXPECT_EQ(meminfo.free, 69936);
+ EXPECT_EQ(meminfo.available, 0);
EXPECT_EQ(meminfo.buffers, 15812);
EXPECT_EQ(meminfo.cached, 115124);
EXPECT_EQ(meminfo.swap_total, 524280);
EXPECT_EQ(meminfo.swap_free, 524200);
EXPECT_EQ(meminfo.dirty, 4);
+ EXPECT_EQ(69936,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
}
TEST_F(SystemMetricsTest, ParseVmstat) {
@@ -341,15 +355,19 @@ TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
-#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
- defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
SystemMemoryInfoKB info;
EXPECT_TRUE(GetSystemMemoryInfo(&info));
// Ensure each field received a value.
EXPECT_GT(info.total, 0);
+#if defined(OS_WIN)
+ EXPECT_GT(info.avail_phys, 0);
+#else
EXPECT_GT(info.free, 0);
+#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_GT(info.buffers, 0);
EXPECT_GT(info.cached, 0);
@@ -360,7 +378,9 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
// All the values should be less than the total amount of memory.
+#if !defined(OS_WIN)
EXPECT_LT(info.free, info.total);
+#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_LT(info.buffers, info.total);
EXPECT_LT(info.cached, info.total);
@@ -370,6 +390,10 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
EXPECT_LT(info.inactive_file, info.total);
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_MACOSX) || defined(OS_IOS)
+ EXPECT_GT(info.file_backed, 0);
+#endif
+
#if defined(OS_CHROMEOS)
// Chrome OS exposes shmem.
EXPECT_GT(info.shmem, 0);
@@ -378,8 +402,8 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
// and gem_size cannot be tested here.
#endif
}
-#endif // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) ||
- // defined(OS_LINUX) || defined(OS_ANDROID)
+#endif // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
+ // defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST(ProcessMetricsTest, ParseProcStatCPU) {
@@ -494,13 +518,13 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
const FilePath temp_path = temp_dir.GetPath();
CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
- Process child = SpawnMultiProcessTestChild(
+ SpawnChildResult spawn_child = SpawnMultiProcessTestChild(
ChildMainString, child_command_line, LaunchOptions());
- ASSERT_TRUE(child.IsValid());
+ ASSERT_TRUE(spawn_child.process.IsValid());
WaitForEvent(temp_path, kSignalClosed);
std::unique_ptr<ProcessMetrics> metrics(
- ProcessMetrics::CreateProcessMetrics(child.Handle()));
+ ProcessMetrics::CreateProcessMetrics(spawn_child.process.Handle()));
// Try a couple times to observe the child with 0 fds open.
// Sometimes we've seen that the child can have 1 remaining
// fd shortly after receiving the signal. Potentially this
@@ -514,7 +538,7 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
}
EXPECT_EQ(0, open_fds);
- ASSERT_TRUE(child.Terminate(0, true));
+ ASSERT_TRUE(spawn_child.process.Terminate(0, true));
}
#endif // !defined(__ANDROID__)
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
index dc11ebc3f1..fa19ae50c1 100644
--- a/base/sequenced_task_runner.cc
+++ b/base/sequenced_task_runner.cc
@@ -4,14 +4,17 @@
#include "base/sequenced_task_runner.h"
+#include <utility>
+
#include "base/bind.h"
namespace base {
bool SequencedTaskRunner::PostNonNestableTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return PostNonNestableDelayedTask(from_here, task, base::TimeDelta());
+ Closure task) {
+ return PostNonNestableDelayedTask(from_here, std::move(task),
+ base::TimeDelta());
}
bool SequencedTaskRunner::DeleteOrReleaseSoonInternal(
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
index 6b2726ed4f..b92bd997e1 100644
--- a/base/sequenced_task_runner.h
+++ b/base/sequenced_task_runner.h
@@ -6,6 +6,7 @@
#define BASE_SEQUENCED_TASK_RUNNER_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/sequenced_task_runner_helpers.h"
#include "base/task_runner.h"
@@ -109,11 +110,11 @@ class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
// below.
bool PostNonNestableTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ Closure task);
virtual bool PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
base::TimeDelta delay) = 0;
// Submits a non-nestable task to delete the given object. Returns
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
index eaec14de5d..5333640fee 100644
--- a/base/strings/string_piece.h
+++ b/base/strings/string_piece.h
@@ -245,6 +245,9 @@ template <typename STRING_TYPE> class BasicStringPiece {
return r;
}
+ // This is the style of conversion preferred by std::string_view in C++17.
+ explicit operator STRING_TYPE() const { return as_string(); }
+
STRING_TYPE as_string() const {
// std::string doesn't like to take a NULL pointer even with a 0 size.
return empty() ? STRING_TYPE() : STRING_TYPE(data(), size());
diff --git a/base/strings/string_piece_unittest.cc b/base/strings/string_piece_unittest.cc
index f05aa152b5..7dfd71116b 100644
--- a/base/strings/string_piece_unittest.cc
+++ b/base/strings/string_piece_unittest.cc
@@ -295,6 +295,8 @@ TYPED_TEST(CommonStringPieceTest, CheckFind) {
ASSERT_EQ(b.rfind(c, 0U), Piece::npos);
ASSERT_EQ(a.rfind(d), static_cast<size_t>(a.as_string().rfind(TypeParam())));
ASSERT_EQ(a.rfind(e), a.as_string().rfind(TypeParam()));
+ ASSERT_EQ(a.rfind(d), static_cast<size_t>(TypeParam(a).rfind(TypeParam())));
+ ASSERT_EQ(a.rfind(e), TypeParam(a).rfind(TypeParam()));
ASSERT_EQ(a.rfind(d, 12), 12U);
ASSERT_EQ(a.rfind(e, 17), 17U);
ASSERT_EQ(a.rfind(g), Piece::npos);
@@ -518,6 +520,12 @@ TYPED_TEST(CommonStringPieceTest, CheckCustom) {
ASSERT_TRUE(c == s3);
TypeParam s4(e.as_string());
ASSERT_TRUE(s4.empty());
+
+ // operator STRING_TYPE()
+ TypeParam s5(TypeParam(a).c_str(), 7); // Note, has an embedded NULL
+ ASSERT_TRUE(c == s5);
+ TypeParam s6(e);
+ ASSERT_TRUE(s6.empty());
}
TEST(StringPieceTest, CheckCustom) {
@@ -591,7 +599,11 @@ TYPED_TEST(CommonStringPieceTest, CheckNULL) {
ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
ASSERT_EQ(s.size(), 0U);
- TypeParam str = s.as_string();
+ TypeParam str(s);
+ ASSERT_EQ(str.length(), 0U);
+ ASSERT_EQ(str, TypeParam());
+
+ str = s.as_string();
ASSERT_EQ(str.length(), 0U);
ASSERT_EQ(str, TypeParam());
}
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index 761965f03a..e8caffeec3 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -112,6 +112,9 @@ class BASE_EXPORT WaitableEvent {
// You MUST NOT delete any of the WaitableEvent objects while this wait is
// happening, however WaitMany's return "happens after" the |Signal| call
// that caused it has completed, like |Wait|.
+ //
+ // If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
+ // index among them is returned.
static size_t WaitMany(WaitableEvent** waitables, size_t count);
// For asynchronous waiting, see WaitableEventWatcher
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index 5dfff468ad..846fa06700 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -5,6 +5,7 @@
#include <stddef.h>
#include <algorithm>
+#include <limits>
#include <vector>
#include "base/debug/activity_tracker.h"
@@ -266,12 +267,10 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
SyncWaiter sw;
const size_t r = EnqueueMany(&waitables[0], count, &sw);
- if (r) {
+ if (r < count) {
// One of the events is already signaled. The SyncWaiter has not been
- // enqueued anywhere. EnqueueMany returns the count of remaining waitables
- // when the signaled one was seen, so the index of the signaled event is
- // @count - @r.
- return waitables[count - r].second;
+ // enqueued anywhere.
+ return waitables[r].second;
}
// At this point, we hold the locks on all the WaitableEvents and we have
@@ -319,38 +318,50 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
}
// -----------------------------------------------------------------------------
-// If return value == 0:
+// If return value == count:
// The locks of the WaitableEvents have been taken in order and the Waiter has
// been enqueued in the wait-list of each. None of the WaitableEvents are
// currently signaled
// else:
// None of the WaitableEvent locks are held. The Waiter has not been enqueued
-// in any of them and the return value is the index of the first WaitableEvent
-// which was signaled, from the end of the array.
+// in any of them and the return value is the index of the WaitableEvent which
+// was signaled with the lowest input index from the original WaitMany call.
// -----------------------------------------------------------------------------
// static
-size_t WaitableEvent::EnqueueMany
- (std::pair<WaitableEvent*, size_t>* waitables,
- size_t count, Waiter* waiter) {
- if (!count)
- return 0;
-
- waitables[0].first->kernel_->lock_.Acquire();
- if (waitables[0].first->kernel_->signaled_) {
- if (!waitables[0].first->kernel_->manual_reset_)
- waitables[0].first->kernel_->signaled_ = false;
- waitables[0].first->kernel_->lock_.Release();
- return count;
+size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
+ size_t count,
+ Waiter* waiter) {
+ size_t winner = count;
+ size_t winner_index = count;
+ for (size_t i = 0; i < count; ++i) {
+ auto& kernel = waitables[i].first->kernel_;
+ kernel->lock_.Acquire();
+ if (kernel->signaled_ && waitables[i].second < winner) {
+ winner = waitables[i].second;
+ winner_index = i;
}
+ }
- const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
- if (r) {
- waitables[0].first->kernel_->lock_.Release();
- } else {
- waitables[0].first->Enqueue(waiter);
+ // No events signaled. All locks acquired. Enqueue the Waiter on all of them
+ // and return.
+ if (winner == count) {
+ for (size_t i = 0; i < count; ++i)
+ waitables[i].first->Enqueue(waiter);
+ return count;
+ }
+
+ // Unlock in reverse order and possibly clear the chosen winner's signal
+ // before returning its index.
+ for (auto* w = waitables + count - 1; w >= waitables; --w) {
+ auto& kernel = w->first->kernel_;
+ if (w->second == winner) {
+ if (!kernel->manual_reset_)
+ kernel->signaled_ = false;
}
+ kernel->lock_.Release();
+ }
- return r;
+ return winner_index;
}
// -----------------------------------------------------------------------------
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
index c0e280aa97..3aa1af1619 100644
--- a/base/synchronization/waitable_event_unittest.cc
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <algorithm>
+
#include "base/compiler_specific.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -78,6 +80,42 @@ TEST(WaitableEventTest, WaitManyShortcut) {
delete ev[i];
}
+TEST(WaitableEventTest, WaitManyLeftToRight) {
+ WaitableEvent* ev[5];
+ for (size_t i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
+
+ // Test for consistent left-to-right return behavior across all permutations
+ // of the input array. This is to verify that only the indices -- and not
+ // the WaitableEvents' addresses -- are relevant in determining who wins when
+ // multiple events are signaled.
+
+ std::sort(ev, ev + 5);
+ do {
+ ev[0]->Signal();
+ ev[1]->Signal();
+ EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+
+ ev[2]->Signal();
+ EXPECT_EQ(1u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+
+ ev[3]->Signal();
+ ev[4]->Signal();
+ ev[0]->Signal();
+ EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(3u, WaitableEvent::WaitMany(ev, 5));
+ ev[2]->Signal();
+ EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(4u, WaitableEvent::WaitMany(ev, 5));
+ } while (std::next_permutation(ev, ev + 5));
+
+ for (size_t i = 0; i < 5; ++i)
+ delete ev[i];
+}
+
class WaitableEventSignaler : public PlatformThread::Delegate {
public:
WaitableEventSignaler(TimeDelta delay, WaitableEvent* event)
diff --git a/base/sys_info.h b/base/sys_info.h
index e35feff735..18bdaf0096 100644
--- a/base/sys_info.h
+++ b/base/sys_info.h
@@ -13,11 +13,18 @@
#include "base/base_export.h"
#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
+namespace debug {
+FORWARD_DECLARE_TEST(SystemMetricsTest, ParseMeminfo);
+}
+
+struct SystemMemoryInfoKB;
+
class BASE_EXPORT SysInfo {
public:
// Return the number of logical processors/cores on the current machine.
@@ -28,6 +35,9 @@ class BASE_EXPORT SysInfo {
// Return the number of bytes of current available physical memory on the
// machine.
+ // (The amount of memory that can be allocated without any significant
+ // impact on the system. It can lead to freeing inactive file-backed
+ // and/or speculative file-backed memory).
static int64_t AmountOfAvailablePhysicalMemory();
// Return the number of bytes of virtual memory of this process. A return
@@ -70,8 +80,6 @@ class BASE_EXPORT SysInfo {
static std::string OperatingSystemVersion();
// Retrieves detailed numeric values for the OS version.
- // TODO(port): Implement a Linux version of this method and enable the
- // corresponding unit test.
// DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
// for OS version-specific feature checks and workarounds. If you must use
// an OS version check instead of a feature check, use the base::mac::IsOS*
@@ -147,6 +155,15 @@ class BASE_EXPORT SysInfo {
// Low-end device refers to devices having less than 512M memory in the
// current implementation.
static bool IsLowEndDevice();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(SysInfoTest, AmountOfAvailablePhysicalMemory);
+ FRIEND_TEST_ALL_PREFIXES(debug::SystemMetricsTest, ParseMeminfo);
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ static int64_t AmountOfAvailablePhysicalMemory(
+ const SystemMemoryInfoKB& meminfo);
+#endif
};
} // namespace base
diff --git a/base/sys_info_linux.cc b/base/sys_info_linux.cc
index 298d245ecf..0cd05b363a 100644
--- a/base/sys_info_linux.cc
+++ b/base/sys_info_linux.cc
@@ -13,6 +13,7 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
+#include "base/process/process_metrics.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_info_internal.h"
#include "build/build_config.h"
@@ -42,13 +43,29 @@ base::LazyInstance<
namespace base {
// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+ return g_lazy_physical_memory.Get().value();
+}
+
+// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- return AmountOfMemory(_SC_AVPHYS_PAGES);
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
+ return 0;
+ return AmountOfAvailablePhysicalMemory(info);
}
// static
-int64_t SysInfo::AmountOfPhysicalMemory() {
- return g_lazy_physical_memory.Get().value();
+int64_t SysInfo::AmountOfAvailablePhysicalMemory(
+ const SystemMemoryInfoKB& info) {
+ // See details here:
+ // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+ // The fallback logic (when there is no MemAvailable) would be more precise
+ // if we had info about zones watermarks (/proc/zoneinfo).
+ int64_t res_kb = info.available != 0
+ ? info.available - info.active_file
+ : info.free + info.reclaimable + info.inactive_file;
+ return res_kb * 1024;
}
// static
diff --git a/base/sys_info_mac.mm b/base/sys_info_mac.mm
index aab1103d4c..1141bd5577 100644
--- a/base/sys_info_mac.mm
+++ b/base/sys_info_mac.mm
@@ -19,6 +19,7 @@
#include "base/mac/scoped_mach_port.h"
#import "base/mac/sdk_forward_declarations.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
namespace base {
@@ -83,20 +84,12 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- base::mac::ScopedMachSendRight host(mach_host_self());
- vm_statistics_data_t vm_info;
- mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
-
- if (host_statistics(host.get(),
- HOST_VM_INFO,
- reinterpret_cast<host_info_t>(&vm_info),
- &count) != KERN_SUCCESS) {
- NOTREACHED();
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
return 0;
- }
-
- return static_cast<int64_t>(vm_info.free_count - vm_info.speculative_count) *
- PAGE_SIZE;
+ // We should add inactive file-backed memory also but there is no such
+ // information from Mac OS unfortunately.
+ return static_cast<int64_t>(info.free + info.speculative) * 1024;
}
// static
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
index cbdfa3f7a9..7d3714663b 100644
--- a/base/sys_info_posix.cc
+++ b/base/sys_info_posix.cc
@@ -183,6 +183,30 @@ std::string SysInfo::OperatingSystemVersion() {
}
#endif
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+ struct utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ *major_version = 0;
+ *minor_version = 0;
+ *bugfix_version = 0;
+ return;
+ }
+ int num_read = sscanf(info.release, "%d.%d.%d", major_version, minor_version,
+ bugfix_version);
+ if (num_read < 1)
+ *major_version = 0;
+ if (num_read < 2)
+ *minor_version = 0;
+ if (num_read < 3)
+ *bugfix_version = 0;
+}
+#endif
+
// static
std::string SysInfo::OperatingSystemArchitecture() {
struct utsname info;
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
index c3b8507707..94b5a84971 100644
--- a/base/sys_info_unittest.cc
+++ b/base/sys_info_unittest.cc
@@ -6,6 +6,7 @@
#include "base/environment.h"
#include "base/files/file_util.h"
+#include "base/process/process_metrics.h"
#include "base/sys_info.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -13,46 +14,71 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
-typedef PlatformTest SysInfoTest;
-using base::FilePath;
+namespace base {
+
+using SysInfoTest = PlatformTest;
TEST_F(SysInfoTest, NumProcs) {
// We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
+ EXPECT_GE(SysInfo::NumberOfProcessors(), 1);
}
TEST_F(SysInfoTest, AmountOfMem) {
// We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GT(base::SysInfo::AmountOfPhysicalMemory(), 0);
- EXPECT_GT(base::SysInfo::AmountOfPhysicalMemoryMB(), 0);
+ EXPECT_GT(SysInfo::AmountOfPhysicalMemory(), 0);
+ EXPECT_GT(SysInfo::AmountOfPhysicalMemoryMB(), 0);
// The maxmimal amount of virtual memory can be zero which means unlimited.
- EXPECT_GE(base::SysInfo::AmountOfVirtualMemory(), 0);
+ EXPECT_GE(SysInfo::AmountOfVirtualMemory(), 0);
}
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+TEST_F(SysInfoTest, AmountOfAvailablePhysicalMemory) {
+ // Note: info is in _K_bytes.
+ SystemMemoryInfoKB info;
+ ASSERT_TRUE(GetSystemMemoryInfo(&info));
+ EXPECT_GT(info.free, 0);
+
+ if (info.available != 0) {
+ // If there is MemAvailable from kernel.
+ EXPECT_LT(info.available, info.total);
+ const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+ EXPECT_LT(amount / 1024, info.available);
+ // Simulate as if there is no MemAvailable.
+ info.available = 0;
+ }
+
+ // There is no MemAvailable. Check the fallback logic.
+ const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+ EXPECT_LT(amount / 1024, info.total);
+}
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
- ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
- << tmp_path.value();
+ ASSERT_TRUE(GetTempDir(&tmp_path));
+ EXPECT_GE(SysInfo::AmountOfFreeDiskSpace(tmp_path), 0) << tmp_path.value();
}
TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
- ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
- << tmp_path.value();
+ ASSERT_TRUE(GetTempDir(&tmp_path));
+ EXPECT_GT(SysInfo::AmountOfTotalDiskSpace(tmp_path), 0) << tmp_path.value();
}
-#if defined(OS_WIN) || defined(OS_MACOSX)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
int32_t os_major_version = -1;
int32_t os_minor_version = -1;
int32_t os_bugfix_version = -1;
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_GT(os_major_version, -1);
EXPECT_GT(os_minor_version, -1);
EXPECT_GT(os_bugfix_version, -1);
@@ -60,18 +86,18 @@ TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
#endif
TEST_F(SysInfoTest, Uptime) {
- base::TimeDelta up_time_1 = base::SysInfo::Uptime();
+ TimeDelta up_time_1 = SysInfo::Uptime();
// UpTime() is implemented internally using TimeTicks::Now(), which documents
// system resolution as being 1-15ms. Sleep a little longer than that.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
- base::TimeDelta up_time_2 = base::SysInfo::Uptime();
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+ TimeDelta up_time_2 = SysInfo::Uptime();
EXPECT_GT(up_time_1.InMicroseconds(), 0);
EXPECT_GT(up_time_2.InMicroseconds(), up_time_1.InMicroseconds());
}
#if defined(OS_MACOSX) && !defined(OS_IOS)
TEST_F(SysInfoTest, HardwareModelName) {
- std::string hardware_model = base::SysInfo::HardwareModelName();
+ std::string hardware_model = SysInfo::HardwareModelName();
EXPECT_FALSE(hardware_model.empty());
}
#endif
@@ -85,10 +111,10 @@ TEST_F(SysInfoTest, GoogleChromeOSVersionNumbers) {
const char kLsbRelease[] =
"FOO=1234123.34.5\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(1, os_major_version);
EXPECT_EQ(2, os_minor_version);
EXPECT_EQ(3, os_bugfix_version);
@@ -101,10 +127,10 @@ TEST_F(SysInfoTest, GoogleChromeOSVersionNumbersFirst) {
const char kLsbRelease[] =
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
"FOO=1234123.34.5\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(1, os_major_version);
EXPECT_EQ(2, os_minor_version);
EXPECT_EQ(3, os_bugfix_version);
@@ -115,10 +141,10 @@ TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
int32_t os_minor_version = -1;
int32_t os_bugfix_version = -1;
const char kLsbRelease[] = "FOO=1234123.34.5\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(0, os_major_version);
EXPECT_EQ(0, os_minor_version);
EXPECT_EQ(0, os_bugfix_version);
@@ -127,43 +153,45 @@ TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
TEST_F(SysInfoTest, GoogleChromeOSLsbReleaseTime) {
const char kLsbRelease[] = "CHROMEOS_RELEASE_VERSION=1.2.3.4";
// Use a fake time that can be safely displayed as a string.
- const base::Time lsb_release_time(base::Time::FromDoubleT(12345.6));
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
- base::Time parsed_lsb_release_time = base::SysInfo::GetLsbReleaseTime();
+ const Time lsb_release_time(Time::FromDoubleT(12345.6));
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
+ Time parsed_lsb_release_time = SysInfo::GetLsbReleaseTime();
EXPECT_DOUBLE_EQ(lsb_release_time.ToDoubleT(),
parsed_lsb_release_time.ToDoubleT());
}
TEST_F(SysInfoTest, IsRunningOnChromeOS) {
- base::SysInfo::SetChromeOSVersionInfoForTest("", base::Time());
- EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest("", Time());
+ EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease1[] =
"CHROMEOS_RELEASE_NAME=Non Chrome OS\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
- EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+ EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease2[] =
"CHROMEOS_RELEASE_NAME=Chrome OS\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
- EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease3[] =
"CHROMEOS_RELEASE_NAME=Chromium OS\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, base::Time());
- EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
}
TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
- EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+ EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
- EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
}
#endif // OS_CHROMEOS
+
+} // namespace base
diff --git a/base/task_runner.cc b/base/task_runner.cc
index 35c0a23274..8502510eb0 100644
--- a/base/task_runner.cc
+++ b/base/task_runner.cc
@@ -23,7 +23,7 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override;
+ Closure task) override;
// Non-owning.
TaskRunner* destination_;
@@ -36,15 +36,15 @@ PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
bool PostTaskAndReplyTaskRunner::PostTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return destination_->PostTask(from_here, task);
+ Closure task) {
+ return destination_->PostTask(from_here, std::move(task));
}
} // namespace
bool TaskRunner::PostTask(const tracked_objects::Location& from_here,
- const Closure& task) {
- return PostDelayedTask(from_here, task, base::TimeDelta());
+ Closure task) {
+ return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
}
bool TaskRunner::PostTaskAndReply(const tracked_objects::Location& from_here,
diff --git a/base/task_runner.h b/base/task_runner.h
index be3039d372..d6a387109a 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -61,8 +61,7 @@ class BASE_EXPORT TaskRunner
// will not be run.
//
// Equivalent to PostDelayedTask(from_here, task, 0).
- bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ bool PostTask(const tracked_objects::Location& from_here, Closure task);
// Like PostTask, but tries to run the posted task only after
// |delay_ms| has passed.
@@ -70,7 +69,7 @@ class BASE_EXPORT TaskRunner
// It is valid for an implementation to ignore |delay_ms|; that is,
// to have PostDelayedTask behave the same as PostTask.
virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
base::TimeDelta delay) = 0;
// Returns true if the current thread is a thread on which a task
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
index 3780c16dcb..44aaa6016d 100644
--- a/base/task_scheduler/task.cc
+++ b/base/task_scheduler/task.cc
@@ -4,22 +4,28 @@
#include "base/task_scheduler/task.h"
+#include <utility>
+
+#include "base/critical_closure.h"
+
namespace base {
namespace internal {
Task::Task(const tracked_objects::Location& posted_from,
- const Closure& task,
+ Closure task,
const TaskTraits& traits,
TimeDelta delay)
- : PendingTask(posted_from,
- task,
- delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
- false), // Not nestable.
+ : PendingTask(
+ posted_from,
+ traits.shutdown_behavior() == TaskShutdownBehavior::BLOCK_SHUTDOWN
+ ? MakeCriticalClosure(std::move(task))
+ : std::move(task),
+ delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
+ false), // Not nestable.
// Prevent a delayed BLOCK_SHUTDOWN task from blocking shutdown before
// being scheduled by changing its shutdown behavior to SKIP_ON_SHUTDOWN.
- traits(!delay.is_zero() &&
- traits.shutdown_behavior() ==
- TaskShutdownBehavior::BLOCK_SHUTDOWN
+ traits(!delay.is_zero() && traits.shutdown_behavior() ==
+ TaskShutdownBehavior::BLOCK_SHUTDOWN
? TaskTraits(traits).WithShutdownBehavior(
TaskShutdownBehavior::SKIP_ON_SHUTDOWN)
: traits),
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
index c5b9bdb53b..1f3b775a23 100644
--- a/base/task_scheduler/task.h
+++ b/base/task_scheduler/task.h
@@ -6,7 +6,7 @@
#define BASE_TASK_SCHEDULER_TASK_H_
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -28,7 +28,7 @@ struct BASE_EXPORT Task : public PendingTask {
// behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
// automatically adjusted to SKIP_ON_SHUTDOWN.
Task(const tracked_objects::Location& posted_from,
- const Closure& task,
+ Closure task,
const TaskTraits& traits,
TimeDelta delay);
~Task();
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index 844707ebd1..a1c47de5a2 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -353,7 +353,6 @@ if (is_android) {
]
srcjar_deps = [ ":test_support_java_aidl" ]
java_files = [
- "android/java/src/org/chromium/base/FileDescriptorInfo.java",
"android/java/src/org/chromium/base/MainReturnCodeResult.java",
"android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
"android/java/src/org/chromium/base/MultiprocessTestClientService.java",
@@ -367,7 +366,10 @@ if (is_android) {
android_aidl("test_support_java_aidl") {
testonly = true
- import_include = [ "android/java/src" ]
+ import_include = [
+ "android/java/src",
+ "//base/android/java/src",
+ ]
sources = [
"android/java/src/org/chromium/base/ITestClient.aidl",
]
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
index fcc4d123ed..c8fd3eddad 100644
--- a/base/test/multiprocess_test.cc
+++ b/base/test/multiprocess_test.cc
@@ -13,7 +13,7 @@
namespace base {
#if !defined(OS_ANDROID) && !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
-Process SpawnMultiProcessTestChild(
+SpawnChildResult SpawnMultiProcessTestChild(
const std::string& procname,
const CommandLine& base_command_line,
const LaunchOptions& options) {
@@ -24,7 +24,9 @@ Process SpawnMultiProcessTestChild(
if (!command_line.HasSwitch(switches::kTestChildProcess))
command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
- return LaunchProcess(command_line, options);
+ SpawnChildResult result;
+ result.process = LaunchProcess(command_line, options);
+ return result;
}
bool WaitForMultiprocessTestChildExit(const Process& process,
@@ -54,7 +56,7 @@ MultiProcessTest::MultiProcessTest() {
// Don't compile on Arc++.
#if 0
-Process MultiProcessTest::SpawnChild(const std::string& procname) {
+SpawnChildResult MultiProcessTest::SpawnChild(const std::string& procname) {
LaunchOptions options;
#if defined(OS_WIN)
options.start_hidden = true;
@@ -62,7 +64,7 @@ Process MultiProcessTest::SpawnChild(const std::string& procname) {
return SpawnChildWithOptions(procname, options);
}
-Process MultiProcessTest::SpawnChildWithOptions(
+SpawnChildResult MultiProcessTest::SpawnChildWithOptions(
const std::string& procname,
const LaunchOptions& options) {
return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options);
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
index bf9663759e..f0027d9458 100644
--- a/base/test/multiprocess_test.h
+++ b/base/test/multiprocess_test.h
@@ -17,6 +17,17 @@ namespace base {
class CommandLine;
+struct SpawnChildResult {
+ SpawnChildResult() {}
+ SpawnChildResult(SpawnChildResult&& other) = default;
+
+ SpawnChildResult& operator=(SpawnChildResult&& other) = default;
+
+ Process process;
+
+ DISALLOW_COPY_AND_ASSIGN(SpawnChildResult);
+};
+
// Helpers to spawn a child for a multiprocess test and execute a designated
// function. Use these when you already have another base class for your test
// fixture, but you want (some) of your tests to be multiprocess (otherwise you
@@ -33,9 +44,10 @@ class CommandLine;
// // Maybe set some options (e.g., |start_hidden| on Windows)....
//
// // Start a child process and run |a_test_func|.
-// base::Process test_child_process =
+// SpawnChildResult result =
// base::SpawnMultiProcessTestChild("a_test_func", command_line,
// options);
+// base::Process test_child_process = std::move(result.process);
//
// // Do stuff involving |test_child_process| and the child process....
//
@@ -61,10 +73,9 @@ class CommandLine;
// |command_line| should be as provided by
// |GetMultiProcessTestChildBaseCommandLine()| (below), possibly with arguments
// added. Note: On Windows, you probably want to set |options.start_hidden|.
-Process SpawnMultiProcessTestChild(
- const std::string& procname,
- const CommandLine& command_line,
- const LaunchOptions& options);
+SpawnChildResult SpawnMultiProcessTestChild(const std::string& procname,
+ const CommandLine& command_line,
+ const LaunchOptions& options);
// Gets the base command line for |SpawnMultiProcessTestChild()|. To this, you
// may add any flags needed for your child process.
@@ -121,13 +132,13 @@ class MultiProcessTest : public PlatformTest {
// }
//
// Returns the child process.
- Process SpawnChild(const std::string& procname);
+ SpawnChildResult SpawnChild(const std::string& procname);
// Run a child process using the given launch options.
//
// Note: On Windows, you probably want to set |options.start_hidden|.
- Process SpawnChildWithOptions(const std::string& procname,
- const LaunchOptions& options);
+ SpawnChildResult SpawnChildWithOptions(const std::string& procname,
+ const LaunchOptions& options);
// Set up the command line used to spawn the child process.
// Override this to add things to the command line (calling this first in the
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
index c74f013da1..a1b8fcbfc0 100644
--- a/base/test/multiprocess_test_android.cc
+++ b/base/test/multiprocess_test_android.cc
@@ -25,9 +25,10 @@ namespace base {
// - All options except |fds_to_remap| are ignored.
//
// NOTE: This MUST NOT run on the main thread of the NativeTest application.
-Process SpawnMultiProcessTestChild(const std::string& procname,
- const CommandLine& base_command_line,
- const LaunchOptions& options) {
+SpawnChildResult SpawnMultiProcessTestChild(
+ const std::string& procname,
+ const CommandLine& base_command_line,
+ const LaunchOptions& options) {
JNIEnv* env = android::AttachCurrentThread();
DCHECK(env);
@@ -54,7 +55,10 @@ Process SpawnMultiProcessTestChild(const std::string& procname,
android::ToJavaArrayOfStrings(env, command_line.argv());
jint pid = android::Java_MultiprocessTestClientLauncher_launchClient(
env, android::GetApplicationContext(), j_argv, fds);
- return Process(pid);
+
+ SpawnChildResult result;
+ result.process = Process(pid);
+ return result;
}
bool WaitForMultiprocessTestChildExit(const Process& process,
diff --git a/base/test/test_mock_time_task_runner.cc b/base/test/test_mock_time_task_runner.cc
index f4bd7244b4..a431189231 100644
--- a/base/test/test_mock_time_task_runner.cc
+++ b/base/test/test_mock_time_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/test/test_mock_time_task_runner.h"
+#include <utility>
+
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
@@ -79,7 +81,7 @@ struct TestMockTimeTaskRunner::TestOrderedPendingTask
: public base::TestPendingTask {
TestOrderedPendingTask();
TestOrderedPendingTask(const tracked_objects::Location& location,
- const Closure& task,
+ Closure task,
TimeTicks post_time,
TimeDelta delay,
size_t ordinal,
@@ -104,12 +106,16 @@ TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
const tracked_objects::Location& location,
- const Closure& task,
+ Closure task,
TimeTicks post_time,
TimeDelta delay,
size_t ordinal,
TestNestability nestability)
- : base::TestPendingTask(location, task, post_time, delay, nestability),
+ : base::TestPendingTask(location,
+ std::move(task),
+ post_time,
+ delay,
+ nestability),
ordinal(ordinal) {}
TestMockTimeTaskRunner::TestOrderedPendingTask::~TestOrderedPendingTask() {
@@ -234,20 +240,20 @@ bool TestMockTimeTaskRunner::RunsTasksOnCurrentThread() const {
bool TestMockTimeTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
AutoLock scoped_lock(tasks_lock_);
- tasks_.push(TestOrderedPendingTask(from_here, task, now_ticks_, delay,
- next_task_ordinal_++,
+ tasks_.push(TestOrderedPendingTask(from_here, std::move(task), now_ticks_,
+ delay, next_task_ordinal_++,
TestPendingTask::NESTABLE));
return true;
}
bool TestMockTimeTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
- return PostDelayedTask(from_here, task, delay);
+ return PostDelayedTask(from_here, std::move(task), delay);
}
bool TestMockTimeTaskRunner::IsElapsingStopped() {
diff --git a/base/test/test_mock_time_task_runner.h b/base/test/test_mock_time_task_runner.h
index 54ebbdb7a8..5c61a36f69 100644
--- a/base/test/test_mock_time_task_runner.h
+++ b/base/test/test_mock_time_task_runner.h
@@ -12,6 +12,7 @@
#include <queue>
#include <vector>
+#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
@@ -140,10 +141,10 @@ class TestMockTimeTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner:
bool RunsTasksOnCurrentThread() const override;
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
protected:
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 98bc0179b8..63bdfffacd 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <string>
-
#include "base/test/test_pending_task.h"
+#include <string>
+#include <utility>
+
namespace base {
TestPendingTask::TestPendingTask() : nestability(NESTABLE) {}
-TestPendingTask::TestPendingTask(
- const tracked_objects::Location& location,
- const Closure& task,
- TimeTicks post_time,
- TimeDelta delay,
- TestNestability nestability)
+TestPendingTask::TestPendingTask(const tracked_objects::Location& location,
+ Closure task,
+ TimeTicks post_time,
+ TimeDelta delay,
+ TestNestability nestability)
: location(location),
- task(task),
+ task(std::move(task)),
post_time(post_time),
delay(delay),
nestability(nestability) {}
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 42f3f42c7b..4497ba18ca 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -23,7 +23,7 @@ struct TestPendingTask {
TestPendingTask();
TestPendingTask(TestPendingTask&& other);
TestPendingTask(const tracked_objects::Location& location,
- const Closure& task,
+ Closure task,
TimeTicks post_time,
TimeDelta delay,
TestNestability nestability);
diff --git a/base/test/test_simple_task_runner.cc b/base/test/test_simple_task_runner.cc
index 090a72e96a..df0334097b 100644
--- a/base/test/test_simple_task_runner.cc
+++ b/base/test/test_simple_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/test/test_simple_task_runner.h"
+#include <utility>
+
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -16,23 +18,23 @@ TestSimpleTaskRunner::~TestSimpleTaskRunner() = default;
bool TestSimpleTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
AutoLock auto_lock(lock_);
- pending_tasks_.push_back(
- TestPendingTask(from_here, task, TimeTicks(), delay,
- TestPendingTask::NESTABLE));
+ pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
+ TimeTicks(), delay,
+ TestPendingTask::NESTABLE));
return true;
}
bool TestSimpleTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
AutoLock auto_lock(lock_);
- pending_tasks_.push_back(
- TestPendingTask(from_here, task, TimeTicks(), delay,
- TestPendingTask::NON_NESTABLE));
+ pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
+ TimeTicks(), delay,
+ TestPendingTask::NON_NESTABLE));
return true;
}
diff --git a/base/test/test_simple_task_runner.h b/base/test/test_simple_task_runner.h
index d089ba8a0b..ac609f1f7f 100644
--- a/base/test/test_simple_task_runner.h
+++ b/base/test/test_simple_task_runner.h
@@ -7,6 +7,7 @@
#include <deque>
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
@@ -43,10 +44,10 @@ class TestSimpleTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner implementation.
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
index 696b668a4c..a02c32ec8c 100644
--- a/base/threading/post_task_and_reply_impl.h
+++ b/base/threading/post_task_and_reply_impl.h
@@ -34,7 +34,7 @@ class BASE_EXPORT PostTaskAndReplyImpl {
private:
virtual bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) = 0;
+ Closure task) = 0;
};
} // namespace internal
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index ce594cd7fb..866a8b3b3b 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -144,7 +144,7 @@ class SequencedWorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -168,13 +168,13 @@ SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
bool SequencedWorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
if (delay.is_zero()) {
- return pool_->PostWorkerTaskWithShutdownBehavior(
- from_here, task, shutdown_behavior_);
+ return pool_->PostWorkerTaskWithShutdownBehavior(from_here, std::move(task),
+ shutdown_behavior_);
}
- return pool_->PostDelayedWorkerTask(from_here, task, delay);
+ return pool_->PostDelayedWorkerTask(from_here, std::move(task), delay);
}
bool SequencedWorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -198,13 +198,13 @@ class SequencedWorkerPool::PoolSequencedTaskRunner
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
// SequencedTaskRunner implementation
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
private:
@@ -231,15 +231,16 @@ SequencedWorkerPool::PoolSequencedTaskRunner::
SequencedWorkerPool::PoolSequencedTaskRunner::
~PoolSequencedTaskRunner() = default;
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
- PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ Closure task,
+ TimeDelta delay) {
if (delay.is_zero()) {
return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
- token_, from_here, task, shutdown_behavior_);
+ token_, from_here, std::move(task), shutdown_behavior_);
}
- return pool_->PostDelayedSequencedWorkerTask(token_, from_here, task, delay);
+ return pool_->PostDelayedSequencedWorkerTask(token_, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PoolSequencedTaskRunner::
@@ -247,13 +248,13 @@ bool SequencedWorkerPool::PoolSequencedTaskRunner::
return pool_->IsRunningSequenceOnCurrentThread(token_);
}
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
- PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ Closure task,
+ TimeDelta delay) {
// There's no way to run nested tasks, so simply forward to
// PostDelayedTask.
- return PostDelayedTask(from_here, task, delay);
+ return PostDelayedTask(from_here, std::move(task), delay);
}
// Worker ---------------------------------------------------------------------
@@ -352,7 +353,7 @@ class SequencedWorkerPool::Inner {
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay);
bool RunsTasksOnCurrentThread() const;
@@ -696,7 +697,7 @@ bool SequencedWorkerPool::Inner::PostTask(
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
// TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
// revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
@@ -710,9 +711,9 @@ bool SequencedWorkerPool::Inner::PostTask(
sequenced.sequence_token_id = sequence_token.id_;
sequenced.shutdown_behavior = shutdown_behavior;
sequenced.posted_from = from_here;
- sequenced.task =
- shutdown_behavior == BLOCK_SHUTDOWN ?
- base::MakeCriticalClosure(task) : task;
+ sequenced.task = shutdown_behavior == BLOCK_SHUTDOWN
+ ? base::MakeCriticalClosure(std::move(task))
+ : std::move(task);
sequenced.time_to_run = TimeTicks::Now() + delay;
int create_thread_id = 0;
@@ -1043,7 +1044,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
- task.task.Run();
+ std::move(task.task).Run();
stopwatch.Stop();
tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
@@ -1054,7 +1055,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// Also, do it before calling reset_running_task_info() so
// that sequence-checking from within the task's destructor
// still works.
- task.task = Closure();
+ DCHECK(!task.task);
this_worker->reset_running_task_info();
}
@@ -1562,71 +1563,71 @@ SequencedWorkerPool::GetTaskRunnerWithShutdownBehavior(
bool SequencedWorkerPool::PostWorkerTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ Closure task) {
+ return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedWorkerTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
- return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
- from_here, task, delay);
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
- from_here, task, TimeDelta());
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task) {
- return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ Closure task) {
+ return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
- from_here, task, delay);
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PostNamedSequencedWorkerTask(
const std::string& token_name,
const tracked_objects::Location& from_here,
- const Closure& task) {
+ Closure task) {
DCHECK(!token_name.empty());
return inner_->PostTask(&token_name, SequenceToken(), BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ from_here, std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
- from_here, task, TimeDelta());
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
- return PostDelayedWorkerTask(from_here, task, delay);
+ return PostDelayedWorkerTask(from_here, std::move(task), delay);
}
bool SequencedWorkerPool::RunsTasksOnCurrentThread() const {
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index 0d42de9138..8cdeb0b5db 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -12,7 +12,7 @@
#include <string>
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -275,8 +275,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
//
// Returns true if the task was posted successfully. This may fail during
// shutdown regardless of the specified ShutdownBehavior.
- bool PostWorkerTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ bool PostWorkerTask(const tracked_objects::Location& from_here, Closure task);
// Same as PostWorkerTask but allows a delay to be specified (although doing
// so changes the shutdown behavior). The task will be run after the given
@@ -288,13 +287,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// task will be guaranteed to run to completion before shutdown
// (BLOCK_SHUTDOWN semantics).
bool PostDelayedWorkerTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay);
// Same as PostWorkerTask but allows specification of the shutdown behavior.
bool PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
WorkerShutdown shutdown_behavior);
// Like PostWorkerTask above, but provides sequencing semantics. This means
@@ -310,13 +309,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// shutdown regardless of the specified ShutdownBehavior.
bool PostSequencedWorkerTask(SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task);
+ Closure task);
// Like PostSequencedWorkerTask above, but allows you to specify a named
// token, which saves an extra call to GetNamedSequenceToken.
bool PostNamedSequencedWorkerTask(const std::string& token_name,
const tracked_objects::Location& from_here,
- const Closure& task);
+ Closure task);
// Same as PostSequencedWorkerTask but allows a delay to be specified
// (although doing so changes the shutdown behavior). The task will be run
@@ -330,7 +329,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay);
// Same as PostSequencedWorkerTask but allows specification of the shutdown
@@ -338,12 +337,12 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
WorkerShutdown shutdown_behavior);
// TaskRunner implementation. Forwards to PostDelayedWorkerTask().
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/threading/worker_pool.cc b/base/threading/worker_pool.cc
index d47037d79a..bc313ce25b 100644
--- a/base/threading/worker_pool.cc
+++ b/base/threading/worker_pool.cc
@@ -27,8 +27,8 @@ class PostTaskAndReplyWorkerPool : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override {
- return WorkerPool::PostTask(from_here, task, task_is_slow_);
+ Closure task) override {
+ return WorkerPool::PostTask(from_here, std::move(task), task_is_slow_);
}
bool task_is_slow_;
@@ -45,7 +45,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -56,7 +56,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// zero because non-zero delays are not supported.
bool PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
base::TimeDelta delay);
const bool tasks_are_slow_;
@@ -73,9 +73,9 @@ WorkerPoolTaskRunner::~WorkerPoolTaskRunner() {
bool WorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) {
- return PostDelayedTaskAssertZeroDelay(from_here, task, delay);
+ return PostDelayedTaskAssertZeroDelay(from_here, std::move(task), delay);
}
bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -84,11 +84,11 @@ bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
bool WorkerPoolTaskRunner::PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
base::TimeDelta delay) {
DCHECK_EQ(delay.InMillisecondsRoundedUp(), 0)
<< "WorkerPoolTaskRunner does not support non-zero delays";
- return WorkerPool::PostTask(from_here, task, tasks_are_slow_);
+ return WorkerPool::PostTask(from_here, std::move(task), tasks_are_slow_);
}
struct TaskRunnerHolder {
diff --git a/base/threading/worker_pool.h b/base/threading/worker_pool.h
index 865948e437..d97dbd6a69 100644
--- a/base/threading/worker_pool.h
+++ b/base/threading/worker_pool.h
@@ -32,7 +32,8 @@ class BASE_EXPORT WorkerPool {
// false if |task| could not be posted to a worker thread. Regardless of
// return value, ownership of |task| is transferred to the worker pool.
static bool PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task, bool task_is_slow);
+ Closure task,
+ bool task_is_slow);
// Just like TaskRunner::PostTaskAndReply, except the destination
// for |task| is a worker thread and you can specify |task_is_slow| just
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index 0e19a1a0fe..2133ba98e2 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <utility>
+
#include "base/bind.h"
#include "base/callback.h"
#include "base/lazy_instance.h"
@@ -47,7 +49,7 @@ class WorkerPoolImpl {
~WorkerPoolImpl() = delete;
void PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::Closure task,
bool task_is_slow);
private:
@@ -59,9 +61,9 @@ WorkerPoolImpl::WorkerPoolImpl()
kIdleSecondsBeforeExit)) {}
void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::Closure task,
bool /*task_is_slow*/) {
- pool_->PostTask(from_here, task);
+ pool_->PostTask(from_here, std::move(task));
}
base::LazyInstance<WorkerPoolImpl>::Leaky g_lazy_worker_pool =
@@ -112,9 +114,10 @@ void WorkerThread::ThreadMain() {
// static
bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::Closure task,
bool task_is_slow) {
- g_lazy_worker_pool.Pointer()->PostTask(from_here, task, task_is_slow);
+ g_lazy_worker_pool.Pointer()->PostTask(from_here, std::move(task),
+ task_is_slow);
return true;
}
@@ -137,8 +140,8 @@ PosixDynamicThreadPool::~PosixDynamicThreadPool() {
void PosixDynamicThreadPool::PostTask(
const tracked_objects::Location& from_here,
- const base::Closure& task) {
- PendingTask pending_task(from_here, task);
+ base::Closure task) {
+ PendingTask pending_task(from_here, std::move(task));
AddTask(&pending_task);
}
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index d65ae8f8cf..cfa50c21dd 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -28,7 +28,7 @@
#include <queue>
#include <string>
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -51,8 +51,7 @@ class BASE_EXPORT PosixDynamicThreadPool
int idle_seconds_before_exit);
// Adds |task| to the thread pool.
- void PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ void PostTask(const tracked_objects::Location& from_here, Closure task);
// Worker thread method to wait for up to |idle_seconds_before_exit| for more
// work from the thread pool. Returns NULL if no work is available.
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 577f50043d..6317886b0d 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -34,7 +34,9 @@ const char kFilteringTraceConfig[] =
" \"excluded_categories\": [],"
" \"filter_args\": {},"
" \"filter_predicate\": \"heap_profiler_predicate\","
- " \"included_categories\": [\"*\"]"
+ " \"included_categories\": ["
+ " \"*\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("Testing") "\"]"
" }"
" ]"
"}";
@@ -122,6 +124,7 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
}
{
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("NotTesting"), kDonut);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
StackFrame frame_cc[] = {t, c, c};
AssertBacktraceEquals(frame_cc);
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 5f5a80af3b..d78de9b548 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -54,10 +54,10 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self,
return ptr;
}
-void* HookllocAligned(const AllocatorDispatch* self,
- size_t alignment,
- size_t size,
- void* context) {
+void* HookAllocAligned(const AllocatorDispatch* self,
+ size_t alignment,
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_aligned_function(next, alignment, size, context);
if (ptr)
@@ -129,7 +129,7 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self,
AllocatorDispatch g_allocator_hooks = {
&HookAlloc, /* alloc_function */
&HookZeroInitAlloc, /* alloc_zero_initialized_function */
- &HookllocAligned, /* alloc_aligned_function */
+ &HookAllocAligned, /* alloc_aligned_function */
&HookRealloc, /* realloc_function */
&HookFree, /* free_function */
&HookGetSizeEstimate, /* get_size_estimate_function */
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
index 7583763889..2692521c09 100644
--- a/base/trace_event/memory_allocator_dump.cc
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -29,7 +29,8 @@ MemoryAllocatorDump::MemoryAllocatorDump(const std::string& absolute_name,
process_memory_dump_(process_memory_dump),
attributes_(new TracedValue),
guid_(guid),
- flags_(Flags::DEFAULT) {
+ flags_(Flags::DEFAULT),
+ size_(0) {
// The |absolute_name| cannot be empty.
DCHECK(!absolute_name.empty());
@@ -59,6 +60,8 @@ MemoryAllocatorDump::~MemoryAllocatorDump() {
void MemoryAllocatorDump::AddScalar(const char* name,
const char* units,
uint64_t value) {
+ if (strcmp(kNameSize, name) == 0)
+ size_ = value;
SStringPrintf(&string_conversion_buffer_, "%" PRIx64, value);
attributes_->BeginDictionary(name);
attributes_->SetString("type", kTypeScalar);
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index c781f071bb..99ff114e5c 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
@@ -85,11 +86,21 @@ class BASE_EXPORT MemoryAllocatorDump {
TracedValue* attributes_for_testing() const { return attributes_.get(); }
private:
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ friend class MemoryDumpManager;
+ FRIEND_TEST_ALL_PREFIXES(MemoryAllocatorDumpTest, GetSize);
+
+ // Get the size for this dump.
+ // The size is the value set with AddScalar(kNameSize, kUnitsBytes, size);
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ uint64_t GetSize() const { return size_; };
+
const std::string absolute_name_;
ProcessMemoryDump* const process_memory_dump_; // Not owned (PMD owns this).
std::unique_ptr<TracedValue> attributes_;
MemoryAllocatorDumpGuid guid_;
int flags_; // See enum Flags.
+ uint64_t size_;
// A local buffer for Sprintf conversion on fastpath. Avoids allocating
// temporary strings on each AddScalar() call.
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index 1bf9715917..e1818f6eec 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -172,6 +172,16 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
pmd.AsValueInto(traced_value.get());
}
+TEST(MemoryAllocatorDumpTest, GetSize) {
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
+ MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, 1);
+ dump->AddScalar("foo", MemoryAllocatorDump::kUnitsBytes, 2);
+ EXPECT_EQ(1u, dump->GetSize());
+}
+
// DEATH tests are not supported in Android / iOS.
#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index 5a54a773c5..a74b95634d 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -4,6 +4,9 @@
#include "base/trace_event/memory_dump_manager.h"
+#include <inttypes.h>
+#include <stdio.h>
+
#include <algorithm>
#include <utility>
@@ -17,6 +20,8 @@
#include "base/debug/stack_trace.h"
#include "base/debug/thread_heap_usage_tracker.h"
#include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_piece.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
@@ -80,9 +85,12 @@ const char* const kStrictThreadCheckBlacklist[] = {
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
uint64_t dump_guid,
bool success) {
- TRACE_EVENT_NESTABLE_ASYNC_END1(
- MemoryDumpManager::kTraceCategory, "GlobalMemoryDump",
- TRACE_ID_MANGLE(dump_guid), "success", success);
+ char guid_str[20];
+ sprintf(guid_str, "0x%" PRIx64, dump_guid);
+ TRACE_EVENT_NESTABLE_ASYNC_END2(MemoryDumpManager::kTraceCategory,
+ "GlobalMemoryDump", TRACE_ID_LOCAL(dump_guid),
+ "dump_guid", TRACE_STR_COPY(guid_str),
+ "success", success);
if (!wrapped_callback.is_null()) {
wrapped_callback.Run(dump_guid, success);
@@ -155,9 +163,7 @@ void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
}
MemoryDumpManager::MemoryDumpManager()
- : delegate_(nullptr),
- is_coordinator_(false),
- memory_tracing_enabled_(0),
+ : memory_tracing_enabled_(0),
tracing_process_id_(kInvalidTracingProcessId),
dumper_registrations_ignored_for_testing_(false),
heap_profiling_enabled_(false) {
@@ -214,14 +220,13 @@ void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
heap_profiling_enabled_ = true;
}
-void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
- bool is_coordinator) {
+void MemoryDumpManager::Initialize(
+ std::unique_ptr<MemoryDumpManagerDelegate> delegate) {
{
AutoLock lock(lock_);
DCHECK(delegate);
DCHECK(!delegate_);
- delegate_ = delegate;
- is_coordinator_ = is_coordinator;
+ delegate_ = std::move(delegate);
EnableHeapProfilingIfNeeded();
}
@@ -243,11 +248,19 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
!(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
// Create trace config with heap profiling filter.
+ std::string filter_string = "*";
+ const char* const kFilteredCategories[] = {
+ TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
+ MemoryDumpManager::kTraceCategory};
+ for (const char* cat : kFilteredCategories)
+ filter_string = filter_string + "," + cat;
+ TraceConfigCategoryFilter category_filter;
+ category_filter.InitializeFromString(filter_string);
+
TraceConfig::EventFilterConfig heap_profiler_filter_config(
HeapProfilerEventFilter::kName);
- heap_profiler_filter_config.AddIncludedCategory("*");
- heap_profiler_filter_config.AddIncludedCategory(
- MemoryDumpManager::kTraceCategory);
+ heap_profiler_filter_config.SetCategoryFilter(category_filter);
+
TraceConfig::EventFilters filters;
filters.push_back(heap_profiler_filter_config);
TraceConfig filtering_trace_config;
@@ -421,7 +434,7 @@ void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
// registered. This handles the case where OnTraceLogEnabled() did not notify
// ready since no polling supported mdp has yet been registered.
if (dump_providers_for_polling_.size() == 1)
- dump_scheduler_->NotifyPollingSupported();
+ MemoryDumpScheduler::GetInstance()->EnablePollingIfNeeded();
}
void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
@@ -456,25 +469,16 @@ void MemoryDumpManager::RequestGlobalDump(
// Creates an async event to keep track of the global dump evolution.
// The |wrapped_callback| will generate the ASYNC_END event and then invoke
// the real |callback| provided by the caller.
- TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "GlobalMemoryDump",
- TRACE_ID_MANGLE(guid));
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(
+ kTraceCategory, "GlobalMemoryDump", TRACE_ID_LOCAL(guid), "dump_type",
+ MemoryDumpTypeToString(dump_type), "level_of_detail",
+ MemoryDumpLevelOfDetailToString(level_of_detail));
MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
- // Technically there is no need to grab the |lock_| here as the delegate is
- // long-lived and can only be set by Initialize(), which is locked and
- // necessarily happens before memory_tracing_enabled_ == true.
- // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
- // (memory-infra is enabled) we're not in the fast-path anymore.
- MemoryDumpManagerDelegate* delegate;
- {
- AutoLock lock(lock_);
- delegate = delegate_;
- }
-
// The delegate will coordinate the IPC broadcast and at some point invoke
// CreateProcessDump() to get a dump for the current process.
MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
- delegate->RequestGlobalMemoryDump(args, wrapped_callback);
+ delegate_->RequestGlobalMemoryDump(args, wrapped_callback);
}
void MemoryDumpManager::RequestGlobalDump(
@@ -483,10 +487,24 @@ void MemoryDumpManager::RequestGlobalDump(
RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
}
+bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
+ MemoryDumpProvider* provider) {
+ AutoLock lock(lock_);
+
+ for (const auto& info : dump_providers_) {
+ if (info->dump_provider == provider)
+ return true;
+ }
+ return false;
+}
+
void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) {
- TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
- TRACE_ID_MANGLE(args.dump_guid));
+ char guid_str[20];
+ sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
+ TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
+ TRACE_STR_COPY(guid_str));
// If argument filter is enabled then only background mode dumps should be
// allowed. In case the trace config passed for background tracing session
@@ -515,14 +533,9 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
CHECK(!session_state_ ||
session_state_->IsDumpModeAllowed(args.level_of_detail));
- if (dump_scheduler_)
- dump_scheduler_->NotifyDumpTriggered();
+ MemoryDumpScheduler::GetInstance()->NotifyDumpTriggered();
}
- TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
- TRACE_ID_MANGLE(args.dump_guid),
- TRACE_EVENT_FLAG_FLOW_OUT);
-
// Start the process dump. This involves task runner hops as specified by the
// MemoryDumpProvider(s) in RegisterDumpProvider()).
SetupNextMemoryDump(std::move(pmd_async_state));
@@ -666,11 +679,8 @@ void MemoryDumpManager::InvokeOnMemoryDump(
if (should_dump) {
// Invoke the dump provider.
- TRACE_EVENT_WITH_FLOW1(kTraceCategory,
- "MemoryDumpManager::InvokeOnMemoryDump",
- TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
- TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
- "dump_provider.name", mdpinfo->name);
+ TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
+ "dump_provider.name", mdpinfo->name);
// A stack allocated string with dump provider name is useful to debug
// crashes while invoking dump after a |dump_provider| is not unregistered
@@ -722,6 +732,18 @@ bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
}
// static
+uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern,
+ const ProcessMemoryDump* pmd) {
+ uint64_t sum = 0;
+ for (const auto& kv : pmd->allocator_dumps()) {
+ auto name = StringPiece(kv.first);
+ if (MatchPattern(name, pattern))
+ sum += kv.second->GetSize();
+ }
+ return sum / 1024;
+}
+
+// static
void MemoryDumpManager::FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
HEAP_PROFILER_SCOPED_IGNORE;
@@ -736,9 +758,11 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
return;
}
- TRACE_EVENT_WITH_FLOW0(kTraceCategory,
- "MemoryDumpManager::FinalizeDumpAndAddToTrace",
- TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN);
+ TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace");
+
+ // The results struct to fill.
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ MemoryDumpCallbackResult result;
for (const auto& kv : pmd_async_state->process_dumps) {
ProcessId pid = kv.first; // kNullProcessId for the current process.
@@ -760,6 +784,30 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
kTraceEventNumArgs, kTraceEventArgNames,
kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
TRACE_EVENT_FLAG_HAS_ID);
+
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ // Don't try to fill the struct in detailed mode since it is hard to avoid
+ // double counting.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::DETAILED)
+ continue;
+
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ if (pid == kNullProcessId) {
+ result.chrome_dump.malloc_total_kb =
+ GetDumpsSumKb("malloc", process_memory_dump);
+ result.chrome_dump.v8_total_kb =
+ GetDumpsSumKb("v8/*", process_memory_dump);
+
+ // partition_alloc reports sizes for both allocated_objects and
+ // partitions. The memory allocated_objects uses is a subset of
+ // the partitions memory so to avoid double counting we only
+ // count partitions memory.
+ result.chrome_dump.partition_alloc_total_kb =
+ GetDumpsSumKb("partition_alloc/partitions/*", process_memory_dump);
+ result.chrome_dump.blink_gc_total_kb =
+ GetDumpsSumKb("blink_gc", process_memory_dump);
+ }
}
bool tracing_still_enabled;
@@ -776,7 +824,7 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
}
TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
- TRACE_ID_MANGLE(dump_guid));
+ TRACE_ID_LOCAL(dump_guid));
}
void MemoryDumpManager::OnTraceLogEnabled() {
@@ -829,18 +877,6 @@ void MemoryDumpManager::OnTraceLogEnabled() {
session_state, &MemoryDumpSessionState::type_name_deduplicator));
}
- std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
- new MemoryDumpScheduler(this, dump_thread->task_runner()));
- DCHECK_LE(memory_dump_config.triggers.size(), 3u);
- for (const auto& trigger : memory_dump_config.triggers) {
- if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
- NOTREACHED();
- continue;
- }
- dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
- trigger.min_time_between_dumps_ms);
- }
-
{
AutoLock lock(lock_);
@@ -849,7 +885,6 @@ void MemoryDumpManager::OnTraceLogEnabled() {
DCHECK(!dump_thread_);
dump_thread_ = std::move(dump_thread);
- dump_scheduler_ = std::move(dump_scheduler);
subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
@@ -858,15 +893,28 @@ void MemoryDumpManager::OnTraceLogEnabled() {
if (mdpinfo->options.is_fast_polling_supported)
dump_providers_for_polling_.insert(mdpinfo);
}
+
+ MemoryDumpScheduler* dump_scheduler = MemoryDumpScheduler::GetInstance();
+ dump_scheduler->Setup(this, dump_thread_->task_runner());
+ DCHECK_LE(memory_dump_config.triggers.size(), 3u);
+ for (const auto& trigger : memory_dump_config.triggers) {
+ if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) {
+ NOTREACHED();
+ continue;
+ }
+ dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
+ trigger.min_time_between_dumps_ms);
+ }
+
// Notify polling supported only if some polling supported provider was
// registered, else RegisterPollingMDPOnDumpThread() will notify when first
// polling MDP registers.
if (!dump_providers_for_polling_.empty())
- dump_scheduler_->NotifyPollingSupported();
+ dump_scheduler->EnablePollingIfNeeded();
// Only coordinator process triggers periodic global memory dumps.
- if (is_coordinator_)
- dump_scheduler_->NotifyPeriodicTriggerSupported();
+ if (delegate_->IsCoordinator())
+ dump_scheduler->EnablePeriodicTriggerIfNeeded();
}
}
@@ -879,14 +927,12 @@ void MemoryDumpManager::OnTraceLogDisabled() {
return;
subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
std::unique_ptr<Thread> dump_thread;
- std::unique_ptr<MemoryDumpScheduler> scheduler;
{
AutoLock lock(lock_);
dump_thread = std::move(dump_thread_);
session_state_ = nullptr;
- scheduler = std::move(dump_scheduler_);
+ MemoryDumpScheduler::GetInstance()->DisableAllTriggers();
}
- scheduler->DisableAllTriggers();
// Thread stops are blocking and must be performed outside of the |lock_|
// or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
@@ -910,10 +956,6 @@ bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
return session_state_->IsDumpModeAllowed(dump_mode);
}
-uint64_t MemoryDumpManager::GetTracingProcessId() const {
- return delegate_->GetTracingProcessId();
-}
-
MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 92cc2f401b..ebee048691 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -18,10 +18,19 @@
#include "base/memory/ref_counted.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
+// Forward declare |MemoryDumpManagerDelegateImplTest| so that we can make it a
+// friend of |MemoryDumpManager| and give it access to |SetInstanceForTesting|.
+namespace memory_instrumentation {
+
+class MemoryDumpManagerDelegateImplTest;
+
+} // namespace memory_instrumentation
+
namespace base {
class SingleThreadTaskRunner;
@@ -54,13 +63,10 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// On the other side, the MemoryDumpManager will not be fully operational
// (i.e. will NACK any RequestGlobalMemoryDump()) until initialized.
// Arguments:
- // is_coordinator: if true this MemoryDumpManager instance will act as a
- // coordinator and schedule periodic dumps (if enabled via TraceConfig);
- // false when the MemoryDumpManager is initialized in a slave process.
// delegate: inversion-of-control interface for embedder-specific behaviors
// (multiprocess handshaking). See the lifetime and thread-safety
// requirements in the |MemoryDumpManagerDelegate| docstring.
- void Initialize(MemoryDumpManagerDelegate* delegate, bool is_coordinator);
+ void Initialize(std::unique_ptr<MemoryDumpManagerDelegate> delegate);
// (Un)Registers a MemoryDumpProvider instance.
// Args:
@@ -123,6 +129,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// Returns true if the dump mode is allowed for current tracing session.
bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
+ // Lets tests see if a dump provider is registered.
+ bool IsDumpProviderRegisteredForTesting(MemoryDumpProvider*);
+
// Returns the MemoryDumpSessionState object, which is shared by all the
// ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
// session lifetime.
@@ -135,7 +144,10 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// retrieved by child processes only when tracing is enabled. This is
// intended to express cross-process sharing of memory dumps on the
// child-process side, without having to know its own child process id.
- uint64_t GetTracingProcessId() const;
+ uint64_t GetTracingProcessId() const { return tracing_process_id_; }
+ void set_tracing_process_id(uint64_t tracing_process_id) {
+ tracing_process_id_ = tracing_process_id;
+ }
// Returns the name for a the allocated_objects dump. Use this to declare
// suballocator dumps from other dump providers.
@@ -156,6 +168,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
friend class MemoryDumpScheduler;
+ friend class memory_instrumentation::MemoryDumpManagerDelegateImplTest;
// Descriptor used to hold information about registered MDPs.
// Some important considerations about lifetime of this object:
@@ -285,6 +298,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
~MemoryDumpManager() override;
static void SetInstanceForTesting(MemoryDumpManager* instance);
+ static uint32_t GetDumpsSumKb(const std::string&, const ProcessMemoryDump*);
static void FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
@@ -348,10 +362,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
std::unordered_set<StringPiece, StringPieceHash>
strict_thread_check_blacklist_;
- MemoryDumpManagerDelegate* delegate_; // Not owned.
-
- // When true, this instance is in charge of coordinating periodic dumps.
- bool is_coordinator_;
+ std::unique_ptr<MemoryDumpManagerDelegate> delegate_;
// Protects from concurrent accesses to the |dump_providers_*| and |delegate_|
// to guard against disabling logging while dumping on another thread.
@@ -361,9 +372,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// dump_providers_enabled_ list) when tracing is not enabled.
subtle::AtomicWord memory_tracing_enabled_;
- // For triggering memory dumps.
- std::unique_ptr<MemoryDumpScheduler> dump_scheduler_;
-
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
std::unique_ptr<Thread> dump_thread_;
@@ -385,17 +393,15 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// safe (i.e. should expect calls from any thread and handle thread hopping).
class BASE_EXPORT MemoryDumpManagerDelegate {
public:
+ MemoryDumpManagerDelegate() {}
+ virtual ~MemoryDumpManagerDelegate() {}
+
virtual void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) = 0;
- // Returns tracing process id of the current process. This is used by
- // MemoryDumpManager::GetTracingProcessId.
- virtual uint64_t GetTracingProcessId() const = 0;
+ virtual bool IsCoordinator() const = 0;
protected:
- MemoryDumpManagerDelegate() {}
- virtual ~MemoryDumpManagerDelegate() {}
-
void CreateProcessDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) {
MemoryDumpManager::GetInstance()->CreateProcessDump(args, callback);
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 51d41943fb..e037fd4982 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -7,9 +7,11 @@
#include <stdint.h>
#include <memory>
+#include <utility>
#include <vector>
#include "base/bind_helpers.h"
+#include "base/callback.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/message_loop/message_loop.h"
@@ -102,10 +104,10 @@ void OnTraceDataCollected(Closure quit_closure,
// Posts |task| to |task_runner| and blocks until it is executed.
void PostTaskAndWait(const tracked_objects::Location& from_here,
SequencedTaskRunner* task_runner,
- const base::Closure& task) {
+ base::Closure task) {
base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- task_runner->PostTask(from_here, task);
+ task_runner->PostTask(from_here, std::move(task));
task_runner->PostTask(
FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
// The SequencedTaskRunner guarantees that |event| will only be signaled after
@@ -119,7 +121,8 @@ void PostTaskAndWait(const tracked_objects::Location& from_here,
// requests locally to the MemoryDumpManager instead of performing IPC dances.
class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
public:
- MemoryDumpManagerDelegateForTesting() {
+ MemoryDumpManagerDelegateForTesting(bool is_coordinator)
+ : is_coordinator_(is_coordinator) {
ON_CALL(*this, RequestGlobalMemoryDump(_, _))
.WillByDefault(Invoke(
this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
@@ -129,13 +132,13 @@ class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
void(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback));
- uint64_t GetTracingProcessId() const override {
- NOTREACHED();
- return MemoryDumpManager::kInvalidTracingProcessId;
- }
+ bool IsCoordinator() const override { return is_coordinator_; }
// Promote the CreateProcessDump to public so it can be used by test fixtures.
using MemoryDumpManagerDelegate::CreateProcessDump;
+
+ private:
+ bool is_coordinator_;
};
class MockMemoryDumpProvider : public MemoryDumpProvider {
@@ -180,19 +183,19 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override {
NOTREACHED();
return false;
}
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ Closure task,
TimeDelta delay) override {
num_of_post_tasks_++;
if (enabled_) {
return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
- task);
+ std::move(task));
}
return false;
}
@@ -220,13 +223,12 @@ class MemoryDumpManagerTest : public testing::Test {
mdm_.reset(new MemoryDumpManager());
MemoryDumpManager::SetInstanceForTesting(mdm_.get());
ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
- delegate_.reset(new MemoryDumpManagerDelegateForTesting);
}
void TearDown() override {
MemoryDumpManager::SetInstanceForTesting(nullptr);
+ delegate_ = nullptr;
mdm_.reset();
- delegate_.reset();
message_loop_.reset();
TraceLog::DeleteForTesting();
}
@@ -248,7 +250,8 @@ class MemoryDumpManagerTest : public testing::Test {
protected:
void InitializeMemoryDumpManager(bool is_coordinator) {
mdm_->set_dumper_registrations_ignored_for_testing(true);
- mdm_->Initialize(delegate_.get(), is_coordinator);
+ delegate_ = new MemoryDumpManagerDelegateForTesting(is_coordinator);
+ mdm_->Initialize(base::WrapUnique(delegate_));
}
void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
@@ -274,7 +277,8 @@ class MemoryDumpManagerTest : public testing::Test {
void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
bool IsPeriodicDumpingEnabled() const {
- return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting();
+ return MemoryDumpScheduler::GetInstance()
+ ->IsPeriodicTimerRunningForTesting();
}
int GetMaxConsecutiveFailuresCount() const {
@@ -283,7 +287,7 @@ class MemoryDumpManagerTest : public testing::Test {
const MemoryDumpProvider::Options kDefaultOptions;
std::unique_ptr<MemoryDumpManager> mdm_;
- std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+ MemoryDumpManagerDelegateForTesting* delegate_;
bool last_callback_success_;
private:
@@ -897,7 +901,6 @@ TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
// initialization gets NACK-ed cleanly.
{
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
- EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED);
EXPECT_FALSE(last_callback_success_);
@@ -906,9 +909,9 @@ TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
// Now late-initialize the MemoryDumpManager and check that the
// RequestGlobalDump completes successfully.
{
+ InitializeMemoryDumpManager(false /* is_coordinator */);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
- InitializeMemoryDumpManager(false /* is_coordinator */);
RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED);
EXPECT_TRUE(last_callback_success_);
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index bf72bef5e4..f2744007d7 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -60,5 +60,9 @@ MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
return MemoryDumpLevelOfDetail::LAST;
}
+MemoryDumpCallbackResult::MemoryDumpCallbackResult() {}
+
+MemoryDumpCallbackResult::~MemoryDumpCallbackResult() {}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index 90a866fa7a..a8b3f423ca 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -9,10 +9,12 @@
// These are also used in the IPCs for coordinating inter-process memory dumps.
#include <stdint.h>
+#include <map>
#include <string>
#include "base/base_export.h"
#include "base/callback.h"
+#include "base/process/process_handle.h"
namespace base {
namespace trace_event {
@@ -72,6 +74,33 @@ struct MemoryDumpArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
+// TODO(hjd): Not used yet, see crbug.com/703184
+// Summarises information about memory use as seen by a single process.
+// This information will eventually be passed to a service to be colated
+// and reported.
+struct MemoryDumpCallbackResult {
+ struct OSMemDump {
+ uint32_t resident_set_kb = 0;
+ };
+ struct ChromeMemDump {
+ uint32_t malloc_total_kb = 0;
+ uint32_t partition_alloc_total_kb = 0;
+ uint32_t blink_gc_total_kb = 0;
+ uint32_t v8_total_kb = 0;
+ };
+
+ // These are for the current process.
+ OSMemDump os_dump;
+ ChromeMemDump chrome_dump;
+
+ // In some cases, OS stats can only be dumped from a privileged process to
+ // get around to sandboxing/selinux restrictions (see crbug.com/461788).
+ std::map<ProcessId, OSMemDump> extra_processes_dump;
+
+ MemoryDumpCallbackResult();
+ ~MemoryDumpCallbackResult();
+};
+
using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
index eaa8d63661..66ea6c9f1a 100644
--- a/base/trace_event/memory_dump_scheduler.cc
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -21,108 +21,131 @@ const uint32_t kMemoryTotalsPollingInterval = 25;
uint32_t g_polling_interval_ms_for_testing = 0;
} // namespace
-MemoryDumpScheduler::MemoryDumpScheduler(
- MemoryDumpManager* mdm,
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
- : mdm_(mdm), polling_state_(polling_task_runner) {}
+// static
+MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
+ static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
+ return instance;
+}
+MemoryDumpScheduler::MemoryDumpScheduler() : mdm_(nullptr), is_setup_(false) {}
MemoryDumpScheduler::~MemoryDumpScheduler() {}
+void MemoryDumpScheduler::Setup(
+ MemoryDumpManager* mdm,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner) {
+ mdm_ = mdm;
+ polling_task_runner_ = polling_task_runner;
+ periodic_state_.reset(new PeriodicTriggerState);
+ polling_state_.reset(new PollingTriggerState);
+ is_setup_ = true;
+}
+
void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
MemoryDumpLevelOfDetail level_of_detail,
uint32_t min_time_between_dumps_ms) {
+ DCHECK(is_setup_);
if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
- DCHECK(!periodic_state_.is_configured);
- DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
+ DCHECK(!periodic_state_->is_configured);
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
DCHECK_NE(0u, min_time_between_dumps_ms);
- polling_state_.level_of_detail = level_of_detail;
- polling_state_.min_polls_between_dumps =
- (min_time_between_dumps_ms + polling_state_.polling_interval_ms - 1) /
- polling_state_.polling_interval_ms;
- polling_state_.current_state = PollingTriggerState::CONFIGURED;
+ polling_state_->level_of_detail = level_of_detail;
+ polling_state_->min_polls_between_dumps =
+ (min_time_between_dumps_ms + polling_state_->polling_interval_ms - 1) /
+ polling_state_->polling_interval_ms;
+ polling_state_->current_state = PollingTriggerState::CONFIGURED;
} else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
- DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
- periodic_state_.is_configured = true;
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
+ periodic_state_->is_configured = true;
DCHECK_NE(0u, min_time_between_dumps_ms);
switch (level_of_detail) {
case MemoryDumpLevelOfDetail::BACKGROUND:
break;
case MemoryDumpLevelOfDetail::LIGHT:
- DCHECK_EQ(0u, periodic_state_.light_dump_period_ms);
- periodic_state_.light_dump_period_ms = min_time_between_dumps_ms;
+ DCHECK_EQ(0u, periodic_state_->light_dump_period_ms);
+ periodic_state_->light_dump_period_ms = min_time_between_dumps_ms;
break;
case MemoryDumpLevelOfDetail::DETAILED:
- DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms);
- periodic_state_.heavy_dump_period_ms = min_time_between_dumps_ms;
+ DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms);
+ periodic_state_->heavy_dump_period_ms = min_time_between_dumps_ms;
break;
}
- periodic_state_.min_timer_period_ms = std::min(
- periodic_state_.min_timer_period_ms, min_time_between_dumps_ms);
- DCHECK_EQ(0u, periodic_state_.light_dump_period_ms %
- periodic_state_.min_timer_period_ms);
- DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms %
- periodic_state_.min_timer_period_ms);
+ periodic_state_->min_timer_period_ms = std::min(
+ periodic_state_->min_timer_period_ms, min_time_between_dumps_ms);
+ DCHECK_EQ(0u, periodic_state_->light_dump_period_ms %
+ periodic_state_->min_timer_period_ms);
+ DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms %
+ periodic_state_->min_timer_period_ms);
}
}
-void MemoryDumpScheduler::NotifyPeriodicTriggerSupported() {
- if (!periodic_state_.is_configured || periodic_state_.timer.IsRunning())
+void MemoryDumpScheduler::EnablePeriodicTriggerIfNeeded() {
+ DCHECK(is_setup_);
+ if (!periodic_state_->is_configured || periodic_state_->timer.IsRunning())
return;
- periodic_state_.light_dumps_rate = periodic_state_.light_dump_period_ms /
- periodic_state_.min_timer_period_ms;
- periodic_state_.heavy_dumps_rate = periodic_state_.heavy_dump_period_ms /
- periodic_state_.min_timer_period_ms;
+ periodic_state_->light_dumps_rate = periodic_state_->light_dump_period_ms /
+ periodic_state_->min_timer_period_ms;
+ periodic_state_->heavy_dumps_rate = periodic_state_->heavy_dump_period_ms /
+ periodic_state_->min_timer_period_ms;
- periodic_state_.dump_count = 0;
- periodic_state_.timer.Start(
+ periodic_state_->dump_count = 0;
+ periodic_state_->timer.Start(
FROM_HERE,
- TimeDelta::FromMilliseconds(periodic_state_.min_timer_period_ms),
+ TimeDelta::FromMilliseconds(periodic_state_->min_timer_period_ms),
Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
}
-void MemoryDumpScheduler::NotifyPollingSupported() {
- if (polling_state_.current_state != PollingTriggerState::CONFIGURED)
+void MemoryDumpScheduler::EnablePollingIfNeeded() {
+ DCHECK(is_setup_);
+ if (polling_state_->current_state != PollingTriggerState::CONFIGURED)
return;
- polling_state_.current_state = PollingTriggerState::ENABLED;
- polling_state_.ResetTotals();
+ polling_state_->current_state = PollingTriggerState::ENABLED;
+ polling_state_->ResetTotals();
- polling_state_.polling_task_runner->PostTask(
+ polling_task_runner_->PostTask(
FROM_HERE,
Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
}
void MemoryDumpScheduler::NotifyDumpTriggered() {
- if (polling_state_.polling_task_runner &&
- polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
- polling_state_.polling_task_runner->PostTask(
+ if (polling_task_runner_ &&
+ !polling_task_runner_->RunsTasksOnCurrentThread()) {
+ polling_task_runner_->PostTask(
FROM_HERE,
Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
return;
}
- if (polling_state_.current_state != PollingTriggerState::ENABLED)
+
+ if (!polling_state_ ||
+ polling_state_->current_state != PollingTriggerState::ENABLED) {
return;
+ }
- polling_state_.ResetTotals();
+ polling_state_->ResetTotals();
}
void MemoryDumpScheduler::DisableAllTriggers() {
- if (periodic_state_.timer.IsRunning())
- periodic_state_.timer.Stop();
- DisablePolling();
-}
+ if (periodic_state_) {
+ if (periodic_state_->timer.IsRunning())
+ periodic_state_->timer.Stop();
+ periodic_state_.reset();
+ }
-void MemoryDumpScheduler::DisablePolling() {
- if (polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
- if (polling_state_.polling_task_runner->PostTask(
- FROM_HERE,
- Bind(&MemoryDumpScheduler::DisablePolling, Unretained(this))))
- return;
+ if (polling_task_runner_) {
+ DCHECK(polling_state_);
+ polling_task_runner_->PostTask(
+ FROM_HERE, Bind(&MemoryDumpScheduler::DisablePollingOnPollingThread,
+ Unretained(this)));
+ polling_task_runner_ = nullptr;
}
- polling_state_.current_state = PollingTriggerState::DISABLED;
- polling_state_.polling_task_runner = nullptr;
+ is_setup_ = false;
+}
+
+void MemoryDumpScheduler::DisablePollingOnPollingThread() {
+ polling_state_->current_state = PollingTriggerState::DISABLED;
+ polling_state_.reset();
}
// static
@@ -131,30 +154,30 @@ void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
}
bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
- return periodic_state_.timer.IsRunning();
+ return periodic_state_->timer.IsRunning();
}
void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
- if (periodic_state_.light_dumps_rate > 0 &&
- periodic_state_.dump_count % periodic_state_.light_dumps_rate == 0)
+ if (periodic_state_->light_dumps_rate > 0 &&
+ periodic_state_->dump_count % periodic_state_->light_dumps_rate == 0)
level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- if (periodic_state_.heavy_dumps_rate > 0 &&
- periodic_state_.dump_count % periodic_state_.heavy_dumps_rate == 0)
+ if (periodic_state_->heavy_dumps_rate > 0 &&
+ periodic_state_->dump_count % periodic_state_->heavy_dumps_rate == 0)
level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
- ++periodic_state_.dump_count;
+ ++periodic_state_->dump_count;
mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
}
void MemoryDumpScheduler::PollMemoryOnPollingThread() {
- if (polling_state_.current_state != PollingTriggerState::ENABLED)
+ if (polling_state_->current_state != PollingTriggerState::ENABLED)
return;
uint64_t polled_memory = 0;
bool res = mdm_->PollFastMemoryTotal(&polled_memory);
DCHECK(res);
- if (polling_state_.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+ if (polling_state_->level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
polled_memory / 1024 / 1024);
}
@@ -166,14 +189,14 @@ void MemoryDumpScheduler::PollMemoryOnPollingThread() {
polled_memory / 1024 / 1024);
mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
- polling_state_.level_of_detail);
+ polling_state_->level_of_detail);
}
// TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
- TimeDelta::FromMilliseconds(polling_state_.polling_interval_ms));
+ TimeDelta::FromMilliseconds(polling_state_->polling_interval_ms));
}
bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
@@ -184,52 +207,52 @@ bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
return false;
bool should_dump = false;
- ++polling_state_.num_polls_from_last_dump;
- if (polling_state_.last_dump_memory_total == 0) {
+ ++polling_state_->num_polls_from_last_dump;
+ if (polling_state_->last_dump_memory_total == 0) {
// If it's first sample then trigger memory dump.
should_dump = true;
- } else if (polling_state_.min_polls_between_dumps >
- polling_state_.num_polls_from_last_dump) {
+ } else if (polling_state_->min_polls_between_dumps >
+ polling_state_->num_polls_from_last_dump) {
return false;
}
int64_t increase_from_last_dump =
- current_memory_total - polling_state_.last_dump_memory_total;
+ current_memory_total - polling_state_->last_dump_memory_total;
should_dump |=
- increase_from_last_dump > polling_state_.memory_increase_threshold;
+ increase_from_last_dump > polling_state_->memory_increase_threshold;
should_dump |= IsCurrentSamplePeak(current_memory_total);
if (should_dump)
- polling_state_.ResetTotals();
+ polling_state_->ResetTotals();
return should_dump;
}
bool MemoryDumpScheduler::IsCurrentSamplePeak(
uint64_t current_memory_total_bytes) {
uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
- polling_state_.last_memory_totals_kb_index =
- (polling_state_.last_memory_totals_kb_index + 1) %
+ polling_state_->last_memory_totals_kb_index =
+ (polling_state_->last_memory_totals_kb_index + 1) %
PollingTriggerState::kMaxNumMemorySamples;
uint64_t mean = 0;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
- if (polling_state_.last_memory_totals_kb[i] == 0) {
+ if (polling_state_->last_memory_totals_kb[i] == 0) {
// Not enough samples to detect peaks.
polling_state_
- .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
current_memory_total_kb;
return false;
}
- mean += polling_state_.last_memory_totals_kb[i];
+ mean += polling_state_->last_memory_totals_kb[i];
}
mean = mean / PollingTriggerState::kMaxNumMemorySamples;
uint64_t variance = 0;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
- variance += (polling_state_.last_memory_totals_kb[i] - mean) *
- (polling_state_.last_memory_totals_kb[i] - mean);
+ variance += (polling_state_->last_memory_totals_kb[i] - mean) *
+ (polling_state_->last_memory_totals_kb[i] - mean);
}
variance = variance / PollingTriggerState::kMaxNumMemorySamples;
polling_state_
- .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
current_memory_total_kb;
// If stddev is less than 0.2% then we consider that the process is inactive.
@@ -256,11 +279,9 @@ MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
DCHECK(!timer.IsRunning());
}
-MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
+MemoryDumpScheduler::PollingTriggerState::PollingTriggerState()
: current_state(DISABLED),
level_of_detail(MemoryDumpLevelOfDetail::FIRST),
- polling_task_runner(polling_task_runner),
polling_interval_ms(g_polling_interval_ms_for_testing
? g_polling_interval_ms_for_testing
: kMemoryTotalsPollingInterval),
@@ -270,9 +291,7 @@ MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
memory_increase_threshold(0),
last_memory_totals_kb_index(0) {}
-MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
- DCHECK(!polling_task_runner);
-}
+MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {}
void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
if (!memory_increase_threshold) {
@@ -282,8 +301,11 @@ void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
// Set threshold to 1% of total system memory.
SystemMemoryInfoKB meminfo;
bool res = GetSystemMemoryInfo(&meminfo);
- if (res)
- memory_increase_threshold = (meminfo.total / 100) * 1024;
+ if (res) {
+ memory_increase_threshold =
+ (static_cast<int64_t>(meminfo.total) / 100) * 1024;
+ }
+ DCHECK_GT(memory_increase_threshold, 0u);
#endif
}
diff --git a/base/trace_event/memory_dump_scheduler.h b/base/trace_event/memory_dump_scheduler.h
index fd21fce834..ab8441bc20 100644
--- a/base/trace_event/memory_dump_scheduler.h
+++ b/base/trace_event/memory_dump_scheduler.h
@@ -5,6 +5,8 @@
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+#include <memory>
+
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
@@ -18,42 +20,50 @@ namespace trace_event {
class MemoryDumpManager;
-// Schedules global dump requests based on the triggers added.
+// Schedules global dump requests based on the triggers added. The methods of
+// this class are NOT thread safe and the client has to take care of invoking
+// all the methods of the class safely.
class BASE_EXPORT MemoryDumpScheduler {
public:
- MemoryDumpScheduler(
- MemoryDumpManager* mdm_,
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
- ~MemoryDumpScheduler();
+ static MemoryDumpScheduler* GetInstance();
+
+ // Initializes the scheduler. NOT thread safe.
+ void Setup(MemoryDumpManager* mdm_,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
// Adds triggers for scheduling global dumps. Both periodic and peak triggers
// cannot be added together. At the moment the periodic support is limited to
// at most one periodic trigger per dump mode and peak triggers are limited to
// at most one. All intervals should be an integeral multiple of the smallest
- // interval specified.
+ // interval specified. NOT thread safe.
void AddTrigger(MemoryDumpType trigger_type,
MemoryDumpLevelOfDetail level_of_detail,
uint32_t min_time_between_dumps_ms);
- // Starts periodic dumps.
- void NotifyPeriodicTriggerSupported();
+ // Starts periodic dumps. NOT thread safe and triggers must be added before
+ // enabling.
+ void EnablePeriodicTriggerIfNeeded();
- // Starts polling memory total.
- void NotifyPollingSupported();
+ // Starts polling memory total. NOT thread safe and triggers must be added
+ // before enabling.
+ void EnablePollingIfNeeded();
// Resets time for triggering dump to account for minimum time between the
- // dumps.
+ // dumps. NOT thread safe.
void NotifyDumpTriggered();
- // Disables all triggers.
+ // Disables all triggers. NOT thread safe. This should be called before
+ // polling thread is stopped to stop polling cleanly.
void DisableAllTriggers();
private:
friend class MemoryDumpManagerTest;
+ friend class MemoryDumpSchedulerPollingTest;
FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, TestPollingOnDumpThread);
+ FRIEND_TEST_ALL_PREFIXES(MemoryDumpSchedulerPollingTest, NotifyDumpTriggered);
// Helper class to schdule periodic memory dumps.
- struct PeriodicTriggerState {
+ struct BASE_EXPORT PeriodicTriggerState {
PeriodicTriggerState();
~PeriodicTriggerState();
@@ -71,7 +81,7 @@ class BASE_EXPORT MemoryDumpScheduler {
DISALLOW_COPY_AND_ASSIGN(PeriodicTriggerState);
};
- struct PollingTriggerState {
+ struct BASE_EXPORT PollingTriggerState {
enum State {
CONFIGURED, // Polling trigger was added.
ENABLED, // Polling is running.
@@ -80,8 +90,7 @@ class BASE_EXPORT MemoryDumpScheduler {
static const uint32_t kMaxNumMemorySamples = 50;
- explicit PollingTriggerState(
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+ PollingTriggerState();
~PollingTriggerState();
// Helper to clear the tracked memory totals and poll count from last dump.
@@ -90,7 +99,6 @@ class BASE_EXPORT MemoryDumpScheduler {
State current_state;
MemoryDumpLevelOfDetail level_of_detail;
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
uint32_t polling_interval_ms;
// Minimum numer of polls after the last dump at which next dump can be
@@ -106,8 +114,11 @@ class BASE_EXPORT MemoryDumpScheduler {
DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
};
- // Helper to set polling disabled on the polling thread.
- void DisablePolling();
+ MemoryDumpScheduler();
+ ~MemoryDumpScheduler();
+
+ // Helper to set polling disabled.
+ void DisablePollingOnPollingThread();
// Periodically called by the timer.
void RequestPeriodicGlobalDump();
@@ -129,8 +140,19 @@ class BASE_EXPORT MemoryDumpScheduler {
MemoryDumpManager* mdm_;
- PeriodicTriggerState periodic_state_;
- PollingTriggerState polling_state_;
+ // Accessed on the thread of the client before enabling and only accessed on
+ // the thread that called "EnablePeriodicTriggersIfNeeded()" after enabling.
+ std::unique_ptr<PeriodicTriggerState> periodic_state_;
+
+ // Accessed on the thread of the client before enabling and only accessed on
+ // the polling thread after enabling.
+ std::unique_ptr<PollingTriggerState> polling_state_;
+
+ // Accessed on the thread of the client only.
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner_;
+
+ // True when the scheduler is setup. Accessed on the thread of client only.
+ bool is_setup_;
DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
};
diff --git a/base/trace_event/memory_dump_scheduler_unittest.cc b/base/trace_event/memory_dump_scheduler_unittest.cc
new file mode 100644
index 0000000000..9af2a3b430
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler_unittest.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_scheduler.h"
+
+#include <memory>
+
+#include "base/single_thread_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+class MemoryDumpSchedulerPollingTest : public testing::Test {
+ public:
+ static const uint32_t kMinPollsToDump = 5;
+
+ MemoryDumpSchedulerPollingTest()
+ : testing::Test(),
+ num_samples_tracked_(
+ MemoryDumpScheduler::PollingTriggerState::kMaxNumMemorySamples) {}
+
+ void SetUp() override {
+ MemoryDumpScheduler::SetPollingIntervalForTesting(1);
+ uint32_t kMinPollsToDump = 5;
+ mds_ = MemoryDumpScheduler::GetInstance();
+ mds_->Setup(nullptr, nullptr);
+ mds_->AddTrigger(MemoryDumpType::PEAK_MEMORY_USAGE,
+ MemoryDumpLevelOfDetail::LIGHT, kMinPollsToDump);
+ mds_->polling_state_->ResetTotals();
+ mds_->polling_state_->current_state =
+ MemoryDumpScheduler::PollingTriggerState::ENABLED;
+ }
+
+ void TearDown() override {
+ mds_->polling_state_->current_state =
+ MemoryDumpScheduler::PollingTriggerState::DISABLED;
+ }
+
+ protected:
+ bool ShouldTriggerDump(uint64_t total) {
+ return mds_->ShouldTriggerDump(total);
+ }
+
+ uint32_t num_samples_tracked_;
+ MemoryDumpScheduler* mds_;
+};
+
+TEST_F(MemoryDumpSchedulerPollingTest, PeakDetection) {
+ for (uint32_t i = 0; i < num_samples_tracked_ * 6; ++i) {
+ // Memory is increased in steps and dumps must be triggered at every step.
+ uint64_t total = (2 + (i / (2 * num_samples_tracked_))) * 1024 * 1204;
+ bool did_trigger = ShouldTriggerDump(total);
+ // Dumps must be triggered only at specific iterations.
+ bool should_have_triggered = i == 0;
+ should_have_triggered |=
+ (i > num_samples_tracked_) && (i % (2 * num_samples_tracked_) == 1);
+ if (should_have_triggered) {
+ ASSERT_TRUE(did_trigger) << "Dump wasn't triggered at " << i;
+ } else {
+ ASSERT_FALSE(did_trigger) << "Unexpected dump at " << i;
+ }
+ }
+}
+
+TEST_F(MemoryDumpSchedulerPollingTest, SlowGrowthDetection) {
+ for (uint32_t i = 0; i < 15; ++i) {
+ // Record 1GiB of increase in each call. Dumps are triggered with 1% w.r.t
+ // system's total memory.
+ uint64_t total = static_cast<uint64_t>(i + 1) * 1024 * 1024 * 1024;
+ bool did_trigger = ShouldTriggerDump(total);
+ bool should_have_triggered = i % kMinPollsToDump == 0;
+ if (should_have_triggered) {
+ ASSERT_TRUE(did_trigger) << "Dump wasn't triggered at " << i;
+ } else {
+ ASSERT_FALSE(did_trigger) << "Unexpected dump at " << i;
+ }
+ }
+}
+
+TEST_F(MemoryDumpSchedulerPollingTest, NotifyDumpTriggered) {
+ for (uint32_t i = 0; i < num_samples_tracked_ * 6; ++i) {
+ uint64_t total = (2 + (i / (2 * num_samples_tracked_))) * 1024 * 1204;
+ if (i % num_samples_tracked_ == 0)
+ mds_->NotifyDumpTriggered();
+ bool did_trigger = ShouldTriggerDump(total);
+ // Dumps should never be triggered since NotifyDumpTriggered() is called
+ // frequently.
+ EXPECT_NE(0u, mds_->polling_state_->last_dump_memory_total);
+ EXPECT_GT(num_samples_tracked_ - 1,
+ mds_->polling_state_->last_memory_totals_kb_index);
+ EXPECT_LT(static_cast<int64_t>(
+ total - mds_->polling_state_->last_dump_memory_total),
+ mds_->polling_state_->memory_increase_threshold);
+ ASSERT_FALSE(did_trigger && i) << "Unexpected dump at " << i;
+ }
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
index ae74322040..746068a7b1 100644
--- a/base/trace_event/memory_infra_background_whitelist.cc
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -69,10 +69,70 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"net/http_network_session_0x?/stream_factory",
"net/sdch_manager_0x?",
"net/ssl_session_cache",
- "net/url_request_context_0x?",
- "net/url_request_context_0x?/http_cache",
- "net/url_request_context_0x?/http_network_session",
- "net/url_request_context_0x?/sdch_manager",
+ "net/url_request_context",
+ "net/url_request_context/app_request",
+ "net/url_request_context/app_request/0x?",
+ "net/url_request_context/app_request/0x?/http_cache",
+ "net/url_request_context/app_request/0x?/http_cache/memory_backend",
+ "net/url_request_context/app_request/0x?/http_cache/simple_backend",
+ "net/url_request_context/app_request/0x?/http_network_session",
+ "net/url_request_context/app_request/0x?/sdch_manager",
+ "net/url_request_context/extensions",
+ "net/url_request_context/extensions/0x?",
+ "net/url_request_context/extensions/0x?/http_cache",
+ "net/url_request_context/extensions/0x?/http_cache/memory_backend",
+ "net/url_request_context/extensions/0x?/http_cache/simple_backend",
+ "net/url_request_context/extensions/0x?/http_network_session",
+ "net/url_request_context/extensions/0x?/sdch_manager",
+ "net/url_request_context/isolated_media",
+ "net/url_request_context/isolated_media/0x?",
+ "net/url_request_context/isolated_media/0x?/http_cache",
+ "net/url_request_context/isolated_media/0x?/http_cache/memory_backend",
+ "net/url_request_context/isolated_media/0x?/http_cache/simple_backend",
+ "net/url_request_context/isolated_media/0x?/http_network_session",
+ "net/url_request_context/isolated_media/0x?/sdch_manager",
+ "net/url_request_context/main",
+ "net/url_request_context/main/0x?",
+ "net/url_request_context/main/0x?/http_cache",
+ "net/url_request_context/main/0x?/http_cache/memory_backend",
+ "net/url_request_context/main/0x?/http_cache/simple_backend",
+ "net/url_request_context/main/0x?/http_network_session",
+ "net/url_request_context/main/0x?/sdch_manager",
+ "net/url_request_context/main_media",
+ "net/url_request_context/main_media/0x?",
+ "net/url_request_context/main_media/0x?/http_cache",
+ "net/url_request_context/main_media/0x?/http_cache/memory_backend",
+ "net/url_request_context/main_media/0x?/http_cache/simple_backend",
+ "net/url_request_context/main_media/0x?/http_network_session",
+ "net/url_request_context/main_media/0x?/sdch_manager",
+ "net/url_request_context/proxy",
+ "net/url_request_context/proxy/0x?",
+ "net/url_request_context/proxy/0x?/http_cache",
+ "net/url_request_context/proxy/0x?/http_cache/memory_backend",
+ "net/url_request_context/proxy/0x?/http_cache/simple_backend",
+ "net/url_request_context/proxy/0x?/http_network_session",
+ "net/url_request_context/proxy/0x?/sdch_manager",
+ "net/url_request_context/safe_browsing",
+ "net/url_request_context/safe_browsing/0x?",
+ "net/url_request_context/safe_browsing/0x?/http_cache",
+ "net/url_request_context/safe_browsing/0x?/http_cache/memory_backend",
+ "net/url_request_context/safe_browsing/0x?/http_cache/simple_backend",
+ "net/url_request_context/safe_browsing/0x?/http_network_session",
+ "net/url_request_context/safe_browsing/0x?/sdch_manager",
+ "net/url_request_context/system",
+ "net/url_request_context/system/0x?",
+ "net/url_request_context/system/0x?/http_cache",
+ "net/url_request_context/system/0x?/http_cache/memory_backend",
+ "net/url_request_context/system/0x?/http_cache/simple_backend",
+ "net/url_request_context/system/0x?/http_network_session",
+ "net/url_request_context/system/0x?/sdch_manager",
+ "net/url_request_context/unknown",
+ "net/url_request_context/unknown/0x?",
+ "net/url_request_context/unknown/0x?/http_cache",
+ "net/url_request_context/unknown/0x?/http_cache/memory_backend",
+ "net/url_request_context/unknown/0x?/http_cache/simple_backend",
+ "net/url_request_context/unknown/0x?/http_network_session",
+ "net/url_request_context/unknown/0x?/sdch_manager",
"web_cache/Image_resources",
"web_cache/CSS stylesheet_resources",
"web_cache/Script_resources",
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 36de107bf8..3df09992b1 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -11,11 +11,7 @@
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
#include "base/memory/ptr_util.h"
-#include "base/strings/pattern.h"
#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_event.h"
@@ -37,11 +33,6 @@ const char kEnableArgumentFilter[] = "enable-argument-filter";
const char kRecordModeParam[] = "record_mode";
const char kEnableSystraceParam[] = "enable_systrace";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
-const char kIncludedCategoriesParam[] = "included_categories";
-const char kExcludedCategoriesParam[] = "excluded_categories";
-const char kSyntheticDelaysParam[] = "synthetic_delays";
-
-const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
// String parameters that is used to parse memory dump config in trace config
// string.
@@ -148,27 +139,36 @@ TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
return *this;
predicate_name_ = rhs.predicate_name_;
- included_categories_ = rhs.included_categories_;
- excluded_categories_ = rhs.excluded_categories_;
+ category_filter_ = rhs.category_filter_;
+
if (rhs.args_)
args_ = rhs.args_->CreateDeepCopy();
return *this;
}
-void TraceConfig::EventFilterConfig::AddIncludedCategory(
- const std::string& category) {
- included_categories_.push_back(category);
+void TraceConfig::EventFilterConfig::InitializeFromConfigDict(
+ const base::DictionaryValue* event_filter) {
+ category_filter_.InitializeFromConfigDict(*event_filter);
+
+ const base::DictionaryValue* args_dict = nullptr;
+ if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
+ args_ = args_dict->CreateDeepCopy();
}
-void TraceConfig::EventFilterConfig::AddExcludedCategory(
- const std::string& category) {
- excluded_categories_.push_back(category);
+void TraceConfig::EventFilterConfig::SetCategoryFilter(
+ const TraceConfigCategoryFilter& category_filter) {
+ category_filter_ = category_filter;
}
-void TraceConfig::EventFilterConfig::SetArgs(
- std::unique_ptr<base::DictionaryValue> args) {
- args_ = std::move(args);
+void TraceConfig::EventFilterConfig::ToDict(
+ DictionaryValue* filter_dict) const {
+ filter_dict->SetString(kFilterPredicateParam, predicate_name());
+
+ category_filter_.ToDict(filter_dict);
+
+ if (args_)
+ filter_dict->Set(kFilterArgsParam, args_->CreateDeepCopy());
}
bool TraceConfig::EventFilterConfig::GetArgAsSet(
@@ -187,26 +187,7 @@ bool TraceConfig::EventFilterConfig::GetArgAsSet(
bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
const char* category_group_name) const {
- CStringTokenizer category_group_tokens(
- category_group_name, category_group_name + strlen(category_group_name),
- ",");
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
-
- for (const auto& excluded_category : excluded_categories_) {
- if (base::MatchPattern(category_group_token, excluded_category)) {
- return false;
- }
- }
-
- for (const auto& included_category : included_categories_) {
- if (base::MatchPattern(category_group_token, included_category)) {
- return true;
- }
- }
- }
-
- return false;
+ return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
TraceConfig::TraceConfig() {
@@ -255,11 +236,8 @@ TraceConfig::TraceConfig(const TraceConfig& tc)
: record_mode_(tc.record_mode_),
enable_systrace_(tc.enable_systrace_),
enable_argument_filter_(tc.enable_argument_filter_),
+ category_filter_(tc.category_filter_),
memory_dump_config_(tc.memory_dump_config_),
- included_categories_(tc.included_categories_),
- disabled_categories_(tc.disabled_categories_),
- excluded_categories_(tc.excluded_categories_),
- synthetic_delays_(tc.synthetic_delays_),
event_filters_(tc.event_filters_) {}
TraceConfig::~TraceConfig() {
@@ -272,17 +250,14 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
record_mode_ = rhs.record_mode_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
+ category_filter_ = rhs.category_filter_;
memory_dump_config_ = rhs.memory_dump_config_;
- included_categories_ = rhs.included_categories_;
- disabled_categories_ = rhs.disabled_categories_;
- excluded_categories_ = rhs.excluded_categories_;
- synthetic_delays_ = rhs.synthetic_delays_;
event_filters_ = rhs.event_filters_;
return *this;
}
const TraceConfig::StringList& TraceConfig::GetSyntheticDelayValues() const {
- return synthetic_delays_;
+ return category_filter_.synthetic_delays();
}
std::string TraceConfig::ToString() const {
@@ -298,69 +273,14 @@ TraceConfig::AsConvertableToTraceFormat() const {
}
std::string TraceConfig::ToCategoryFilterString() const {
- std::string filter_string;
- WriteCategoryFilterString(included_categories_, &filter_string, true);
- WriteCategoryFilterString(disabled_categories_, &filter_string, true);
- WriteCategoryFilterString(excluded_categories_, &filter_string, false);
- WriteCategoryFilterString(synthetic_delays_, &filter_string);
- return filter_string;
+ return category_filter_.ToFilterString();
}
bool TraceConfig::IsCategoryGroupEnabled(
const char* category_group_name) const {
// TraceLog should call this method only as part of enabling/disabling
// categories.
-
- bool had_enabled_by_default = false;
- DCHECK(category_group_name);
- std::string category_group_name_str = category_group_name;
- StringTokenizer category_group_tokens(category_group_name_str, ",");
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
- // Don't allow empty tokens, nor tokens with leading or trailing space.
- DCHECK(!TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- category_group_token))
- << "Disallowed category string";
- if (IsCategoryEnabled(category_group_token.c_str()))
- return true;
-
- if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
- had_enabled_by_default = true;
- }
- // Do a second pass to check for explicitly disabled categories
- // (those explicitly enabled have priority due to first pass).
- category_group_tokens.Reset();
- bool category_group_disabled = false;
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
- for (const std::string& category : excluded_categories_) {
- if (MatchPattern(category_group_token, category)) {
- // Current token of category_group_name is present in excluded_list.
- // Flag the exclusion and proceed further to check if any of the
- // remaining categories of category_group_name is not present in the
- // excluded_ list.
- category_group_disabled = true;
- break;
- }
- // One of the category of category_group_name is not present in
- // excluded_ list. So, if it's not a disabled-by-default category,
- // it has to be included_ list. Enable the category_group_name
- // for recording.
- if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*"))) {
- category_group_disabled = false;
- }
- }
- // One of the categories present in category_group_name is not present in
- // excluded_ list. Implies this category_group_name group can be enabled
- // for recording, since one of its groups is enabled for recording.
- if (!category_group_disabled)
- break;
- }
- // If the category group is not excluded, and there are no included patterns
- // we consider this category group enabled, as long as it had categories
- // other than disabled-by-default.
- return !category_group_disabled && had_enabled_by_default &&
- included_categories_.empty();
+ return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
void TraceConfig::Merge(const TraceConfig& config) {
@@ -371,28 +291,10 @@ void TraceConfig::Merge(const TraceConfig& config) {
<< "set of options.";
}
- // Keep included patterns only if both filters have an included entry.
- // Otherwise, one of the filter was specifying "*" and we want to honor the
- // broadest filter.
- if (HasIncludedPatterns() && config.HasIncludedPatterns()) {
- included_categories_.insert(included_categories_.end(),
- config.included_categories_.begin(),
- config.included_categories_.end());
- } else {
- included_categories_.clear();
- }
+ category_filter_.Merge(config.category_filter_);
memory_dump_config_.Merge(config.memory_dump_config_);
- disabled_categories_.insert(disabled_categories_.end(),
- config.disabled_categories_.begin(),
- config.disabled_categories_.end());
- excluded_categories_.insert(excluded_categories_.end(),
- config.excluded_categories_.begin(),
- config.excluded_categories_.end());
- synthetic_delays_.insert(synthetic_delays_.end(),
- config.synthetic_delays_.begin(),
- config.synthetic_delays_.end());
event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
config.event_filters().end());
}
@@ -401,10 +303,7 @@ void TraceConfig::Clear() {
record_mode_ = RECORD_UNTIL_FULL;
enable_systrace_ = false;
enable_argument_filter_ = false;
- included_categories_.clear();
- disabled_categories_.clear();
- excluded_categories_.clear();
- synthetic_delays_.clear();
+ category_filter_.Clear();
memory_dump_config_.Clear();
event_filters_.clear();
}
@@ -435,19 +334,13 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
enable_argument_filter_ =
dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
- const ListValue* category_list = nullptr;
- if (dict.GetList(kIncludedCategoriesParam, &category_list))
- SetCategoriesFromIncludedList(*category_list);
- if (dict.GetList(kExcludedCategoriesParam, &category_list))
- SetCategoriesFromExcludedList(*category_list);
- if (dict.GetList(kSyntheticDelaysParam, &category_list))
- SetSyntheticDelaysFromList(*category_list);
+ category_filter_.InitializeFromConfigDict(dict);
const base::ListValue* category_event_filters = nullptr;
if (dict.GetList(kEventFiltersParam, &category_event_filters))
SetEventFiltersFromConfigList(*category_event_filters);
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
const DictionaryValue* memory_dump_config = nullptr;
@@ -468,37 +361,8 @@ void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string) {
- if (!category_filter_string.empty()) {
- std::vector<std::string> split = SplitString(
- category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
- for (const std::string& category : split) {
- // Ignore empty categories.
- if (category.empty())
- continue;
- // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
- if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
- CompareCase::SENSITIVE) &&
- category.back() == ')') {
- std::string synthetic_category = category.substr(
- strlen(kSyntheticDelayCategoryFilterPrefix),
- category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
- size_t name_length = synthetic_category.find(';');
- if (name_length != std::string::npos && name_length > 0 &&
- name_length != synthetic_category.size() - 1) {
- synthetic_delays_.push_back(synthetic_category);
- }
- } else if (category.front() == '-') {
- // Excluded categories start with '-'.
- // Remove '-' from category string.
- excluded_categories_.push_back(category.substr(1));
- } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
- } else {
- included_categories_.push_back(category);
- }
- }
- }
+ if (!category_filter_string.empty())
+ category_filter_.InitializeFromString(category_filter_string);
record_mode_ = RECORD_UNTIL_FULL;
enable_systrace_ = false;
@@ -523,64 +387,11 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
}
}
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
SetDefaultMemoryDumpConfig();
}
}
-void TraceConfig::SetCategoriesFromIncludedList(
- const ListValue& included_list) {
- included_categories_.clear();
- for (size_t i = 0; i < included_list.GetSize(); ++i) {
- std::string category;
- if (!included_list.GetString(i, &category))
- continue;
- if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
- } else {
- included_categories_.push_back(category);
- }
- }
-}
-
-void TraceConfig::SetCategoriesFromExcludedList(
- const ListValue& excluded_list) {
- excluded_categories_.clear();
- for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
- std::string category;
- if (excluded_list.GetString(i, &category))
- excluded_categories_.push_back(category);
- }
-}
-
-void TraceConfig::SetSyntheticDelaysFromList(const ListValue& list) {
- synthetic_delays_.clear();
- for (size_t i = 0; i < list.GetSize(); ++i) {
- std::string delay;
- if (!list.GetString(i, &delay))
- continue;
- // Synthetic delays are of the form "delay;option;option;...".
- size_t name_length = delay.find(';');
- if (name_length != std::string::npos && name_length > 0 &&
- name_length != delay.size() - 1) {
- synthetic_delays_.push_back(delay);
- }
- }
-}
-
-void TraceConfig::AddCategoryToDict(DictionaryValue* dict,
- const char* param,
- const StringList& categories) const {
- if (categories.empty())
- return;
-
- auto list = MakeUnique<ListValue>();
- for (const std::string& category : categories)
- list->AppendString(category);
- dict->Set(param, std::move(list));
-}
-
void TraceConfig::SetMemoryDumpConfigFromConfigDict(
const DictionaryValue& memory_dump_config) {
// Set allowed dump modes.
@@ -673,29 +484,7 @@ void TraceConfig::SetEventFiltersFromConfigList(
<< "Invalid predicate name in category event filter.";
EventFilterConfig new_config(predicate_name);
- const base::ListValue* included_list = nullptr;
- CHECK(event_filter->GetList(kIncludedCategoriesParam, &included_list))
- << "Missing included_categories in category event filter.";
-
- for (size_t i = 0; i < included_list->GetSize(); ++i) {
- std::string category;
- if (included_list->GetString(i, &category))
- new_config.AddIncludedCategory(category);
- }
-
- const base::ListValue* excluded_list = nullptr;
- if (event_filter->GetList(kExcludedCategoriesParam, &excluded_list)) {
- for (size_t i = 0; i < excluded_list->GetSize(); ++i) {
- std::string category;
- if (excluded_list->GetString(i, &category))
- new_config.AddExcludedCategory(category);
- }
- }
-
- const base::DictionaryValue* args_dict = nullptr;
- if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
- new_config.SetArgs(args_dict->CreateDeepCopy());
-
+ new_config.InitializeFromConfigDict(event_filter);
event_filters_.push_back(new_config);
}
}
@@ -722,50 +511,20 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
- StringList categories(included_categories_);
- categories.insert(categories.end(),
- disabled_categories_.begin(),
- disabled_categories_.end());
- AddCategoryToDict(dict.get(), kIncludedCategoriesParam, categories);
- AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
- AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
+ category_filter_.ToDict(dict.get());
if (!event_filters_.empty()) {
std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
for (const EventFilterConfig& filter : event_filters_) {
std::unique_ptr<base::DictionaryValue> filter_dict(
new base::DictionaryValue());
- filter_dict->SetString(kFilterPredicateParam, filter.predicate_name());
-
- std::unique_ptr<base::ListValue> included_categories_list(
- new base::ListValue());
- for (const std::string& included_category : filter.included_categories())
- included_categories_list->AppendString(included_category);
-
- filter_dict->Set(kIncludedCategoriesParam,
- std::move(included_categories_list));
-
- if (!filter.excluded_categories().empty()) {
- std::unique_ptr<base::ListValue> excluded_categories_list(
- new base::ListValue());
- for (const std::string& excluded_category :
- filter.excluded_categories())
- excluded_categories_list->AppendString(excluded_category);
-
- filter_dict->Set(kExcludedCategoriesParam,
- std::move(excluded_categories_list));
- }
-
- if (filter.filter_args())
- filter_dict->Set(kFilterArgsParam,
- filter.filter_args()->CreateDeepCopy());
-
+ filter.ToDict(filter_dict.get());
filter_list->Append(std::move(filter_dict));
}
dict->Set(kEventFiltersParam, std::move(filter_list));
}
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
auto allowed_modes = MakeUnique<ListValue>();
for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
@@ -829,59 +588,5 @@ std::string TraceConfig::ToTraceOptionsString() const {
return ret;
}
-void TraceConfig::WriteCategoryFilterString(const StringList& values,
- std::string* out,
- bool included) const {
- bool prepend_comma = !out->empty();
- int token_cnt = 0;
- for (const std::string& category : values) {
- if (token_cnt > 0 || prepend_comma)
- StringAppendF(out, ",");
- StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
- ++token_cnt;
- }
-}
-
-void TraceConfig::WriteCategoryFilterString(const StringList& delays,
- std::string* out) const {
- bool prepend_comma = !out->empty();
- int token_cnt = 0;
- for (const std::string& category : delays) {
- if (token_cnt > 0 || prepend_comma)
- StringAppendF(out, ",");
- StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
- category.c_str());
- ++token_cnt;
- }
-}
-
-bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
- // Check the disabled- filters and the disabled-* wildcard first so that a
- // "*" filter does not include the disabled.
- for (const std::string& category : disabled_categories_) {
- if (MatchPattern(category_name, category))
- return true;
- }
-
- if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
- return false;
-
- for (const std::string& category : included_categories_) {
- if (MatchPattern(category_name, category))
- return true;
- }
-
- return false;
-}
-
-bool TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- StringPiece str) {
- return str.empty() || str.front() == ' ' || str.back() == ' ';
-}
-
-bool TraceConfig::HasIncludedPatterns() const {
- return !included_categories_.empty();
-}
-
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 717c261316..29edc9a8ec 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -17,6 +17,7 @@
#include "base/gtest_prod_util.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_config_category_filter.h"
#include "base/values.h"
namespace base {
@@ -94,26 +95,25 @@ class BASE_EXPORT TraceConfig {
EventFilterConfig& operator=(const EventFilterConfig& rhs);
- void AddIncludedCategory(const std::string& category);
- void AddExcludedCategory(const std::string& category);
- void SetArgs(std::unique_ptr<base::DictionaryValue> args);
+ void InitializeFromConfigDict(const base::DictionaryValue* event_filter);
+
+ void SetCategoryFilter(const TraceConfigCategoryFilter& category_filter);
+
+ void ToDict(DictionaryValue* filter_dict) const;
+
bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
bool IsCategoryGroupEnabled(const char* category_group_name) const;
const std::string& predicate_name() const { return predicate_name_; }
base::DictionaryValue* filter_args() const { return args_.get(); }
- const StringList& included_categories() const {
- return included_categories_;
- }
- const StringList& excluded_categories() const {
- return excluded_categories_;
+ const TraceConfigCategoryFilter& category_filter() const {
+ return category_filter_;
}
private:
std::string predicate_name_;
- StringList included_categories_;
- StringList excluded_categories_;
+ TraceConfigCategoryFilter category_filter_;
std::unique_ptr<base::DictionaryValue> args_;
};
typedef std::vector<EventFilterConfig> EventFilters;
@@ -241,6 +241,10 @@ class BASE_EXPORT TraceConfig {
// Clears and resets the memory dump config.
void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+ const TraceConfigCategoryFilter& category_filter() const {
+ return category_filter_;
+ }
+
const MemoryDumpConfig& memory_dump_config() const {
return memory_dump_config_;
}
@@ -254,15 +258,6 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
TraceConfigFromInvalidLegacyStrings);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
- IsEmptyOrContainsLeadingOrTrailingWhitespace);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
- EmptyAndAsteriskCategoryFilterString);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
@@ -279,13 +274,6 @@ class BASE_EXPORT TraceConfig {
void InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string);
- void SetCategoriesFromIncludedList(const ListValue& included_list);
- void SetCategoriesFromExcludedList(const ListValue& excluded_list);
- void SetSyntheticDelaysFromList(const ListValue& list);
- void AddCategoryToDict(DictionaryValue* dict,
- const char* param,
- const StringList& categories) const;
-
void SetMemoryDumpConfigFromConfigDict(
const DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
@@ -295,32 +283,14 @@ class BASE_EXPORT TraceConfig {
std::string ToTraceOptionsString() const;
- void WriteCategoryFilterString(const StringList& values,
- std::string* out,
- bool included) const;
- void WriteCategoryFilterString(const StringList& delays,
- std::string* out) const;
-
- // Returns true if the category is enabled according to this trace config.
- // This tells whether a category is enabled from the TraceConfig's
- // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
- // category is enabled from the tracing runtime's perspective.
- bool IsCategoryEnabled(const char* category_name) const;
-
- static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(StringPiece str);
-
- bool HasIncludedPatterns() const;
-
TraceRecordMode record_mode_;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
+ TraceConfigCategoryFilter category_filter_;
+
MemoryDumpConfig memory_dump_config_;
- StringList included_categories_;
- StringList disabled_categories_;
- StringList excluded_categories_;
- StringList synthetic_delays_;
EventFilters event_filters_;
};
diff --git a/base/trace_event/trace_config_category_filter.cc b/base/trace_event/trace_config_category_filter.cc
new file mode 100644
index 0000000000..dc30e0ea99
--- /dev/null
+++ b/base/trace_event/trace_config_category_filter.cc
@@ -0,0 +1,298 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_config_category_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+const char kIncludedCategoriesParam[] = "included_categories";
+const char kExcludedCategoriesParam[] = "excluded_categories";
+const char kSyntheticDelaysParam[] = "synthetic_delays";
+
+const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
+}
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter() {}
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter(
+ const TraceConfigCategoryFilter& other)
+ : included_categories_(other.included_categories_),
+ disabled_categories_(other.disabled_categories_),
+ excluded_categories_(other.excluded_categories_),
+ synthetic_delays_(other.synthetic_delays_) {}
+
+TraceConfigCategoryFilter::~TraceConfigCategoryFilter() {}
+
+TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
+ const TraceConfigCategoryFilter& rhs) {
+ included_categories_ = rhs.included_categories_;
+ disabled_categories_ = rhs.disabled_categories_;
+ excluded_categories_ = rhs.excluded_categories_;
+ synthetic_delays_ = rhs.synthetic_delays_;
+ return *this;
+}
+
+void TraceConfigCategoryFilter::InitializeFromString(
+ const StringPiece& category_filter_string) {
+ std::vector<std::string> split =
+ SplitString(category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const std::string& category : split) {
+ // Ignore empty categories.
+ if (category.empty())
+ continue;
+ // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
+ if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
+ CompareCase::SENSITIVE) &&
+ category.back() == ')') {
+ std::string synthetic_category = category.substr(
+ strlen(kSyntheticDelayCategoryFilterPrefix),
+ category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
+ size_t name_length = synthetic_category.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != synthetic_category.size() - 1) {
+ synthetic_delays_.push_back(synthetic_category);
+ }
+ } else if (category.front() == '-') {
+ // Excluded categories start with '-'.
+ // Remove '-' from category string.
+ excluded_categories_.push_back(category.substr(1));
+ } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+ TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+ disabled_categories_.push_back(category);
+ } else {
+ included_categories_.push_back(category);
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::InitializeFromConfigDict(
+ const DictionaryValue& dict) {
+ const ListValue* category_list = nullptr;
+ if (dict.GetList(kIncludedCategoriesParam, &category_list))
+ SetCategoriesFromIncludedList(*category_list);
+ if (dict.GetList(kExcludedCategoriesParam, &category_list))
+ SetCategoriesFromExcludedList(*category_list);
+ if (dict.GetList(kSyntheticDelaysParam, &category_list))
+ SetSyntheticDelaysFromList(*category_list);
+}
+
+bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
+ const char* category_group_name) const {
+ bool had_enabled_by_default = false;
+ DCHECK(category_group_name);
+ std::string category_group_name_str = category_group_name;
+ StringTokenizer category_group_tokens(category_group_name_str, ",");
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+ // Don't allow empty tokens, nor tokens with leading or trailing space.
+ DCHECK(IsCategoryNameAllowed(category_group_token))
+ << "Disallowed category string";
+ if (IsCategoryEnabled(category_group_token.c_str()))
+ return true;
+
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+ had_enabled_by_default = true;
+ }
+ // Do a second pass to check for explicitly disabled categories
+ // (those explicitly enabled have priority due to first pass).
+ category_group_tokens.Reset();
+ bool category_group_disabled = false;
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+ for (const std::string& category : excluded_categories_) {
+ if (MatchPattern(category_group_token, category)) {
+ // Current token of category_group_name is present in excluded_list.
+ // Flag the exclusion and proceed further to check if any of the
+ // remaining categories of category_group_name is not present in the
+ // excluded_ list.
+ category_group_disabled = true;
+ break;
+ }
+ // One of the category of category_group_name is not present in
+ // excluded_ list. So, if it's not a disabled-by-default category,
+ // it has to be included_ list. Enable the category_group_name
+ // for recording.
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+ category_group_disabled = false;
+ }
+ // One of the categories present in category_group_name is not present in
+ // excluded_ list. Implies this category_group_name group can be enabled
+ // for recording, since one of its groups is enabled for recording.
+ if (!category_group_disabled)
+ break;
+ }
+ // If the category group is not excluded, and there are no included patterns
+ // we consider this category group enabled, as long as it had categories
+ // other than disabled-by-default.
+ return !category_group_disabled && had_enabled_by_default &&
+ included_categories_.empty();
+}
+
+bool TraceConfigCategoryFilter::IsCategoryEnabled(
+ const char* category_name) const {
+ // Check the disabled- filters and the disabled-* wildcard first so that a
+ // "*" filter does not include the disabled.
+ for (const std::string& category : disabled_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+ return false;
+
+ for (const std::string& category : included_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ return false;
+}
+
+void TraceConfigCategoryFilter::Merge(const TraceConfigCategoryFilter& config) {
+ // Keep included patterns only if both filters have an included entry.
+ // Otherwise, one of the filter was specifying "*" and we want to honor the
+ // broadest filter.
+ if (!included_categories_.empty() && !config.included_categories_.empty()) {
+ included_categories_.insert(included_categories_.end(),
+ config.included_categories_.begin(),
+ config.included_categories_.end());
+ } else {
+ included_categories_.clear();
+ }
+
+ disabled_categories_.insert(disabled_categories_.end(),
+ config.disabled_categories_.begin(),
+ config.disabled_categories_.end());
+ excluded_categories_.insert(excluded_categories_.end(),
+ config.excluded_categories_.begin(),
+ config.excluded_categories_.end());
+ synthetic_delays_.insert(synthetic_delays_.end(),
+ config.synthetic_delays_.begin(),
+ config.synthetic_delays_.end());
+}
+
+void TraceConfigCategoryFilter::Clear() {
+ included_categories_.clear();
+ disabled_categories_.clear();
+ excluded_categories_.clear();
+ synthetic_delays_.clear();
+}
+
+void TraceConfigCategoryFilter::ToDict(DictionaryValue* dict) const {
+ StringList categories(included_categories_);
+ categories.insert(categories.end(), disabled_categories_.begin(),
+ disabled_categories_.end());
+ AddCategoriesToDict(categories, kIncludedCategoriesParam, dict);
+ AddCategoriesToDict(excluded_categories_, kExcludedCategoriesParam, dict);
+ AddCategoriesToDict(synthetic_delays_, kSyntheticDelaysParam, dict);
+}
+
+std::string TraceConfigCategoryFilter::ToFilterString() const {
+ std::string filter_string;
+ WriteCategoryFilterString(included_categories_, &filter_string, true);
+ WriteCategoryFilterString(disabled_categories_, &filter_string, true);
+ WriteCategoryFilterString(excluded_categories_, &filter_string, false);
+ WriteCategoryFilterString(synthetic_delays_, &filter_string);
+ return filter_string;
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromIncludedList(
+ const ListValue& included_list) {
+ included_categories_.clear();
+ for (size_t i = 0; i < included_list.GetSize(); ++i) {
+ std::string category;
+ if (!included_list.GetString(i, &category))
+ continue;
+ if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+ TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+ disabled_categories_.push_back(category);
+ } else {
+ included_categories_.push_back(category);
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromExcludedList(
+ const ListValue& excluded_list) {
+ excluded_categories_.clear();
+ for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
+ std::string category;
+ if (excluded_list.GetString(i, &category))
+ excluded_categories_.push_back(category);
+ }
+}
+
+void TraceConfigCategoryFilter::SetSyntheticDelaysFromList(
+ const ListValue& list) {
+ for (size_t i = 0; i < list.GetSize(); ++i) {
+ std::string delay;
+ if (!list.GetString(i, &delay))
+ continue;
+ // Synthetic delays are of the form "delay;option;option;...".
+ size_t name_length = delay.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != delay.size() - 1) {
+ synthetic_delays_.push_back(delay);
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::AddCategoriesToDict(
+ const StringList& categories,
+ const char* param,
+ DictionaryValue* dict) const {
+ if (categories.empty())
+ return;
+
+ auto list = MakeUnique<ListValue>();
+ for (const std::string& category : categories)
+ list->AppendString(category);
+ dict->Set(param, std::move(list));
+}
+
+void TraceConfigCategoryFilter::WriteCategoryFilterString(
+ const StringList& values,
+ std::string* out,
+ bool included) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : values) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
+ ++token_cnt;
+ }
+}
+
+void TraceConfigCategoryFilter::WriteCategoryFilterString(
+ const StringList& delays,
+ std::string* out) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : delays) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
+ category.c_str());
+ ++token_cnt;
+ }
+}
+
+// static
+bool TraceConfigCategoryFilter::IsCategoryNameAllowed(StringPiece str) {
+ return !str.empty() && str.front() != ' ' && str.back() != ' ';
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_config_category_filter.h b/base/trace_event/trace_config_category_filter.h
new file mode 100644
index 0000000000..df8c3a5b2a
--- /dev/null
+++ b/base/trace_event/trace_config_category_filter.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+// Configuration of categories enabled and disabled in TraceConfig.
+class BASE_EXPORT TraceConfigCategoryFilter {
+ public:
+ using StringList = std::vector<std::string>;
+
+ TraceConfigCategoryFilter();
+ TraceConfigCategoryFilter(const TraceConfigCategoryFilter& other);
+ ~TraceConfigCategoryFilter();
+
+ TraceConfigCategoryFilter& operator=(const TraceConfigCategoryFilter& rhs);
+
+ // Initializes from category filter string. See TraceConfig constructor for
+ // description of how to write category filter string.
+ void InitializeFromString(const StringPiece& category_filter_string);
+
+ // Initializes TraceConfigCategoryFilter object from the config dictionary.
+ void InitializeFromConfigDict(const DictionaryValue& dict);
+
+ // Merges this with category filter config.
+ void Merge(const TraceConfigCategoryFilter& config);
+ void Clear();
+
+ // Returns true if at least one category in the list is enabled by this
+ // trace config. This is used to determine if the category filters are
+ // enabled in the TRACE_* macros.
+ bool IsCategoryGroupEnabled(const char* category_group_name) const;
+
+ // Returns true if the category is enabled according to this trace config.
+ // This tells whether a category is enabled from the TraceConfig's
+ // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+ // category is enabled from the tracing runtime's perspective.
+ bool IsCategoryEnabled(const char* category_name) const;
+
+ void ToDict(DictionaryValue* dict) const;
+
+ std::string ToFilterString() const;
+
+ // Returns true if category name is a valid string.
+ static bool IsCategoryNameAllowed(StringPiece str);
+
+ const StringList& included_categories() const { return included_categories_; }
+ const StringList& excluded_categories() const { return excluded_categories_; }
+ const StringList& synthetic_delays() const { return synthetic_delays_; }
+
+ private:
+ void SetCategoriesFromIncludedList(const ListValue& included_list);
+ void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+ void SetSyntheticDelaysFromList(const ListValue& list);
+
+ void AddCategoriesToDict(const StringList& categories,
+ const char* param,
+ DictionaryValue* dict) const;
+
+ void WriteCategoryFilterString(const StringList& values,
+ std::string* out,
+ bool included) const;
+ void WriteCategoryFilterString(const StringList& delays,
+ std::string* out) const;
+
+ StringList included_categories_;
+ StringList disabled_categories_;
+ StringList excluded_categories_;
+ StringList synthetic_delays_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index 74aa7bdc63..a856c27192 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -304,10 +304,12 @@ TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
CheckDefaultTraceConfigBehavior(tc_asterisk);
// They differ only for internal checking.
- EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
- EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
- EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
- EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+ EXPECT_FALSE(tc_empty.category_filter().IsCategoryEnabled("Category1"));
+ EXPECT_FALSE(
+ tc_empty.category_filter().IsCategoryEnabled("not-excluded-category"));
+ EXPECT_TRUE(tc_asterisk.category_filter().IsCategoryEnabled("Category1"));
+ EXPECT_TRUE(
+ tc_asterisk.category_filter().IsCategoryEnabled("not-excluded-category"));
}
TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
@@ -402,13 +404,15 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
"-exc_pattern*,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
tc.ToCategoryFilterString().c_str());
- EXPECT_TRUE(tc.IsCategoryEnabled("included"));
- EXPECT_TRUE(tc.IsCategoryEnabled("inc_pattern_category"));
- EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-cc"));
- EXPECT_FALSE(tc.IsCategoryEnabled("excluded"));
- EXPECT_FALSE(tc.IsCategoryEnabled("exc_pattern_category"));
- EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-others"));
- EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-nor-included"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("included"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("inc_pattern_category"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("disabled-by-default-cc"));
+ EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("excluded"));
+ EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("exc_pattern_category"));
+ EXPECT_FALSE(
+ tc.category_filter().IsCategoryEnabled("disabled-by-default-others"));
+ EXPECT_FALSE(
+ tc.category_filter().IsCategoryEnabled("not-excluded-nor-included"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("inc_pattern_category"));
@@ -431,10 +435,12 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
EXPECT_STREQ("event_whitelist_predicate",
event_filter.predicate_name().c_str());
- EXPECT_EQ(1u, event_filter.included_categories().size());
- EXPECT_STREQ("*", event_filter.included_categories()[0].c_str());
- EXPECT_EQ(1u, event_filter.excluded_categories().size());
- EXPECT_STREQ("unfiltered_cat", event_filter.excluded_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.category_filter().included_categories().size());
+ EXPECT_STREQ("*",
+ event_filter.category_filter().included_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.category_filter().excluded_categories().size());
+ EXPECT_STREQ("unfiltered_cat",
+ event_filter.category_filter().excluded_categories()[0].c_str());
EXPECT_TRUE(event_filter.filter_args());
std::string json_out;
@@ -449,8 +455,10 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
TraceConfig tc2(config_string_2);
- EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
- EXPECT_FALSE(tc2.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc2.category_filter().IsCategoryEnabled(
+ "non-disabled-by-default-pattern"));
+ EXPECT_FALSE(
+ tc2.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
EXPECT_TRUE(tc2.IsCategoryGroupEnabled("non-disabled-by-default-pattern"));
EXPECT_FALSE(tc2.IsCategoryGroupEnabled("disabled-by-default-pattern"));
@@ -538,8 +546,9 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
"\"excluded_categories\":[\"category\",\"disabled-by-default-pattern\"]"
"}";
tc = TraceConfig(invalid_config_string_2);
- EXPECT_TRUE(tc.IsCategoryEnabled("category"));
- EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("category"));
+ EXPECT_TRUE(
+ tc.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("category"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-pattern"));
}
@@ -591,27 +600,25 @@ TEST(TraceConfigTest, IsCategoryGroupEnabled) {
EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,disabled-by-default-cc"));
}
-TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
- // Test that IsEmptyOrContainsLeadingOrTrailingWhitespace actually catches
- // categories that are explicitly forbidden.
- // This method is called in a DCHECK to assert that we don't have these types
- // of strings as categories.
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category"));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category"));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- ""));
- EXPECT_FALSE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "good_category"));
+TEST(TraceConfigTest, IsCategoryNameAllowed) {
+ // Test that IsCategoryNameAllowed actually catches categories that are
+ // explicitly forbidden. This method is called in a DCHECK to assert that we
+ // don't have these types of strings as categories.
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
+ EXPECT_FALSE(TraceConfigCategoryFilter::IsCategoryNameAllowed(""));
+ EXPECT_TRUE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("good_category"));
}
TEST(TraceConfigTest, SetTraceOptionValues) {
@@ -637,20 +644,20 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
EXPECT_EQ(tc_str1, tc2.ToString());
EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
+ ASSERT_EQ(2u, tc1.memory_dump_config().triggers.size());
EXPECT_EQ(200u,
- tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ tc1.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
- tc1.memory_dump_config_.triggers[0].level_of_detail);
+ tc1.memory_dump_config().triggers[0].level_of_detail);
EXPECT_EQ(2000u,
- tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
+ tc1.memory_dump_config().triggers[1].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc1.memory_dump_config_.triggers[1].level_of_detail);
+ tc1.memory_dump_config().triggers[1].level_of_detail);
EXPECT_EQ(
2048u,
- tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+ tc1.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
std::string tc_str3 =
TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
@@ -658,20 +665,20 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
TraceConfig tc3(tc_str3);
EXPECT_EQ(tc_str3, tc3.ToString());
EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ ASSERT_EQ(1u, tc3.memory_dump_config().triggers.size());
+ EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
- tc3.memory_dump_config_.triggers[0].level_of_detail);
+ tc3.memory_dump_config().triggers[0].level_of_detail);
std::string tc_str4 =
TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
1 /*heavy_period */);
TraceConfig tc4(tc_str4);
EXPECT_EQ(tc_str4, tc4.ToString());
- ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ ASSERT_EQ(1u, tc4.memory_dump_config().triggers.size());
+ EXPECT_EQ(1u, tc4.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc4.memory_dump_config_.triggers[0].level_of_detail);
+ tc4.memory_dump_config().triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
@@ -679,22 +686,22 @@ TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
tc.ToString());
- EXPECT_EQ(0u, tc.memory_dump_config_.triggers.size());
- EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
- ::kDefaultBreakdownThresholdBytes,
- tc.memory_dump_config_.heap_profiler_options
- .breakdown_threshold_bytes);
+ EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
+ EXPECT_EQ(
+ TraceConfig::MemoryDumpConfig::HeapProfiler ::
+ kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
- EXPECT_EQ(2u, tc.memory_dump_config_.triggers.size());
- EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
- ::kDefaultBreakdownThresholdBytes,
- tc.memory_dump_config_.heap_profiler_options
- .breakdown_threshold_bytes);
+ EXPECT_EQ(2u, tc.memory_dump_config().triggers.size());
+ EXPECT_EQ(
+ TraceConfig::MemoryDumpConfig::HeapProfiler ::
+ kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
} // namespace trace_event
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 82a552aa4e..85e1e16312 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -3088,11 +3088,15 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"testing_predicate\", "
- " \"included_categories\": [\"filtered_cat\"]"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
" }"
" "
" ]"
@@ -3111,12 +3115,15 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
TRACE_EVENT0("filtered_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a horse");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
+
// This is scoped so we can test the end event being filtered.
{ TRACE_EVENT0("filtered_cat", "another cat whoa"); }
EndTraceAndFlush();
- EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(4u, filter_hits_counter.filter_trace_event_hit_count);
EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
}
@@ -3125,12 +3132,14 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"%s\", "
- " \"included_categories\": [\"*\"], "
- " \"excluded_categories\": [\"unfiltered_cat\"], "
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("*") "\"], "
" \"filter_args\": {"
" \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
" }"
@@ -3148,12 +3157,16 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
TRACE_EVENT0("filtered_cat", "a snake");
TRACE_EVENT0("filtered_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a cat");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a pony");
EndTraceAndFlush();
EXPECT_TRUE(FindMatchingValue("name", "a snake"));
EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+ EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+ EXPECT_FALSE(FindMatchingValue("name", "a pony"));
}
TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
@@ -3161,12 +3174,16 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
" \"excluded_categories\": [\"excluded_cat\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"%s\", "
- " \"included_categories\": [\"*\"]"
+ " \"included_categories\": ["
+ " \"*\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
" }"
" ]"
"}",
@@ -3180,6 +3197,8 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
TRACE_EVENT0("filtered_cat", "a snake");
TRACE_EVENT0("excluded_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a cat");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
EndTraceAndFlush();
@@ -3187,6 +3206,8 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
EXPECT_TRUE(FindMatchingValue("name", "a snake"));
EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+ EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+ EXPECT_TRUE(FindMatchingValue("name", "a pony"));
}
TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 10b090ae57..d798a9539b 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -19,8 +19,10 @@
#include "base/memory/ref_counted_memory.h"
#include "base/memory/singleton.h"
#include "base/message_loop/message_loop.h"
+#include "base/process/process_info.h"
#include "base/process/process_metrics.h"
#include "base/stl_util.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
@@ -1509,8 +1511,18 @@ void TraceLog::AddMetadataEventsWhileLocked() {
process_name_);
}
+#if !defined(OS_NACL) && !defined(OS_IOS)
+ Time process_creation_time = CurrentProcessInfo::CreationTime();
+ if (!process_creation_time.is_null()) {
+ TimeDelta process_uptime = Time::Now() - process_creation_time;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_uptime_seconds",
+ "uptime", process_uptime.InSeconds());
+ }
+#endif // !defined(OS_NACL) && !defined(OS_IOS)
+
if (!process_labels_.empty()) {
- std::vector<std::string> labels;
+ std::vector<base::StringPiece> labels;
for (const auto& it : process_labels_)
labels.push_back(it.second);
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
diff --git a/base/values.cc b/base/values.cc
index 5cc0d693bd..fa90a57dbb 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -91,7 +91,7 @@ Value::Value(const Value& that) {
InternalCopyConstructFrom(that);
}
-Value::Value(Value&& that) {
+Value::Value(Value&& that) noexcept {
InternalMoveConstructFrom(std::move(that));
}
@@ -173,27 +173,21 @@ Value::Value(std::vector<char>&& in_blob) : type_(Type::BINARY) {
}
Value& Value::operator=(const Value& that) {
- if (this != &that) {
- if (type_ == that.type_) {
- InternalCopyAssignFromSameType(that);
- } else {
- InternalCleanup();
- InternalCopyConstructFrom(that);
- }
+ if (type_ == that.type_) {
+ InternalCopyAssignFromSameType(that);
+ } else {
+ // This is not a self assignment because the type_ doesn't match.
+ InternalCleanup();
+ InternalCopyConstructFrom(that);
}
return *this;
}
Value& Value::operator=(Value&& that) {
- if (this != &that) {
- if (type_ == that.type_) {
- InternalMoveAssignFromSameType(std::move(that));
- } else {
- InternalCleanup();
- InternalMoveConstructFrom(std::move(that));
- }
- }
+ DCHECK(this != &that) << "attempt to self move assign.";
+ InternalCleanup();
+ InternalMoveConstructFrom(std::move(that));
return *this;
}
@@ -398,61 +392,115 @@ std::unique_ptr<Value> Value::CreateDeepCopy() const {
return WrapUnique(DeepCopy());
}
-bool Value::Equals(const Value* other) const {
- if (other->type() != type())
+bool operator==(const Value& lhs, const Value& rhs) {
+ if (lhs.type_ != rhs.type_)
return false;
- switch (type()) {
- case Type::NONE:
+ switch (lhs.type_) {
+ case Value::Type::NONE:
return true;
- case Type::BOOLEAN:
- return bool_value_ == other->bool_value_;
- case Type::INTEGER:
- return int_value_ == other->int_value_;
- case Type::DOUBLE:
- return double_value_ == other->double_value_;
- case Type::STRING:
- return *string_value_ == *(other->string_value_);
- case Type::BINARY:
- return *binary_value_ == *(other->binary_value_);
+ case Value::Type::BOOLEAN:
+ return lhs.bool_value_ == rhs.bool_value_;
+ case Value::Type::INTEGER:
+ return lhs.int_value_ == rhs.int_value_;
+ case Value::Type::DOUBLE:
+ return lhs.double_value_ == rhs.double_value_;
+ case Value::Type::STRING:
+ return *lhs.string_value_ == *rhs.string_value_;
+ case Value::Type::BINARY:
+ return *lhs.binary_value_ == *rhs.binary_value_;
// TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
// are completely inlined.
- case Type::DICTIONARY: {
- if ((*dict_ptr_)->size() != (*other->dict_ptr_)->size())
+ case Value::Type::DICTIONARY:
+ if ((*lhs.dict_ptr_)->size() != (*rhs.dict_ptr_)->size())
return false;
-
- return std::equal(std::begin(**dict_ptr_), std::end(**dict_ptr_),
- std::begin(**(other->dict_ptr_)),
- [](const DictStorage::value_type& lhs,
- const DictStorage::value_type& rhs) {
- if (lhs.first != rhs.first)
- return false;
-
- return lhs.second->Equals(rhs.second.get());
+ return std::equal(std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
+ std::begin(**rhs.dict_ptr_),
+ [](const Value::DictStorage::value_type& u,
+ const Value::DictStorage::value_type& v) {
+ return std::tie(u.first, *u.second) ==
+ std::tie(v.first, *v.second);
});
- }
- case Type::LIST: {
- if (list_->size() != other->list_->size())
+ case Value::Type::LIST:
+ if (lhs.list_->size() != rhs.list_->size())
return false;
+ return std::equal(
+ std::begin(*lhs.list_), std::end(*lhs.list_), std::begin(*rhs.list_),
+ [](const Value::ListStorage::value_type& u,
+ const Value::ListStorage::value_type& v) { return *u == *v; });
+ }
- return std::equal(std::begin(*list_), std::end(*list_),
- std::begin(*(other->list_)),
- [](const ListStorage::value_type& lhs,
- const ListStorage::value_type& rhs) {
- return lhs->Equals(rhs.get());
- });
- }
+ NOTREACHED();
+ return false;
+}
+
+bool operator!=(const Value& lhs, const Value& rhs) {
+ return !(lhs == rhs);
+}
+
+bool operator<(const Value& lhs, const Value& rhs) {
+ if (lhs.type_ != rhs.type_)
+ return lhs.type_ < rhs.type_;
+
+ switch (lhs.type_) {
+ case Value::Type::NONE:
+ return false;
+ case Value::Type::BOOLEAN:
+ return lhs.bool_value_ < rhs.bool_value_;
+ case Value::Type::INTEGER:
+ return lhs.int_value_ < rhs.int_value_;
+ case Value::Type::DOUBLE:
+ return lhs.double_value_ < rhs.double_value_;
+ case Value::Type::STRING:
+ return *lhs.string_value_ < *rhs.string_value_;
+ case Value::Type::BINARY:
+ return *lhs.binary_value_ < *rhs.binary_value_;
+ // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+ // are completely inlined.
+ case Value::Type::DICTIONARY:
+ return std::lexicographical_compare(
+ std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
+ std::begin(**rhs.dict_ptr_), std::end(**rhs.dict_ptr_),
+ [](const Value::DictStorage::value_type& u,
+ const Value::DictStorage::value_type& v) {
+ return std::tie(u.first, *u.second) < std::tie(v.first, *v.second);
+ });
+ case Value::Type::LIST:
+ return std::lexicographical_compare(
+ std::begin(*lhs.list_), std::end(*lhs.list_), std::begin(*rhs.list_),
+ std::end(*rhs.list_),
+ [](const Value::ListStorage::value_type& u,
+ const Value::ListStorage::value_type& v) { return *u < *v; });
}
NOTREACHED();
return false;
}
+bool operator>(const Value& lhs, const Value& rhs) {
+ return rhs < lhs;
+}
+
+bool operator<=(const Value& lhs, const Value& rhs) {
+ return !(rhs < lhs);
+}
+
+bool operator>=(const Value& lhs, const Value& rhs) {
+ return !(lhs < rhs);
+}
+
+bool Value::Equals(const Value* other) const {
+ DCHECK(other);
+ return *this == *other;
+}
+
// static
bool Value::Equals(const Value* a, const Value* b) {
- if ((a == NULL) && (b == NULL)) return true;
- if ((a == NULL) ^ (b == NULL)) return false;
- return a->Equals(b);
+ if ((a == NULL) && (b == NULL))
+ return true;
+ if ((a == NULL) ^ (b == NULL))
+ return false;
+ return *a == *b;
}
void Value::InternalCopyFundamentalValue(const Value& that) {
@@ -533,6 +581,8 @@ void Value::InternalMoveConstructFrom(Value&& that) {
}
void Value::InternalCopyAssignFromSameType(const Value& that) {
+ // TODO(crbug.com/646113): make this a DCHECK once base::Value does not have
+ // subclasses.
CHECK_EQ(type_, that.type_);
switch (type_) {
@@ -562,32 +612,6 @@ void Value::InternalCopyAssignFromSameType(const Value& that) {
}
}
-void Value::InternalMoveAssignFromSameType(Value&& that) {
- CHECK_EQ(type_, that.type_);
-
- switch (type_) {
- case Type::NONE:
- case Type::BOOLEAN:
- case Type::INTEGER:
- case Type::DOUBLE:
- InternalCopyFundamentalValue(that);
- return;
-
- case Type::STRING:
- *string_value_ = std::move(*that.string_value_);
- return;
- case Type::BINARY:
- *binary_value_ = std::move(*that.binary_value_);
- return;
- case Type::DICTIONARY:
- *dict_ptr_ = std::move(*that.dict_ptr_);
- return;
- case Type::LIST:
- *list_ = std::move(*that.list_);
- return;
- }
-}
-
void Value::InternalCleanup() {
switch (type_) {
case Type::NONE:
@@ -1237,7 +1261,7 @@ bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
bool ListValue::Remove(const Value& value, size_t* index) {
for (auto it = list_->begin(); it != list_->end(); ++it) {
- if ((*it)->Equals(&value)) {
+ if (**it == value) {
size_t previous_index = it - list_->begin();
list_->erase(it);
@@ -1305,9 +1329,8 @@ void ListValue::AppendStrings(const std::vector<string16>& in_values) {
bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
DCHECK(in_value);
for (const auto& entry : *list_) {
- if (entry->Equals(in_value.get())) {
+ if (*entry == *in_value)
return false;
- }
}
list_->push_back(std::move(in_value));
return true;
@@ -1325,7 +1348,7 @@ bool ListValue::Insert(size_t index, std::unique_ptr<Value> in_value) {
ListValue::const_iterator ListValue::Find(const Value& value) const {
return std::find_if(list_->begin(), list_->end(),
[&value](const std::unique_ptr<Value>& entry) {
- return entry->Equals(&value);
+ return *entry == value;
});
}
diff --git a/base/values.h b/base/values.h
index 35f66df904..95d5d1c2ba 100644
--- a/base/values.h
+++ b/base/values.h
@@ -74,7 +74,7 @@ class BASE_EXPORT Value {
size_t size);
Value(const Value& that);
- Value(Value&& that);
+ Value(Value&& that) noexcept;
Value(); // A null value.
explicit Value(Type type);
explicit Value(bool in_bool);
@@ -161,11 +161,24 @@ class BASE_EXPORT Value {
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<Value> CreateDeepCopy() const;
+ // Comparison operators so that Values can easily be used with standard
+ // library algorithms and associative containers.
+ BASE_EXPORT friend bool operator==(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator!=(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator<(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator>(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator<=(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator>=(const Value& lhs, const Value& rhs);
+
// Compares if two Value objects have equal contents.
+ // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
bool Equals(const Value* other) const;
// Compares if two Value objects have equal contents. Can handle NULLs.
// NULLs are considered equal but different from Value::CreateNullValue().
+ // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
static bool Equals(const Value* a, const Value* b);
protected:
@@ -191,7 +204,6 @@ class BASE_EXPORT Value {
void InternalCopyConstructFrom(const Value& that);
void InternalMoveConstructFrom(Value&& that);
void InternalCopyAssignFromSameType(const Value& that);
- void InternalMoveAssignFromSameType(Value&& that);
void InternalCleanup();
};
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 3bcdc16e37..9a7eb2f270 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -789,10 +789,10 @@ TEST(ValuesTest, Equals) {
std::unique_ptr<Value> null1(Value::CreateNullValue());
std::unique_ptr<Value> null2(Value::CreateNullValue());
EXPECT_NE(null1.get(), null2.get());
- EXPECT_TRUE(null1->Equals(null2.get()));
+ EXPECT_EQ(*null1, *null2);
Value boolean(false);
- EXPECT_FALSE(null1->Equals(&boolean));
+ EXPECT_NE(*null1, boolean);
DictionaryValue dv;
dv.SetBoolean("a", false);
@@ -803,7 +803,7 @@ TEST(ValuesTest, Equals) {
dv.Set("e", Value::CreateNullValue());
std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
- EXPECT_TRUE(dv.Equals(copy.get()));
+ EXPECT_EQ(dv, *copy);
std::unique_ptr<ListValue> list(new ListValue);
ListValue* original_list = list.get();
@@ -812,19 +812,19 @@ TEST(ValuesTest, Equals) {
std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
dv.Set("f", std::move(list));
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
copy->Set("f", std::move(list_copy));
- EXPECT_TRUE(dv.Equals(copy.get()));
+ EXPECT_EQ(dv, *copy);
original_list->Append(MakeUnique<Value>(true));
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
// Check if Equals detects differences in only the keys.
copy = dv.CreateDeepCopy();
- EXPECT_TRUE(dv.Equals(copy.get()));
+ EXPECT_EQ(dv, *copy);
copy->Remove("a", NULL);
copy->SetBoolean("aa", false);
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
}
TEST(ValuesTest, StaticEquals) {
@@ -850,6 +850,126 @@ TEST(ValuesTest, StaticEquals) {
EXPECT_FALSE(Value::Equals(NULL, null1.get()));
}
+TEST(ValuesTest, Comparisons) {
+ // Test None Values.
+ Value null1;
+ Value null2;
+ EXPECT_EQ(null1, null2);
+ EXPECT_FALSE(null1 != null2);
+ EXPECT_FALSE(null1 < null2);
+ EXPECT_FALSE(null1 > null2);
+ EXPECT_LE(null1, null2);
+ EXPECT_GE(null1, null2);
+
+ // Test Bool Values.
+ Value bool1(false);
+ Value bool2(true);
+ EXPECT_FALSE(bool1 == bool2);
+ EXPECT_NE(bool1, bool2);
+ EXPECT_LT(bool1, bool2);
+ EXPECT_FALSE(bool1 > bool2);
+ EXPECT_LE(bool1, bool2);
+ EXPECT_FALSE(bool1 >= bool2);
+
+ // Test Int Values.
+ Value int1(1);
+ Value int2(2);
+ EXPECT_FALSE(int1 == int2);
+ EXPECT_NE(int1, int2);
+ EXPECT_LT(int1, int2);
+ EXPECT_FALSE(int1 > int2);
+ EXPECT_LE(int1, int2);
+ EXPECT_FALSE(int1 >= int2);
+
+ // Test Double Values.
+ Value double1(1.0);
+ Value double2(2.0);
+ EXPECT_FALSE(double1 == double2);
+ EXPECT_NE(double1, double2);
+ EXPECT_LT(double1, double2);
+ EXPECT_FALSE(double1 > double2);
+ EXPECT_LE(double1, double2);
+ EXPECT_FALSE(double1 >= double2);
+
+ // Test String Values.
+ Value string1("1");
+ Value string2("2");
+ EXPECT_FALSE(string1 == string2);
+ EXPECT_NE(string1, string2);
+ EXPECT_LT(string1, string2);
+ EXPECT_FALSE(string1 > string2);
+ EXPECT_LE(string1, string2);
+ EXPECT_FALSE(string1 >= string2);
+
+ // Test Binary Values.
+ Value binary1(std::vector<char>{0x01});
+ Value binary2(std::vector<char>{0x02});
+ EXPECT_FALSE(binary1 == binary2);
+ EXPECT_NE(binary1, binary2);
+ EXPECT_LT(binary1, binary2);
+ EXPECT_FALSE(binary1 > binary2);
+ EXPECT_LE(binary1, binary2);
+ EXPECT_FALSE(binary1 >= binary2);
+
+ // Test Empty List Values.
+ ListValue null_list1;
+ ListValue null_list2;
+ EXPECT_EQ(null_list1, null_list2);
+ EXPECT_FALSE(null_list1 != null_list2);
+ EXPECT_FALSE(null_list1 < null_list2);
+ EXPECT_FALSE(null_list1 > null_list2);
+ EXPECT_LE(null_list1, null_list2);
+ EXPECT_GE(null_list1, null_list2);
+
+ // Test Non Empty List Values.
+ ListValue int_list1;
+ ListValue int_list2;
+ int_list1.AppendInteger(1);
+ int_list2.AppendInteger(2);
+ EXPECT_FALSE(int_list1 == int_list2);
+ EXPECT_NE(int_list1, int_list2);
+ EXPECT_LT(int_list1, int_list2);
+ EXPECT_FALSE(int_list1 > int_list2);
+ EXPECT_LE(int_list1, int_list2);
+ EXPECT_FALSE(int_list1 >= int_list2);
+
+ // Test Empty Dict Values.
+ DictionaryValue null_dict1;
+ DictionaryValue null_dict2;
+ EXPECT_EQ(null_dict1, null_dict2);
+ EXPECT_FALSE(null_dict1 != null_dict2);
+ EXPECT_FALSE(null_dict1 < null_dict2);
+ EXPECT_FALSE(null_dict1 > null_dict2);
+ EXPECT_LE(null_dict1, null_dict2);
+ EXPECT_GE(null_dict1, null_dict2);
+
+ // Test Non Empty Dict Values.
+ DictionaryValue int_dict1;
+ DictionaryValue int_dict2;
+ int_dict1.SetInteger("key", 1);
+ int_dict2.SetInteger("key", 2);
+ EXPECT_FALSE(int_dict1 == int_dict2);
+ EXPECT_NE(int_dict1, int_dict2);
+ EXPECT_LT(int_dict1, int_dict2);
+ EXPECT_FALSE(int_dict1 > int_dict2);
+ EXPECT_LE(int_dict1, int_dict2);
+ EXPECT_FALSE(int_dict1 >= int_dict2);
+
+ // Test Values of different types.
+ std::vector<Value> values = {null1, bool1, int1, double1,
+ string1, binary1, int_dict1, int_list1};
+ for (size_t i = 0; i < values.size(); ++i) {
+ for (size_t j = i + 1; j < values.size(); ++j) {
+ EXPECT_FALSE(values[i] == values[j]);
+ EXPECT_NE(values[i], values[j]);
+ EXPECT_LT(values[i], values[j]);
+ EXPECT_FALSE(values[i] > values[j]);
+ EXPECT_LE(values[i], values[j]);
+ EXPECT_FALSE(values[i] >= values[j]);
+ }
+ }
+}
+
TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
DictionaryValue original_dict;
std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
@@ -895,15 +1015,15 @@ TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
- EXPECT_TRUE(original_dict.Equals(copy_dict.get()));
- EXPECT_TRUE(original_null->Equals(copy_null.get()));
- EXPECT_TRUE(original_bool->Equals(copy_bool.get()));
- EXPECT_TRUE(original_int->Equals(copy_int.get()));
- EXPECT_TRUE(original_double->Equals(copy_double.get()));
- EXPECT_TRUE(original_string->Equals(copy_string.get()));
- EXPECT_TRUE(original_string16->Equals(copy_string16.get()));
- EXPECT_TRUE(original_binary->Equals(copy_binary.get()));
- EXPECT_TRUE(original_list->Equals(copy_list.get()));
+ EXPECT_EQ(original_dict, *copy_dict);
+ EXPECT_EQ(*original_null, *copy_null);
+ EXPECT_EQ(*original_bool, *copy_bool);
+ EXPECT_EQ(*original_int, *copy_int);
+ EXPECT_EQ(*original_double, *copy_double);
+ EXPECT_EQ(*original_string, *copy_string);
+ EXPECT_EQ(*original_string16, *copy_string16);
+ EXPECT_EQ(*original_binary, *copy_binary);
+ EXPECT_EQ(*original_list, *copy_list);
}
TEST(ValuesTest, RemoveEmptyChildren) {
@@ -1073,7 +1193,7 @@ TEST(ValuesTest, DictionaryIterator) {
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
EXPECT_FALSE(seen1);
EXPECT_EQ("key1", it.key());
- EXPECT_TRUE(value1.Equals(&it.value()));
+ EXPECT_EQ(value1, it.value());
seen1 = true;
}
EXPECT_TRUE(seen1);
@@ -1084,11 +1204,11 @@ TEST(ValuesTest, DictionaryIterator) {
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
if (it.key() == "key1") {
EXPECT_FALSE(seen1);
- EXPECT_TRUE(value1.Equals(&it.value()));
+ EXPECT_EQ(value1, it.value());
seen1 = true;
} else if (it.key() == "key2") {
EXPECT_FALSE(seen2);
- EXPECT_TRUE(value2.Equals(&it.value()));
+ EXPECT_EQ(value2, it.value());
seen2 = true;
} else {
ADD_FAILURE();