summaryrefslogtreecommitdiff
path: root/base/trace_event
diff options
context:
space:
mode:
Diffstat (limited to 'base/trace_event')
-rw-r--r--base/trace_event/category_registry.cc156
-rw-r--r--base/trace_event/category_registry.h93
-rw-r--r--base/trace_event/common/trace_event_common.h163
-rw-r--r--base/trace_event/etw_manifest/etw_manifest.gyp41
-rw-r--r--base/trace_event/event_name_filter.cc26
-rw-r--r--base/trace_event/event_name_filter.h46
-rw-r--r--base/trace_event/event_name_filter_unittest.cc41
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker.cc58
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker.h27
-rw-r--r--base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc97
-rw-r--r--base/trace_event/heap_profiler_allocation_register.cc8
-rw-r--r--base/trace_event/heap_profiler_allocation_register.h32
-rw-r--r--base/trace_event/heap_profiler_event_filter.cc67
-rw-r--r--base/trace_event/heap_profiler_event_filter.h40
-rw-r--r--base/trace_event/heap_profiler_heap_dump_writer.cc3
-rw-r--r--base/trace_event/heap_profiler_stack_frame_deduplicator.cc20
-rw-r--r--base/trace_event/heap_profiler_stack_frame_deduplicator.h2
-rw-r--r--base/trace_event/heap_profiler_type_name_deduplicator.cc28
-rw-r--r--base/trace_event/malloc_dump_provider.cc178
-rw-r--r--base/trace_event/malloc_dump_provider.h2
-rw-r--r--base/trace_event/memory_allocator_dump.h6
-rw-r--r--base/trace_event/memory_dump_manager.cc314
-rw-r--r--base/trace_event/memory_dump_manager.h65
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc200
-rw-r--r--base/trace_event/memory_dump_provider.h20
-rw-r--r--base/trace_event/memory_dump_request_args.cc17
-rw-r--r--base/trace_event/memory_dump_request_args.h18
-rw-r--r--base/trace_event/memory_dump_scheduler.cc304
-rw-r--r--base/trace_event/memory_dump_scheduler.h141
-rw-r--r--base/trace_event/memory_dump_session_state.cc15
-rw-r--r--base/trace_event/memory_dump_session_state.h22
-rw-r--r--base/trace_event/memory_infra_background_whitelist.cc71
-rw-r--r--base/trace_event/memory_usage_estimator.cc14
-rw-r--r--base/trace_event/memory_usage_estimator.h549
-rw-r--r--base/trace_event/memory_usage_estimator_unittest.cc244
-rw-r--r--base/trace_event/process_memory_dump.cc24
-rw-r--r--base/trace_event/process_memory_dump.h1
-rw-r--r--base/trace_event/trace_buffer.cc13
-rw-r--r--base/trace_event/trace_category.h109
-rw-r--r--base/trace_event/trace_config.cc249
-rw-r--r--base/trace_event/trace_config.h63
-rw-r--r--base/trace_event/trace_config_memory_test_util.h181
-rw-r--r--base/trace_event/trace_config_unittest.cc178
-rw-r--r--base/trace_event/trace_event.gypi107
-rw-r--r--base/trace_event/trace_event.h407
-rw-r--r--base/trace_event/trace_event_argument.cc36
-rw-r--r--base/trace_event/trace_event_argument_unittest.cc10
-rw-r--r--base/trace_event/trace_event_filter.cc21
-rw-r--r--base/trace_event/trace_event_filter.h51
-rw-r--r--base/trace_event/trace_event_filter_test_utils.cc61
-rw-r--r--base/trace_event/trace_event_filter_test_utils.h53
-rw-r--r--base/trace_event/trace_event_impl.cc68
-rw-r--r--base/trace_event/trace_event_impl.h5
-rw-r--r--base/trace_event/trace_event_memory_overhead.cc20
-rw-r--r--base/trace_event/trace_event_synthetic_delay.h3
-rw-r--r--base/trace_event/trace_event_unittest.cc770
-rw-r--r--base/trace_event/trace_log.cc750
-rw-r--r--base/trace_event/trace_log.h140
-rw-r--r--base/trace_event/trace_log_constants.cc3
-rw-r--r--base/trace_event/trace_sampling_thread.cc107
-rw-r--r--base/trace_event/trace_sampling_thread.h54
61 files changed, 4560 insertions, 2052 deletions
diff --git a/base/trace_event/category_registry.cc b/base/trace_event/category_registry.cc
new file mode 100644
index 0000000000..e7c14606d6
--- /dev/null
+++ b/base/trace_event/category_registry.cc
@@ -0,0 +1,156 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/category_registry.h"
+
+#include <string.h>
+
+#include <type_traits>
+
+#include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/trace_event/trace_category.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+constexpr size_t kMaxCategories = 200;
+const int kNumBuiltinCategories = 4;
+
+// |g_categories| might end up causing creating dynamic initializers if not POD.
+static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
+
+// These entries must be kept consistent with the kCategory* consts below.
+TraceCategory g_categories[kMaxCategories] = {
+ {0, 0, "tracing categories exhausted; must increase kMaxCategories"},
+ {0, 0, "tracing already shutdown"}, // See kCategoryAlreadyShutdown below.
+ {0, 0, "__metadata"}, // See kCategoryMetadata below.
+ {0, 0, "toplevel"}, // Warmup the toplevel category.
+};
+
+base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
+
+bool IsValidCategoryPtr(const TraceCategory* category) {
+ // If any of these are hit, something has cached a corrupt category pointer.
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(category);
+ return ptr % sizeof(void*) == 0 &&
+ ptr >= reinterpret_cast<uintptr_t>(&g_categories[0]) &&
+ ptr <= reinterpret_cast<uintptr_t>(&g_categories[kMaxCategories - 1]);
+}
+
+} // namespace
+
+// static
+TraceCategory* const CategoryRegistry::kCategoryExhausted = &g_categories[0];
+TraceCategory* const CategoryRegistry::kCategoryAlreadyShutdown =
+ &g_categories[1];
+TraceCategory* const CategoryRegistry::kCategoryMetadata = &g_categories[2];
+
+// static
+void CategoryRegistry::Initialize() {
+ // Trace is enabled or disabled on one thread while other threads are
+ // accessing the enabled flag. We don't care whether edge-case events are
+ // traced or not, so we allow races on the enabled flag to keep the trace
+ // macros fast.
+ for (size_t i = 0; i < kMaxCategories; ++i) {
+ ANNOTATE_BENIGN_RACE(g_categories[i].state_ptr(),
+ "trace_event category enabled");
+ // If this DCHECK is hit in a test it means that ResetForTesting() is not
+ // called and the categories state leaks between test fixtures.
+ DCHECK(!g_categories[i].is_enabled());
+ }
+}
+
+// static
+void CategoryRegistry::ResetForTesting() {
+ // reset_for_testing clears up only the enabled state and filters. The
+ // categories themselves cannot be cleared up because the static pointers
+ // injected by the macros still point to them and cannot be reset.
+ for (size_t i = 0; i < kMaxCategories; ++i)
+ g_categories[i].reset_for_testing();
+}
+
+// static
+TraceCategory* CategoryRegistry::GetCategoryByName(const char* category_name) {
+ DCHECK(!strchr(category_name, '"'))
+ << "Category names may not contain double quote";
+
+ // The g_categories is append only, avoid using a lock for the fast path.
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_categories[i].name(), category_name) == 0) {
+ return &g_categories[i];
+ }
+ }
+ return nullptr;
+}
+
+bool CategoryRegistry::GetOrCreateCategoryLocked(
+ const char* category_name,
+ CategoryInitializerFn category_initializer_fn,
+ TraceCategory** category) {
+ // This is the slow path: the lock is not held in the fastpath
+ // (GetCategoryByName), so more than one thread could have reached here trying
+ // to add the same category.
+ *category = GetCategoryByName(category_name);
+ if (*category)
+ return false;
+
+ // Create a new category.
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ if (category_index >= kMaxCategories) {
+ NOTREACHED() << "must increase kMaxCategories";
+ *category = kCategoryExhausted;
+ return false;
+ }
+
+ // TODO(primiano): this strdup should be removed. The only documented reason
+ // for it was TraceWatchEvent, which is gone. However, something might have
+ // ended up relying on this. Needs some auditing before removal.
+ const char* category_name_copy = strdup(category_name);
+ ANNOTATE_LEAKING_OBJECT_PTR(category_name_copy);
+
+ *category = &g_categories[category_index];
+ DCHECK(!(*category)->is_valid());
+ DCHECK(!(*category)->is_enabled());
+ (*category)->set_name(category_name_copy);
+ category_initializer_fn(*category);
+
+ // Update the max index now.
+ base::subtle::Release_Store(&g_category_index, category_index + 1);
+ return true;
+}
+
+// static
+const TraceCategory* CategoryRegistry::GetCategoryByStatePtr(
+ const uint8_t* category_state) {
+ const TraceCategory* category = TraceCategory::FromStatePtr(category_state);
+ DCHECK(IsValidCategoryPtr(category));
+ return category;
+}
+
+// static
+bool CategoryRegistry::IsBuiltinCategory(const TraceCategory* category) {
+ DCHECK(IsValidCategoryPtr(category));
+ return category < &g_categories[kNumBuiltinCategories];
+}
+
+// static
+CategoryRegistry::Range CategoryRegistry::GetAllCategories() {
+ // The |g_categories| array is append only. We have to only guarantee to
+ // not return an index to a category which is being initialized by
+ // GetOrCreateCategoryByName().
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ return CategoryRegistry::Range(&g_categories[0],
+ &g_categories[category_index]);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/category_registry.h b/base/trace_event/category_registry.h
new file mode 100644
index 0000000000..9c08efa3e1
--- /dev/null
+++ b/base/trace_event/category_registry.h
@@ -0,0 +1,93 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
+#define BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+struct TraceCategory;
+class TraceCategoryTest;
+class TraceLog;
+
+// Allows fast and thread-safe acces to the state of all tracing categories.
+// All the methods in this class can be concurrently called on multiple threads,
+// unless otherwise noted (e.g., GetOrCreateCategoryLocked).
+// The reason why this is a fully static class with global state is to allow to
+// statically define known categories as global linker-initialized structs,
+// without requiring static initializers.
+class BASE_EXPORT CategoryRegistry {
+ public:
+ // Allows for-each iterations over a slice of the categories array.
+ class Range {
+ public:
+ Range(TraceCategory* begin, TraceCategory* end) : begin_(begin), end_(end) {
+ DCHECK_LE(begin, end);
+ }
+ TraceCategory* begin() const { return begin_; }
+ TraceCategory* end() const { return end_; }
+
+ private:
+ TraceCategory* const begin_;
+ TraceCategory* const end_;
+ };
+
+ // Known categories.
+ static TraceCategory* const kCategoryExhausted;
+ static TraceCategory* const kCategoryMetadata;
+ static TraceCategory* const kCategoryAlreadyShutdown;
+
+ // Returns a category entry from the Category.state_ptr() pointer.
+ // TODO(primiano): trace macros should just keep a pointer to the entire
+ // TraceCategory, not just the enabled state pointer. That would remove the
+ // need for this function and make everything cleaner at no extra cost (as
+ // long as the |state_| is the first field of the struct, which can be
+ // guaranteed via static_assert, see TraceCategory ctor).
+ static const TraceCategory* GetCategoryByStatePtr(
+ const uint8_t* category_state);
+
+ // Returns a category from its name or nullptr if not found.
+ // The output |category| argument is an undefinitely lived pointer to the
+ // TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
+ // pointer and use it for checks in their fast-paths.
+ static TraceCategory* GetCategoryByName(const char* category_name);
+
+ static bool IsBuiltinCategory(const TraceCategory*);
+
+ private:
+ friend class TraceCategoryTest;
+ friend class TraceLog;
+ using CategoryInitializerFn = void (*)(TraceCategory*);
+
+ // Only for debugging/testing purposes, is a no-op on release builds.
+ static void Initialize();
+
+ // Resets the state of all categories, to clear up the state between tests.
+ static void ResetForTesting();
+
+ // Used to get/create a category in the slow-path. If the category exists
+ // already, this has the same effect of GetCategoryByName and returns false.
+ // If not, a new category is created and the CategoryInitializerFn is invoked
+ // before retuning true. The caller must guarantee serialization: either call
+ // this method from a single thread or hold a lock when calling this.
+ static bool GetOrCreateCategoryLocked(const char* category_name,
+ CategoryInitializerFn,
+ TraceCategory**);
+
+ // Allows to iterate over the valid categories in a for-each loop.
+ // This includes builtin categories such as __metadata.
+ static Range GetAllCategories();
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index 0a04d62710..bb6fa1b82b 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -223,49 +223,6 @@
flow_flags, arg1_name, arg1_val, \
arg2_name, arg2_val)
-// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
-// included in official builds.
-
-#if OFFICIAL_BUILD
-#undef TRACING_IS_OFFICIAL_BUILD
-#define TRACING_IS_OFFICIAL_BUILD 1
-#elif !defined(TRACING_IS_OFFICIAL_BUILD)
-#define TRACING_IS_OFFICIAL_BUILD 0
-#endif
-
-#if TRACING_IS_OFFICIAL_BUILD
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- (void)0
-#else
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
- TRACE_EVENT0(category_group, name)
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
- TRACE_EVENT_INSTANT0(category_group, name, scope)
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-#endif
-
// Records a single event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -297,20 +254,10 @@
#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE | scope)
-// Syntactic sugars for the sampling tracing in the main thread.
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_GET_SAMPLING_STATE() \
- TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
-#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
- TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
-
// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -395,10 +342,15 @@
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
arg2_name, arg2_val)
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
#define TRACE_EVENT_COPY_MARK(category_group, name) \
@@ -406,8 +358,8 @@
TRACE_EVENT_FLAG_COPY)
#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_COPY)
// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
@@ -544,6 +496,12 @@
TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SAMPLE, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+
// ASYNC_STEP_* APIs should be only used by legacy code. New code should
// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
// event.
@@ -612,6 +570,13 @@
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@@ -701,6 +666,13 @@
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -760,16 +732,19 @@
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with one associated argument. If the category is not enabled, then this
-// does nothing.
+// with none, one or two associated argument. If the category is not enabled,
+// then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+
#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
arg1_name, arg1_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
category_group, name, id, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with 2 associated arguments. If the category is not enabled, then this
-// does nothing.
+
#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
@@ -944,48 +919,58 @@
#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \
INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
- issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \
- "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
+ issue_end_ts, TRACE_EVENT_FLAG_NONE, \
+ "sync_id", sync_id, "issue_ts", issue_ts)
// Macros to track the life time and value of arbitrary client objects.
// See also TraceTrackableObject.
#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
snapshot) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+ id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
-#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
- category_group, name, id, timestamp, snapshot) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
- TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ id, TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "snapshot", snapshot)
#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
// Records entering and leaving trace event contexts. |category_group| and
// |name| specify the context category and type. |context| is a
// snapshotted context object id.
-#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \
- TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \
- TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, context, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, context, \
+ TRACE_EVENT_FLAG_NONE)
#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
- TRACE_ID_DONT_MANGLE(context))
+ INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
+
+// Macro to specify that two trace IDs are identical. For example,
+// TRACE_LINK_IDS(
+// "category", "name",
+// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
+// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
+// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
+// the current process have the same ID as events with ID
+// ("blink::ResourceFetcher::FetchRequest", 0x2000).
+#define TRACE_LINK_IDS(category_group, name, id, linked_id) \
+ INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id, linked_id);
// Macro to efficiently determine if a given category group is enabled.
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
@@ -1052,11 +1037,13 @@
#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
+#define TRACE_EVENT_PHASE_LINK_IDS ('=')
// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+// TODO(crbug.com/639003): Free this bit after ID mangling is deprecated.
#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
@@ -1067,6 +1054,8 @@
#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
+#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast<unsigned int>(1 << 12))
+#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast<unsigned int>(1 << 13))
#define TRACE_EVENT_FLAG_SCOPE_MASK \
(static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
diff --git a/base/trace_event/etw_manifest/etw_manifest.gyp b/base/trace_event/etw_manifest/etw_manifest.gyp
deleted file mode 100644
index b2f0eb8ea1..0000000000
--- a/base/trace_event/etw_manifest/etw_manifest.gyp
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'targets': [
- {
- # GN version: //base/trace_event/etw_manifest/BUILD.gn
- 'target_name': 'etw_manifest',
- 'type': 'none',
- 'toolsets': ['host', 'target'],
- 'hard_dependency': 1,
- 'conditions': [
- ['OS=="win"', {
- 'sources': [
- 'chrome_events_win.man',
- ],
- 'variables': {
- 'man_output_dir': '<(SHARED_INTERMEDIATE_DIR)/base/trace_event/etw_manifest',
- },
- 'rules': [{
- # Rule to run the message compiler.
- 'rule_name': 'message_compiler',
- 'extension': 'man',
- 'outputs': [
- '<(man_output_dir)/chrome_events_win.h',
- '<(man_output_dir)/chrome_events_win.rc',
- ],
- 'action': [
- 'mc.exe',
- '-h', '<(man_output_dir)',
- '-r', '<(man_output_dir)/.',
- '-um',
- '<(RULE_INPUT_PATH)',
- ],
- 'message': 'Running message compiler on <(RULE_INPUT_PATH)',
- }],
- }],
- ],
- }
- ]
-}
diff --git a/base/trace_event/event_name_filter.cc b/base/trace_event/event_name_filter.cc
new file mode 100644
index 0000000000..8d0058c147
--- /dev/null
+++ b/base/trace_event/event_name_filter.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+const char EventNameFilter::kName[] = "event_whitelist_predicate";
+
+EventNameFilter::EventNameFilter(
+ std::unique_ptr<EventNamesWhitelist> event_names_whitelist)
+ : event_names_whitelist_(std::move(event_names_whitelist)) {}
+
+EventNameFilter::~EventNameFilter() {}
+
+bool EventNameFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+ return event_names_whitelist_->count(trace_event.name()) != 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/event_name_filter.h b/base/trace_event/event_name_filter.h
new file mode 100644
index 0000000000..19333b3e03
--- /dev/null
+++ b/base/trace_event/event_name_filter.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+#define BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+
+#include <memory>
+#include <string>
+#include <unordered_set>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// Filters trace events by checking the full name against a whitelist.
+// The current implementation is quite simple and dumb and just uses a
+// hashtable which requires char* to std::string conversion. It could be smarter
+// and use a bloom filter trie. However, today this is used too rarely to
+// justify that cost.
+class BASE_EXPORT EventNameFilter : public TraceEventFilter {
+ public:
+ using EventNamesWhitelist = std::unordered_set<std::string>;
+ static const char kName[];
+
+ EventNameFilter(std::unique_ptr<EventNamesWhitelist>);
+ ~EventNameFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent&) const override;
+
+ private:
+ std::unique_ptr<const EventNamesWhitelist> event_names_whitelist_;
+
+ DISALLOW_COPY_AND_ASSIGN(EventNameFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
diff --git a/base/trace_event/event_name_filter_unittest.cc b/base/trace_event/event_name_filter_unittest.cc
new file mode 100644
index 0000000000..0bc2a4dafc
--- /dev/null
+++ b/base/trace_event/event_name_filter_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+const TraceEvent& MakeTraceEvent(const char* name) {
+ static TraceEvent event;
+ event.Reset();
+ event.Initialize(0, TimeTicks(), ThreadTicks(), 'b', nullptr, name, "", 0, 0,
+ 0, nullptr, nullptr, nullptr, nullptr, 0);
+ return event;
+}
+
+TEST(TraceEventNameFilterTest, Whitelist) {
+ auto empty_whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
+ auto filter = MakeUnique<EventNameFilter>(std::move(empty_whitelist));
+
+ // No events should be filtered if the whitelist is empty.
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+
+ auto whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
+ whitelist->insert("foo");
+ whitelist->insert("bar");
+ filter = MakeUnique<EventNameFilter>(std::move(whitelist));
+ EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("fooz")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("afoo")));
+ EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("bar")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foobar")));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
index 31f311a918..b47dc16edd 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -29,7 +29,6 @@ const size_t kMaxStackDepth = 128u;
const size_t kMaxTaskDepth = 16u;
AllocationContextTracker* const kInitializingSentinel =
reinterpret_cast<AllocationContextTracker*>(-1);
-const char kTracingOverhead[] = "tracing_overhead";
ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
@@ -108,17 +107,17 @@ void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
}
void AllocationContextTracker::PushPseudoStackFrame(
- const char* trace_event_name) {
+ AllocationContextTracker::PseudoStackFrame stack_frame) {
// Impose a limit on the height to verify that every push is popped, because
// in practice the pseudo stack never grows higher than ~20 frames.
if (pseudo_stack_.size() < kMaxStackDepth)
- pseudo_stack_.push_back(trace_event_name);
+ pseudo_stack_.push_back(stack_frame);
else
NOTREACHED();
}
void AllocationContextTracker::PopPseudoStackFrame(
- const char* trace_event_name) {
+ AllocationContextTracker::PseudoStackFrame stack_frame) {
// Guard for stack underflow. If tracing was started with a TRACE_EVENT in
// scope, the frame was never pushed, so it is possible that pop is called
// on an empty stack.
@@ -128,8 +127,10 @@ void AllocationContextTracker::PopPseudoStackFrame(
// Assert that pushes and pops are nested correctly. This DCHECK can be
// hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
// without a corresponding TRACE_EVENT_BEGIN).
- DCHECK_EQ(trace_event_name, pseudo_stack_.back())
- << "Encountered an unmatched TRACE_EVENT_END";
+ DCHECK(stack_frame == pseudo_stack_.back())
+ << "Encountered an unmatched TRACE_EVENT_END: "
+ << stack_frame.trace_event_name
+ << " vs event in stack: " << pseudo_stack_.back().trace_event_name;
pseudo_stack_.pop_back();
}
@@ -155,21 +156,15 @@ void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
}
// static
-AllocationContext AllocationContextTracker::GetContextSnapshot() {
- AllocationContext ctx;
-
- if (ignore_scope_depth_) {
- ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
- ctx.type_name = kTracingOverhead;
- ctx.backtrace.frame_count = 1;
- return ctx;
- }
+bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
+ if (ignore_scope_depth_)
+ return false;
CaptureMode mode = static_cast<CaptureMode>(
subtle::NoBarrier_Load(&capture_mode_));
- auto* backtrace = std::begin(ctx.backtrace.frames);
- auto* backtrace_end = std::end(ctx.backtrace.frames);
+ auto* backtrace = std::begin(ctx->backtrace.frames);
+ auto* backtrace_end = std::end(ctx->backtrace.frames);
if (!thread_name_) {
// Ignore the string allocation made by GetAndLeakThreadName to avoid
@@ -193,11 +188,12 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
}
case CaptureMode::PSEUDO_STACK:
{
- for (const char* event_name: pseudo_stack_) {
+ for (const PseudoStackFrame& stack_frame : pseudo_stack_) {
if (backtrace == backtrace_end) {
break;
}
- *backtrace++ = StackFrame::FromTraceEventName(event_name);
+ *backtrace++ =
+ StackFrame::FromTraceEventName(stack_frame.trace_event_name);
}
break;
}
@@ -222,24 +218,32 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
// Copy frames backwards
size_t backtrace_capacity = backtrace_end - backtrace;
- size_t top_frame_index = (backtrace_capacity >= frame_count) ?
- 0 :
- frame_count - backtrace_capacity;
- for (size_t i = frame_count; i > top_frame_index;) {
- const void* frame = frames[--i];
+ int32_t top_frame_index = (backtrace_capacity >= frame_count)
+ ? 0
+ : frame_count - backtrace_capacity;
+ for (int32_t i = frame_count - 1; i >= top_frame_index; --i) {
+ const void* frame = frames[i];
*backtrace++ = StackFrame::FromProgramCounter(frame);
}
break;
}
}
- ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
+ ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
// TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
// (component name) in the heap profiler and not piggy back on the type name.
- ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
+ if (!task_contexts_.empty()) {
+ ctx->type_name = task_contexts_.back();
+ } else if (!pseudo_stack_.empty()) {
+ // If task context was unavailable, then the category names are taken from
+ // trace events.
+ ctx->type_name = pseudo_stack_.back().trace_event_category;
+ } else {
+ ctx->type_name = nullptr;
+ }
- return ctx;
+ return true;
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
index 454200c474..4f2a8c9502 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -10,7 +10,6 @@
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/debug/stack_trace.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
@@ -30,6 +29,17 @@ class BASE_EXPORT AllocationContextTracker {
NATIVE_STACK // GetContextSnapshot() returns native (real) stack trace
};
+ // Stack frame constructed from trace events in codebase.
+ struct BASE_EXPORT PseudoStackFrame {
+ const char* trace_event_category;
+ const char* trace_event_name;
+
+ bool operator==(const PseudoStackFrame& other) const {
+ return trace_event_category == other.trace_event_category &&
+ trace_event_name == other.trace_event_name;
+ }
+ };
+
// Globally sets capturing mode.
// TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
static void SetCaptureMode(CaptureMode mode);
@@ -60,8 +70,8 @@ class BASE_EXPORT AllocationContextTracker {
static void SetCurrentThreadName(const char* name);
// Starts and ends a new ignore scope between which the allocations are
- // ignored in the heap profiler. A dummy context that short circuits to
- // "tracing_overhead" is returned for these allocations.
+ // ignored by the heap profiler. GetContextSnapshot() returns false when
+ // allocations are ignored.
void begin_ignore_scope() { ignore_scope_depth_++; }
void end_ignore_scope() {
if (ignore_scope_depth_)
@@ -69,18 +79,19 @@ class BASE_EXPORT AllocationContextTracker {
}
// Pushes a frame onto the thread-local pseudo stack.
- void PushPseudoStackFrame(const char* trace_event_name);
+ void PushPseudoStackFrame(PseudoStackFrame stack_frame);
// Pops a frame from the thread-local pseudo stack.
- void PopPseudoStackFrame(const char* trace_event_name);
+ void PopPseudoStackFrame(PseudoStackFrame stack_frame);
// Push and pop current task's context. A stack is used to support nested
// tasks and the top of the stack will be used in allocation context.
void PushCurrentTaskContext(const char* context);
void PopCurrentTaskContext(const char* context);
- // Returns a snapshot of the current thread-local context.
- AllocationContext GetContextSnapshot();
+ // Fills a snapshot of the current thread-local context. Doesn't fill and
+ // returns false if allocations are being ignored.
+ bool GetContextSnapshot(AllocationContext* snapshot);
~AllocationContextTracker();
@@ -90,7 +101,7 @@ class BASE_EXPORT AllocationContextTracker {
static subtle::Atomic32 capture_mode_;
// The pseudo stack where frames are |TRACE_EVENT| names.
- std::vector<const char*> pseudo_stack_;
+ std::vector<PseudoStackFrame> pseudo_stack_;
// The thread name is used as the first entry in the pseudo stack.
const char* thread_name_;
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 3064a6a711..577f50043d 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -11,6 +11,7 @@
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -26,13 +27,25 @@ const char kEclair[] = "Eclair";
const char kFroyo[] = "Froyo";
const char kGingerbread[] = "Gingerbread";
+const char kFilteringTraceConfig[] =
+ "{"
+ " \"event_filters\": ["
+ " {"
+ " \"excluded_categories\": [],"
+ " \"filter_args\": {},"
+ " \"filter_predicate\": \"heap_profiler_predicate\","
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}";
+
// Asserts that the fixed-size array |expected_backtrace| matches the backtrace
// in |AllocationContextTracker::GetContextSnapshot|.
template <size_t N>
void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
auto* actual = std::begin(ctx.backtrace.frames);
auto* actual_bottom = actual + ctx.backtrace.frame_count;
@@ -52,9 +65,9 @@ void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
void AssertBacktraceContainsOnlyThreadName() {
StackFrame t = StackFrame::FromThreadName(kThreadName);
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_EQ(1u, ctx.backtrace.frame_count);
ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -63,17 +76,19 @@ void AssertBacktraceContainsOnlyThreadName() {
class AllocationContextTrackerTest : public testing::Test {
public:
void SetUp() override {
- TraceConfig config("");
- TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+ // Enabling memory-infra category sets default memory dump config which
+ // includes filters for capturing pseudo stack.
+ TraceConfig config(kFilteringTraceConfig);
+ TraceLog::GetInstance()->SetEnabled(config, TraceLog::FILTERING_MODE);
AllocationContextTracker::SetCurrentThreadName(kThreadName);
}
void TearDown() override {
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::DISABLED);
- TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
}
};
@@ -106,6 +121,12 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
AssertBacktraceEquals(frame_ce);
}
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
+ StackFrame frame_cc[] = {t, c, c};
+ AssertBacktraceEquals(frame_cc);
+ }
+
AssertBacktraceEquals(frame_c);
}
@@ -222,9 +243,9 @@ TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
{
TRACE_EVENT0("Testing", kGingerbread);
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
// The pseudo stack relies on pointer equality, not deep string comparisons.
ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -233,38 +254,54 @@ TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
}
{
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_EQ(t, ctx.backtrace.frames[0]);
ASSERT_EQ(c, ctx.backtrace.frames[1]);
ASSERT_EQ(f, ctx.backtrace.frames[11]);
}
}
-TEST_F(AllocationContextTrackerTest, TrackTaskContext) {
+TEST_F(AllocationContextTrackerTest, TrackCategoryName) {
const char kContext1[] = "context1";
const char kContext2[] = "context2";
{
// The context from the scoped task event should be used as type name.
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
- AllocationContext ctx1 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx1;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx1));
ASSERT_EQ(kContext1, ctx1.type_name);
// In case of nested events, the last event's context should be used.
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
- AllocationContext ctx2 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx2;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx2));
ASSERT_EQ(kContext2, ctx2.type_name);
}
+ {
+ // Type should be category name of the last seen trace event.
+ TRACE_EVENT0("Testing", kCupcake);
+ AllocationContext ctx1;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx1));
+ ASSERT_EQ("Testing", std::string(ctx1.type_name));
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
+ AllocationContext ctx2;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx2));
+ ASSERT_EQ(TRACE_DISABLED_BY_DEFAULT("Testing"),
+ std::string(ctx2.type_name));
+ }
+
// Type should be nullptr without task event.
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_FALSE(ctx.type_name);
}
@@ -272,13 +309,9 @@ TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
TRACE_EVENT0("Testing", kCupcake);
TRACE_EVENT0("Testing", kDonut);
HEAP_PROFILER_SCOPED_IGNORE;
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
- const StringPiece kTracingOverhead("tracing_overhead");
- ASSERT_EQ(kTracingOverhead,
- static_cast<const char*>(ctx.backtrace.frames[0].value));
- ASSERT_EQ(1u, ctx.backtrace.frame_count);
+ AllocationContext ctx;
+ ASSERT_FALSE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 2c2cd378bb..63d40611a6 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -60,12 +60,12 @@ size_t AllocationRegister::AddressHasher::operator () (
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
// recorded from a Chrome trace run). It is the first prime after 2^17. For
- // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
- // buckets. Microbenchmarks show that this simple scheme outperforms fancy
- // hashes like Murmur3 by 20 to 40 percent.
+ // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
+ // Microbenchmarks show that this simple scheme outperforms fancy hashes like
+ // Murmur3 by 20 to 40 percent.
const uintptr_t key = reinterpret_cast<uintptr_t>(address);
const uintptr_t a = 131101;
- const uintptr_t shift = 14;
+ const uintptr_t shift = 15;
const uintptr_t h = (key * a) >> shift;
return h;
}
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index 86e2721c56..d6a02faeae 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -16,6 +16,7 @@
#include "base/process/process_metrics.h"
#include "base/template_util.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "build/build_config.h"
namespace base {
namespace trace_event {
@@ -45,8 +46,7 @@ class FixedHashMap {
using KVPair = std::pair<const Key, Value>;
// For implementation simplicity API uses integer index instead
- // of iterators. Most operations (except FindValidIndex) on KVIndex
- // are O(1).
+ // of iterators. Most operations (except Find) on KVIndex are O(1).
using KVIndex = size_t;
static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
@@ -199,7 +199,9 @@ class FixedHashMap {
// the simplest solution is to just allocate a humongous chunk of address
// space.
- DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+ CHECK_LT(next_unused_cell_, num_cells_ + 1)
+ << "Allocation Register hash table has too little capacity. Increase "
+ "the capacity to run heap profiler in large sessions.";
return &cells_[idx];
}
@@ -300,15 +302,25 @@ class BASE_EXPORT AllocationRegister {
private:
friend AllocationRegisterTest;
- // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
- // hashing and should be changed together with AddressHasher.
+// Expect lower number of allocations from mobile platforms. Load factor
+// (capacity / bucket count) is kept less than 10 for optimal hashing. The
+// number of buckets should be changed together with AddressHasher.
+#if defined(OS_ANDROID) || defined(OS_IOS)
static const size_t kAllocationBuckets = 1 << 18;
static const size_t kAllocationCapacity = 1500000;
-
- // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
- // needing to tweak BacktraceHasher implementation.
- static const size_t kBacktraceBuckets = 1 << 15;
- static const size_t kBacktraceCapacity = kBacktraceBuckets;
+#else
+ static const size_t kAllocationBuckets = 1 << 19;
+ static const size_t kAllocationCapacity = 5000000;
+#endif
+
+ // 2^16 works well with BacktraceHasher. When increasing this number make
+ // sure BacktraceHasher still produces low number of collisions.
+ static const size_t kBacktraceBuckets = 1 << 16;
+#if defined(OS_ANDROID)
+ static const size_t kBacktraceCapacity = 32000; // 22K was observed
+#else
+ static const size_t kBacktraceCapacity = 55000; // 45K was observed on Linux
+#endif
struct BacktraceHasher {
size_t operator () (const Backtrace& backtrace) const;
diff --git a/base/trace_event/heap_profiler_event_filter.cc b/base/trace_event/heap_profiler_event_filter.cc
new file mode 100644
index 0000000000..6c91c91b13
--- /dev/null
+++ b/base/trace_event/heap_profiler_event_filter.cc
@@ -0,0 +1,67 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_event_filter.h"
+
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/trace_category.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+inline bool IsPseudoStackEnabled() {
+ return AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK;
+}
+
+inline AllocationContextTracker* GetThreadLocalTracker() {
+ return AllocationContextTracker::GetInstanceForCurrentThread();
+}
+
+} // namespace
+
+// static
+const char HeapProfilerEventFilter::kName[] = "heap_profiler_predicate";
+
+HeapProfilerEventFilter::HeapProfilerEventFilter() {}
+HeapProfilerEventFilter::~HeapProfilerEventFilter() {}
+
+bool HeapProfilerEventFilter::FilterTraceEvent(
+ const TraceEvent& trace_event) const {
+ if (!IsPseudoStackEnabled())
+ return true;
+
+ // TODO(primiano): Add support for events with copied name crbug.com/581079.
+ if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
+ return true;
+
+ const auto* category = CategoryRegistry::GetCategoryByStatePtr(
+ trace_event.category_group_enabled());
+ AllocationContextTracker::PseudoStackFrame frame = {category->name(),
+ trace_event.name()};
+ if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
+ trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
+ GetThreadLocalTracker()->PushPseudoStackFrame(frame);
+ } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
+ // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
+ GetThreadLocalTracker()->PopPseudoStackFrame(frame);
+ }
+ // Do not filter-out any events and always return true. TraceLog adds the
+ // event only if it is enabled for recording.
+ return true;
+}
+
+void HeapProfilerEventFilter::EndEvent(const char* category_name,
+ const char* event_name) const {
+ if (IsPseudoStackEnabled())
+ GetThreadLocalTracker()->PopPseudoStackFrame({category_name, event_name});
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/heap_profiler_event_filter.h b/base/trace_event/heap_profiler_event_filter.h
new file mode 100644
index 0000000000..47368a1b07
--- /dev/null
+++ b/base/trace_event/heap_profiler_event_filter.h
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// This filter unconditionally accepts all events and pushes/pops them from the
+// thread-local AllocationContextTracker instance as they are seen.
+// This is used to cheaply construct the heap profiler pseudo stack without
+// having to actually record all events.
+class BASE_EXPORT HeapProfilerEventFilter : public TraceEventFilter {
+ public:
+ static const char kName[];
+
+ HeapProfilerEventFilter();
+ ~HeapProfilerEventFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+ void EndEvent(const char* category_name,
+ const char* event_name) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HeapProfilerEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
index 1bf06dbd97..8043fff995 100644
--- a/base/trace_event/heap_profiler_heap_dump_writer.cc
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -314,8 +314,7 @@ std::unique_ptr<TracedValue> ExportHeapDump(
internal::HeapDumpWriter writer(
session_state.stack_frame_deduplicator(),
session_state.type_name_deduplicator(),
- session_state.memory_dump_config().heap_profiler_options
- .breakdown_threshold_bytes);
+ session_state.heap_profiler_breakdown_threshold_bytes());
return Serialize(writer.Summarize(metrics_by_context));
}
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index 49a235051c..fc5da0d1dd 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -11,6 +11,7 @@
#include <utility>
#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
#include "base/trace_event/trace_event_argument.h"
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -23,6 +24,10 @@ StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
StackFrameDeduplicator::FrameNode::~FrameNode() {}
+size_t StackFrameDeduplicator::FrameNode::EstimateMemoryUsage() const {
+ return base::trace_event::EstimateMemoryUsage(children);
+}
+
StackFrameDeduplicator::StackFrameDeduplicator() {}
StackFrameDeduplicator::~StackFrameDeduplicator() {}
@@ -116,19 +121,10 @@ void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const {
void StackFrameDeduplicator::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
- // The sizes here are only estimates; they fail to take into account the
- // overhead of the tree nodes for the map, but as an estimate this should be
- // fine.
- size_t maps_size = roots_.size() * sizeof(std::pair<StackFrame, int>);
- size_t frames_allocated = frames_.capacity() * sizeof(FrameNode);
- size_t frames_resident = frames_.size() * sizeof(FrameNode);
-
- for (const FrameNode& node : frames_)
- maps_size += node.children.size() * sizeof(std::pair<StackFrame, int>);
-
+ size_t memory_usage =
+ EstimateMemoryUsage(frames_) + EstimateMemoryUsage(roots_);
overhead->Add("StackFrameDeduplicator",
- sizeof(StackFrameDeduplicator) + maps_size + frames_allocated,
- sizeof(StackFrameDeduplicator) + maps_size + frames_resident);
+ sizeof(StackFrameDeduplicator) + memory_usage);
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
index 4932534e1d..66d430f2ee 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.h
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -34,6 +34,8 @@ class BASE_EXPORT StackFrameDeduplicator : public ConvertableToTraceFormat {
FrameNode(const FrameNode& other);
~FrameNode();
+ size_t EstimateMemoryUsage() const;
+
StackFrame frame;
// The index of the parent stack frame in |frames_|, or -1 if there is no
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.cc b/base/trace_event/heap_profiler_type_name_deduplicator.cc
index 055f86abf0..a6dab51ad2 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -10,7 +10,10 @@
#include <utility>
#include "base/json/string_escape.h"
+#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
+#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_memory_overhead.h"
namespace base {
@@ -18,16 +21,24 @@ namespace trace_event {
namespace {
-// Extract directory name if |type_name| was file name. Otherwise, return
-// |type_name|.
-StringPiece ExtractDirNameFromFileName(const char* type_name) {
+// If |type_name| is file name then extract directory name. Or if |type_name| is
+// category name, then disambiguate multple categories and remove
+// "disabled-by-default" prefix if present.
+StringPiece ExtractCategoryFromTypeName(const char* type_name) {
StringPiece result(type_name);
size_t last_seperator = result.find_last_of("\\/");
// If |type_name| was a not a file path, the seperator will not be found, so
// the whole type name is returned.
- if (last_seperator == StringPiece::npos)
+ if (last_seperator == StringPiece::npos) {
+ // Use the first the category name if it has ",".
+ size_t first_comma_position = result.find(',');
+ if (first_comma_position != StringPiece::npos)
+ result = result.substr(0, first_comma_position);
+ if (result.starts_with(TRACE_DISABLED_BY_DEFAULT("")))
+ result.remove_prefix(sizeof(TRACE_DISABLED_BY_DEFAULT("")) - 1);
return result;
+ }
// Remove the file name from the path.
result.remove_suffix(result.length() - last_seperator);
@@ -82,7 +93,7 @@ void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
// TODO(ssid): crbug.com/594803 the type name is misused for file name in
// some cases.
- StringPiece type_info = ExtractDirNameFromFileName(it->first);
+ StringPiece type_info = ExtractCategoryFromTypeName(it->first);
// |EscapeJSONString| appends, it does not overwrite |buffer|.
bool put_in_quotes = true;
@@ -95,12 +106,9 @@ void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
void TypeNameDeduplicator::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
- // The size here is only an estimate; it fails to take into account the size
- // of the tree nodes for the map, but as an estimate this should be fine.
- size_t map_size = type_ids_.size() * sizeof(std::pair<const char*, int>);
-
+ size_t memory_usage = EstimateMemoryUsage(type_ids_);
overhead->Add("TypeNameDeduplicator",
- sizeof(TypeNameDeduplicator) + map_size);
+ sizeof(TypeNameDeduplicator) + memory_usage);
}
} // namespace trace_event
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index c3d3258651..3565b8b95b 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -9,6 +9,7 @@
#include "base/allocator/allocator_extension.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/features.h"
+#include "base/debug/profiler.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_allocation_register.h"
@@ -22,26 +23,32 @@
#else
#include <malloc.h>
#endif
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
namespace base {
namespace trace_event {
-#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
namespace {
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
using allocator::AllocatorDispatch;
-void* HookAlloc(const AllocatorDispatch* self, size_t size) {
+void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->alloc_function(next, size);
+ void* ptr = next->alloc_function(next, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
-void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
+void* HookZeroInitAlloc(const AllocatorDispatch* self,
+ size_t n,
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->alloc_zero_initialized_function(next, n, size);
+ void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
return ptr;
@@ -49,41 +56,127 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
void* HookllocAligned(const AllocatorDispatch* self,
size_t alignment,
- size_t size) {
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->alloc_aligned_function(next, alignment, size);
+ void* ptr = next->alloc_aligned_function(next, alignment, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
-void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) {
+void* HookRealloc(const AllocatorDispatch* self,
+ void* address,
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
- void* ptr = next->realloc_function(next, address, size);
+ void* ptr = next->realloc_function(next, address, size, context);
MallocDumpProvider::GetInstance()->RemoveAllocation(address);
if (size > 0) // realloc(size == 0) means free().
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
-void HookFree(const AllocatorDispatch* self, void* address) {
+void HookFree(const AllocatorDispatch* self, void* address, void* context) {
if (address)
MallocDumpProvider::GetInstance()->RemoveAllocation(address);
const AllocatorDispatch* const next = self->next;
- next->free_function(next, address);
+ next->free_function(next, address, context);
+}
+
+size_t HookGetSizeEstimate(const AllocatorDispatch* self,
+ void* address,
+ void* context) {
+ const AllocatorDispatch* const next = self->next;
+ return next->get_size_estimate_function(next, address, context);
+}
+
+unsigned HookBatchMalloc(const AllocatorDispatch* self,
+ size_t size,
+ void** results,
+ unsigned num_requested,
+ void* context) {
+ const AllocatorDispatch* const next = self->next;
+ unsigned count =
+ next->batch_malloc_function(next, size, results, num_requested, context);
+ for (unsigned i = 0; i < count; ++i) {
+ MallocDumpProvider::GetInstance()->InsertAllocation(results[i], size);
+ }
+ return count;
+}
+
+void HookBatchFree(const AllocatorDispatch* self,
+ void** to_be_freed,
+ unsigned num_to_be_freed,
+ void* context) {
+ const AllocatorDispatch* const next = self->next;
+ for (unsigned i = 0; i < num_to_be_freed; ++i) {
+ MallocDumpProvider::GetInstance()->RemoveAllocation(to_be_freed[i]);
+ }
+ next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
+}
+
+void HookFreeDefiniteSize(const AllocatorDispatch* self,
+ void* ptr,
+ size_t size,
+ void* context) {
+ if (ptr)
+ MallocDumpProvider::GetInstance()->RemoveAllocation(ptr);
+ const AllocatorDispatch* const next = self->next;
+ next->free_definite_size_function(next, ptr, size, context);
}
AllocatorDispatch g_allocator_hooks = {
- &HookAlloc, /* alloc_function */
- &HookZeroInitAlloc, /* alloc_zero_initialized_function */
- &HookllocAligned, /* alloc_aligned_function */
- &HookRealloc, /* realloc_function */
- &HookFree, /* free_function */
- nullptr, /* next */
+ &HookAlloc, /* alloc_function */
+ &HookZeroInitAlloc, /* alloc_zero_initialized_function */
+ &HookllocAligned, /* alloc_aligned_function */
+ &HookRealloc, /* realloc_function */
+ &HookFree, /* free_function */
+ &HookGetSizeEstimate, /* get_size_estimate_function */
+ &HookBatchMalloc, /* batch_malloc_function */
+ &HookBatchFree, /* batch_free_function */
+ &HookFreeDefiniteSize, /* free_definite_size_function */
+ nullptr, /* next */
};
+#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+#if defined(OS_WIN)
+// A structure containing some information about a given heap.
+struct WinHeapInfo {
+ size_t committed_size;
+ size_t uncommitted_size;
+ size_t allocated_size;
+ size_t block_count;
+};
+
+// NOTE: crbug.com/665516
+// Unfortunately, there is no safe way to collect information from secondary
+// heaps due to limitations and racy nature of this piece of WinAPI.
+void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
+#if defined(SYZYASAN)
+ if (base::debug::IsBinaryInstrumented())
+ return;
+#endif
+
+ // Iterate through whichever heap our CRT is using.
+ HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
+ ::HeapLock(crt_heap);
+ PROCESS_HEAP_ENTRY heap_entry;
+ heap_entry.lpData = nullptr;
+ // Walk over all the entries in the main heap.
+ while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
+ if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
+ crt_heap_info->allocated_size += heap_entry.cbData;
+ crt_heap_info->block_count++;
+ } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
+ crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
+ crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
+ }
+ }
+ CHECK(::HeapUnlock(crt_heap) == TRUE);
+}
+#endif // defined(OS_WIN)
} // namespace
-#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
// static
const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
@@ -106,6 +199,7 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
size_t total_virtual_size = 0;
size_t resident_size = 0;
size_t allocated_objects_size = 0;
+ size_t allocated_objects_count = 0;
#if defined(USE_TCMALLOC)
bool res =
allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
@@ -117,18 +211,35 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
&allocated_objects_size);
DCHECK(res);
#elif defined(OS_MACOSX) || defined(OS_IOS)
- malloc_statistics_t stats;
- memset(&stats, 0, sizeof(stats));
+ malloc_statistics_t stats = {0};
malloc_zone_statistics(nullptr, &stats);
total_virtual_size = stats.size_allocated;
allocated_objects_size = stats.size_in_use;
- // The resident size is approximated to the max size in use, which would count
- // the total size of all regions other than the free bytes at the end of each
- // region. In each allocation region the allocations are rounded off to a
- // fixed quantum, so the excess region will not be resident.
- // See crrev.com/1531463004 for detailed explanation.
- resident_size = stats.max_size_in_use;
+ // Resident size is approximated pretty well by stats.max_size_in_use.
+ // However, on macOS, freed blocks are both resident and reusable, which is
+ // semantically equivalent to deallocated. The implementation of libmalloc
+ // will also only hold a fixed number of freed regions before actually
+ // starting to deallocate them, so stats.max_size_in_use is also not
+ // representative of the peak size. As a result, stats.max_size_in_use is
+ // typically somewhere between actually resident [non-reusable] pages, and
+ // peak size. This is not very useful, so we just use stats.size_in_use for
+ // resident_size, even though it's an underestimate and fails to account for
+ // fragmentation. See
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
+ resident_size = stats.size_in_use;
+#elif defined(OS_WIN)
+ WinHeapInfo main_heap_info = {};
+ WinHeapMemoryDumpImpl(&main_heap_info);
+ total_virtual_size =
+ main_heap_info.committed_size + main_heap_info.uncommitted_size;
+ // Resident size is approximated with committed heap size. Note that it is
+ // possible to do this with better accuracy on windows by intersecting the
+ // working set with the virtual memory ranges occuipied by the heap. It's not
+ // clear that this is worth it, as it's fairly expensive to do.
+ resident_size = main_heap_info.committed_size;
+ allocated_objects_size = main_heap_info.allocated_size;
+ allocated_objects_count = main_heap_info.block_count;
#else
struct mallinfo info = mallinfo();
DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
@@ -138,6 +249,8 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
// |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
total_virtual_size = info.arena + info.hblkhd;
resident_size = info.uordblks;
+
+ // Total allocated space is given by |uordblks|.
allocated_objects_size = info.uordblks;
#endif
@@ -147,13 +260,17 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, resident_size);
- // Total allocated space is given by |uordblks|.
MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
allocated_objects_size);
+ if (allocated_objects_count != 0) {
+ inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects,
+ allocated_objects_count);
+ }
- if (resident_size - allocated_objects_size > 0) {
+ if (resident_size > allocated_objects_size) {
// Explicitly specify why is extra memory resident. In tcmalloc it accounts
// for free lists and caches. In mac and ios it accounts for the
// fragmentation and metadata.
@@ -233,7 +350,10 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
if (!tracker)
return;
- AllocationContext context = tracker->GetContextSnapshot();
+
+ AllocationContext context;
+ if (!tracker->GetContextSnapshot(&context))
+ return;
AutoLock lock(allocation_register_lock_);
if (!allocation_register_)
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
index 4746cf5896..384033c9b8 100644
--- a/base/trace_event/malloc_dump_provider.h
+++ b/base/trace_event/malloc_dump_provider.h
@@ -15,7 +15,7 @@
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
-#if defined(OS_LINUX) || defined(OS_ANDROID) || \
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_WIN) || \
(defined(OS_MACOSX) && !defined(OS_IOS))
#define MALLOC_MEMORY_TRACING_SUPPORTED
#endif
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index 7d1023606b..c781f071bb 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -19,7 +19,6 @@
namespace base {
namespace trace_event {
-class MemoryDumpManager;
class ProcessMemoryDump;
class TracedValue;
@@ -70,11 +69,6 @@ class BASE_EXPORT MemoryAllocatorDump {
// Called at trace generation time to populate the TracedValue.
void AsValueInto(TracedValue* value) const;
- // Get the ProcessMemoryDump instance that owns this.
- ProcessMemoryDump* process_memory_dump() const {
- return process_memory_dump_;
- }
-
// Use enum Flags to set values.
void set_flags(int flags) { flags_ |= flags; }
void clear_flags(int flags) { flags_ &= ~flags; }
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index eed070a782..5a54a773c5 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -7,21 +7,26 @@
#include <algorithm>
#include <utility>
+#include "base/allocator/features.h"
#include "base/atomic_sequence_num.h"
#include "base/base_switches.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
#include "base/debug/debugging_flags.h"
#include "base/debug/stack_trace.h"
+#include "base/debug/thread_heap_usage_tracker.h"
#include "base/memory/ptr_util.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_dump_session_state.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
@@ -33,10 +38,6 @@
#include "base/trace_event/java_heap_dump_provider_android.h"
#endif
-#if defined(OS_WIN)
-#include "base/trace_event/winheap_dump_provider_win.h"
-#endif
-
namespace base {
namespace trace_event {
@@ -49,6 +50,31 @@ const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
MemoryDumpManager* g_instance_for_testing = nullptr;
+// The list of names of dump providers that are blacklisted from strict thread
+// affinity check on unregistration. These providers could potentially cause
+// crashes on build bots if they do not unregister on right thread.
+// TODO(ssid): Fix all the dump providers to unregister if needed and clear the
+// blacklist, crbug.com/643438.
+const char* const kStrictThreadCheckBlacklist[] = {
+ "ClientDiscardableSharedMemoryManager",
+ "ContextProviderCommandBuffer",
+ "DiscardableSharedMemoryManager",
+ "FontCaches",
+ "GpuMemoryBufferVideoFramePool",
+ "IndexedDBBackingStore",
+ "Sql",
+ "ThreadLocalEventBuffer",
+ "TraceLog",
+ "URLRequestContext",
+ "VpxVideoDecoder",
+ "cc::SoftwareImageDecodeCache",
+ "cc::StagingBufferPool",
+ "gpu::BufferManager",
+ "gpu::MappedMemoryManager",
+ "gpu::RenderbufferManager",
+ "BlacklistTestDumpProvider" // for testing
+};
+
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -110,8 +136,6 @@ const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
const char* const MemoryDumpManager::kSystemAllocatorPoolName =
#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
MallocDumpProvider::kAllocatedObjects;
-#elif defined(OS_WIN)
- WinHeapDumpProvider::kAllocatedObjects;
#else
nullptr;
#endif
@@ -142,6 +166,9 @@ MemoryDumpManager::MemoryDumpManager()
// At this point the command line may not be initialized but we try to
// enable the heap profiler to capture allocations as soon as possible.
EnableHeapProfilingIfNeeded();
+
+ strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
+ std::end(kStrictThreadCheckBlacklist));
}
MemoryDumpManager::~MemoryDumpManager() {
@@ -162,18 +189,20 @@ void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
if (profiling_mode == "") {
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::PSEUDO_STACK);
- }
- else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
#if HAVE_TRACE_STACK_FRAME_POINTERS && \
(BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
+ } else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
// We need frame pointers for native tracing to work, and they are
// enabled in profiling and debug builds.
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::NATIVE_STACK);
-#else
- CHECK(false) << "'" << profiling_mode << "' mode for "
- << switches::kEnableHeapProfiling << " flag is not supported "
- << "for this platform / build type.";
+#endif
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ } else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) {
+ // Enable heap tracking, which in turn enables capture of heap usage
+ // tracking in tracked_objects.cc.
+ if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
+ base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
#endif
} else {
CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
@@ -206,14 +235,33 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
nullptr);
#endif
-#if defined(OS_WIN)
- RegisterDumpProvider(WinHeapDumpProvider::GetInstance(), "WinHeap", nullptr);
-#endif
+ TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
+
+ // TODO(ssid): This should be done in EnableHeapProfiling so that we capture
+ // more allocations (crbug.com/625170).
+ if (AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
+ !(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
+ // Create trace config with heap profiling filter.
+ TraceConfig::EventFilterConfig heap_profiler_filter_config(
+ HeapProfilerEventFilter::kName);
+ heap_profiler_filter_config.AddIncludedCategory("*");
+ heap_profiler_filter_config.AddIncludedCategory(
+ MemoryDumpManager::kTraceCategory);
+ TraceConfig::EventFilters filters;
+ filters.push_back(heap_profiler_filter_config);
+ TraceConfig filtering_trace_config;
+ filtering_trace_config.SetEventFilters(filters);
+
+ TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
+ TraceLog::FILTERING_MODE);
+ }
// If tracing was enabled before initializing MemoryDumpManager, we missed the
// OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
+ // IsEnabled is called before adding observer to avoid calling
+ // OnTraceLogEnabled twice.
bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
- TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
TraceLog::GetInstance()->AddEnabledStateObserver(this);
if (is_tracing_already_enabled)
OnTraceLogEnabled();
@@ -262,6 +310,11 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
whitelisted_for_background_mode);
+ if (options.is_fast_polling_supported) {
+ DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
+ "polling must NOT be thread bound.";
+ }
+
{
AutoLock lock(lock_);
bool already_registered = !dump_providers_.insert(mdpinfo).second;
@@ -269,6 +322,15 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
// path for RenderThreadImpl::Init().
if (already_registered)
return;
+
+ // The list of polling MDPs is populated OnTraceLogEnabled(). This code
+ // deals with the case of a MDP capable of fast polling that is registered
+ // after the OnTraceLogEnabled()
+ if (options.is_fast_polling_supported && dump_thread_) {
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
+ Unretained(this), mdpinfo));
+ }
}
if (heap_profiling_enabled_)
@@ -307,9 +369,18 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
// - At the end of this function, if no dump is in progress.
// - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
// removed from |pending_dump_providers|.
+ // - When the provider is removed from |dump_providers_for_polling_|.
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
- } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
+ subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ // If dump provider's name is on |strict_thread_check_blacklist_|, then the
+ // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
+ // fired even when tracing is not enabled (stricter).
+ // TODO(ssid): Remove this condition after removing all the dump providers
+ // in the blacklist and the buildbots are no longer flakily hitting the
+ // DCHECK, crbug.com/643438.
+
// If you hit this DCHECK, your dump provider has a bug.
// Unregistration of a MemoryDumpProvider is safe only if:
// - The MDP has specified a sequenced task runner affinity AND the
@@ -325,6 +396,13 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
<< "unregister itself in a racy way. Please file a crbug.";
}
+ if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) {
+ DCHECK(take_mdp_ownership_and_delete_async);
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
+ Unretained(this), *mdp_iter));
+ }
+
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
// the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
@@ -334,6 +412,28 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
dump_providers_.erase(mdp_iter);
}
+void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.insert(mdpinfo);
+
+ // Notify ready for polling when first polling supported provider is
+ // registered. This handles the case where OnTraceLogEnabled() did not notify
+ // ready since no polling supported mdp has yet been registered.
+ if (dump_providers_for_polling_.size() == 1)
+ dump_scheduler_->NotifyPollingSupported();
+}
+
+void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.erase(mdpinfo);
+ DCHECK(!dump_providers_for_polling_.empty())
+ << "All polling MDPs cannot be unregistered.";
+}
+
void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
@@ -413,8 +513,10 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
// with disallowed modes. If |session_state_| is null then tracing is
// disabled.
CHECK(!session_state_ ||
- session_state_->memory_dump_config().allowed_dump_modes.count(
- args.level_of_detail));
+ session_state_->IsDumpModeAllowed(args.level_of_detail));
+
+ if (dump_scheduler_)
+ dump_scheduler_->NotifyDumpTriggered();
}
TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
@@ -570,6 +672,16 @@ void MemoryDumpManager::InvokeOnMemoryDump(
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
"dump_provider.name", mdpinfo->name);
+ // A stack allocated string with dump provider name is useful to debug
+ // crashes while invoking dump after a |dump_provider| is not unregistered
+ // in safe way.
+ // TODO(ssid): Remove this after fixing crbug.com/643438.
+ char provider_name_for_debugging[16];
+ strncpy(provider_name_for_debugging, mdpinfo->name,
+ sizeof(provider_name_for_debugging) - 1);
+ provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
+ base::debug::Alias(provider_name_for_debugging);
+
// Pid of the target process being dumped. Often kNullProcessId (= current
// process), non-zero when the coordinator process creates dumps on behalf
// of child processes (see crbug.com/461788).
@@ -587,6 +699,28 @@ void MemoryDumpManager::InvokeOnMemoryDump(
SetupNextMemoryDump(std::move(pmd_async_state));
}
+bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
+#if DCHECK_IS_ON()
+ {
+ AutoLock lock(lock_);
+ if (dump_thread_)
+ DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
+ }
+#endif
+ if (dump_providers_for_polling_.empty())
+ return false;
+
+ *memory_total = 0;
+ // Note that we call PollFastMemoryTotal() even if the dump provider is
+ // disabled (unregistered). This is to avoid taking lock while polling.
+ for (const auto& mdpinfo : dump_providers_for_polling_) {
+ uint64_t value = 0;
+ mdpinfo->dump_provider->PollFastMemoryTotal(&value);
+ *memory_total += value;
+ }
+ return true;
+}
+
// static
void MemoryDumpManager::FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
@@ -663,11 +797,15 @@ void MemoryDumpManager::OnTraceLogEnabled() {
return;
}
- const TraceConfig trace_config =
+ const TraceConfig& trace_config =
TraceLog::GetInstance()->GetCurrentTraceConfig();
+ const TraceConfig::MemoryDumpConfig& memory_dump_config =
+ trace_config.memory_dump_config();
scoped_refptr<MemoryDumpSessionState> session_state =
new MemoryDumpSessionState;
- session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
+ session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
+ session_state->set_heap_profiler_breakdown_threshold_bytes(
+ memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
if (heap_profiling_enabled_) {
// If heap profiling is enabled, the stack frame deduplicator and type name
// deduplicator will be in use. Add a metadata events to write the frames
@@ -681,14 +819,26 @@ void MemoryDumpManager::OnTraceLogEnabled() {
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
"stackFrames",
- WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
- session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
+ MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
+ session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
"typeNames",
- WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
- session_state, &MemoryDumpSessionState::type_name_deduplicator)));
+ MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
+ session_state, &MemoryDumpSessionState::type_name_deduplicator));
+ }
+
+ std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
+ new MemoryDumpScheduler(this, dump_thread->task_runner()));
+ DCHECK_LE(memory_dump_config.triggers.size(), 3u);
+ for (const auto& trigger : memory_dump_config.triggers) {
+ if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
+ NOTREACHED();
+ continue;
+ }
+ dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
+ trigger.min_time_between_dumps_ms);
}
{
@@ -699,48 +849,65 @@ void MemoryDumpManager::OnTraceLogEnabled() {
DCHECK(!dump_thread_);
dump_thread_ = std::move(dump_thread);
+ dump_scheduler_ = std::move(dump_scheduler);
subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
- // TODO(primiano): This is a temporary hack to disable periodic memory dumps
- // when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps. See crbug.com/529184 .
- if (!is_coordinator_ ||
- CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
- return;
+ dump_providers_for_polling_.clear();
+ for (const auto& mdpinfo : dump_providers_) {
+ if (mdpinfo->options.is_fast_polling_supported)
+ dump_providers_for_polling_.insert(mdpinfo);
}
+ // Notify polling supported only if some polling supported provider was
+ // registered, else RegisterPollingMDPOnDumpThread() will notify when first
+ // polling MDP registers.
+ if (!dump_providers_for_polling_.empty())
+ dump_scheduler_->NotifyPollingSupported();
+
+ // Only coordinator process triggers periodic global memory dumps.
+ if (is_coordinator_)
+ dump_scheduler_->NotifyPeriodicTriggerSupported();
}
- // Enable periodic dumps if necessary.
- periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
}
void MemoryDumpManager::OnTraceLogDisabled() {
// There might be a memory dump in progress while this happens. Therefore,
// ensure that the MDM state which depends on the tracing enabled / disabled
// state is always accessed by the dumping methods holding the |lock_|.
+ if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
+ return;
subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
std::unique_ptr<Thread> dump_thread;
+ std::unique_ptr<MemoryDumpScheduler> scheduler;
{
AutoLock lock(lock_);
dump_thread = std::move(dump_thread_);
session_state_ = nullptr;
+ scheduler = std::move(dump_scheduler_);
}
+ scheduler->DisableAllTriggers();
// Thread stops are blocking and must be performed outside of the |lock_|
// or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
- periodic_dump_timer_.Stop();
if (dump_thread)
dump_thread->Stop();
+
+ // |dump_providers_for_polling_| must be cleared only after the dump thread is
+ // stopped (polling tasks are done).
+ {
+ AutoLock lock(lock_);
+ for (const auto& mdpinfo : dump_providers_for_polling_)
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+ dump_providers_for_polling_.clear();
+ }
}
bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
AutoLock lock(lock_);
if (!session_state_)
return false;
- return session_state_->memory_dump_config().allowed_dump_modes.count(
- dump_mode) != 0;
+ return session_state_->IsDumpModeAllowed(dump_mode);
}
uint64_t MemoryDumpManager::GetTracingProcessId() const {
@@ -806,78 +973,5 @@ ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
return iter->second.get();
}
-MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
-
-MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
- Stop();
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
- const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
- if (triggers_list.empty())
- return;
-
- // At the moment the periodic support is limited to at most one periodic
- // trigger per dump mode. All intervals should be an integer multiple of the
- // smallest interval specified.
- periodic_dumps_count_ = 0;
- uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
- uint32_t light_dump_period_ms = 0;
- uint32_t heavy_dump_period_ms = 0;
- DCHECK_LE(triggers_list.size(), 3u);
- auto* mdm = MemoryDumpManager::GetInstance();
- for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
- DCHECK_NE(0u, config.periodic_interval_ms);
- switch (config.level_of_detail) {
- case MemoryDumpLevelOfDetail::BACKGROUND:
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
- break;
- case MemoryDumpLevelOfDetail::LIGHT:
- DCHECK_EQ(0u, light_dump_period_ms);
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
- light_dump_period_ms = config.periodic_interval_ms;
- break;
- case MemoryDumpLevelOfDetail::DETAILED:
- DCHECK_EQ(0u, heavy_dump_period_ms);
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
- heavy_dump_period_ms = config.periodic_interval_ms;
- break;
- }
- min_timer_period_ms =
- std::min(min_timer_period_ms, config.periodic_interval_ms);
- }
-
- DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
- light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
- DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
- heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
-
- timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
- base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
- base::Unretained(this)));
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
- if (IsRunning()) {
- timer_.Stop();
- }
-}
-
-bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
- return timer_.IsRunning();
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
- MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
- if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
- level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
- level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
- ++periodic_dumps_count_;
-
- MemoryDumpManager::GetInstance()->RequestGlobalDump(
- MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 06b772c6e4..92cc2f401b 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -18,7 +18,6 @@
#include "base/memory/ref_counted.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
-#include "base/timer/timer.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
@@ -33,6 +32,7 @@ namespace trace_event {
class MemoryDumpManagerDelegate;
class MemoryDumpProvider;
class MemoryDumpSessionState;
+class MemoryDumpScheduler;
// This is the interface exposed to the rest of the codebase to deal with
// memory tracing. The main entry point for clients is represented by
@@ -94,7 +94,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// This method takes ownership of the dump provider and guarantees that:
// - The |mdp| will be deleted at some point in the near future.
// - Its deletion will not happen concurrently with the OnMemoryDump() call.
- // Note that OnMemoryDump() calls can still happen after this method returns.
+ // Note that OnMemoryDump() and PollFastMemoryTotal() calls can still happen
+ // after this method returns.
void UnregisterAndDeleteDumpProviderSoon(
std::unique_ptr<MemoryDumpProvider> mdp);
@@ -116,6 +117,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void OnTraceLogEnabled() override;
void OnTraceLogDisabled() override;
+ // Enable heap profiling if kEnableHeapProfiling is specified.
+ void EnableHeapProfilingIfNeeded();
+
// Returns true if the dump mode is allowed for current tracing session.
bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
@@ -151,6 +155,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend struct DefaultSingletonTraits<MemoryDumpManager>;
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
+ friend class MemoryDumpScheduler;
// Descriptor used to hold information about registered MDPs.
// Some important considerations about lifetime of this object:
@@ -273,31 +278,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
};
- // Sets up periodic memory dump timers to start global dump requests based on
- // the dump triggers from trace config.
- class BASE_EXPORT PeriodicGlobalDumpTimer {
- public:
- PeriodicGlobalDumpTimer();
- ~PeriodicGlobalDumpTimer();
-
- void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
- triggers_list);
- void Stop();
-
- bool IsRunning();
-
- private:
- // Periodically called by the timer.
- void RequestPeriodicGlobalDump();
-
- RepeatingTimer timer_;
- uint32_t periodic_dumps_count_;
- uint32_t light_dump_rate_;
- uint32_t heavy_dump_rate_;
-
- DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
- };
-
static const int kMaxConsecutiveFailuresCount;
static const char* const kSystemAllocatorPoolName;
@@ -308,9 +288,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
static void FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
- // Enable heap profiling if kEnableHeapProfiling is specified.
- void EnableHeapProfilingIfNeeded();
-
// Internal, used only by MemoryDumpManagerDelegate.
// Creates a memory dump for the current process and appends it to the trace.
// |callback| will be invoked asynchronously upon completion on the same
@@ -329,6 +306,14 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// runner.
void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+ // Records a quick total memory usage in |memory_total|. This is used to track
+ // and detect peaks in the memory usage of the process without having to
+ // record all data from dump providers. This value is approximate to trade-off
+ // speed, and not consistent with the rest of the memory-infra metrics. Must
+ // be called on the dump thread.
+ // Returns true if |memory_total| was updated by polling at least 1 MDP.
+ bool PollFastMemoryTotal(uint64_t* memory_total);
+
// Helper for RegierDumpProvider* functions.
void RegisterDumpProviderInternal(
MemoryDumpProvider* mdp,
@@ -340,13 +325,29 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async);
+ // Adds / removes provider that supports polling to
+ // |dump_providers_for_polling_|.
+ void RegisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
+ void UnregisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
+
// An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
// runner affinity (MDPs belonging to the same task runners are adjacent).
MemoryDumpProviderInfo::OrderedSet dump_providers_;
+ // A copy of mdpinfo list that support polling. It must be accessed only on
+ // the dump thread if dump thread exists.
+ MemoryDumpProviderInfo::OrderedSet dump_providers_for_polling_;
+
// Shared among all the PMDs to keep state scoped to the tracing session.
scoped_refptr<MemoryDumpSessionState> session_state_;
+ // The list of names of dump providers that are blacklisted from strict thread
+ // affinity check on unregistration.
+ std::unordered_set<StringPiece, StringPieceHash>
+ strict_thread_check_blacklist_;
+
MemoryDumpManagerDelegate* delegate_; // Not owned.
// When true, this instance is in charge of coordinating periodic dumps.
@@ -360,8 +361,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// dump_providers_enabled_ list) when tracing is not enabled.
subtle::AtomicWord memory_tracing_enabled_;
- // For time-triggered periodic dumps.
- PeriodicGlobalDumpTimer periodic_dump_timer_;
+ // For triggering memory dumps.
+ std::unique_ptr<MemoryDumpScheduler> dump_scheduler_;
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index d14093cbcc..51d41943fb 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -16,13 +16,16 @@
#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/sequenced_worker_pool_owner.h"
#include "base/test/test_io_thread.h"
#include "base/test/trace_event_analyzer.h"
#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/sequenced_worker_pool.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
@@ -70,8 +73,10 @@ void RegisterDumpProvider(
mdm->set_dumper_registrations_ignored_for_testing(true);
}
-void RegisterDumpProvider(MemoryDumpProvider* mdp) {
- RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
+void RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ RegisterDumpProvider(mdp, task_runner, MemoryDumpProvider::Options());
}
void RegisterDumpProviderWithSequencedTaskRunner(
@@ -94,6 +99,20 @@ void OnTraceDataCollected(Closure quit_closure,
quit_closure.Run();
}
+// Posts |task| to |task_runner| and blocks until it is executed.
+void PostTaskAndWait(const tracked_objects::Location& from_here,
+ SequencedTaskRunner* task_runner,
+ const base::Closure& task) {
+ base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner->PostTask(from_here, task);
+ task_runner->PostTask(
+ FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
+ // The SequencedTaskRunner guarantees that |event| will only be signaled after
+ // |task| is executed.
+ event.Wait();
+}
+
} // namespace
// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
@@ -124,6 +143,8 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
MOCK_METHOD0(Destructor, void());
MOCK_METHOD2(OnMemoryDump,
bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
+ MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
+ MOCK_METHOD0(SuspendFastMemoryPolling, void());
MockMemoryDumpProvider() : enable_mock_destructor(false) {
ON_CALL(*this, OnMemoryDump(_, _))
@@ -135,6 +156,10 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
EXPECT_TRUE(pmd->session_state().get() != nullptr);
return true;
}));
+
+ ON_CALL(*this, PollFastMemoryTotal(_))
+ .WillByDefault(
+ Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
}
~MockMemoryDumpProvider() override {
if (enable_mock_destructor)
@@ -147,8 +172,7 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
class TestSequencedTaskRunner : public SequencedTaskRunner {
public:
TestSequencedTaskRunner()
- : worker_pool_(
- new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
+ : worker_pool_(2 /* max_threads */, "Test Task Runner"),
enabled_(true),
num_of_post_tasks_(0) {}
@@ -166,19 +190,21 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
const Closure& task,
TimeDelta delay) override {
num_of_post_tasks_++;
- if (enabled_)
- return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
+ if (enabled_) {
+ return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
+ task);
+ }
return false;
}
bool RunsTasksOnCurrentThread() const override {
- return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
+ return worker_pool_.pool()->RunsTasksOnCurrentThread();
}
private:
~TestSequencedTaskRunner() override {}
- scoped_refptr<SequencedWorkerPool> worker_pool_;
+ SequencedWorkerPoolOwner worker_pool_;
const SequencedWorkerPool::SequenceToken token_;
bool enabled_;
unsigned num_of_post_tasks_;
@@ -215,6 +241,10 @@ class MemoryDumpManagerTest : public testing::Test {
task_runner->PostTask(FROM_HERE, closure);
}
+ void PollFastMemoryTotal(uint64_t* memory_total) {
+ mdm_->PollFastMemoryTotal(memory_total);
+ }
+
protected:
void InitializeMemoryDumpManager(bool is_coordinator) {
mdm_->set_dumper_registrations_ignored_for_testing(true);
@@ -244,7 +274,7 @@ class MemoryDumpManagerTest : public testing::Test {
void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
bool IsPeriodicDumpingEnabled() const {
- return mdm_->periodic_dump_timer_.IsRunning();
+ return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting();
}
int GetMaxConsecutiveFailuresCount() const {
@@ -268,7 +298,7 @@ class MemoryDumpManagerTest : public testing::Test {
TEST_F(MemoryDumpManagerTest, SingleDumper) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
// Check that the dumper is not called if the memory category is not enabled.
EnableTracingWithLegacyCategories("foobar-but-not-memory");
@@ -309,7 +339,7 @@ TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
@@ -320,7 +350,7 @@ TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
// Check that requesting dumps with low level of detail actually propagates to
// OnMemoryDump() call on dump providers.
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
@@ -335,8 +365,8 @@ TEST_F(MemoryDumpManagerTest, SharedSessionState) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp1, nullptr);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
const MemoryDumpSessionState* session_state =
@@ -372,7 +402,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
MockMemoryDumpProvider mdp2;
// Enable only mdp1.
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -383,7 +413,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
// Invert: enable mdp1 and disable mdp2.
mdm_->UnregisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -393,7 +423,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
DisableTracing();
// Enable both mdp1 and mdp2.
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -409,7 +439,7 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
{
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -431,7 +461,7 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
DisableTracing();
}
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
mdm_->UnregisterDumpProvider(&mdp);
{
@@ -443,9 +473,9 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
DisableTracing();
}
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
mdm_->UnregisterDumpProvider(&mdp);
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
{
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -567,8 +597,8 @@ TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp1, nullptr);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
@@ -601,7 +631,7 @@ TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
@@ -611,7 +641,7 @@ TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
.WillOnce(Return(true))
.WillOnce(
Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp2, nullptr);
return true;
}))
.WillRepeatedly(Return(true));
@@ -687,13 +717,16 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
// unregister the other one.
for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
int other_idx = (mdps.front() == mdp);
- TestIOThread* other_thread = threads[other_idx].get();
+ // TestIOThread's task runner must be obtained from the main thread but can
+ // then be used from other threads.
+ scoped_refptr<SingleThreadTaskRunner> other_runner =
+ threads[other_idx]->task_runner();
MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
- auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
+ auto on_dump = [this, other_runner, other_mdp, &on_memory_dump_call_count](
const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
- other_thread->PostTaskAndWait(
- FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
- base::Unretained(&*mdm_), other_mdp));
+ PostTaskAndWait(FROM_HERE, other_runner.get(),
+ base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
+ base::Unretained(&*mdm_), other_mdp));
on_memory_dump_call_count++;
return true;
};
@@ -716,6 +749,75 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestPollingOnDumpThread) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider());
+ std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider());
+ mdp1->enable_mock_destructor = true;
+ mdp2->enable_mock_destructor = true;
+
+ EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
+ EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1);
+ EXPECT_CALL(*mdp1, Destructor());
+ EXPECT_CALL(*mdp2, Destructor());
+
+ MemoryDumpProvider::Options options;
+ options.is_fast_polling_supported = true;
+ RegisterDumpProvider(mdp1.get(), nullptr, options);
+
+ RunLoop run_loop;
+ scoped_refptr<SingleThreadTaskRunner> test_task_runner =
+ ThreadTaskRunnerHandle::Get();
+ auto quit_closure = run_loop.QuitClosure();
+
+ const int kPollsToQuit = 10;
+ int call_count = 0;
+ MemoryDumpManager* mdm = mdm_.get();
+ const auto poll_function1 = [&call_count, &test_task_runner, quit_closure,
+ &mdp2, mdm, &options, kPollsToQuit,
+ this](uint64_t* total) -> void {
+ ++call_count;
+ if (call_count == 1)
+ RegisterDumpProvider(mdp2.get(), nullptr, options, kMDPName);
+ else if (call_count == 4)
+ mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
+ else if (call_count == kPollsToQuit)
+ test_task_runner->PostTask(FROM_HERE, quit_closure);
+
+ // Record increase of 1 GiB of memory at each call.
+ *total = static_cast<uint64_t>(call_count) * 1024 * 1024 * 1024;
+ };
+ EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
+ .Times(testing::AtLeast(kPollsToQuit))
+ .WillRepeatedly(Invoke(poll_function1));
+
+ // Depending on the order of PostTask calls the mdp2 might be registered after
+ // all polls or in between polls.
+ EXPECT_CALL(*mdp2, PollFastMemoryTotal(_))
+ .Times(Between(0, kPollsToQuit - 1))
+ .WillRepeatedly(Return());
+
+ MemoryDumpScheduler::SetPollingIntervalForTesting(1);
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(3));
+
+ int last_poll_to_request_dump = -2;
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
+ .Times(testing::AtLeast(2))
+ .WillRepeatedly(Invoke([&last_poll_to_request_dump, &call_count](
+ const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) -> void {
+ // Minimum number of polls between dumps must be 3 (polling interval is
+ // 1ms).
+ EXPECT_GE(call_count - last_poll_to_request_dump, 3);
+ last_poll_to_request_dump = call_count;
+ }));
+
+ run_loop.Run();
+ DisableTracing();
+ mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1));
+}
+
// If a thread (with a dump provider living on it) is torn down during a dump
// its dump provider should be skipped but the dump itself should succeed.
TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
@@ -738,9 +840,14 @@ TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
int other_idx = (mdps.front() == mdp);
TestIOThread* other_thread = threads[other_idx].get();
- auto on_dump = [other_thread, &on_memory_dump_call_count](
+ // TestIOThread isn't thread-safe and must be stopped on the |main_runner|.
+ scoped_refptr<SequencedTaskRunner> main_runner =
+ SequencedTaskRunnerHandle::Get();
+ auto on_dump = [other_thread, main_runner, &on_memory_dump_call_count](
const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
- other_thread->Stop();
+ PostTaskAndWait(
+ FROM_HERE, main_runner.get(),
+ base::Bind(&TestIOThread::Stop, base::Unretained(other_thread)));
on_memory_dump_call_count++;
return true;
};
@@ -768,7 +875,7 @@ TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp1;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -783,7 +890,7 @@ TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
// began, it will still late-join the party (real use case: startup tracing).
TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
// First check that a RequestGlobalDump() issued before the MemoryDumpManager
@@ -966,7 +1073,7 @@ TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
// Create both same-thread MDP and another MDP with dedicated thread
MockMemoryDumpProvider mdp1;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
MockMemoryDumpProvider mdp2;
RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
@@ -1085,8 +1192,8 @@ TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
thread_ref = PlatformThread::CurrentRef();
TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
- thread_for_unregistration.PostTaskAndWait(
- FROM_HERE,
+ PostTaskAndWait(
+ FROM_HERE, thread_for_unregistration.task_runner().get(),
base::Bind(
&MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
base::Unretained(MemoryDumpManager::GetInstance()),
@@ -1116,7 +1223,7 @@ TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
InitializeMemoryDumpManager(false /* is_coordinator */);
SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
- RegisterDumpProvider(mdp1.get());
+ RegisterDumpProvider(mdp1.get(), nullptr);
std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
kWhitelistedMDPName);
@@ -1167,5 +1274,22 @@ TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestBlacklistedUnsafeUnregistration) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ RegisterDumpProvider(&mdp1, nullptr, kDefaultOptions,
+ "BlacklistTestDumpProvider");
+ // Not calling UnregisterAndDeleteDumpProviderSoon() should not crash.
+ mdm_->UnregisterDumpProvider(&mdp1);
+
+ Thread thread("test thread");
+ thread.Start();
+ RegisterDumpProvider(&mdp1, thread.task_runner(), kDefaultOptions,
+ "BlacklistTestDumpProvider");
+ // Unregistering on wrong thread should not crash.
+ mdm_->UnregisterDumpProvider(&mdp1);
+ thread.Stop();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index 2c502861d8..76c2969e96 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -22,7 +22,8 @@ class BASE_EXPORT MemoryDumpProvider {
struct Options {
Options()
: target_pid(kNullProcessId),
- dumps_on_single_thread_task_runner(false) {}
+ dumps_on_single_thread_task_runner(false),
+ is_fast_polling_supported(false) {}
// If the dump provider generates dumps on behalf of another process,
// |target_pid| contains the pid of that process.
@@ -34,6 +35,11 @@ class BASE_EXPORT MemoryDumpProvider {
// a SingleThreadTaskRunner, which is usually the case. It is faster to run
// all providers that run on the same thread together without thread hops.
bool dumps_on_single_thread_task_runner;
+
+ // Set to true if the dump provider implementation supports high frequency
+ // polling. Only providers running without task runner affinity are
+ // supported.
+ bool is_fast_polling_supported;
};
virtual ~MemoryDumpProvider() {}
@@ -52,6 +58,18 @@ class BASE_EXPORT MemoryDumpProvider {
// collecting extensive allocation data, if supported.
virtual void OnHeapProfilingEnabled(bool) {}
+ // Quickly record the total memory usage in |memory_total|. This method will
+ // be called only when the dump provider registration has
+ // |is_fast_polling_supported| set to true. This method is used for polling at
+ // high frequency for detecting peaks. See comment on
+ // |is_fast_polling_supported| option if you need to override this method.
+ virtual void PollFastMemoryTotal(uint64_t* /* memory_total */) {}
+
+ // Indicates that fast memory polling is not going to be used in the near
+ // future and the MDP can tear down any resource kept around for fast memory
+ // polling.
+ virtual void SuspendFastMemoryPolling() {}
+
protected:
MemoryDumpProvider() {}
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index e6c5b87b22..bf72bef5e4 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -12,19 +12,28 @@ namespace trace_event {
// static
const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
switch (dump_type) {
- case MemoryDumpType::TASK_BEGIN:
- return "task_begin";
- case MemoryDumpType::TASK_END:
- return "task_end";
case MemoryDumpType::PERIODIC_INTERVAL:
return "periodic_interval";
case MemoryDumpType::EXPLICITLY_TRIGGERED:
return "explicitly_triggered";
+ case MemoryDumpType::PEAK_MEMORY_USAGE:
+ return "peak_memory_usage";
}
NOTREACHED();
return "unknown";
}
+MemoryDumpType StringToMemoryDumpType(const std::string& str) {
+ if (str == "periodic_interval")
+ return MemoryDumpType::PERIODIC_INTERVAL;
+ if (str == "explicitly_triggered")
+ return MemoryDumpType::EXPLICITLY_TRIGGERED;
+ if (str == "peak_memory_usage")
+ return MemoryDumpType::PEAK_MEMORY_USAGE;
+ NOTREACHED();
+ return MemoryDumpType::LAST;
+}
+
const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail) {
switch (level_of_detail) {
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index f3ff9d8e3b..90a866fa7a 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -18,16 +18,19 @@ namespace base {
namespace trace_event {
// Captures the reason why a memory dump is being requested. This is to allow
-// selective enabling of dumps, filtering and post-processing.
+// selective enabling of dumps, filtering and post-processing. Important: this
+// must be kept consistent with
+// services/resource_coordinator/public/cpp/memory/memory_infra_traits.cc.
enum class MemoryDumpType {
- TASK_BEGIN, // Dumping memory at the beginning of a message-loop task.
- TASK_END, // Dumping memory at the ending of a message-loop task.
- PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
+ PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
EXPLICITLY_TRIGGERED, // Non maskable dump request.
- LAST = EXPLICITLY_TRIGGERED // For IPC macros.
+ PEAK_MEMORY_USAGE, // Dumping memory at detected peak total memory usage.
+ LAST = PEAK_MEMORY_USAGE // For IPC macros.
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
+// Important: this must be kept consistent with
+// services/resource_Coordinator/public/cpp/memory/memory_infra_traits.cc.
enum class MemoryDumpLevelOfDetail : uint32_t {
FIRST,
@@ -50,7 +53,8 @@ enum class MemoryDumpLevelOfDetail : uint32_t {
};
// Initial request arguments for a global memory dump. (see
-// MemoryDumpManager::RequestGlobalMemoryDump()).
+// MemoryDumpManager::RequestGlobalMemoryDump()). Important: this must be kept
+// consistent with services/memory_infra/public/cpp/memory_infra_traits.cc.
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
@@ -72,6 +76,8 @@ using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+BASE_EXPORT MemoryDumpType StringToMemoryDumpType(const std::string& str);
+
BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail);
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
new file mode 100644
index 0000000000..eaa8d63661
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -0,0 +1,304 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_scheduler.h"
+
+#include "base/process/process_metrics.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+// Threshold on increase in memory from last dump beyond which a new dump must
+// be triggered.
+int64_t kDefaultMemoryIncreaseThreshold = 50 * 1024 * 1024; // 50MiB
+const uint32_t kMemoryTotalsPollingInterval = 25;
+uint32_t g_polling_interval_ms_for_testing = 0;
+} // namespace
+
+MemoryDumpScheduler::MemoryDumpScheduler(
+ MemoryDumpManager* mdm,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
+ : mdm_(mdm), polling_state_(polling_task_runner) {}
+
+MemoryDumpScheduler::~MemoryDumpScheduler() {}
+
+void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
+ MemoryDumpLevelOfDetail level_of_detail,
+ uint32_t min_time_between_dumps_ms) {
+ if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
+ DCHECK(!periodic_state_.is_configured);
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
+ DCHECK_NE(0u, min_time_between_dumps_ms);
+
+ polling_state_.level_of_detail = level_of_detail;
+ polling_state_.min_polls_between_dumps =
+ (min_time_between_dumps_ms + polling_state_.polling_interval_ms - 1) /
+ polling_state_.polling_interval_ms;
+ polling_state_.current_state = PollingTriggerState::CONFIGURED;
+ } else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
+ DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
+ periodic_state_.is_configured = true;
+ DCHECK_NE(0u, min_time_between_dumps_ms);
+ switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ break;
+ case MemoryDumpLevelOfDetail::LIGHT:
+ DCHECK_EQ(0u, periodic_state_.light_dump_period_ms);
+ periodic_state_.light_dump_period_ms = min_time_between_dumps_ms;
+ break;
+ case MemoryDumpLevelOfDetail::DETAILED:
+ DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms);
+ periodic_state_.heavy_dump_period_ms = min_time_between_dumps_ms;
+ break;
+ }
+
+ periodic_state_.min_timer_period_ms = std::min(
+ periodic_state_.min_timer_period_ms, min_time_between_dumps_ms);
+ DCHECK_EQ(0u, periodic_state_.light_dump_period_ms %
+ periodic_state_.min_timer_period_ms);
+ DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms %
+ periodic_state_.min_timer_period_ms);
+ }
+}
+
+void MemoryDumpScheduler::NotifyPeriodicTriggerSupported() {
+ if (!periodic_state_.is_configured || periodic_state_.timer.IsRunning())
+ return;
+ periodic_state_.light_dumps_rate = periodic_state_.light_dump_period_ms /
+ periodic_state_.min_timer_period_ms;
+ periodic_state_.heavy_dumps_rate = periodic_state_.heavy_dump_period_ms /
+ periodic_state_.min_timer_period_ms;
+
+ periodic_state_.dump_count = 0;
+ periodic_state_.timer.Start(
+ FROM_HERE,
+ TimeDelta::FromMilliseconds(periodic_state_.min_timer_period_ms),
+ Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
+}
+
+void MemoryDumpScheduler::NotifyPollingSupported() {
+ if (polling_state_.current_state != PollingTriggerState::CONFIGURED)
+ return;
+
+ polling_state_.current_state = PollingTriggerState::ENABLED;
+ polling_state_.ResetTotals();
+
+ polling_state_.polling_task_runner->PostTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
+}
+
+void MemoryDumpScheduler::NotifyDumpTriggered() {
+ if (polling_state_.polling_task_runner &&
+ polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
+ polling_state_.polling_task_runner->PostTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
+ return;
+ }
+ if (polling_state_.current_state != PollingTriggerState::ENABLED)
+ return;
+
+ polling_state_.ResetTotals();
+}
+
+void MemoryDumpScheduler::DisableAllTriggers() {
+ if (periodic_state_.timer.IsRunning())
+ periodic_state_.timer.Stop();
+ DisablePolling();
+}
+
+void MemoryDumpScheduler::DisablePolling() {
+ if (polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
+ if (polling_state_.polling_task_runner->PostTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::DisablePolling, Unretained(this))))
+ return;
+ }
+ polling_state_.current_state = PollingTriggerState::DISABLED;
+ polling_state_.polling_task_runner = nullptr;
+}
+
+// static
+void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
+ g_polling_interval_ms_for_testing = interval;
+}
+
+bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
+ return periodic_state_.timer.IsRunning();
+}
+
+void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
+ MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+ if (periodic_state_.light_dumps_rate > 0 &&
+ periodic_state_.dump_count % periodic_state_.light_dumps_rate == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+ if (periodic_state_.heavy_dumps_rate > 0 &&
+ periodic_state_.dump_count % periodic_state_.heavy_dumps_rate == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+ ++periodic_state_.dump_count;
+
+ mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
+void MemoryDumpScheduler::PollMemoryOnPollingThread() {
+ if (polling_state_.current_state != PollingTriggerState::ENABLED)
+ return;
+
+ uint64_t polled_memory = 0;
+ bool res = mdm_->PollFastMemoryTotal(&polled_memory);
+ DCHECK(res);
+ if (polling_state_.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+ TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
+ polled_memory / 1024 / 1024);
+ }
+
+ if (ShouldTriggerDump(polled_memory)) {
+ TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
+ "Peak memory dump Triggered",
+ TRACE_EVENT_SCOPE_PROCESS, "total_usage_MB",
+ polled_memory / 1024 / 1024);
+
+ mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
+ polling_state_.level_of_detail);
+ }
+
+ // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
+ TimeDelta::FromMilliseconds(polling_state_.polling_interval_ms));
+}
+
+bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
+ // This function tries to detect peak memory usage as discussed in
+ // https://goo.gl/0kOU4A.
+
+ if (current_memory_total == 0)
+ return false;
+
+ bool should_dump = false;
+ ++polling_state_.num_polls_from_last_dump;
+ if (polling_state_.last_dump_memory_total == 0) {
+ // If it's first sample then trigger memory dump.
+ should_dump = true;
+ } else if (polling_state_.min_polls_between_dumps >
+ polling_state_.num_polls_from_last_dump) {
+ return false;
+ }
+
+ int64_t increase_from_last_dump =
+ current_memory_total - polling_state_.last_dump_memory_total;
+ should_dump |=
+ increase_from_last_dump > polling_state_.memory_increase_threshold;
+ should_dump |= IsCurrentSamplePeak(current_memory_total);
+ if (should_dump)
+ polling_state_.ResetTotals();
+ return should_dump;
+}
+
+bool MemoryDumpScheduler::IsCurrentSamplePeak(
+ uint64_t current_memory_total_bytes) {
+ uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
+ polling_state_.last_memory_totals_kb_index =
+ (polling_state_.last_memory_totals_kb_index + 1) %
+ PollingTriggerState::kMaxNumMemorySamples;
+ uint64_t mean = 0;
+ for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
+ if (polling_state_.last_memory_totals_kb[i] == 0) {
+ // Not enough samples to detect peaks.
+ polling_state_
+ .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ current_memory_total_kb;
+ return false;
+ }
+ mean += polling_state_.last_memory_totals_kb[i];
+ }
+ mean = mean / PollingTriggerState::kMaxNumMemorySamples;
+ uint64_t variance = 0;
+ for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
+ variance += (polling_state_.last_memory_totals_kb[i] - mean) *
+ (polling_state_.last_memory_totals_kb[i] - mean);
+ }
+ variance = variance / PollingTriggerState::kMaxNumMemorySamples;
+
+ polling_state_
+ .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
+ current_memory_total_kb;
+
+ // If stddev is less than 0.2% then we consider that the process is inactive.
+ bool is_stddev_low = variance < mean / 500 * mean / 500;
+ if (is_stddev_low)
+ return false;
+
+ // (mean + 3.69 * stddev) corresponds to a value that is higher than current
+ // sample with 99.99% probability.
+ return (current_memory_total_kb - mean) * (current_memory_total_kb - mean) >
+ (3.69 * 3.69 * variance);
+}
+
+MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState()
+ : is_configured(false),
+ dump_count(0),
+ min_timer_period_ms(std::numeric_limits<uint32_t>::max()),
+ light_dumps_rate(0),
+ heavy_dumps_rate(0),
+ light_dump_period_ms(0),
+ heavy_dump_period_ms(0) {}
+
+MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
+ DCHECK(!timer.IsRunning());
+}
+
+MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
+ : current_state(DISABLED),
+ level_of_detail(MemoryDumpLevelOfDetail::FIRST),
+ polling_task_runner(polling_task_runner),
+ polling_interval_ms(g_polling_interval_ms_for_testing
+ ? g_polling_interval_ms_for_testing
+ : kMemoryTotalsPollingInterval),
+ min_polls_between_dumps(0),
+ num_polls_from_last_dump(-1),
+ last_dump_memory_total(0),
+ memory_increase_threshold(0),
+ last_memory_totals_kb_index(0) {}
+
+MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
+ DCHECK(!polling_task_runner);
+}
+
+void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
+ if (!memory_increase_threshold) {
+ memory_increase_threshold = kDefaultMemoryIncreaseThreshold;
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+ // Set threshold to 1% of total system memory.
+ SystemMemoryInfoKB meminfo;
+ bool res = GetSystemMemoryInfo(&meminfo);
+ if (res)
+ memory_increase_threshold = (meminfo.total / 100) * 1024;
+#endif
+ }
+
+ // Update the |last_dump_memory_total|'s value from the totals if it's not
+ // first poll.
+ if (num_polls_from_last_dump >= 0 &&
+ last_memory_totals_kb[last_memory_totals_kb_index]) {
+ last_dump_memory_total =
+ last_memory_totals_kb[last_memory_totals_kb_index] * 1024;
+ }
+ num_polls_from_last_dump = 0;
+ for (uint32_t i = 0; i < kMaxNumMemorySamples; ++i)
+ last_memory_totals_kb[i] = 0;
+ last_memory_totals_kb_index = 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_dump_scheduler.h b/base/trace_event/memory_dump_scheduler.h
new file mode 100644
index 0000000000..fd21fce834
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler.h
@@ -0,0 +1,141 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/timer/timer.h"
+#include "base/trace_event/memory_dump_request_args.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+
+namespace trace_event {
+
+class MemoryDumpManager;
+
+// Schedules global dump requests based on the triggers added.
+class BASE_EXPORT MemoryDumpScheduler {
+ public:
+ MemoryDumpScheduler(
+ MemoryDumpManager* mdm_,
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+ ~MemoryDumpScheduler();
+
+ // Adds triggers for scheduling global dumps. Both periodic and peak triggers
+ // cannot be added together. At the moment the periodic support is limited to
+ // at most one periodic trigger per dump mode and peak triggers are limited to
+ // at most one. All intervals should be an integeral multiple of the smallest
+ // interval specified.
+ void AddTrigger(MemoryDumpType trigger_type,
+ MemoryDumpLevelOfDetail level_of_detail,
+ uint32_t min_time_between_dumps_ms);
+
+ // Starts periodic dumps.
+ void NotifyPeriodicTriggerSupported();
+
+ // Starts polling memory total.
+ void NotifyPollingSupported();
+
+ // Resets time for triggering dump to account for minimum time between the
+ // dumps.
+ void NotifyDumpTriggered();
+
+ // Disables all triggers.
+ void DisableAllTriggers();
+
+ private:
+ friend class MemoryDumpManagerTest;
+ FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, TestPollingOnDumpThread);
+
+ // Helper class to schdule periodic memory dumps.
+ struct PeriodicTriggerState {
+ PeriodicTriggerState();
+ ~PeriodicTriggerState();
+
+ bool is_configured;
+
+ RepeatingTimer timer;
+ uint32_t dump_count;
+ uint32_t min_timer_period_ms;
+ uint32_t light_dumps_rate;
+ uint32_t heavy_dumps_rate;
+
+ uint32_t light_dump_period_ms;
+ uint32_t heavy_dump_period_ms;
+
+ DISALLOW_COPY_AND_ASSIGN(PeriodicTriggerState);
+ };
+
+ struct PollingTriggerState {
+ enum State {
+ CONFIGURED, // Polling trigger was added.
+ ENABLED, // Polling is running.
+ DISABLED // Polling is disabled.
+ };
+
+ static const uint32_t kMaxNumMemorySamples = 50;
+
+ explicit PollingTriggerState(
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+ ~PollingTriggerState();
+
+ // Helper to clear the tracked memory totals and poll count from last dump.
+ void ResetTotals();
+
+ State current_state;
+ MemoryDumpLevelOfDetail level_of_detail;
+
+ scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
+ uint32_t polling_interval_ms;
+
+ // Minimum numer of polls after the last dump at which next dump can be
+ // triggered.
+ int min_polls_between_dumps;
+ int num_polls_from_last_dump;
+
+ uint64_t last_dump_memory_total;
+ int64_t memory_increase_threshold;
+ uint64_t last_memory_totals_kb[kMaxNumMemorySamples];
+ uint32_t last_memory_totals_kb_index;
+
+ DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
+ };
+
+ // Helper to set polling disabled on the polling thread.
+ void DisablePolling();
+
+ // Periodically called by the timer.
+ void RequestPeriodicGlobalDump();
+
+ // Called for polling memory usage and trigger dumps if peak is detected.
+ void PollMemoryOnPollingThread();
+
+ // Returns true if peak memory value is detected.
+ bool ShouldTriggerDump(uint64_t current_memory_total);
+
+ // Helper to detect peaks in memory usage.
+ bool IsCurrentSamplePeak(uint64_t current_memory_total);
+
+ // Must be set before enabling tracing.
+ static void SetPollingIntervalForTesting(uint32_t interval);
+
+ // True if periodic dumping is enabled.
+ bool IsPeriodicTimerRunningForTesting();
+
+ MemoryDumpManager* mdm_;
+
+ PeriodicTriggerState periodic_state_;
+ PollingTriggerState polling_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
diff --git a/base/trace_event/memory_dump_session_state.cc b/base/trace_event/memory_dump_session_state.cc
index b3d9a8ccfc..d26b82a5b7 100644
--- a/base/trace_event/memory_dump_session_state.cc
+++ b/base/trace_event/memory_dump_session_state.cc
@@ -7,8 +7,8 @@
namespace base {
namespace trace_event {
-MemoryDumpSessionState::MemoryDumpSessionState() {}
-
+MemoryDumpSessionState::MemoryDumpSessionState()
+ : heap_profiler_breakdown_threshold_bytes_(0) {}
MemoryDumpSessionState::~MemoryDumpSessionState() {}
void MemoryDumpSessionState::SetStackFrameDeduplicator(
@@ -23,9 +23,14 @@ void MemoryDumpSessionState::SetTypeNameDeduplicator(
type_name_deduplicator_ = std::move(type_name_deduplicator);
}
-void MemoryDumpSessionState::SetMemoryDumpConfig(
- const TraceConfig::MemoryDumpConfig& config) {
- memory_dump_config_ = config;
+void MemoryDumpSessionState::SetAllowedDumpModes(
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes) {
+ allowed_dump_modes_ = allowed_dump_modes;
+}
+
+bool MemoryDumpSessionState::IsDumpModeAllowed(
+ MemoryDumpLevelOfDetail dump_mode) const {
+ return allowed_dump_modes_.count(dump_mode) != 0;
}
} // namespace trace_event
diff --git a/base/trace_event/memory_dump_session_state.h b/base/trace_event/memory_dump_session_state.h
index f199ec1a2f..46092cb483 100644
--- a/base/trace_event/memory_dump_session_state.h
+++ b/base/trace_event/memory_dump_session_state.h
@@ -6,11 +6,12 @@
#define BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
#include <memory>
+#include <set>
#include "base/base_export.h"
#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-#include "base/trace_event/trace_config.h"
+#include "base/trace_event/memory_dump_request_args.h"
namespace base {
namespace trace_event {
@@ -40,11 +41,18 @@ class BASE_EXPORT MemoryDumpSessionState
void SetTypeNameDeduplicator(
std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
- const TraceConfig::MemoryDumpConfig& memory_dump_config() const {
- return memory_dump_config_;
+ void SetAllowedDumpModes(
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes);
+
+ bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) const;
+
+ void set_heap_profiler_breakdown_threshold_bytes(uint32_t value) {
+ heap_profiler_breakdown_threshold_bytes_ = value;
}
- void SetMemoryDumpConfig(const TraceConfig::MemoryDumpConfig& config);
+ uint32_t heap_profiler_breakdown_threshold_bytes() const {
+ return heap_profiler_breakdown_threshold_bytes_;
+ }
private:
friend class RefCountedThreadSafe<MemoryDumpSessionState>;
@@ -58,9 +66,9 @@ class BASE_EXPORT MemoryDumpSessionState
// trace is finalized.
std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
- // The memory dump config, copied at the time when the tracing session was
- // started.
- TraceConfig::MemoryDumpConfig memory_dump_config_;
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes_;
+
+ uint32_t heap_profiler_breakdown_threshold_bytes_;
};
} // namespace trace_event
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
index aed187fa1d..ae74322040 100644
--- a/base/trace_event/memory_infra_background_whitelist.cc
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -17,20 +17,26 @@ namespace {
// providers can be added here only if the background mode dump has very
// less performance and memory overhead.
const char* const kDumpProviderWhitelist[] = {
+ "android::ResourceManagerImpl",
"BlinkGC",
- "ChildDiscardableSharedMemoryManager",
+ "ClientDiscardableSharedMemoryManager",
"DOMStorage",
- "HostDiscardableSharedMemoryManager",
+ "DiscardableSharedMemoryManager",
"IndexedDBBackingStore",
"JavaHeap",
+ "LevelDB",
"LeveldbValueStore",
"Malloc",
+ "MemoryCache",
"PartitionAlloc",
"ProcessMemoryMetrics",
"Skia",
"Sql",
+ "URLRequestContext",
"V8Isolate",
"WinHeap",
+ "SyncDirectory",
+ "TabRestoreServiceHelper",
nullptr // End of list marker.
};
@@ -46,6 +52,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"java_heap",
"java_heap/allocated_objects",
"leveldb/index_db/0x?",
+ "leveldb/leveldb_proto/0x?",
"leveldb/value_store/Extensions.Database.Open.Settings/0x?",
"leveldb/value_store/Extensions.Database.Open.Rules/0x?",
"leveldb/value_store/Extensions.Database.Open.State/0x?",
@@ -55,14 +62,33 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"malloc",
"malloc/allocated_objects",
"malloc/metadata_fragmentation_caches",
+ "net/http_network_session_0x?",
+ "net/http_network_session_0x?/quic_stream_factory",
+ "net/http_network_session_0x?/socket_pool",
+ "net/http_network_session_0x?/spdy_session_pool",
+ "net/http_network_session_0x?/stream_factory",
+ "net/sdch_manager_0x?",
+ "net/ssl_session_cache",
+ "net/url_request_context_0x?",
+ "net/url_request_context_0x?/http_cache",
+ "net/url_request_context_0x?/http_network_session",
+ "net/url_request_context_0x?/sdch_manager",
+ "web_cache/Image_resources",
+ "web_cache/CSS stylesheet_resources",
+ "web_cache/Script_resources",
+ "web_cache/XSL stylesheet_resources",
+ "web_cache/Font_resources",
+ "web_cache/Other_resources",
"partition_alloc/allocated_objects",
"partition_alloc/partitions",
+ "partition_alloc/partitions/array_buffer",
"partition_alloc/partitions/buffer",
"partition_alloc/partitions/fast_malloc",
"partition_alloc/partitions/layout",
"skia/sk_glyph_cache",
"skia/sk_resource_cache",
"sqlite",
+ "ui/resource_manager_0x?",
"v8/isolate_0x?/heap_spaces",
"v8/isolate_0x?/heap_spaces/code_space",
"v8/isolate_0x?/heap_spaces/large_object_space",
@@ -74,6 +100,47 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"v8/isolate_0x?/zapped_for_debug",
"winheap",
"winheap/allocated_objects",
+ "sync/0x?/kernel",
+ "sync/0x?/store",
+ "sync/0x?/model_type/APP",
+ "sync/0x?/model_type/APP_LIST",
+ "sync/0x?/model_type/APP_NOTIFICATION",
+ "sync/0x?/model_type/APP_SETTING",
+ "sync/0x?/model_type/ARC_PACKAGE",
+ "sync/0x?/model_type/ARTICLE",
+ "sync/0x?/model_type/AUTOFILL",
+ "sync/0x?/model_type/AUTOFILL_PROFILE",
+ "sync/0x?/model_type/AUTOFILL_WALLET",
+ "sync/0x?/model_type/BOOKMARK",
+ "sync/0x?/model_type/DEVICE_INFO",
+ "sync/0x?/model_type/DICTIONARY",
+ "sync/0x?/model_type/EXPERIMENTS",
+ "sync/0x?/model_type/EXTENSION",
+ "sync/0x?/model_type/EXTENSION_SETTING",
+ "sync/0x?/model_type/FAVICON_IMAGE",
+ "sync/0x?/model_type/FAVICON_TRACKING",
+ "sync/0x?/model_type/HISTORY_DELETE_DIRECTIVE",
+ "sync/0x?/model_type/MANAGED_USER",
+ "sync/0x?/model_type/MANAGED_USER_SETTING",
+ "sync/0x?/model_type/MANAGED_USER_SHARED_SETTING",
+ "sync/0x?/model_type/MANAGED_USER_WHITELIST",
+ "sync/0x?/model_type/NIGORI",
+ "sync/0x?/model_type/PASSWORD",
+ "sync/0x?/model_type/PREFERENCE",
+ "sync/0x?/model_type/PRINTER",
+ "sync/0x?/model_type/PRIORITY_PREFERENCE",
+ "sync/0x?/model_type/READING_LIST",
+ "sync/0x?/model_type/SEARCH_ENGINE",
+ "sync/0x?/model_type/SESSION",
+ "sync/0x?/model_type/SYNCED_NOTIFICATION",
+ "sync/0x?/model_type/SYNCED_NOTIFICATION_APP_INFO",
+ "sync/0x?/model_type/THEME",
+ "sync/0x?/model_type/TYPED_URL",
+ "sync/0x?/model_type/WALLET_METADATA",
+ "sync/0x?/model_type/WIFI_CREDENTIAL",
+ "tab_restore/service_helper_0x?/entries",
+ "tab_restore/service_helper_0x?/entries/tab_0x?",
+ "tab_restore/service_helper_0x?/entries/window_0x?",
nullptr // End of list marker.
};
diff --git a/base/trace_event/memory_usage_estimator.cc b/base/trace_event/memory_usage_estimator.cc
new file mode 100644
index 0000000000..c769d5b6f1
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator.cc
@@ -0,0 +1,14 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+namespace base {
+namespace trace_event {
+
+template size_t EstimateMemoryUsage(const std::string&);
+template size_t EstimateMemoryUsage(const string16&);
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_usage_estimator.h b/base/trace_event/memory_usage_estimator.h
new file mode 100644
index 0000000000..db4ea6956c
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator.h
@@ -0,0 +1,549 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+#define BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+
+#include <stdint.h>
+
+#include <array>
+#include <deque>
+#include <list>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <stack>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/linked_list.h"
+#include "base/strings/string16.h"
+#include "base/template_util.h"
+
+// Composable memory usage estimators.
+//
+// This file defines set of EstimateMemoryUsage(object) functions that return
+// approximate memory usage of their argument.
+//
+// The ultimate goal is to make memory usage estimation for a class simply a
+// matter of aggregating EstimateMemoryUsage() results over all fields.
+//
+// That is achieved via composability: if EstimateMemoryUsage() is defined
+// for T then EstimateMemoryUsage() is also defined for any combination of
+// containers holding T (e.g. std::map<int, std::vector<T>>).
+//
+// There are two ways of defining EstimateMemoryUsage() for a type:
+//
+// 1. As a global function 'size_t EstimateMemoryUsage(T)' in
+// in base::trace_event namespace.
+//
+// 2. As 'size_t T::EstimateMemoryUsage() const' method. In this case
+// EstimateMemoryUsage(T) function in base::trace_event namespace is
+// provided automatically.
+//
+// Here is an example implementation:
+//
+// size_t foo::bar::MyClass::EstimateMemoryUsage() const {
+// return base::trace_event::EstimateMemoryUsage(name_) +
+// base::trace_event::EstimateMemoryUsage(id_) +
+// base::trace_event::EstimateMemoryUsage(items_);
+// }
+//
+// The approach is simple: first call EstimateMemoryUsage() on all members,
+// then recursively fix compilation errors that are caused by types not
+// implementing EstimateMemoryUsage().
+
+namespace base {
+namespace trace_event {
+
+// Declarations
+
+// If T declares 'EstimateMemoryUsage() const' member function, then
+// global function EstimateMemoryUsage(T) is available, and just calls
+// the member function.
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+ -> decltype(object.EstimateMemoryUsage());
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array);
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]);
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length);
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr);
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+ size_t array_length);
+
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr);
+
+// Containers
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list);
+
+template <class T>
+size_t EstimateMemoryUsage(const base::LinkedList<T>& list);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<T, H, KE, A>& set);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<T, H, KE, A>& set);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::deque<T, A>& deque);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::queue<T, C>& queue);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::stack<T, C>& stack);
+
+// TODO(dskiba):
+// std::forward_list
+
+// Definitions
+
+namespace internal {
+
+// HasEMU<T>::value is true iff EstimateMemoryUsage(T) is available.
+// (This is the default version, which is false.)
+template <class T, class X = void>
+struct HasEMU : std::false_type {};
+
+// This HasEMU specialization is only picked up if there exists function
+// EstimateMemoryUsage(const T&) that returns size_t. Simpler ways to
+// achieve this don't work on MSVC.
+template <class T>
+struct HasEMU<
+ T,
+ typename std::enable_if<std::is_same<
+ size_t,
+ decltype(EstimateMemoryUsage(std::declval<const T&>()))>::value>::type>
+ : std::true_type {};
+
+// EMUCaller<T> does three things:
+// 1. Defines Call() method that calls EstimateMemoryUsage(T) if it's
+// available.
+// 2. If EstimateMemoryUsage(T) is not available, but T has trivial dtor
+// (i.e. it's POD, integer, pointer, enum, etc.) then it defines Call()
+// method that returns 0. This is useful for containers, which allocate
+// memory regardless of T (also for cases like std::map<int, MyClass>).
+// 3. Finally, if EstimateMemoryUsage(T) is not available, then it triggers
+// a static_assert with a helpful message. That cuts numbers of errors
+// considerably - if you just call EstimateMemoryUsage(T) but it's not
+// available for T, then compiler will helpfully list *all* possible
+// variants of it, with an explanation for each.
+template <class T, class X = void>
+struct EMUCaller {
+ // std::is_same<> below makes static_assert depend on T, in order to
+ // prevent it from asserting regardless instantiation.
+ static_assert(std::is_same<T, std::false_type>::value,
+ "Neither global function 'size_t EstimateMemoryUsage(T)' "
+ "nor member function 'size_t T::EstimateMemoryUsage() const' "
+ "is defined for the type.");
+
+ static size_t Call(const T&) { return 0; }
+};
+
+template <class T>
+struct EMUCaller<T, typename std::enable_if<HasEMU<T>::value>::type> {
+ static size_t Call(const T& value) { return EstimateMemoryUsage(value); }
+};
+
+template <class T>
+struct EMUCaller<
+ T,
+ typename std::enable_if<!HasEMU<T>::value &&
+ is_trivially_destructible<T>::value>::type> {
+ static size_t Call(const T&) { return 0; }
+};
+
+// Returns reference to the underlying container of a container adapter.
+// Works for std::stack, std::queue and std::priority_queue.
+template <class A>
+const typename A::container_type& GetUnderlyingContainer(const A& adapter) {
+ struct ExposedAdapter : A {
+ using A::c;
+ };
+ return adapter.*&ExposedAdapter::c;
+}
+
+} // namespace internal
+
+// Proxy that deducts T and calls EMUCaller<T>.
+// To be used by EstimateMemoryUsage() implementations for containers.
+template <class T>
+size_t EstimateItemMemoryUsage(const T& value) {
+ return internal::EMUCaller<T>::Call(value);
+}
+
+template <class I>
+size_t EstimateIterableMemoryUsage(const I& iterable) {
+ size_t memory_usage = 0;
+ for (const auto& item : iterable) {
+ memory_usage += EstimateItemMemoryUsage(item);
+ }
+ return memory_usage;
+}
+
+// Global EstimateMemoryUsage(T) that just calls T::EstimateMemoryUsage().
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+ -> decltype(object.EstimateMemoryUsage()) {
+ static_assert(
+ std::is_same<decltype(object.EstimateMemoryUsage()), size_t>::value,
+ "'T::EstimateMemoryUsage() const' must return size_t.");
+ return object.EstimateMemoryUsage();
+}
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string) {
+ using string_type = std::basic_string<C, T, A>;
+ using value_type = typename string_type::value_type;
+ // C++11 doesn't leave much room for implementors - std::string can
+ // use short string optimization, but that's about it. We detect SSO
+ // by checking that c_str() points inside |string|.
+ const uint8_t* cstr = reinterpret_cast<const uint8_t*>(string.c_str());
+ const uint8_t* inline_cstr = reinterpret_cast<const uint8_t*>(&string);
+ if (cstr >= inline_cstr && cstr < inline_cstr + sizeof(string)) {
+ // SSO string
+ return 0;
+ }
+ return (string.capacity() + 1) * sizeof(value_type);
+}
+
+// Use explicit instantiations from the .cc file (reduces bloat).
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const std::string&);
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const string16&);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array) {
+ return EstimateIterableMemoryUsage(array);
+}
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]) {
+ return EstimateIterableMemoryUsage(array);
+}
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length) {
+ size_t memory_usage = sizeof(T) * array_length;
+ for (size_t i = 0; i != array_length; ++i) {
+ memory_usage += EstimateItemMemoryUsage(array[i]);
+ }
+ return memory_usage;
+}
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr) {
+ return ptr ? (sizeof(T) + EstimateItemMemoryUsage(*ptr)) : 0;
+}
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+ size_t array_length) {
+ return EstimateMemoryUsage(array.get(), array_length);
+}
+
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr) {
+ auto use_count = ptr.use_count();
+ if (use_count == 0) {
+ return 0;
+ }
+ // Model shared_ptr after libc++,
+ // see __shared_ptr_pointer from include/memory
+ struct SharedPointer {
+ void* vtbl;
+ long shared_owners;
+ long shared_weak_owners;
+ T* value;
+ };
+ // If object of size S shared N > S times we prefer to (potentially)
+ // overestimate than to return 0.
+ return sizeof(SharedPointer) +
+ (EstimateItemMemoryUsage(*ptr) + (use_count - 1)) / use_count;
+}
+
+// std::pair
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair) {
+ return EstimateItemMemoryUsage(pair.first) +
+ EstimateItemMemoryUsage(pair.second);
+}
+
+// std::vector
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector) {
+ return sizeof(T) * vector.capacity() + EstimateIterableMemoryUsage(vector);
+}
+
+// std::list
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list) {
+ using value_type = typename std::list<T, A>::value_type;
+ struct Node {
+ Node* prev;
+ Node* next;
+ value_type value;
+ };
+ return sizeof(Node) * list.size() +
+ EstimateIterableMemoryUsage(list);
+}
+
+template <class T>
+size_t EstimateMemoryUsage(const base::LinkedList<T>& list) {
+ size_t memory_usage = 0u;
+ for (base::LinkNode<T>* node = list.head(); node != list.end();
+ node = node->next()) {
+ // Since we increment by calling node = node->next() we know that node
+ // isn't nullptr.
+ memory_usage += EstimateMemoryUsage(*node->value()) + sizeof(T);
+ }
+ return memory_usage;
+}
+
+// Tree containers
+
+template <class V>
+size_t EstimateTreeMemoryUsage(size_t size) {
+ // Tree containers are modeled after libc++
+ // (__tree_node from include/__tree)
+ struct Node {
+ Node* left;
+ Node* right;
+ Node* parent;
+ bool is_black;
+ V value;
+ };
+ return sizeof(Node) * size;
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set) {
+ using value_type = typename std::set<T, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set) {
+ using value_type = typename std::multiset<T, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map) {
+ using value_type = typename std::map<K, V, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map) {
+ using value_type = typename std::multimap<K, V, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+// HashMap containers
+
+namespace internal {
+
+// While hashtable containers model doesn't depend on STL implementation, one
+// detail still crept in: bucket_count. It's used in size estimation, but its
+// value after inserting N items is not predictable.
+// This function is specialized by unittests to return constant value, thus
+// excluding bucket_count from testing.
+template <class V>
+size_t HashMapBucketCountForTesting(size_t bucket_count) {
+ return bucket_count;
+}
+
+} // namespace internal
+
+template <class V>
+size_t EstimateHashMapMemoryUsage(size_t bucket_count, size_t size) {
+ // Hashtable containers are modeled after libc++
+ // (__hash_node from include/__hash_table)
+ struct Node {
+ void* next;
+ size_t hash;
+ V value;
+ };
+ using Bucket = void*;
+ bucket_count = internal::HashMapBucketCountForTesting<V>(bucket_count);
+ return sizeof(Bucket) * bucket_count + sizeof(Node) * size;
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<K, H, KE, A>& set) {
+ using value_type = typename std::unordered_set<K, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+ set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<K, H, KE, A>& set) {
+ using value_type = typename std::unordered_multiset<K, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+ set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map) {
+ using value_type = typename std::unordered_map<K, V, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+ map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map) {
+ using value_type =
+ typename std::unordered_multimap<K, V, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+ map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+// std::deque
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::deque<T, A>& deque) {
+// Since std::deque implementations are wildly different
+// (see crbug.com/674287), we can't have one "good enough"
+// way to estimate.
+
+// kBlockSize - minimum size of a block, in bytes
+// kMinBlockLength - number of elements in a block
+// if sizeof(T) > kBlockSize
+#if defined(_LIBCPP_VERSION)
+ size_t kBlockSize = 4096;
+ size_t kMinBlockLength = 16;
+#elif defined(__GLIBCXX__)
+ size_t kBlockSize = 512;
+ size_t kMinBlockLength = 1;
+#elif defined(_MSC_VER)
+ size_t kBlockSize = 16;
+ size_t kMinBlockLength = 1;
+#else
+ size_t kBlockSize = 0;
+ size_t kMinBlockLength = 1;
+#endif
+
+ size_t block_length =
+ (sizeof(T) > kBlockSize) ? kMinBlockLength : kBlockSize / sizeof(T);
+
+ size_t blocks = (deque.size() + block_length - 1) / block_length;
+
+#if defined(__GLIBCXX__)
+ // libstdc++: deque always has at least one block
+ if (!blocks)
+ blocks = 1;
+#endif
+
+#if defined(_LIBCPP_VERSION)
+ // libc++: deque keeps at most two blocks when it shrinks,
+ // so even if the size is zero, deque might be holding up
+ // to 4096 * 2 bytes. One way to know whether deque has
+ // ever allocated (and hence has 1 or 2 blocks) is to check
+ // iterator's pointer. Non-zero value means that deque has
+ // at least one block.
+ if (!blocks && deque.begin().operator->())
+ blocks = 1;
+#endif
+
+ return (blocks * block_length * sizeof(T)) +
+ EstimateIterableMemoryUsage(deque);
+}
+
+// Container adapters
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::queue<T, C>& queue) {
+ return EstimateMemoryUsage(internal::GetUnderlyingContainer(queue));
+}
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue) {
+ return EstimateMemoryUsage(internal::GetUnderlyingContainer(queue));
+}
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::stack<T, C>& stack) {
+ return EstimateMemoryUsage(internal::GetUnderlyingContainer(stack));
+}
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
diff --git a/base/trace_event/memory_usage_estimator_unittest.cc b/base/trace_event/memory_usage_estimator_unittest.cc
new file mode 100644
index 0000000000..80237c0192
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator_unittest.cc
@@ -0,0 +1,244 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+#include <stdlib.h>
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(ARCH_CPU_64_BITS)
+#define EXPECT_EQ_32_64(_, e, a) EXPECT_EQ(e, a)
+#else
+#define EXPECT_EQ_32_64(e, _, a) EXPECT_EQ(e, a)
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Test class with predictable memory usage.
+class Data {
+ public:
+ explicit Data(size_t size = 17): size_(size) {
+ }
+
+ size_t size() const { return size_; }
+
+ size_t EstimateMemoryUsage() const {
+ return size_;
+ }
+
+ bool operator < (const Data& other) const {
+ return size_ < other.size_;
+ }
+ bool operator == (const Data& other) const {
+ return size_ == other.size_;
+ }
+
+ struct Hasher {
+ size_t operator () (const Data& data) const {
+ return data.size();
+ }
+ };
+
+ private:
+ size_t size_;
+};
+
+} // namespace
+
+namespace internal {
+
+// This kills variance of bucket_count across STL implementations.
+template <>
+size_t HashMapBucketCountForTesting<Data>(size_t) {
+ return 10;
+}
+template <>
+size_t HashMapBucketCountForTesting<std::pair<const Data, short>>(size_t) {
+ return 10;
+}
+
+} // namespace internal
+
+TEST(EstimateMemoryUsageTest, String) {
+ std::string string(777, 'a');
+ EXPECT_EQ(string.capacity() + 1, EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, String16) {
+ string16 string(777, 'a');
+ EXPECT_EQ(sizeof(char16) * (string.capacity() + 1),
+ EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, Arrays) {
+ // std::array
+ {
+ std::array<Data, 10> array;
+ EXPECT_EQ(170u, EstimateMemoryUsage(array));
+ }
+
+ // T[N]
+ {
+ Data array[10];
+ EXPECT_EQ(170u, EstimateMemoryUsage(array));
+ }
+
+ // C array
+ {
+ struct Item {
+ char payload[10];
+ };
+ Item* array = new Item[7];
+ EXPECT_EQ(70u, EstimateMemoryUsage(array, 7));
+ delete[] array;
+ }
+}
+
+TEST(EstimateMemoryUsageTest, UniquePtr) {
+ // Empty
+ {
+ std::unique_ptr<Data> ptr;
+ EXPECT_EQ(0u, EstimateMemoryUsage(ptr));
+ }
+
+ // Not empty
+ {
+ std::unique_ptr<Data> ptr(new Data());
+ EXPECT_EQ_32_64(21u, 25u, EstimateMemoryUsage(ptr));
+ }
+
+ // With a pointer
+ {
+ std::unique_ptr<Data*> ptr(new Data*());
+ EXPECT_EQ(sizeof(void*), EstimateMemoryUsage(ptr));
+ }
+
+ // With an array
+ {
+ struct Item {
+ uint32_t payload[10];
+ };
+ std::unique_ptr<Item[]> ptr(new Item[7]);
+ EXPECT_EQ(280u, EstimateMemoryUsage(ptr, 7));
+ }
+}
+
+TEST(EstimateMemoryUsageTest, Vector) {
+ std::vector<Data> vector;
+ vector.reserve(1000);
+
+ // For an empty vector we should return memory usage of its buffer
+ size_t capacity = vector.capacity();
+ size_t expected_size = capacity * sizeof(Data);
+ EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+
+ // If vector is not empty, its size should also include memory usages
+ // of all elements.
+ for (size_t i = 0; i != capacity / 2; ++i) {
+ vector.push_back(Data(i));
+ expected_size += EstimateMemoryUsage(vector.back());
+ }
+ EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+}
+
+TEST(EstimateMemoryUsageTest, List) {
+ struct POD {
+ short data;
+ };
+ std::list<POD> list;
+ for (int i = 0; i != 1000; ++i) {
+ list.push_back(POD());
+ }
+ EXPECT_EQ_32_64(12000u, 24000u, EstimateMemoryUsage(list));
+}
+
+TEST(EstimateMemoryUsageTest, Set) {
+ std::set<std::pair<int, Data>> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert({i, Data(i)});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, MultiSet) {
+ std::multiset<bool> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert((i & 1) != 0);
+ }
+ EXPECT_EQ_32_64(16000u, 32000u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, Map) {
+ std::map<Data, int> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), i});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, MultiMap) {
+ std::multimap<char, Data> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({static_cast<char>(i), Data(i)});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedSet) {
+ std::unordered_set<Data, Data::Hasher> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert(Data(i));
+ }
+ EXPECT_EQ_32_64(511540u, 523580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiSet) {
+ std::unordered_multiset<Data, Data::Hasher> set;
+ for (int i = 0; i != 500; ++i) {
+ set.insert(Data(i));
+ set.insert(Data(i));
+ }
+ EXPECT_EQ_32_64(261540u, 273580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMap) {
+ std::unordered_map<Data, short, Data::Hasher> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), static_cast<short>(i)});
+ }
+ EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiMap) {
+ std::unordered_multimap<Data, short, Data::Hasher> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), static_cast<short>(i)});
+ }
+ EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, Deque) {
+ std::deque<Data> deque;
+
+ // Pick a large value so that platform-specific accounting
+ // for deque's blocks is small compared to usage of all items.
+ constexpr size_t kDataSize = 100000;
+ for (int i = 0; i != 1500; ++i) {
+ deque.push_back(Data(kDataSize));
+ }
+
+ // Compare against a reasonable minimum (i.e. no overhead).
+ size_t min_expected_usage = deque.size() * (sizeof(Data) + kDataSize);
+ EXPECT_LE(min_expected_usage, EstimateMemoryUsage(deque));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index 826989237b..63d1340e42 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -18,7 +18,7 @@
#include "build/build_config.h"
#if defined(OS_IOS)
-#include <sys/sysctl.h>
+#include <mach/vm_page_size.h>
#endif
#if defined(OS_POSIX)
@@ -57,19 +57,13 @@ bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
size_t ProcessMemoryDump::GetSystemPageSize() {
#if defined(OS_IOS)
// On iOS, getpagesize() returns the user page sizes, but for allocating
- // arrays for mincore(), kernel page sizes is needed. sysctlbyname() should
- // be used for this. Refer to crbug.com/542671 and Apple rdar://23651782
- int pagesize;
- size_t pagesize_len;
- int status = sysctlbyname("vm.pagesize", NULL, &pagesize_len, nullptr, 0);
- if (!status && pagesize_len == sizeof(pagesize)) {
- if (!sysctlbyname("vm.pagesize", &pagesize, &pagesize_len, nullptr, 0))
- return pagesize;
- }
- LOG(ERROR) << "sysctlbyname(\"vm.pagesize\") failed.";
- // Falls back to getpagesize() although it may be wrong in certain cases.
-#endif // defined(OS_IOS)
+ // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
+ // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
+ // Refer to http://crbug.com/542671 and Apple rdar://23651782
+ return vm_kernel_page_size;
+#else
return base::GetPageSize();
+#endif // defined(OS_IOS)
}
// static
@@ -164,14 +158,14 @@ ProcessMemoryDump::~ProcessMemoryDump() {}
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
const std::string& absolute_name) {
return AddAllocatorDumpInternal(
- WrapUnique(new MemoryAllocatorDump(absolute_name, this)));
+ MakeUnique<MemoryAllocatorDump>(absolute_name, this));
}
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
const std::string& absolute_name,
const MemoryAllocatorDumpGuid& guid) {
return AddAllocatorDumpInternal(
- WrapUnique(new MemoryAllocatorDump(absolute_name, this, guid)));
+ MakeUnique<MemoryAllocatorDump>(absolute_name, this, guid));
}
MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
index d020c7d652..6f8d167273 100644
--- a/base/trace_event/process_memory_dump.h
+++ b/base/trace_event/process_memory_dump.h
@@ -31,7 +31,6 @@
namespace base {
namespace trace_event {
-class MemoryDumpManager;
class MemoryDumpSessionState;
class TracedValue;
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
index d40f4302fe..e26e9fd28f 100644
--- a/base/trace_event/trace_buffer.cc
+++ b/base/trace_event/trace_buffer.cc
@@ -168,7 +168,8 @@ class TraceBufferVector : public TraceBuffer {
// have to add the metadata events and flush thread-local buffers even if
// the buffer is full.
*index = chunks_.size();
- chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
+ // Put nullptr in the slot of a in-flight chunk.
+ chunks_.push_back(nullptr);
++in_flight_chunk_count_;
// + 1 because zero chunk_seq is not allowed.
return std::unique_ptr<TraceBufferChunk>(
@@ -181,7 +182,7 @@ class TraceBufferVector : public TraceBuffer {
DCHECK_LT(index, chunks_.size());
DCHECK(!chunks_[index]);
--in_flight_chunk_count_;
- chunks_[index] = chunk.release();
+ chunks_[index] = std::move(chunk);
}
bool IsFull() const override { return chunks_.size() >= max_chunks_; }
@@ -198,7 +199,7 @@ class TraceBufferVector : public TraceBuffer {
TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
return NULL;
- TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
if (!chunk || chunk->seq() != handle.chunk_seq)
return NULL;
return chunk->GetEventAt(handle.event_index);
@@ -207,7 +208,7 @@ class TraceBufferVector : public TraceBuffer {
const TraceBufferChunk* NextChunk() override {
while (current_iteration_index_ < chunks_.size()) {
// Skip in-flight chunks.
- const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
+ const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
if (chunk)
return chunk;
}
@@ -223,7 +224,7 @@ class TraceBufferVector : public TraceBuffer {
overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
chunks_ptr_vector_resident_size);
for (size_t i = 0; i < chunks_.size(); ++i) {
- TraceBufferChunk* chunk = chunks_[i];
+ TraceBufferChunk* chunk = chunks_[i].get();
// Skip the in-flight (nullptr) chunks. They will be accounted by the
// per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
if (chunk)
@@ -235,7 +236,7 @@ class TraceBufferVector : public TraceBuffer {
size_t in_flight_chunk_count_;
size_t current_iteration_index_;
size_t max_chunks_;
- ScopedVector<TraceBufferChunk> chunks_;
+ std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
};
diff --git a/base/trace_event/trace_category.h b/base/trace_event/trace_category.h
new file mode 100644
index 0000000000..5a7915ac03
--- /dev/null
+++ b/base/trace_event/trace_category.h
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+#define BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+
+#include <stdint.h>
+
+namespace base {
+namespace trace_event {
+
+// Captures the state of an invidivual trace category. Nothing except tracing
+// internals (e.g., TraceLog) is supposed to have non-const Category pointers.
+struct TraceCategory {
+ // The TRACE_EVENT macros should only use this value as a bool.
+ // These enum values are effectively a public API and third_party projects
+ // depend on their value. Hence, never remove or recycle existing bits, unless
+ // you are sure that all the third-party projects that depend on this have
+ // been updated.
+ enum StateFlags : uint8_t {
+ ENABLED_FOR_RECORDING = 1 << 0,
+
+ // Not used anymore.
+ DEPRECATED_ENABLED_FOR_MONITORING = 1 << 1,
+ DEPRECATED_ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+
+ ENABLED_FOR_ETW_EXPORT = 1 << 3,
+ ENABLED_FOR_FILTERING = 1 << 4
+ };
+
+ static const TraceCategory* FromStatePtr(const uint8_t* state_ptr) {
+ static_assert(
+ offsetof(TraceCategory, state_) == 0,
+ "|state_| must be the first field of the TraceCategory class.");
+ return reinterpret_cast<const TraceCategory*>(state_ptr);
+ }
+
+ bool is_valid() const { return name_ != nullptr; }
+ void set_name(const char* name) { name_ = name; }
+ const char* name() const {
+ DCHECK(is_valid());
+ return name_;
+ }
+
+ // TODO(primiano): This is an intermediate solution to deal with the fact that
+ // today TRACE_EVENT* macros cache the state ptr. They should just cache the
+ // full TraceCategory ptr, which is immutable, and use these helper function
+ // here. This will get rid of the need of this awkward ptr getter completely.
+ const uint8_t* state_ptr() const {
+ return const_cast<const uint8_t*>(&state_);
+ }
+
+ uint8_t state() const {
+ return *const_cast<volatile const uint8_t*>(&state_);
+ }
+
+ bool is_enabled() const { return state() != 0; }
+
+ void set_state(uint8_t state) {
+ *const_cast<volatile uint8_t*>(&state_) = state;
+ }
+
+ void clear_state_flag(StateFlags flag) { set_state(state() & (~flag)); }
+ void set_state_flag(StateFlags flag) { set_state(state() | flag); }
+
+ uint32_t enabled_filters() const {
+ return *const_cast<volatile const uint32_t*>(&enabled_filters_);
+ }
+
+ bool is_filter_enabled(size_t index) const {
+ DCHECK(index < sizeof(enabled_filters_) * 8);
+ return (enabled_filters() & (1 << index)) != 0;
+ }
+
+ void set_enabled_filters(uint32_t enabled_filters) {
+ *const_cast<volatile uint32_t*>(&enabled_filters_) = enabled_filters;
+ }
+
+ void reset_for_testing() {
+ set_state(0);
+ set_enabled_filters(0);
+ }
+
+ // These fields should not be accessed directly, not even by tracing code.
+ // The only reason why these are not private is because it makes it impossible
+ // to have a global array of TraceCategory in category_registry.cc without
+ // creating initializers. See discussion on goo.gl/qhZN94 and
+ // crbug.com/{660967,660828}.
+
+ // The enabled state. TRACE_EVENT* macros will capture events if any of the
+ // flags here are set. Since TRACE_EVENTx macros are used in a lot of
+ // fast-paths, accesses to this field are non-barriered and racy by design.
+ // This field is mutated when starting/stopping tracing and we don't care
+ // about missing some events.
+ uint8_t state_;
+
+ // When ENABLED_FOR_FILTERING is set, this contains a bitmap to the
+ // coressponding filter (see event_filters.h).
+ uint32_t enabled_filters_;
+
+ // TraceCategory group names are long lived static strings.
+ const char* name_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CATEGORY_H_
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index b343ea00bc..36de107bf8 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -30,13 +30,11 @@ const char kRecordUntilFull[] = "record-until-full";
const char kRecordContinuously[] = "record-continuously";
const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
const char kTraceToConsole[] = "trace-to-console";
-const char kEnableSampling[] = "enable-sampling";
const char kEnableSystrace[] = "enable-systrace";
const char kEnableArgumentFilter[] = "enable-argument-filter";
// String parameters that can be used to parse the trace config string.
const char kRecordModeParam[] = "record_mode";
-const char kEnableSamplingParam[] = "enable_sampling";
const char kEnableSystraceParam[] = "enable_systrace";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
const char kIncludedCategoriesParam[] = "included_categories";
@@ -50,24 +48,32 @@ const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
const char kMemoryDumpConfigParam[] = "memory_dump_config";
const char kAllowedDumpModesParam[] = "allowed_dump_modes";
const char kTriggersParam[] = "triggers";
-const char kPeriodicIntervalParam[] = "periodic_interval_ms";
-const char kModeParam[] = "mode";
+const char kTriggerModeParam[] = "mode";
+const char kMinTimeBetweenDumps[] = "min_time_between_dumps_ms";
+const char kTriggerTypeParam[] = "type";
+const char kPeriodicIntervalLegacyParam[] = "periodic_interval_ms";
const char kHeapProfilerOptions[] = "heap_profiler_options";
const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
+// String parameters used to parse category event filters.
+const char kEventFiltersParam[] = "event_filters";
+const char kFilterPredicateParam[] = "filter_predicate";
+const char kFilterArgsParam[] = "filter_args";
+
// Default configuration of memory dumps.
const TraceConfig::MemoryDumpConfig::Trigger kDefaultHeavyMemoryDumpTrigger = {
- 2000, // periodic_interval_ms
- MemoryDumpLevelOfDetail::DETAILED};
+ 2000, // min_time_between_dumps_ms
+ MemoryDumpLevelOfDetail::DETAILED, MemoryDumpType::PERIODIC_INTERVAL};
const TraceConfig::MemoryDumpConfig::Trigger kDefaultLightMemoryDumpTrigger = {
- 250, // periodic_interval_ms
- MemoryDumpLevelOfDetail::LIGHT};
+ 250, // min_time_between_dumps_ms
+ MemoryDumpLevelOfDetail::LIGHT, MemoryDumpType::PERIODIC_INTERVAL};
class ConvertableTraceConfigToTraceFormat
: public base::trace_event::ConvertableToTraceFormat {
public:
explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
: trace_config_(trace_config) {}
+
~ConvertableTraceConfigToTraceFormat() override {}
void AppendAsTraceFormat(std::string* out) const override {
@@ -115,6 +121,94 @@ void TraceConfig::MemoryDumpConfig::Clear() {
heap_profiler_options.Clear();
}
+void TraceConfig::MemoryDumpConfig::Merge(
+ const TraceConfig::MemoryDumpConfig& config) {
+ triggers.insert(triggers.end(), config.triggers.begin(),
+ config.triggers.end());
+ allowed_dump_modes.insert(config.allowed_dump_modes.begin(),
+ config.allowed_dump_modes.end());
+ heap_profiler_options.breakdown_threshold_bytes =
+ std::min(heap_profiler_options.breakdown_threshold_bytes,
+ config.heap_profiler_options.breakdown_threshold_bytes);
+}
+
+TraceConfig::EventFilterConfig::EventFilterConfig(
+ const std::string& predicate_name)
+ : predicate_name_(predicate_name) {}
+
+TraceConfig::EventFilterConfig::~EventFilterConfig() {}
+
+TraceConfig::EventFilterConfig::EventFilterConfig(const EventFilterConfig& tc) {
+ *this = tc;
+}
+
+TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
+ const TraceConfig::EventFilterConfig& rhs) {
+ if (this == &rhs)
+ return *this;
+
+ predicate_name_ = rhs.predicate_name_;
+ included_categories_ = rhs.included_categories_;
+ excluded_categories_ = rhs.excluded_categories_;
+ if (rhs.args_)
+ args_ = rhs.args_->CreateDeepCopy();
+
+ return *this;
+}
+
+void TraceConfig::EventFilterConfig::AddIncludedCategory(
+ const std::string& category) {
+ included_categories_.push_back(category);
+}
+
+void TraceConfig::EventFilterConfig::AddExcludedCategory(
+ const std::string& category) {
+ excluded_categories_.push_back(category);
+}
+
+void TraceConfig::EventFilterConfig::SetArgs(
+ std::unique_ptr<base::DictionaryValue> args) {
+ args_ = std::move(args);
+}
+
+bool TraceConfig::EventFilterConfig::GetArgAsSet(
+ const char* key,
+ std::unordered_set<std::string>* out_set) const {
+ const ListValue* list = nullptr;
+ if (!args_->GetList(key, &list))
+ return false;
+ for (size_t i = 0; i < list->GetSize(); ++i) {
+ std::string value;
+ if (list->GetString(i, &value))
+ out_set->insert(value);
+ }
+ return true;
+}
+
+bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
+ const char* category_group_name) const {
+ CStringTokenizer category_group_tokens(
+ category_group_name, category_group_name + strlen(category_group_name),
+ ",");
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+
+ for (const auto& excluded_category : excluded_categories_) {
+ if (base::MatchPattern(category_group_token, excluded_category)) {
+ return false;
+ }
+ }
+
+ for (const auto& included_category : included_categories_) {
+ if (base::MatchPattern(category_group_token, included_category)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
TraceConfig::TraceConfig() {
InitializeDefault();
}
@@ -159,14 +253,14 @@ TraceConfig::TraceConfig(StringPiece config_string) {
TraceConfig::TraceConfig(const TraceConfig& tc)
: record_mode_(tc.record_mode_),
- enable_sampling_(tc.enable_sampling_),
enable_systrace_(tc.enable_systrace_),
enable_argument_filter_(tc.enable_argument_filter_),
memory_dump_config_(tc.memory_dump_config_),
included_categories_(tc.included_categories_),
disabled_categories_(tc.disabled_categories_),
excluded_categories_(tc.excluded_categories_),
- synthetic_delays_(tc.synthetic_delays_) {}
+ synthetic_delays_(tc.synthetic_delays_),
+ event_filters_(tc.event_filters_) {}
TraceConfig::~TraceConfig() {
}
@@ -176,7 +270,6 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
return *this;
record_mode_ = rhs.record_mode_;
- enable_sampling_ = rhs.enable_sampling_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
memory_dump_config_ = rhs.memory_dump_config_;
@@ -184,6 +277,7 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
disabled_categories_ = rhs.disabled_categories_;
excluded_categories_ = rhs.excluded_categories_;
synthetic_delays_ = rhs.synthetic_delays_;
+ event_filters_ = rhs.event_filters_;
return *this;
}
@@ -200,7 +294,7 @@ std::string TraceConfig::ToString() const {
std::unique_ptr<ConvertableToTraceFormat>
TraceConfig::AsConvertableToTraceFormat() const {
- return WrapUnique(new ConvertableTraceConfigToTraceFormat(*this));
+ return MakeUnique<ConvertableTraceConfigToTraceFormat>(*this);
}
std::string TraceConfig::ToCategoryFilterString() const {
@@ -271,7 +365,6 @@ bool TraceConfig::IsCategoryGroupEnabled(
void TraceConfig::Merge(const TraceConfig& config) {
if (record_mode_ != config.record_mode_
- || enable_sampling_ != config.enable_sampling_
|| enable_systrace_ != config.enable_systrace_
|| enable_argument_filter_ != config.enable_argument_filter_) {
DLOG(ERROR) << "Attempting to merge trace config with a different "
@@ -289,9 +382,7 @@ void TraceConfig::Merge(const TraceConfig& config) {
included_categories_.clear();
}
- memory_dump_config_.triggers.insert(memory_dump_config_.triggers.end(),
- config.memory_dump_config_.triggers.begin(),
- config.memory_dump_config_.triggers.end());
+ memory_dump_config_.Merge(config.memory_dump_config_);
disabled_categories_.insert(disabled_categories_.end(),
config.disabled_categories_.begin(),
@@ -302,11 +393,12 @@ void TraceConfig::Merge(const TraceConfig& config) {
synthetic_delays_.insert(synthetic_delays_.end(),
config.synthetic_delays_.begin(),
config.synthetic_delays_.end());
+ event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
+ config.event_filters().end());
}
void TraceConfig::Clear() {
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
included_categories_.clear();
@@ -314,11 +406,11 @@ void TraceConfig::Clear() {
excluded_categories_.clear();
synthetic_delays_.clear();
memory_dump_config_.Clear();
+ event_filters_.clear();
}
void TraceConfig::InitializeDefault() {
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
}
@@ -339,7 +431,6 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
}
bool val;
- enable_sampling_ = dict.GetBoolean(kEnableSamplingParam, &val) ? val : false;
enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
enable_argument_filter_ =
dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
@@ -352,6 +443,10 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
if (dict.GetList(kSyntheticDelaysParam, &category_list))
SetSyntheticDelaysFromList(*category_list);
+ const base::ListValue* category_event_filters = nullptr;
+ if (dict.GetList(kEventFiltersParam, &category_event_filters))
+ SetEventFiltersFromConfigList(*category_event_filters);
+
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
@@ -406,7 +501,6 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
}
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
if (!trace_options_string.empty()) {
@@ -421,8 +515,6 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
record_mode_ = ECHO_TO_CONSOLE;
} else if (token == kRecordAsMuchAsPossible) {
record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
- } else if (token == kEnableSampling) {
- enable_sampling_ = true;
} else if (token == kEnableSystrace) {
enable_systrace_ = true;
} else if (token == kEnableArgumentFilter) {
@@ -516,17 +608,26 @@ void TraceConfig::SetMemoryDumpConfigFromConfigDict(
if (!trigger_list->GetDictionary(i, &trigger))
continue;
+ MemoryDumpConfig::Trigger dump_config;
int interval = 0;
- if (!trigger->GetInteger(kPeriodicIntervalParam, &interval))
- continue;
-
+ if (!trigger->GetInteger(kMinTimeBetweenDumps, &interval)) {
+ // If "min_time_between_dumps_ms" param was not given, then the trace
+ // config uses old format where only periodic dumps are supported.
+ trigger->GetInteger(kPeriodicIntervalLegacyParam, &interval);
+ dump_config.trigger_type = MemoryDumpType::PERIODIC_INTERVAL;
+ } else {
+ std::string trigger_type_str;
+ trigger->GetString(kTriggerTypeParam, &trigger_type_str);
+ dump_config.trigger_type = StringToMemoryDumpType(trigger_type_str);
+ }
DCHECK_GT(interval, 0);
- MemoryDumpConfig::Trigger dump_config;
- dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
+ dump_config.min_time_between_dumps_ms = static_cast<uint32_t>(interval);
+
std::string level_of_detail_str;
- trigger->GetString(kModeParam, &level_of_detail_str);
+ trigger->GetString(kTriggerModeParam, &level_of_detail_str);
dump_config.level_of_detail =
StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+
memory_dump_config_.triggers.push_back(dump_config);
}
}
@@ -555,6 +656,50 @@ void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
+void TraceConfig::SetEventFiltersFromConfigList(
+ const base::ListValue& category_event_filters) {
+ event_filters_.clear();
+
+ for (size_t event_filter_index = 0;
+ event_filter_index < category_event_filters.GetSize();
+ ++event_filter_index) {
+ const base::DictionaryValue* event_filter = nullptr;
+ if (!category_event_filters.GetDictionary(event_filter_index,
+ &event_filter))
+ continue;
+
+ std::string predicate_name;
+ CHECK(event_filter->GetString(kFilterPredicateParam, &predicate_name))
+ << "Invalid predicate name in category event filter.";
+
+ EventFilterConfig new_config(predicate_name);
+ const base::ListValue* included_list = nullptr;
+ CHECK(event_filter->GetList(kIncludedCategoriesParam, &included_list))
+ << "Missing included_categories in category event filter.";
+
+ for (size_t i = 0; i < included_list->GetSize(); ++i) {
+ std::string category;
+ if (included_list->GetString(i, &category))
+ new_config.AddIncludedCategory(category);
+ }
+
+ const base::ListValue* excluded_list = nullptr;
+ if (event_filter->GetList(kExcludedCategoriesParam, &excluded_list)) {
+ for (size_t i = 0; i < excluded_list->GetSize(); ++i) {
+ std::string category;
+ if (excluded_list->GetString(i, &category))
+ new_config.AddExcludedCategory(category);
+ }
+ }
+
+ const base::DictionaryValue* args_dict = nullptr;
+ if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
+ new_config.SetArgs(args_dict->CreateDeepCopy());
+
+ event_filters_.push_back(new_config);
+ }
+}
+
std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
auto dict = MakeUnique<DictionaryValue>();
switch (record_mode_) {
@@ -574,7 +719,6 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
NOTREACHED();
}
- dict->SetBoolean(kEnableSamplingParam, enable_sampling_);
dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
@@ -586,6 +730,41 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
+ if (!event_filters_.empty()) {
+ std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
+ for (const EventFilterConfig& filter : event_filters_) {
+ std::unique_ptr<base::DictionaryValue> filter_dict(
+ new base::DictionaryValue());
+ filter_dict->SetString(kFilterPredicateParam, filter.predicate_name());
+
+ std::unique_ptr<base::ListValue> included_categories_list(
+ new base::ListValue());
+ for (const std::string& included_category : filter.included_categories())
+ included_categories_list->AppendString(included_category);
+
+ filter_dict->Set(kIncludedCategoriesParam,
+ std::move(included_categories_list));
+
+ if (!filter.excluded_categories().empty()) {
+ std::unique_ptr<base::ListValue> excluded_categories_list(
+ new base::ListValue());
+ for (const std::string& excluded_category :
+ filter.excluded_categories())
+ excluded_categories_list->AppendString(excluded_category);
+
+ filter_dict->Set(kExcludedCategoriesParam,
+ std::move(excluded_categories_list));
+ }
+
+ if (filter.filter_args())
+ filter_dict->Set(kFilterArgsParam,
+ filter.filter_args()->CreateDeepCopy());
+
+ filter_list->Append(std::move(filter_dict));
+ }
+ dict->Set(kEventFiltersParam, std::move(filter_list));
+ }
+
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
auto allowed_modes = MakeUnique<ListValue>();
for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
@@ -597,10 +776,14 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
auto triggers_list = MakeUnique<ListValue>();
for (const auto& config : memory_dump_config_.triggers) {
auto trigger_dict = MakeUnique<DictionaryValue>();
- trigger_dict->SetInteger(kPeriodicIntervalParam,
- static_cast<int>(config.periodic_interval_ms));
+ trigger_dict->SetString(kTriggerTypeParam,
+ MemoryDumpTypeToString(config.trigger_type));
+ trigger_dict->SetInteger(
+ kMinTimeBetweenDumps,
+ static_cast<int>(config.min_time_between_dumps_ms));
trigger_dict->SetString(
- kModeParam, MemoryDumpLevelOfDetailToString(config.level_of_detail));
+ kTriggerModeParam,
+ MemoryDumpLevelOfDetailToString(config.level_of_detail));
triggers_list->Append(std::move(trigger_dict));
}
@@ -639,8 +822,6 @@ std::string TraceConfig::ToTraceOptionsString() const {
default:
NOTREACHED();
}
- if (enable_sampling_)
- ret = ret + "," + kEnableSampling;
if (enable_systrace_)
ret = ret + "," + kEnableSystrace;
if (enable_argument_filter_)
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 91d6f1f3bd..717c261316 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -7,8 +7,10 @@
#include <stdint.h>
+#include <memory>
#include <set>
#include <string>
+#include <unordered_set>
#include <vector>
#include "base/base_export.h"
@@ -51,8 +53,9 @@ class BASE_EXPORT TraceConfig {
// Specifies the triggers in the memory dump config.
struct Trigger {
- uint32_t periodic_interval_ms;
+ uint32_t min_time_between_dumps_ms;
MemoryDumpLevelOfDetail level_of_detail;
+ MemoryDumpType trigger_type;
};
// Specifies the configuration options for the heap profiler.
@@ -71,6 +74,8 @@ class BASE_EXPORT TraceConfig {
// Reset the values in the config.
void Clear();
+ void Merge(const MemoryDumpConfig& config);
+
// Set of memory dump modes allowed for the tracing session. The explicitly
// triggered dumps will be successful only if the dump mode is allowed in
// the config.
@@ -80,6 +85,39 @@ class BASE_EXPORT TraceConfig {
HeapProfiler heap_profiler_options;
};
+ class BASE_EXPORT EventFilterConfig {
+ public:
+ EventFilterConfig(const std::string& predicate_name);
+ EventFilterConfig(const EventFilterConfig& tc);
+
+ ~EventFilterConfig();
+
+ EventFilterConfig& operator=(const EventFilterConfig& rhs);
+
+ void AddIncludedCategory(const std::string& category);
+ void AddExcludedCategory(const std::string& category);
+ void SetArgs(std::unique_ptr<base::DictionaryValue> args);
+ bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
+
+ bool IsCategoryGroupEnabled(const char* category_group_name) const;
+
+ const std::string& predicate_name() const { return predicate_name_; }
+ base::DictionaryValue* filter_args() const { return args_.get(); }
+ const StringList& included_categories() const {
+ return included_categories_;
+ }
+ const StringList& excluded_categories() const {
+ return excluded_categories_;
+ }
+
+ private:
+ std::string predicate_name_;
+ StringList included_categories_;
+ StringList excluded_categories_;
+ std::unique_ptr<base::DictionaryValue> args_;
+ };
+ typedef std::vector<EventFilterConfig> EventFilters;
+
TraceConfig();
// Create TraceConfig object from category filter and trace options strings.
@@ -93,22 +131,22 @@ class BASE_EXPORT TraceConfig {
//
// |trace_options_string| is a comma-delimited list of trace options.
// Possible options are: "record-until-full", "record-continuously",
- // "record-as-much-as-possible", "trace-to-console", "enable-sampling",
- // "enable-systrace" and "enable-argument-filter".
+ // "record-as-much-as-possible", "trace-to-console", "enable-systrace" and
+ // "enable-argument-filter".
// The first 4 options are trace recoding modes and hence
// mutually exclusive. If more than one trace recording modes appear in the
// options_string, the last one takes precedence. If none of the trace
// recording mode is specified, recording mode is RECORD_UNTIL_FULL.
//
// The trace option will first be reset to the default option
- // (record_mode set to RECORD_UNTIL_FULL, enable_sampling, enable_systrace,
- // and enable_argument_filter set to false) before options parsed from
+ // (record_mode set to RECORD_UNTIL_FULL, enable_systrace and
+ // enable_argument_filter set to false) before options parsed from
// |trace_options_string| are applied on it. If |trace_options_string| is
// invalid, the final state of trace options is undefined.
//
// Example: TraceConfig("test_MyTest*", "record-until-full");
// Example: TraceConfig("test_MyTest*,test_OtherStuff",
- // "record-continuously, enable-sampling");
+ // "record-continuously");
// Example: TraceConfig("-excluded_category1,-excluded_category2",
// "record-until-full, trace-to-console");
// would set ECHO_TO_CONSOLE as the recording mode.
@@ -138,7 +176,6 @@ class BASE_EXPORT TraceConfig {
// Example:
// {
// "record_mode": "record-continuously",
- // "enable_sampling": true,
// "enable_systrace": true,
// "enable_argument_filter": true,
// "included_categories": ["included",
@@ -174,12 +211,10 @@ class BASE_EXPORT TraceConfig {
const StringList& GetSyntheticDelayValues() const;
TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
- bool IsSamplingEnabled() const { return enable_sampling_; }
bool IsSystraceEnabled() const { return enable_systrace_; }
bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
- void EnableSampling() { enable_sampling_ = true; }
void EnableSystrace() { enable_systrace_ = true; }
void EnableArgumentFilter() { enable_argument_filter_ = true; }
@@ -196,7 +231,7 @@ class BASE_EXPORT TraceConfig {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group) const;
+ bool IsCategoryGroupEnabled(const char* category_group_name) const;
// Merges config with the current TraceConfig
void Merge(const TraceConfig& config);
@@ -210,6 +245,11 @@ class BASE_EXPORT TraceConfig {
return memory_dump_config_;
}
+ const EventFilters& event_filters() const { return event_filters_; }
+ void SetEventFilters(const EventFilters& filter_configs) {
+ event_filters_ = filter_configs;
+ }
+
private:
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -250,6 +290,7 @@ class BASE_EXPORT TraceConfig {
const DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
+ void SetEventFiltersFromConfigList(const base::ListValue& event_filters);
std::unique_ptr<DictionaryValue> ToDict() const;
std::string ToTraceOptionsString() const;
@@ -271,7 +312,6 @@ class BASE_EXPORT TraceConfig {
bool HasIncludedPatterns() const;
TraceRecordMode record_mode_;
- bool enable_sampling_ : 1;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
@@ -281,6 +321,7 @@ class BASE_EXPORT TraceConfig {
StringList disabled_categories_;
StringList excluded_categories_;
StringList synthetic_delays_;
+ EventFilters event_filters_;
};
} // namespace trace_event
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
index 6b47f8dc55..744e8a8acc 100644
--- a/base/trace_event/trace_config_memory_test_util.h
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -13,87 +13,144 @@ namespace trace_event {
class TraceConfigMemoryTestUtil {
public:
+ static std::string GetTraceConfig_LegacyPeriodicTriggers(int light_period,
+ int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":2048"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"light\","
+ "\"periodic_interval_ms\":%d"
+ "},"
+ "{"
+ "\"mode\":\"detailed\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ ;
+ }
+
static std::string GetTraceConfig_PeriodicTriggers(int light_period,
int heavy_period) {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"heap_profiler_options\":{"
- "\"breakdown_threshold_bytes\":2048"
- "},"
- "\"triggers\":["
- "{"
- "\"mode\":\"light\","
- "\"periodic_interval_ms\":%d"
- "},"
- "{"
- "\"mode\":\"detailed\","
- "\"periodic_interval_ms\":%d"
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":2048"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"light\","
+ "\"type\":\"periodic_interval\""
+ "},"
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"periodic_interval\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, light_period, heavy_period);
}
static std::string GetTraceConfig_EmptyTriggers() {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"triggers\":["
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"triggers\":["
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_NoTriggers() {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\"],"
- "\"triggers\":["
- "{"
- "\"mode\":\"background\","
- "\"periodic_interval_ms\":%d"
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory, period_ms);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\"],"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"background\","
+ "\"type\":\"periodic_interval\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, period_ms);
+ }
+
+ static std::string GetTraceConfig_PeakDetectionTrigger(int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"peak_memory_usage\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, heavy_period);
}
};
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index 4b46b2fefd..74aa7bdc63 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -5,6 +5,7 @@
#include <stddef.h>
#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
#include "base/macros.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_config.h"
@@ -19,38 +20,52 @@ namespace {
const char kDefaultTraceConfigString[] =
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"record_mode\":\"record-until-full\""
"}";
const char kCustomTraceConfigString[] =
- "{"
+ "{"
"\"enable_argument_filter\":true,"
- "\"enable_sampling\":true,"
"\"enable_systrace\":true,"
+ "\"event_filters\":["
+ "{"
+ "\"excluded_categories\":[\"unfiltered_cat\"],"
+ "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
+ "\"filter_predicate\":\"event_whitelist_predicate\","
+ "\"included_categories\":[\"*\"]"
+ "}"
+ "],"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
- "\"included_categories\":[\"included\","
- "\"inc_pattern*\","
- "\"disabled-by-default-cc\","
- "\"disabled-by-default-memory-infra\"],"
+ "\"included_categories\":["
+ "\"included\","
+ "\"inc_pattern*\","
+ "\"disabled-by-default-cc\","
+ "\"disabled-by-default-memory-infra\"],"
"\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"heap_profiler_options\":{"
- "\"breakdown_threshold_bytes\":10240"
- "},"
- "\"triggers\":["
- "{\"mode\":\"light\",\"periodic_interval_ms\":50},"
- "{\"mode\":\"detailed\",\"periodic_interval_ms\":1000}"
- "]"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":10240"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":50,"
+ "\"mode\":\"light\","
+ "\"type\":\"periodic_interval\""
+ "},"
+ "{"
+ "\"min_time_between_dumps_ms\":1000,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"peak_memory_usage\""
+ "}"
+ "]"
"},"
"\"record_mode\":\"record-continuously\","
"\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
- "}";
+ "}";
void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
@@ -72,44 +87,31 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From trace options strings
TraceConfig config("", "record-until-full");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "record-continuously");
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "record-as-much-as-possible");
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible",
config.ToTraceOptionsString().c_str());
- config = TraceConfig("", "record-until-full, enable-sampling");
- EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
- EXPECT_FALSE(config.IsSystraceEnabled());
- EXPECT_FALSE(config.IsArgumentFilterEnabled());
- EXPECT_STREQ("record-until-full,enable-sampling",
- config.ToTraceOptionsString().c_str());
-
config = TraceConfig("", "enable-systrace, record-continuously");
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously,enable-systrace",
@@ -117,7 +119,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config = TraceConfig("", "enable-argument-filter,record-as-much-as-possible");
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_TRUE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible,enable-argument-filter",
@@ -125,19 +126,17 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config = TraceConfig(
"",
- "enable-systrace,trace-to-console,enable-sampling,enable-argument-filter");
+ "enable-systrace,trace-to-console,enable-argument-filter");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_TRUE(config.IsArgumentFilterEnabled());
EXPECT_STREQ(
- "trace-to-console,enable-sampling,enable-systrace,enable-argument-filter",
+ "trace-to-console,enable-systrace,enable-argument-filter",
config.ToTraceOptionsString().c_str());
config = TraceConfig(
"", "record-continuously, record-until-full, trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
@@ -145,28 +144,24 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From TraceRecordMode
config = TraceConfig("", RECORD_UNTIL_FULL);
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("", RECORD_CONTINUOUSLY);
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
config = TraceConfig("", ECHO_TO_CONSOLE);
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
config = TraceConfig("", RECORD_AS_MUCH_AS_POSSIBLE);
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible",
@@ -198,33 +193,30 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From both trace options and category filter strings
config = TraceConfig("", "");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
- "enable-systrace, trace-to-console, enable-sampling");
+ "enable-systrace, trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
- EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ EXPECT_STREQ("trace-to-console,enable-systrace",
config.ToTraceOptionsString().c_str());
// From both trace options and category filter strings with spaces.
config = TraceConfig(" included , -excluded, inc_pattern*, ,-exc_pattern* ",
- "enable-systrace, ,trace-to-console, enable-sampling ");
+ "enable-systrace, ,trace-to-console ");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
- EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ EXPECT_STREQ("trace-to-console,enable-systrace",
config.ToTraceOptionsString().c_str());
// From category filter string and TraceRecordMode
@@ -232,7 +224,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
RECORD_CONTINUOUSLY);
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
EXPECT_FALSE(config.IsSystraceEnabled());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
@@ -242,7 +233,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
TraceConfig config("", "foo-bar-baz");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
@@ -250,7 +240,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
config = TraceConfig("arbitrary-category", "foo-bar-baz, enable-systrace");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("arbitrary-category", config.ToCategoryFilterString().c_str());
@@ -330,6 +319,7 @@ TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+ EXPECT_TRUE(tc.event_filters().empty());
// Enabling only the disabled-by-default-* category means the default ones
// are also enabled.
tc = TraceConfig("disabled-by-default-foo", "");
@@ -346,7 +336,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig tc(dict);
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -360,7 +349,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig default_tc(*default_dict);
EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
- EXPECT_FALSE(default_tc.IsSamplingEnabled());
EXPECT_FALSE(default_tc.IsSystraceEnabled());
EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
@@ -374,7 +362,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig custom_tc(*custom_dict);
EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
- EXPECT_TRUE(custom_tc.IsSamplingEnabled());
EXPECT_TRUE(custom_tc.IsSystraceEnabled());
EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,"
@@ -387,22 +374,28 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TEST(TraceConfigTest, TraceConfigFromValidString) {
// Using some non-empty config string.
const char config_string[] =
- "{"
+ "{"
"\"enable_argument_filter\":true,"
- "\"enable_sampling\":true,"
"\"enable_systrace\":true,"
+ "\"event_filters\":["
+ "{"
+ "\"excluded_categories\":[\"unfiltered_cat\"],"
+ "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
+ "\"filter_predicate\":\"event_whitelist_predicate\","
+ "\"included_categories\":[\"*\"]"
+ "}"
+ "],"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"included_categories\":[\"included\","
- "\"inc_pattern*\","
- "\"disabled-by-default-cc\"],"
+ "\"inc_pattern*\","
+ "\"disabled-by-default-cc\"],"
"\"record_mode\":\"record-continuously\","
"\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
- "}";
+ "}";
TraceConfig tc(config_string);
EXPECT_STREQ(config_string, tc.ToString().c_str());
EXPECT_EQ(RECORD_CONTINUOUSLY, tc.GetTraceRecordMode());
- EXPECT_TRUE(tc.IsSamplingEnabled());
EXPECT_TRUE(tc.IsSystraceEnabled());
EXPECT_TRUE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,disabled-by-default-cc,-excluded,"
@@ -434,6 +427,26 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
EXPECT_STREQ("test.Delay1;16", tc.GetSyntheticDelayValues()[0].c_str());
EXPECT_STREQ("test.Delay2;32", tc.GetSyntheticDelayValues()[1].c_str());
+ EXPECT_EQ(tc.event_filters().size(), 1u);
+ const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
+ EXPECT_STREQ("event_whitelist_predicate",
+ event_filter.predicate_name().c_str());
+ EXPECT_EQ(1u, event_filter.included_categories().size());
+ EXPECT_STREQ("*", event_filter.included_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.excluded_categories().size());
+ EXPECT_STREQ("unfiltered_cat", event_filter.excluded_categories()[0].c_str());
+ EXPECT_TRUE(event_filter.filter_args());
+
+ std::string json_out;
+ base::JSONWriter::Write(*event_filter.filter_args(), &json_out);
+ EXPECT_STREQ(json_out.c_str(),
+ "{\"event_name_whitelist\":[\"a snake\",\"a dog\"]}");
+ std::unordered_set<std::string> filter_values;
+ EXPECT_TRUE(event_filter.GetArgAsSet("event_name_whitelist", &filter_values));
+ EXPECT_EQ(2u, filter_values.size());
+ EXPECT_EQ(1u, filter_values.count("a snake"));
+ EXPECT_EQ(1u, filter_values.count("a dog"));
+
const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
TraceConfig tc2(config_string_2);
EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
@@ -446,7 +459,6 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
EXPECT_STREQ(tc.ToString().c_str(),
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"record_mode\":\"record-until-full\""
"}");
@@ -458,7 +470,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
TraceConfig tc("");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -467,7 +478,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("This is an invalid config string.");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -476,7 +486,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -485,7 +494,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -495,7 +503,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
// initialize TraceConfig with best effort.
tc = TraceConfig("{}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -503,7 +510,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -511,7 +517,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
const char invalid_config_string[] =
"{"
- "\"enable_sampling\":\"true\","
"\"enable_systrace\":1,"
"\"excluded_categories\":[\"excluded\"],"
"\"included_categories\":\"not a list\","
@@ -522,7 +527,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
"}";
tc = TraceConfig(invalid_config_string);
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("-excluded,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
@@ -547,7 +551,6 @@ TEST(TraceConfigTest, MergingTraceConfigs) {
tc.Merge(tc2);
EXPECT_STREQ("{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"record_mode\":\"record-until-full\""
@@ -614,15 +617,11 @@ TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
TEST(TraceConfigTest, SetTraceOptionValues) {
TraceConfig tc;
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
tc.SetTraceRecordMode(RECORD_AS_MUCH_AS_POSSIBLE);
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, tc.GetTraceRecordMode());
- tc.EnableSampling();
- EXPECT_TRUE(tc.IsSamplingEnabled());
-
tc.EnableSystrace();
EXPECT_TRUE(tc.IsSystraceEnabled());
}
@@ -632,30 +631,47 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
TraceConfig tc1(tc_str1);
EXPECT_EQ(tc_str1, tc1.ToString());
+ TraceConfig tc2(
+ TraceConfigMemoryTestUtil::GetTraceConfig_LegacyPeriodicTriggers(200,
+ 2000));
+ EXPECT_EQ(tc_str1, tc2.ToString());
+
EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
- EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(200u,
+ tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
tc1.memory_dump_config_.triggers[0].level_of_detail);
- EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
+ EXPECT_EQ(2000u,
+ tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
tc1.memory_dump_config_.triggers[1].level_of_detail);
EXPECT_EQ(
2048u,
tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
- std::string tc_str2 =
+ std::string tc_str3 =
TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
1 /* period_ms */);
- TraceConfig tc2(tc_str2);
- EXPECT_EQ(tc_str2, tc2.ToString());
- EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+ TraceConfig tc3(tc_str3);
+ EXPECT_EQ(tc_str3, tc3.ToString());
+ EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
- tc2.memory_dump_config_.triggers[0].level_of_detail);
+ tc3.memory_dump_config_.triggers[0].level_of_detail);
+
+ std::string tc_str4 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
+ 1 /*heavy_period */);
+ TraceConfig tc4(tc_str4);
+ EXPECT_EQ(tc_str4, tc4.ToString());
+ ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+ tc4.memory_dump_config_.triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
deleted file mode 100644
index f915780de5..0000000000
--- a/base/trace_event/trace_event.gypi
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'trace_event_sources' : [
- 'trace_event/blame_context.cc',
- 'trace_event/blame_context.h',
- 'trace_event/common/trace_event_common.h',
- 'trace_event/heap_profiler.h',
- 'trace_event/heap_profiler_allocation_context.cc',
- 'trace_event/heap_profiler_allocation_context.h',
- 'trace_event/heap_profiler_allocation_context_tracker.cc',
- 'trace_event/heap_profiler_allocation_context_tracker.h',
- 'trace_event/heap_profiler_allocation_register.cc',
- 'trace_event/heap_profiler_allocation_register_posix.cc',
- 'trace_event/heap_profiler_allocation_register_win.cc',
- 'trace_event/heap_profiler_allocation_register.h',
- 'trace_event/heap_profiler_heap_dump_writer.cc',
- 'trace_event/heap_profiler_heap_dump_writer.h',
- 'trace_event/heap_profiler_stack_frame_deduplicator.cc',
- 'trace_event/heap_profiler_stack_frame_deduplicator.h',
- 'trace_event/heap_profiler_type_name_deduplicator.cc',
- 'trace_event/heap_profiler_type_name_deduplicator.h',
- 'trace_event/java_heap_dump_provider_android.cc',
- 'trace_event/java_heap_dump_provider_android.h',
- 'trace_event/memory_allocator_dump.cc',
- 'trace_event/memory_allocator_dump.h',
- 'trace_event/memory_allocator_dump_guid.cc',
- 'trace_event/memory_allocator_dump_guid.h',
- 'trace_event/memory_dump_manager.cc',
- 'trace_event/memory_dump_manager.h',
- 'trace_event/memory_dump_provider.h',
- 'trace_event/memory_dump_request_args.cc',
- 'trace_event/memory_dump_request_args.h',
- 'trace_event/memory_dump_session_state.cc',
- 'trace_event/memory_dump_session_state.h',
- 'trace_event/memory_infra_background_whitelist.cc',
- 'trace_event/memory_infra_background_whitelist.h',
- 'trace_event/process_memory_dump.cc',
- 'trace_event/process_memory_dump.h',
- 'trace_event/process_memory_maps.cc',
- 'trace_event/process_memory_maps.h',
- 'trace_event/process_memory_totals.cc',
- 'trace_event/process_memory_totals.h',
- 'trace_event/trace_buffer.cc',
- 'trace_event/trace_buffer.h',
- 'trace_event/trace_config.cc',
- 'trace_event/trace_config.h',
- 'trace_event/trace_event.h',
- 'trace_event/trace_event_android.cc',
- 'trace_event/trace_event_argument.cc',
- 'trace_event/trace_event_argument.h',
- 'trace_event/trace_event_etw_export_win.cc',
- 'trace_event/trace_event_etw_export_win.h',
- 'trace_event/trace_event_impl.cc',
- 'trace_event/trace_event_impl.h',
- 'trace_event/trace_event_memory_overhead.cc',
- 'trace_event/trace_event_memory_overhead.h',
- 'trace_event/trace_event_synthetic_delay.cc',
- 'trace_event/trace_event_synthetic_delay.h',
- 'trace_event/trace_event_system_stats_monitor.cc',
- 'trace_event/trace_event_system_stats_monitor.h',
- 'trace_event/trace_log.cc',
- 'trace_event/trace_log.h',
- 'trace_event/trace_log_constants.cc',
- 'trace_event/trace_sampling_thread.cc',
- 'trace_event/trace_sampling_thread.h',
- 'trace_event/tracing_agent.cc',
- 'trace_event/tracing_agent.h',
- 'trace_event/winheap_dump_provider_win.cc',
- 'trace_event/winheap_dump_provider_win.h',
- ],
- 'trace_event_test_sources' : [
- 'trace_event/blame_context_unittest.cc',
- 'trace_event/heap_profiler_allocation_context_tracker_unittest.cc',
- 'trace_event/heap_profiler_allocation_register_unittest.cc',
- 'trace_event/heap_profiler_heap_dump_writer_unittest.cc',
- 'trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc',
- 'trace_event/heap_profiler_type_name_deduplicator_unittest.cc',
- 'trace_event/java_heap_dump_provider_android_unittest.cc',
- 'trace_event/memory_allocator_dump_unittest.cc',
- 'trace_event/memory_dump_manager_unittest.cc',
- 'trace_event/process_memory_dump_unittest.cc',
- 'trace_event/trace_config_memory_test_util.h',
- 'trace_event/trace_config_unittest.cc',
- 'trace_event/trace_event_argument_unittest.cc',
- 'trace_event/trace_event_synthetic_delay_unittest.cc',
- 'trace_event/trace_event_system_stats_monitor_unittest.cc',
- 'trace_event/trace_event_unittest.cc',
- 'trace_event/winheap_dump_provider_win_unittest.cc',
- ],
- 'conditions': [
- ['OS == "linux" or OS=="android" or OS=="mac" or OS=="ios"', {
- 'trace_event_sources': [
- 'trace_event/malloc_dump_provider.cc',
- 'trace_event/malloc_dump_provider.h',
- ],
- }],
- ['OS == "android"', {
- 'trace_event_test_sources' : [
- 'trace_event/trace_event_android_unittest.cc',
- ],
- }],
- ],
- },
-}
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
index a075898269..51e6927cbd 100644
--- a/base/trace_event/trace_event.h
+++ b/base/trace_event/trace_event.h
@@ -19,6 +19,7 @@
#include "base/time/time.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_category.h"
#include "base/trace_event/trace_event_system_stats_monitor.h"
#include "base/trace_event/trace_log.h"
#include "build/build_config.h"
@@ -28,55 +29,52 @@
#define TRACE_STR_COPY(str) \
trace_event_internal::TraceStringWithCopy(str)
-// By default, uint64_t ID argument values are not mangled with the Process ID
-// in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
+// instead. By default, uint64_t ID argument values are not mangled with the
+// Process ID in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID
+// mangling.
#define TRACE_ID_MANGLE(id) \
trace_event_internal::TraceID::ForceMangle(id)
-// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
-// macros. Use this macro to prevent Process ID mangling.
+// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
+// instead. By default, pointers are mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to prevent Process ID mangling.
#define TRACE_ID_DONT_MANGLE(id) \
trace_event_internal::TraceID::DontMangle(id)
// By default, trace IDs are eventually converted to a single 64-bit number. Use
-// this macro to add a scope string.
-#define TRACE_ID_WITH_SCOPE(scope, id) \
- trace_event_internal::TraceID::WithScope(scope, id)
-
-// Sets the current sample state to the given category and name (both must be
-// constant strings). These states are intended for a sampling profiler.
-// Implementation note: we store category and name together because we don't
-// want the inconsistency/expense of storing two pointers.
-// |thread_bucket| is [0..2] and is used to statically isolate samples in one
-// thread from others.
-#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
- bucket_number, category, name) \
- trace_event_internal:: \
- TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
-
-// Returns a current sampling state of the given bucket.
-#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
- trace_event_internal::TraceEventSamplingStateScope<bucket_number>::Current()
-
-// Creates a scope of a sampling state of the given bucket.
+// this macro to add a scope string. For example,
//
-// { // The sampling state is set within this scope.
-// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
-// ...;
-// }
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \
- bucket_number, category, name) \
- trace_event_internal::TraceEventSamplingStateScope<bucket_number> \
- traceEventSamplingScope(category "\0" name);
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+// "network", "ResourceLoad",
+// TRACE_ID_WITH_SCOPE("BlinkResourceID", resourceID));
+//
+// Also, it is possible to prepend the ID with another number, like the process
+// ID. This is useful in creatin IDs that are unique among all processes. To do
+// that, pass two numbers after the scope string instead of one. For example,
+//
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+// "network", "ResourceLoad",
+// TRACE_ID_WITH_SCOPE("BlinkResourceID", pid, resourceID));
+#define TRACE_ID_WITH_SCOPE(scope, ...) \
+ trace_event_internal::TraceID::WithScope(scope, ##__VA_ARGS__)
+
+#define TRACE_ID_GLOBAL(id) trace_event_internal::TraceID::GlobalId(id)
+#define TRACE_ID_LOCAL(id) trace_event_internal::TraceID::LocalId(id)
#define TRACE_EVENT_API_CURRENT_THREAD_ID \
static_cast<int>(base::PlatformThread::CurrentId())
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
- (base::trace_event::TraceLog::ENABLED_FOR_RECORDING | \
- base::trace_event::TraceLog::ENABLED_FOR_EVENT_CALLBACK | \
- base::trace_event::TraceLog::ENABLED_FOR_ETW_EXPORT))
+ (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT))
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED() \
+ UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
+ base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
////////////////////////////////////////////////////////////////////////////////
// Implementation specific tracing API definitions.
@@ -204,13 +202,6 @@
// Defines visibility for classes in trace_event.h
#define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
-// The thread buckets for the sampling profiler.
-TRACE_EVENT_API_CLASS_EXPORT extern \
- TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
-#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
- g_trace_state[thread_bucket]
-
////////////////////////////////////////////////////////////////////////////////
// Implementation detail: trace event macros create temporary variables
@@ -249,69 +240,69 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- trace_event_internal::AddTraceEvent( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
- } while (0)
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
// Implementation detail: internal macro to create static category and add begin
// event if the category is enabled. Also adds the end event when the scope
// ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- base::trace_event::TraceEventHandle h = \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_COMPLETE, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
- ##__VA_ARGS__); \
- INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
- }
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
+ ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW( \
- category_group, name, bind_id, flow_flags, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned int trace_event_flags = flow_flags; \
- trace_event_internal::TraceID trace_event_bind_id(bind_id, \
- &trace_event_flags); \
- base::trace_event::TraceEventHandle h = \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_COMPLETE, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
+ bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_bind_id((bind_id)); \
+ unsigned int trace_event_flags = \
+ flow_flags | trace_event_bind_id.id_flags(); \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
- INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
}
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
- flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
- trace_event_internal::TraceID trace_event_trace_id( \
- id, &trace_event_flags); \
- trace_event_internal::AddTraceEvent( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
- name, trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
- trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
- } while (0)
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_trace_id((id)); \
+ unsigned int trace_event_flags = \
+ flags | trace_event_trace_id.id_flags(); \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
@@ -319,12 +310,11 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
timestamp, flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, \
- base::TimeTicks::FromInternalValue(timestamp), \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
@@ -332,33 +322,50 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- phase, category_group, name, id, thread_id, timestamp, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
- trace_event_internal::TraceID trace_event_trace_id(id, \
- &trace_event_flags); \
- trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
- thread_id, base::TimeTicks::FromInternalValue(timestamp), \
- trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
- trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_trace_id((id)); \
+ unsigned int trace_event_flags = \
+ flags | trace_event_trace_id.id_flags(); \
+ trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ thread_id, timestamp, \
+ trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// The linked ID will not be mangled.
+#define INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id1, id2) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID source_id((id1)); \
+ unsigned int source_flags = source_id.id_flags(); \
+ trace_event_internal::TraceID target_id((id2)); \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_LINK_IDS, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ source_id.scope(), source_id.raw_id(), source_flags, \
+ trace_event_internal::kNoId, "linked_id", \
+ target_id.AsConvertableToTraceFormat()); \
+ } \
} while (0)
// Implementation detail: internal macro to create static category and add
// metadata event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- TRACE_EVENT_API_ADD_METADATA_EVENT( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- ##__VA_ARGS__); \
- } \
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ TRACE_EVENT_API_ADD_METADATA_EVENT( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ ##__VA_ARGS__); \
+ } \
} while (0)
// Implementation detail: internal macro to enter and leave a
@@ -381,7 +388,7 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {}; \
}; \
INTERNAL_TRACE_EVENT_UID(ScopedContext) \
- INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+ INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
// Implementation detail: internal macro to trace a task execution with the
// location where it was posted from.
@@ -403,19 +410,64 @@ const unsigned long long kNoId = 0;
// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
// are by default mangled with the Process ID so that they are unlikely to
// collide when the same pointer is used on different processes.
-class TraceID {
+class BASE_EXPORT TraceID {
public:
+ // Can be combined with WithScope.
+ class LocalId {
+ public:
+ explicit LocalId(unsigned long long raw_id) : raw_id_(raw_id) {}
+ unsigned long long raw_id() const { return raw_id_; }
+ private:
+ unsigned long long raw_id_;
+ };
+
+ // Can be combined with WithScope.
+ class GlobalId {
+ public:
+ explicit GlobalId(unsigned long long raw_id) : raw_id_(raw_id) {}
+ unsigned long long raw_id() const { return raw_id_; }
+ private:
+ unsigned long long raw_id_;
+ };
+
class WithScope {
public:
WithScope(const char* scope, unsigned long long raw_id)
: scope_(scope), raw_id_(raw_id) {}
+ WithScope(const char* scope, LocalId local_id)
+ : scope_(scope), raw_id_(local_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+ }
+ WithScope(const char* scope, GlobalId global_id)
+ : scope_(scope), raw_id_(global_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
+ WithScope(const char* scope,
+ unsigned long long prefix,
+ unsigned long long raw_id)
+ : scope_(scope), has_prefix_(true), prefix_(prefix), raw_id_(raw_id) {}
+ WithScope(const char* scope, unsigned long long prefix, GlobalId global_id)
+ : scope_(scope),
+ has_prefix_(true),
+ prefix_(prefix),
+ raw_id_(global_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ unsigned long long prefix() const { return prefix_; }
+ unsigned int id_flags() const { return id_flags_; }
+
private:
const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ unsigned long long prefix_;
unsigned long long raw_id_;
+ unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
+ // DEPRECATED: consider using LocalId or GlobalId, instead.
class DontMangle {
public:
explicit DontMangle(const void* raw_id)
@@ -436,15 +488,12 @@ class TraceID {
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
explicit DontMangle(signed char raw_id)
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
- explicit DontMangle(WithScope scoped_id)
- : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
- const char* scope() const { return scope_; }
unsigned long long raw_id() const { return raw_id_; }
private:
- const char* scope_ = nullptr;
unsigned long long raw_id_;
};
+ // DEPRECATED: consider using LocalId or GlobalId, instead.
class ForceMangle {
public:
explicit ForceMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
@@ -466,50 +515,58 @@ class TraceID {
private:
unsigned long long raw_id_;
};
- TraceID(const void* raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(
- reinterpret_cast<uintptr_t>(raw_id))) {
- *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
- }
- TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
- *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
- }
- TraceID(DontMangle maybe_scoped_id, unsigned int* /*flags*/)
- : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
- TraceID(unsigned long long raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
- }
- TraceID(unsigned long raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+
+ TraceID(const void* raw_id) : raw_id_(static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(raw_id))) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+ TraceID(ForceMangle raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(unsigned short raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+ TraceID(DontMangle raw_id) : raw_id_(raw_id.raw_id()) {}
+ TraceID(unsigned long long raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned long raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned int raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned short raw_id) : raw_id_(raw_id) {}
+ TraceID(unsigned char raw_id) : raw_id_(raw_id) {}
+ TraceID(long long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(int raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(short raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(signed char raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ TraceID(LocalId raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
}
- TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
- (void)flags;
+ TraceID(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
}
- TraceID(long long raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(long raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(int raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(short raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(signed char raw_id, unsigned int* flags)
- : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
- TraceID(WithScope scoped_id, unsigned int* /*flags*/)
- : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+ TraceID(WithScope scoped_id)
+ : scope_(scoped_id.scope()),
+ has_prefix_(scoped_id.has_prefix()),
+ prefix_(scoped_id.prefix()),
+ raw_id_(scoped_id.raw_id()),
+ id_flags_(scoped_id.id_flags()) {}
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ unsigned long long prefix() const { return prefix_; }
+ unsigned int id_flags() const { return id_flags_; }
+
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ AsConvertableToTraceFormat() const;
private:
const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ unsigned long long prefix_;
unsigned long long raw_id_;
+ unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
// Simple union to store various types as unsigned long long.
@@ -973,9 +1030,10 @@ class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
ScopedTracer() : p_data_(NULL) {}
~ScopedTracer() {
- if (p_data_ && *data_.category_group_enabled)
+ if (p_data_ && *data_.category_group_enabled) {
TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
data_.category_group_enabled, data_.name, data_.event_handle);
+ }
}
void Initialize(const unsigned char* category_group_enabled,
@@ -1023,37 +1081,6 @@ class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
trace_event_internal::ScopedTraceBinaryEfficient \
INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
-// TraceEventSamplingStateScope records the current sampling state
-// and sets a new sampling state. When the scope exists, it restores
-// the sampling state having recorded.
-template<size_t BucketNumber>
-class TraceEventSamplingStateScope {
- public:
- TraceEventSamplingStateScope(const char* category_and_name) {
- previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
- TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
- }
-
- ~TraceEventSamplingStateScope() {
- TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
- }
-
- static inline const char* Current() {
- return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
- g_trace_state[BucketNumber]));
- }
-
- static inline void Set(const char* category_and_name) {
- TRACE_EVENT_API_ATOMIC_STORE(
- g_trace_state[BucketNumber],
- reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
- const_cast<char*>(category_and_name)));
- }
-
- private:
- const char* previous_state_;
-};
-
} // namespace trace_event_internal
namespace base {
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
index 336d964bff..db702b6231 100644
--- a/base/trace_event/trace_event_argument.cc
+++ b/base/trace_event/trace_event_argument.cc
@@ -244,36 +244,36 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
const base::Value& value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
switch (value.GetType()) {
- case base::Value::TYPE_NULL:
- case base::Value::TYPE_BINARY:
+ case base::Value::Type::NONE:
+ case base::Value::Type::BINARY:
NOTREACHED();
break;
- case base::Value::TYPE_BOOLEAN: {
+ case base::Value::Type::BOOLEAN: {
bool bool_value;
value.GetAsBoolean(&bool_value);
SetBooleanWithCopiedName(name, bool_value);
} break;
- case base::Value::TYPE_INTEGER: {
+ case base::Value::Type::INTEGER: {
int int_value;
value.GetAsInteger(&int_value);
SetIntegerWithCopiedName(name, int_value);
} break;
- case base::Value::TYPE_DOUBLE: {
+ case base::Value::Type::DOUBLE: {
double double_value;
value.GetAsDouble(&double_value);
SetDoubleWithCopiedName(name, double_value);
} break;
- case base::Value::TYPE_STRING: {
- const StringValue* string_value;
+ case base::Value::Type::STRING: {
+ const Value* string_value;
value.GetAsString(&string_value);
SetStringWithCopiedName(name, string_value->GetString());
} break;
- case base::Value::TYPE_DICTIONARY: {
+ case base::Value::Type::DICTIONARY: {
const DictionaryValue* dict_value;
value.GetAsDictionary(&dict_value);
BeginDictionaryWithCopiedName(name);
@@ -284,7 +284,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
EndDictionary();
} break;
- case base::Value::TYPE_LIST: {
+ case base::Value::Type::LIST: {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArrayWithCopiedName(name);
@@ -298,36 +298,36 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
void TracedValue::AppendBaseValue(const base::Value& value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
switch (value.GetType()) {
- case base::Value::TYPE_NULL:
- case base::Value::TYPE_BINARY:
+ case base::Value::Type::NONE:
+ case base::Value::Type::BINARY:
NOTREACHED();
break;
- case base::Value::TYPE_BOOLEAN: {
+ case base::Value::Type::BOOLEAN: {
bool bool_value;
value.GetAsBoolean(&bool_value);
AppendBoolean(bool_value);
} break;
- case base::Value::TYPE_INTEGER: {
+ case base::Value::Type::INTEGER: {
int int_value;
value.GetAsInteger(&int_value);
AppendInteger(int_value);
} break;
- case base::Value::TYPE_DOUBLE: {
+ case base::Value::Type::DOUBLE: {
double double_value;
value.GetAsDouble(&double_value);
AppendDouble(double_value);
} break;
- case base::Value::TYPE_STRING: {
- const StringValue* string_value;
+ case base::Value::Type::STRING: {
+ const Value* string_value;
value.GetAsString(&string_value);
AppendString(string_value->GetString());
} break;
- case base::Value::TYPE_DICTIONARY: {
+ case base::Value::Type::DICTIONARY: {
const DictionaryValue* dict_value;
value.GetAsDictionary(&dict_value);
BeginDictionary();
@@ -338,7 +338,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
EndDictionary();
} break;
- case base::Value::TYPE_LIST: {
+ case base::Value::Type::LIST: {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArray();
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
index 61395f4d55..aef8441c8e 100644
--- a/base/trace_event/trace_event_argument_unittest.cc
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -97,9 +97,9 @@ TEST(TraceEventArgumentTest, LongStrings) {
}
TEST(TraceEventArgumentTest, PassBaseValue) {
- FundamentalValue int_value(42);
- FundamentalValue bool_value(true);
- FundamentalValue double_value(42.0f);
+ Value int_value(42);
+ Value bool_value(true);
+ Value double_value(42.0f);
auto dict_value = WrapUnique(new DictionaryValue);
dict_value->SetBoolean("bool", true);
@@ -131,10 +131,10 @@ TEST(TraceEventArgumentTest, PassBaseValue) {
}
TEST(TraceEventArgumentTest, PassTracedValue) {
- auto dict_value = WrapUnique(new TracedValue());
+ auto dict_value = MakeUnique<TracedValue>();
dict_value->SetInteger("a", 1);
- auto nested_dict_value = WrapUnique(new TracedValue());
+ auto nested_dict_value = MakeUnique<TracedValue>();
nested_dict_value->SetInteger("b", 2);
nested_dict_value->BeginArray("c");
nested_dict_value->AppendString("foo");
diff --git a/base/trace_event/trace_event_filter.cc b/base/trace_event/trace_event_filter.cc
new file mode 100644
index 0000000000..d50c5fe251
--- /dev/null
+++ b/base/trace_event/trace_event_filter.cc
@@ -0,0 +1,21 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/compiler_specific.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+TraceEventFilter::TraceEventFilter() {}
+TraceEventFilter::~TraceEventFilter() {}
+
+void TraceEventFilter::EndEvent(const char* category_name,
+ const char* event_name) const {
+ ALLOW_UNUSED_PARAM(category_name);
+ ALLOW_UNUSED_PARAM(event_name);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_event_filter.h b/base/trace_event/trace_event_filter.h
new file mode 100644
index 0000000000..48c6711432
--- /dev/null
+++ b/base/trace_event/trace_event_filter.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// TraceEventFilter is like iptables for TRACE_EVENT macros. Filters can be
+// enabled on a per-category basis, hence a single filter instance can serve
+// more than a TraceCategory. There are two use cases for filters:
+// 1. Snooping TRACE_EVENT macros without adding them to the TraceLog. This is
+// possible by setting the ENABLED_FOR_FILTERING flag on a category w/o
+// ENABLED_FOR_RECORDING (see TraceConfig for user-facing configuration).
+// 2. Filtering TRACE_EVENT macros before they are added to the TraceLog. This
+// requires both the ENABLED_FOR_FILTERING and ENABLED_FOR_RECORDING flags
+// on the category.
+// More importantly, filters must be thread-safe. The FilterTraceEvent and
+// EndEvent methods can be called concurrently as trace macros are hit on
+// different threads.
+class BASE_EXPORT TraceEventFilter {
+ public:
+ TraceEventFilter();
+ virtual ~TraceEventFilter();
+
+ // If the category is ENABLED_FOR_RECORDING, the event is added iff all the
+ // filters enabled for the category return true. false causes the event to be
+ // discarded.
+ virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0;
+
+ // Notifies the end of a duration event when the RAII macro goes out of scope.
+ virtual void EndEvent(const char* category_name,
+ const char* event_name) const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TraceEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
diff --git a/base/trace_event/trace_event_filter_test_utils.cc b/base/trace_event/trace_event_filter_test_utils.cc
new file mode 100644
index 0000000000..06548b049a
--- /dev/null
+++ b/base/trace_event/trace_event_filter_test_utils.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_filter_test_utils.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+TestEventFilter::HitsCounter* g_hits_counter;
+} // namespace;
+
+// static
+const char TestEventFilter::kName[] = "testing_predicate";
+bool TestEventFilter::filter_return_value_;
+
+// static
+std::unique_ptr<TraceEventFilter> TestEventFilter::Factory(
+ const std::string& predicate_name) {
+ std::unique_ptr<TraceEventFilter> res;
+ if (predicate_name == kName)
+ res.reset(new TestEventFilter());
+ return res;
+}
+
+TestEventFilter::TestEventFilter() {}
+TestEventFilter::~TestEventFilter() {}
+
+bool TestEventFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+ if (g_hits_counter)
+ g_hits_counter->filter_trace_event_hit_count++;
+ return filter_return_value_;
+}
+
+void TestEventFilter::EndEvent(const char* category_name,
+ const char* name) const {
+ if (g_hits_counter)
+ g_hits_counter->end_event_hit_count++;
+}
+
+TestEventFilter::HitsCounter::HitsCounter() {
+ Reset();
+ DCHECK(!g_hits_counter);
+ g_hits_counter = this;
+}
+
+TestEventFilter::HitsCounter::~HitsCounter() {
+ DCHECK(g_hits_counter);
+ g_hits_counter = nullptr;
+}
+
+void TestEventFilter::HitsCounter::Reset() {
+ filter_trace_event_hit_count = 0;
+ end_event_hit_count = 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_event_filter_test_utils.h b/base/trace_event/trace_event_filter_test_utils.h
new file mode 100644
index 0000000000..419068b221
--- /dev/null
+++ b/base/trace_event/trace_event_filter_test_utils.h
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TestEventFilter : public TraceEventFilter {
+ public:
+ struct HitsCounter {
+ HitsCounter();
+ ~HitsCounter();
+ void Reset();
+ size_t filter_trace_event_hit_count;
+ size_t end_event_hit_count;
+ };
+
+ static const char kName[];
+
+ // Factory method for TraceLog::SetFilterFactoryForTesting().
+ static std::unique_ptr<TraceEventFilter> Factory(
+ const std::string& predicate_name);
+
+ TestEventFilter();
+ ~TestEventFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+ void EndEvent(const char* category_name, const char* name) const override;
+
+ static void set_filter_return_value(bool value) {
+ filter_return_value_ = value;
+ }
+
+ private:
+ static bool filter_return_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index f469f2f6bc..cb23eb474c 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -8,6 +8,7 @@
#include "base/format_macros.h"
#include "base/json/string_escape.h"
+#include "base/memory/ptr_util.h"
#include "base/process/process_handle.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
@@ -15,6 +16,7 @@
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
#include "base/trace_event/trace_log.h"
namespace base {
@@ -358,10 +360,33 @@ void TraceEvent::AppendAsJSON(
// If id_ is set, print it out as a hex string so we don't loose any
// bits (it might be a 64-bit pointer).
- if (flags_ & TRACE_EVENT_FLAG_HAS_ID) {
+ unsigned int id_flags_ = flags_ & (TRACE_EVENT_FLAG_HAS_ID |
+ TRACE_EVENT_FLAG_HAS_LOCAL_ID |
+ TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
+ if (id_flags_) {
if (scope_ != trace_event_internal::kGlobalScope)
StringAppendF(out, ",\"scope\":\"%s\"", scope_);
- StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64_t>(id_));
+
+ switch (id_flags_) {
+ case TRACE_EVENT_FLAG_HAS_ID:
+ StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"",
+ static_cast<uint64_t>(id_));
+ break;
+
+ case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
+ StringAppendF(out, ",\"id2\":{\"local\":\"0x%" PRIx64 "\"}",
+ static_cast<uint64_t>(id_));
+ break;
+
+ case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
+ StringAppendF(out, ",\"id2\":{\"global\":\"0x%" PRIx64 "\"}",
+ static_cast<uint64_t>(id_));
+ break;
+
+ default:
+ NOTREACHED() << "More than one of the ID flags are set";
+ break;
+ }
}
if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
@@ -424,3 +449,42 @@ void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
} // namespace trace_event
} // namespace base
+
+namespace trace_event_internal {
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+TraceID::AsConvertableToTraceFormat() const {
+ auto value = base::MakeUnique<base::trace_event::TracedValue>();
+
+ if (scope_ != kGlobalScope)
+ value->SetString("scope", scope_);
+
+ const char* id_field_name = "id";
+ if (id_flags_ == TRACE_EVENT_FLAG_HAS_GLOBAL_ID) {
+ id_field_name = "global";
+ value->BeginDictionary("id2");
+ } else if (id_flags_ == TRACE_EVENT_FLAG_HAS_LOCAL_ID) {
+ id_field_name = "local";
+ value->BeginDictionary("id2");
+ } else if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID) {
+ NOTREACHED() << "Unrecognized ID flag";
+ }
+
+ if (has_prefix_) {
+ value->SetString(id_field_name,
+ base::StringPrintf("0x%" PRIx64 "/0x%" PRIx64,
+ static_cast<uint64_t>(prefix_),
+ static_cast<uint64_t>(raw_id_)));
+ } else {
+ value->SetString(
+ id_field_name,
+ base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
+ }
+
+ if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID)
+ value->EndDictionary();
+
+ return std::move(value);
+}
+
+} // namespace trace_event_internal
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
index 4382217881..5eef702fb9 100644
--- a/base/trace_event/trace_event_impl.h
+++ b/base/trace_event/trace_event_impl.h
@@ -23,16 +23,11 @@
#include "base/strings/string_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
-#include "base/threading/thread.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "build/build_config.h"
namespace base {
-
-class WaitableEvent;
-class MessageLoop;
-
namespace trace_event {
typedef base::Callback<bool(const char* arg_name)> ArgumentNameFilterPredicate;
diff --git a/base/trace_event/trace_event_memory_overhead.cc b/base/trace_event/trace_event_memory_overhead.cc
index 23579cbb22..8d56e1d80e 100644
--- a/base/trace_event/trace_event_memory_overhead.cc
+++ b/base/trace_event/trace_event_memory_overhead.cc
@@ -69,27 +69,27 @@ void TraceEventMemoryOverhead::AddRefCountedString(
void TraceEventMemoryOverhead::AddValue(const Value& value) {
switch (value.GetType()) {
- case Value::TYPE_NULL:
- case Value::TYPE_BOOLEAN:
- case Value::TYPE_INTEGER:
- case Value::TYPE_DOUBLE:
+ case Value::Type::NONE:
+ case Value::Type::BOOLEAN:
+ case Value::Type::INTEGER:
+ case Value::Type::DOUBLE:
Add("FundamentalValue", sizeof(Value));
break;
- case Value::TYPE_STRING: {
- const StringValue* string_value = nullptr;
+ case Value::Type::STRING: {
+ const Value* string_value = nullptr;
value.GetAsString(&string_value);
- Add("StringValue", sizeof(StringValue));
+ Add("StringValue", sizeof(Value));
AddString(string_value->GetString());
} break;
- case Value::TYPE_BINARY: {
+ case Value::Type::BINARY: {
const BinaryValue* binary_value = nullptr;
value.GetAsBinary(&binary_value);
Add("BinaryValue", sizeof(BinaryValue) + binary_value->GetSize());
} break;
- case Value::TYPE_DICTIONARY: {
+ case Value::Type::DICTIONARY: {
const DictionaryValue* dictionary_value = nullptr;
value.GetAsDictionary(&dictionary_value);
Add("DictionaryValue", sizeof(DictionaryValue));
@@ -100,7 +100,7 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
}
} break;
- case Value::TYPE_LIST: {
+ case Value::Type::LIST: {
const ListValue* list_value = nullptr;
value.GetAsList(&list_value);
Add("ListValue", sizeof(ListValue));
diff --git a/base/trace_event/trace_event_synthetic_delay.h b/base/trace_event/trace_event_synthetic_delay.h
index 59e2842f71..e86f9eee2c 100644
--- a/base/trace_event/trace_event_synthetic_delay.h
+++ b/base/trace_event/trace_event_synthetic_delay.h
@@ -62,9 +62,6 @@
trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->End(); \
} while (false)
-template <typename Type>
-struct DefaultSingletonTraits;
-
namespace base {
namespace trace_event {
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index ff8ec2de78..82a552aa4e 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -18,6 +18,7 @@
#include "base/json/json_writer.h"
#include "base/location.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/memory/singleton.h"
#include "base/process/process_handle.h"
@@ -29,7 +30,12 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
+#include "base/trace_event/event_name_filter.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_filter.h"
+#include "base/trace_event/trace_event_filter_test_utils.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
#include "base/values.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -67,9 +73,6 @@ class TraceEventTestFixture : public testing::Test {
WaitableEvent* flush_complete_event,
const scoped_refptr<base::RefCountedString>& events_str,
bool has_more_events);
- void OnWatchEventMatched() {
- ++event_watch_notification_;
- }
DictionaryValue* FindMatchingTraceEntry(const JsonKeyValue* key_values);
DictionaryValue* FindNamePhase(const char* name, const char* phase);
DictionaryValue* FindNamePhaseKeyValue(const char* name,
@@ -91,7 +94,6 @@ class TraceEventTestFixture : public testing::Test {
}
void BeginSpecificTrace(const std::string& filter) {
- event_watch_notification_ = 0;
TraceLog::GetInstance()->SetEnabled(TraceConfig(filter, ""),
TraceLog::RECORDING_MODE);
}
@@ -135,7 +137,8 @@ class TraceEventTestFixture : public testing::Test {
}
void EndTraceAndFlushAsync(WaitableEvent* flush_complete_event) {
- TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE |
+ TraceLog::FILTERING_MODE);
TraceLog::GetInstance()->Flush(
base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
base::Unretained(static_cast<TraceEventTestFixture*>(this)),
@@ -151,7 +154,6 @@ class TraceEventTestFixture : public testing::Test {
ASSERT_TRUE(tracelog);
ASSERT_FALSE(tracelog->IsEnabled());
trace_buffer_.SetOutputCallback(json_output_.GetCallback());
- event_watch_notification_ = 0;
num_flush_callbacks_ = 0;
}
void TearDown() override {
@@ -168,7 +170,6 @@ class TraceEventTestFixture : public testing::Test {
ListValue trace_parsed_;
TraceResultBuffer trace_buffer_;
TraceResultBuffer::SimpleOutput json_output_;
- int event_watch_notification_;
size_t num_flush_callbacks_;
private:
@@ -263,7 +264,7 @@ DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
for (size_t i = 0; i < trace_parsed_count; i++) {
Value* value = NULL;
trace_parsed_.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
DictionaryValue* dict = static_cast<DictionaryValue*>(value);
@@ -281,7 +282,7 @@ void TraceEventTestFixture::DropTracedMetadataRecords() {
for (size_t i = 0; i < old_trace_parsed_size; i++) {
Value* value = nullptr;
old_trace_parsed->Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
+ if (!value || value->GetType() != Value::Type::DICTIONARY) {
trace_parsed_.Append(value->CreateDeepCopy());
continue;
}
@@ -370,7 +371,7 @@ const DictionaryValue* FindTraceEntry(
match_after_this_item = NULL;
continue;
}
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
@@ -388,7 +389,7 @@ std::vector<const DictionaryValue*> FindTraceEntries(
for (size_t i = 0; i < trace_parsed_count; i++) {
const Value* value = NULL;
trace_parsed.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
@@ -460,9 +461,10 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
"b", 1415);
TRACE_COUNTER_WITH_TIMESTAMP1("all", "TRACE_COUNTER_WITH_TIMESTAMP1 call",
- 42, 31415);
+ TimeTicks::FromInternalValue(42), 31415);
TRACE_COUNTER_WITH_TIMESTAMP2("all", "TRACE_COUNTER_WITH_TIMESTAMP2 call",
- 42, "a", 30000, "b", 1415);
+ TimeTicks::FromInternalValue(42),
+ "a", 30000, "b", 1415);
TRACE_COUNTER_ID1("all", "TRACE_COUNTER_ID1 call", 0x319009, 31415);
TRACE_COUNTER_ID2("all", "TRACE_COUNTER_ID2 call", 0x319009,
@@ -470,14 +472,14 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId, kThreadId, 12345);
+ kAsyncId, kThreadId, TimeTicks::FromInternalValue(12345));
TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId, kThreadId, 23456);
+ kAsyncId, kThreadId, TimeTicks::FromInternalValue(23456));
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId2, kThreadId, 34567);
+ kAsyncId2, kThreadId, TimeTicks::FromInternalValue(34567));
TRACE_EVENT_ASYNC_STEP_PAST0("all", "TRACE_EVENT_ASYNC_STEP_PAST0 call",
kAsyncId2, "step_end1");
TRACE_EVENT_ASYNC_STEP_PAST1("all", "TRACE_EVENT_ASYNC_STEP_PAST1 call",
@@ -485,7 +487,7 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0("all",
"TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call",
- kAsyncId2, kThreadId, 45678);
+ kAsyncId2, kThreadId, TimeTicks::FromInternalValue(45678));
TRACE_EVENT_OBJECT_CREATED_WITH_ID("all", "tracked object 1", 0x42);
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
@@ -517,6 +519,24 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
context_id);
TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
context_id);
+
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS simple call", 0x1000, 0x2000);
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS scoped call",
+ TRACE_ID_WITH_SCOPE("scope 1", 0x1000),
+ TRACE_ID_WITH_SCOPE("scope 2", 0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a local ID", 0x1000,
+ TRACE_ID_LOCAL(0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a global ID", 0x1000,
+ TRACE_ID_GLOBAL(0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a composite ID", 0x1000,
+ TRACE_ID_WITH_SCOPE("scope 1", 0x2000, 0x3000));
+
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async default process scope", 0x1000);
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async local id", TRACE_ID_LOCAL(0x2000));
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async global id", TRACE_ID_GLOBAL(0x3000));
+ TRACE_EVENT_ASYNC_BEGIN0("all", "async global id with scope string",
+ TRACE_ID_WITH_SCOPE("scope string",
+ TRACE_ID_GLOBAL(0x4000)));
} // Scope close causes TRACE_EVENT0 etc to send their END events.
if (task_complete_event)
@@ -957,6 +977,144 @@ void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ("0x20151021", id);
}
+
+ EXPECT_FIND_("TRACE_LINK_IDS simple call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS scoped call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ std::string scope1;
+ EXPECT_TRUE((item && item->GetString("scope", &scope1)));
+ EXPECT_EQ("scope 1", scope1);
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ std::string scope2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.scope", &scope2)));
+ EXPECT_EQ("scope 2", scope2);
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a local ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id2.local", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a global ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id2.global", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a composite ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE(item->HasKey("scope"));
+ std::string id1;
+ EXPECT_TRUE(item->GetString("id", &id1));
+ EXPECT_EQ("0x1000", id1);
+
+ std::string scope;
+ EXPECT_TRUE(item->GetString("args.linked_id.scope", &scope));
+ EXPECT_EQ("scope 1", scope);
+ std::string id2;
+ EXPECT_TRUE(item->GetString("args.linked_id.id", &id2));
+ EXPECT_EQ(id2, "0x2000/0x3000");
+ }
+
+ EXPECT_FIND_("async default process scope");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x1000", id);
+ }
+
+ EXPECT_FIND_("async local id");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id2.local", &id)));
+ EXPECT_EQ("0x2000", id);
+ }
+
+ EXPECT_FIND_("async global id");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id2.global", &id)));
+ EXPECT_EQ("0x3000", id);
+ }
+
+ EXPECT_FIND_("async global id with scope string");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("S", ph);
+
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id2.global", &id)));
+ EXPECT_EQ("0x4000", id);
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("scope", &scope)));
+ EXPECT_EQ("scope string", scope);
+ }
}
void TraceManyInstantEvents(int thread_id, int num_events,
@@ -981,7 +1139,7 @@ void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
for (size_t i = 0; i < trace_parsed_count; i++) {
const Value* value = NULL;
trace_parsed.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
std::string name;
@@ -1430,59 +1588,6 @@ TEST_F(TraceEventTestFixture, Categories) {
}
-// Test EVENT_WATCH_NOTIFICATION
-TEST_F(TraceEventTestFixture, EventWatchNotification) {
- // Basic one occurrence.
- BeginTrace();
- TraceLog::WatchEventCallback callback =
- base::Bind(&TraceEventTestFixture::OnWatchEventMatched,
- base::Unretained(this));
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 1);
-
- // Auto-reset after end trace.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- EndTraceAndFlush();
- BeginTrace();
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Multiple occurrence.
- BeginTrace();
- int num_occurrences = 5;
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- for (int i = 0; i < num_occurrences; ++i)
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, num_occurrences);
-
- // Wrong category.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("wrong_cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Wrong name.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("cat", "wrong_event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Canceled.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TraceLog::GetInstance()->CancelWatchEvent();
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-}
-
// Test ASYNC_BEGIN/END events
TEST_F(TraceEventTestFixture, AsyncBeginEndEvents) {
BeginTrace();
@@ -2053,55 +2158,6 @@ TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
trace_log->SetDisabled();
}
-TEST_F(TraceEventTestFixture, TraceSampling) {
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
- TraceLog::RECORDING_MODE);
-
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Stuff");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Things");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
- EndTraceAndFlush();
-
- // Make sure we hit at least once.
- EXPECT_TRUE(FindNamePhase("Stuff", "P"));
- EXPECT_TRUE(FindNamePhase("Things", "P"));
-}
-
-TEST_F(TraceEventTestFixture, TraceSamplingScope) {
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
- TraceLog::RECORDING_MODE);
-
- TRACE_EVENT_SCOPED_SAMPLING_STATE("AAA", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SCOPED_SAMPLING_STATE("BBB", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "BBB");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SCOPED_SAMPLING_STATE("CCC", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "CCC");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SET_SAMPLING_STATE("DDD", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
-
- EndTraceAndFlush();
-}
class MyData : public ConvertableToTraceFormat {
public:
@@ -2290,7 +2346,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_one", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(1, double_value);
@@ -2300,7 +2356,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_half", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(0.5, double_value);
@@ -2310,7 +2366,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(-0.5, double_value);
@@ -2480,233 +2536,6 @@ TEST_F(TraceEventTestFixture, ArgsWhitelisting) {
EXPECT_EQ(args_string, "__stripped__");
}
-class TraceEventCallbackTest : public TraceEventTestFixture {
- public:
- void SetUp() override {
- TraceEventTestFixture::SetUp();
- ASSERT_EQ(NULL, s_instance);
- s_instance = this;
- }
- void TearDown() override {
- TraceLog::GetInstance()->SetDisabled();
- ASSERT_TRUE(s_instance);
- s_instance = NULL;
- TraceEventTestFixture::TearDown();
- }
-
- protected:
- // For TraceEventCallbackAndRecordingX tests.
- void VerifyCallbackAndRecordedEvents(size_t expected_callback_count,
- size_t expected_recorded_count) {
- // Callback events.
- EXPECT_EQ(expected_callback_count, collected_events_names_.size());
- for (size_t i = 0; i < collected_events_names_.size(); ++i) {
- EXPECT_EQ("callback", collected_events_categories_[i]);
- EXPECT_EQ("yes", collected_events_names_[i]);
- }
-
- // Recorded events.
- EXPECT_EQ(expected_recorded_count, trace_parsed_.GetSize());
- EXPECT_TRUE(FindTraceEntry(trace_parsed_, "recording"));
- EXPECT_FALSE(FindTraceEntry(trace_parsed_, "callback"));
- EXPECT_TRUE(FindTraceEntry(trace_parsed_, "yes"));
- EXPECT_FALSE(FindTraceEntry(trace_parsed_, "no"));
- }
-
- void VerifyCollectedEvent(size_t i,
- unsigned phase,
- const std::string& category,
- const std::string& name) {
- EXPECT_EQ(phase, collected_events_phases_[i]);
- EXPECT_EQ(category, collected_events_categories_[i]);
- EXPECT_EQ(name, collected_events_names_[i]);
- }
-
- std::vector<std::string> collected_events_categories_;
- std::vector<std::string> collected_events_names_;
- std::vector<unsigned char> collected_events_phases_;
- std::vector<TimeTicks> collected_events_timestamps_;
-
- static TraceEventCallbackTest* s_instance;
- static void Callback(TimeTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- const char* scope,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned int flags) {
- s_instance->collected_events_phases_.push_back(phase);
- s_instance->collected_events_categories_.push_back(
- TraceLog::GetCategoryGroupName(category_group_enabled));
- s_instance->collected_events_names_.push_back(name);
- s_instance->collected_events_timestamps_.push_back(timestamp);
- }
-};
-
-TraceEventCallbackTest* TraceEventCallbackTest::s_instance;
-
-TEST_F(TraceEventCallbackTest, TraceEventCallback) {
- TRACE_EVENT_INSTANT0("all", "before enable", TRACE_EVENT_SCOPE_THREAD);
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- TRACE_EVENT_INSTANT0("all", "event1", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("all", "event2", TRACE_EVENT_SCOPE_GLOBAL);
- {
- TRACE_EVENT0("all", "duration");
- TRACE_EVENT_INSTANT0("all", "event3", TRACE_EVENT_SCOPE_GLOBAL);
- }
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("all", "after callback removed",
- TRACE_EVENT_SCOPE_GLOBAL);
- ASSERT_EQ(5u, collected_events_names_.size());
- EXPECT_EQ("event1", collected_events_names_[0]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[0]);
- EXPECT_EQ("event2", collected_events_names_[1]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[1]);
- EXPECT_EQ("duration", collected_events_names_[2]);
- EXPECT_EQ(TRACE_EVENT_PHASE_BEGIN, collected_events_phases_[2]);
- EXPECT_EQ("event3", collected_events_names_[3]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[3]);
- EXPECT_EQ("duration", collected_events_names_[4]);
- EXPECT_EQ(TRACE_EVENT_PHASE_END, collected_events_phases_[4]);
- for (size_t i = 1; i < collected_events_timestamps_.size(); i++) {
- EXPECT_LE(collected_events_timestamps_[i - 1],
- collected_events_timestamps_[i]);
- }
-}
-
-TEST_F(TraceEventCallbackTest, TraceEventCallbackWhileFull) {
- TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
- TraceLog::RECORDING_MODE);
- do {
- TRACE_EVENT_INSTANT0("all", "badger badger", TRACE_EVENT_SCOPE_GLOBAL);
- } while (!TraceLog::GetInstance()->BufferIsFull());
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- TRACE_EVENT_INSTANT0("all", "a snake", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- ASSERT_EQ(1u, collected_events_names_.size());
- EXPECT_EQ("a snake", collected_events_names_[0]);
-}
-
-// 1: Enable callback, enable recording, disable callback, disable recording.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording1) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(2, 2);
-}
-
-// 2: Enable callback, enable recording, disable recording, disable callback.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording2) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(3, 1);
-}
-
-// 3: Enable recording, enable callback, disable callback, disable recording.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording3) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(1, 3);
-}
-
-// 4: Enable recording, enable callback, disable recording, disable callback.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording4) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(2, 2);
-}
-
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecordingDuration) {
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- {
- TRACE_EVENT0("callback", "duration1");
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
- TRACE_EVENT0("callback", "duration2");
- EndTraceAndFlush();
- TRACE_EVENT0("callback", "duration3");
- }
- TraceLog::GetInstance()->SetEventCallbackDisabled();
-
- ASSERT_EQ(6u, collected_events_names_.size());
- VerifyCollectedEvent(0, TRACE_EVENT_PHASE_BEGIN, "callback", "duration1");
- VerifyCollectedEvent(1, TRACE_EVENT_PHASE_BEGIN, "callback", "duration2");
- VerifyCollectedEvent(2, TRACE_EVENT_PHASE_BEGIN, "callback", "duration3");
- VerifyCollectedEvent(3, TRACE_EVENT_PHASE_END, "callback", "duration3");
- VerifyCollectedEvent(4, TRACE_EVENT_PHASE_END, "callback", "duration2");
- VerifyCollectedEvent(5, TRACE_EVENT_PHASE_END, "callback", "duration1");
-}
-
TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
TraceLog* trace_log = TraceLog::GetInstance();
trace_log->SetEnabled(
@@ -2715,9 +2544,9 @@ TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
TraceBuffer::CreateTraceBufferVectorOfSize(100));
do {
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
} while (!trace_log->BufferIsFull());
EndTraceAndFlush();
@@ -2928,29 +2757,9 @@ TEST_F(TraceEventTestFixture, ConvertTraceConfigToInternalOptions) {
trace_log->GetInternalOptionsFromTraceConfig(
TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE)));
- EXPECT_EQ(
- TraceLog::kInternalRecordUntilFull | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "record-until-full,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalRecordContinuously | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "record-continuously,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "trace-to-console,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig("*",
- "trace-to-console,enable-sampling,enable-systrace")));
+ EXPECT_EQ(TraceLog::kInternalEchoToConsole,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig("*", "trace-to-console,enable-systrace")));
}
void SetBlockingFlagAndBlockUntilStopped(WaitableEvent* task_start_event,
@@ -3109,9 +2918,9 @@ TEST_F(TraceEventTestFixture, TimeOffset) {
TRACE_EVENT0("all", "duration2");
}
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
- "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ "all", "with_timestamp", 0, 0, TimeTicks::Now());
EndTraceAndFlush();
DropTracedMetadataRecords();
@@ -3173,6 +2982,213 @@ TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
EXPECT_EQ(filter, config.ToCategoryFilterString());
}
+TEST_F(TraceEventTestFixture, TraceFilteringMode) {
+ const char config_json[] =
+ "{"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"testing_predicate\", "
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}";
+
+ // Run RECORDING_MODE within FILTERING_MODE:
+ TestEventFilter::HitsCounter filter_hits_counter;
+ TestEventFilter::set_filter_return_value(true);
+ TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
+
+ // Only filtering mode is enabled with test filters.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+ TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+ {
+ void* ptr = this;
+ TRACE_EVENT0("c0", "name0");
+ TRACE_EVENT_ASYNC_BEGIN0("c1", "name1", ptr);
+ TRACE_EVENT_INSTANT0("c0", "name0", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_ASYNC_END0("c1", "name1", ptr);
+ }
+
+ // Recording mode is enabled when filtering mode is turned on.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+ TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c2", "name2");
+ }
+ // Only recording mode is disabled and filtering mode will continue to run.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+ {
+ TRACE_EVENT0("c0", "name0");
+ }
+ // Filtering mode is disabled and no tracing mode should be enabled.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+ EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+ EndTraceAndFlush();
+ EXPECT_FALSE(FindMatchingValue("cat", "c0"));
+ EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+ EXPECT_FALSE(FindMatchingValue("name", "name0"));
+ EXPECT_FALSE(FindMatchingValue("name", "name1"));
+ EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+ EXPECT_TRUE(FindMatchingValue("name", "name2"));
+ EXPECT_EQ(6u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(3u, filter_hits_counter.end_event_hit_count);
+ Clear();
+ filter_hits_counter.Reset();
+
+ // Run FILTERING_MODE within RECORDING_MODE:
+ // Only recording mode is enabled and all events must be recorded.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c0", "name0");
+ }
+
+ // Filtering mode is also enabled and all events must be filtered-out.
+ TestEventFilter::set_filter_return_value(false);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+ TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+ TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c1", "name1");
+ }
+ // Only filtering mode is disabled and recording mode should continue to run
+ // with all events being recorded.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+ {
+ TRACE_EVENT0("c2", "name2");
+ }
+ // Recording mode is disabled and no tracing mode should be enabled.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+ EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "c0"));
+ EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+ EXPECT_TRUE(FindMatchingValue("name", "name0"));
+ EXPECT_TRUE(FindMatchingValue("name", "name2"));
+ EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+ EXPECT_FALSE(FindMatchingValue("name", "name1"));
+ EXPECT_EQ(1u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
+ Clear();
+}
+
+TEST_F(TraceEventTestFixture, EventFiltering) {
+ const char config_json[] =
+ "{"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"unfiltered_cat\"],"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"testing_predicate\", "
+ " \"included_categories\": [\"filtered_cat\"]"
+ " }"
+ " "
+ " ]"
+ "}";
+
+ TestEventFilter::HitsCounter filter_hits_counter;
+ TestEventFilter::set_filter_return_value(true);
+ TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
+
+ TraceConfig trace_config(config_json);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+ ASSERT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TRACE_EVENT0("filtered_cat", "a snake");
+ TRACE_EVENT0("filtered_cat", "a mushroom");
+ TRACE_EVENT0("unfiltered_cat", "a horse");
+
+ // This is scoped so we can test the end event being filtered.
+ { TRACE_EVENT0("filtered_cat", "another cat whoa"); }
+
+ EndTraceAndFlush();
+
+ EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
+}
+
+TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
+ std::string config_json = StringPrintf(
+ "{"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"unfiltered_cat\"],"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"%s\", "
+ " \"included_categories\": [\"*\"], "
+ " \"excluded_categories\": [\"unfiltered_cat\"], "
+ " \"filter_args\": {"
+ " \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
+ " }"
+ " }"
+ " "
+ " ]"
+ "}",
+ EventNameFilter::kName);
+
+ TraceConfig trace_config(config_json);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TRACE_EVENT0("filtered_cat", "a snake");
+ TRACE_EVENT0("filtered_cat", "a mushroom");
+ TRACE_EVENT0("unfiltered_cat", "a cat");
+
+ EndTraceAndFlush();
+
+ EXPECT_TRUE(FindMatchingValue("name", "a snake"));
+ EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
+ EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+}
+
+TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
+ std::string config_json = StringPrintf(
+ "{"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"unfiltered_cat\"],"
+ " \"excluded_categories\": [\"excluded_cat\"],"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"%s\", "
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}",
+ HeapProfilerEventFilter::kName);
+
+ TraceConfig trace_config(config_json);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TRACE_EVENT0("filtered_cat", "a snake");
+ TRACE_EVENT0("excluded_cat", "a mushroom");
+ TRACE_EVENT0("unfiltered_cat", "a cat");
+
+ EndTraceAndFlush();
+
+ // The predicate should not change behavior of the trace events.
+ EXPECT_TRUE(FindMatchingValue("name", "a snake"));
+ EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
+ EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+}
+
TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
BeginSpecificTrace("-*");
TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 12cebc6f65..10b090ae57 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -13,41 +13,43 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/debug/leak_annotations.h"
-#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/memory/singleton.h"
+#include "base/message_loop/message_loop.h"
#include "base/process/process_metrics.h"
#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+// post_task.h pulls in a lot of code not needed on Arc++.
+#if 0
+#include "base/task_scheduler/post_task.h"
+#endif
#include "base/threading/platform_thread.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_task_runner_handle.h"
-#include "base/threading/worker_pool.h"
#include "base/time/time.h"
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/event_name_filter.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
-#include "base/trace_event/trace_sampling_thread.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/trace_event/trace_event_etw_export_win.h"
#endif
-// The thread buckets for the sampling profiler.
-BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
namespace base {
namespace internal {
@@ -86,35 +88,13 @@ const size_t kEchoToConsoleTraceEventBufferChunks = 256;
const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
const int kThreadFlushTimeoutMs = 3000;
-#define MAX_CATEGORY_GROUPS 200
-
-// Parallel arrays g_category_groups and g_category_group_enabled are separate
-// so that a pointer to a member of g_category_group_enabled can be easily
-// converted to an index into g_category_groups. This allows macros to deal
-// only with char enabled pointers from g_category_group_enabled, and we can
-// convert internally to determine the category name from the char enabled
-// pointer.
-const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
- "toplevel",
- "tracing already shutdown",
- "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
- "__metadata"};
-
-// The enabled flag is char instead of bool so that the API can be used from C.
-unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
-// Indexes here have to match the g_category_groups array indexes above.
-const int g_category_already_shutdown = 1;
-const int g_category_categories_exhausted = 2;
-const int g_category_metadata = 3;
-const int g_num_builtin_categories = 4;
-// Skip default categories.
-base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
-
-// The name of the current thread. This is used to decide if the current
-// thread name has changed. We combine all the seen thread names into the
-// output name for the thread.
-LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
- LAZY_INSTANCE_INITIALIZER;
+#define MAX_TRACE_EVENT_FILTERS 32
+
+// List of TraceEventFilter objects from the most recent tracing session.
+std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
+ static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
+ return *filters;
+}
ThreadTicks ThreadNow() {
return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
@@ -138,7 +118,7 @@ void InitializeMetadataEvent(TraceEvent* trace_event,
TimeTicks(),
ThreadTicks(),
TRACE_EVENT_PHASE_METADATA,
- &g_category_group_enabled[g_category_metadata],
+ CategoryRegistry::kCategoryMetadata->state_ptr(),
metadata_name,
trace_event_internal::kGlobalScope, // scope
trace_event_internal::kNoId, // id
@@ -174,11 +154,24 @@ void MakeHandle(uint32_t chunk_seq,
DCHECK(chunk_seq);
DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
+ DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
handle->chunk_seq = chunk_seq;
handle->chunk_index = static_cast<uint16_t>(chunk_index);
handle->event_index = static_cast<uint16_t>(event_index);
}
+template <typename Function>
+void ForEachCategoryFilter(const unsigned char* category_group_enabled,
+ Function filter_fn) {
+ const TraceCategory* category =
+ CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
+ uint32_t filter_bitmap = category->enabled_filters();
+ for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
+ if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
+ filter_fn(GetCategoryGroupFilters()[index].get());
+ }
+}
+
} // namespace
// A helper class that allows the lock to be acquired in the middle of the scope
@@ -352,33 +345,20 @@ TraceLog* TraceLog::GetInstance() {
}
TraceLog::TraceLog()
- : mode_(DISABLED),
+ : enabled_modes_(0),
num_traces_recorded_(0),
- event_callback_(0),
dispatching_to_observer_list_(false),
process_sort_index_(0),
process_id_hash_(0),
process_id_(0),
- watch_category_(0),
trace_options_(kInternalRecordUntilFull),
- sampling_thread_handle_(0),
trace_config_(TraceConfig()),
- event_callback_trace_config_(TraceConfig()),
thread_shared_chunk_index_(0),
generation_(0),
- use_worker_thread_(false) {
- // Trace is enabled or disabled on one thread while other threads are
- // accessing the enabled flag. We don't care whether edge-case events are
- // traced or not, so we allow races on the enabled flag to keep the trace
- // macros fast.
- // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
- // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
- // sizeof(g_category_group_enabled),
- // "trace_event category enabled");
- for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
- ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
- "trace_event category enabled");
- }
+ use_worker_thread_(false),
+ filter_factory_for_testing_(nullptr) {
+ CategoryRegistry::Initialize();
+
#if defined(OS_NACL) // NaCl shouldn't expose the process id.
SetProcessID(0);
#else
@@ -414,7 +394,9 @@ void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
}
}
-bool TraceLog::OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump* pmd) {
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
+ ALLOW_UNUSED_PARAM(args);
// TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
// (crbug.com/499731).
TraceEventMemoryOverhead overhead;
@@ -436,61 +418,111 @@ const unsigned char* TraceLog::GetCategoryGroupEnabled(
const char* category_group) {
TraceLog* tracelog = GetInstance();
if (!tracelog) {
- DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
- return &g_category_group_enabled[g_category_already_shutdown];
+ DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
+ return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
+ }
+ TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
+ if (!category) {
+ // Slow path: in the case of a new category we have to repeat the check
+ // holding the lock, as multiple threads might have reached this point
+ // at the same time.
+ auto category_initializer = [](TraceCategory* category) {
+ TraceLog::GetInstance()->UpdateCategoryState(category);
+ };
+ AutoLock lock(tracelog->lock_);
+ CategoryRegistry::GetOrCreateCategoryLocked(
+ category_group, category_initializer, &category);
}
- return tracelog->GetCategoryGroupEnabledInternal(category_group);
+ DCHECK(category->state_ptr());
+ return category->state_ptr();
}
const char* TraceLog::GetCategoryGroupName(
const unsigned char* category_group_enabled) {
- // Calculate the index of the category group by finding
- // category_group_enabled in g_category_group_enabled array.
- uintptr_t category_begin =
- reinterpret_cast<uintptr_t>(g_category_group_enabled);
- uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
- DCHECK(category_ptr >= category_begin &&
- category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
- MAX_CATEGORY_GROUPS))
- << "out of bounds category pointer";
- uintptr_t category_index =
- (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
- return g_category_groups[category_index];
+ return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
+ ->name();
}
-void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
- unsigned char enabled_flag = 0;
- const char* category_group = g_category_groups[category_index];
- if (mode_ == RECORDING_MODE &&
- trace_config_.IsCategoryGroupEnabled(category_group)) {
- enabled_flag |= ENABLED_FOR_RECORDING;
+void TraceLog::UpdateCategoryState(TraceCategory* category) {
+ lock_.AssertAcquired();
+ DCHECK(category->is_valid());
+ unsigned char state_flags = 0;
+ if (enabled_modes_ & RECORDING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
}
- if (event_callback_ &&
- event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
- enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (enabled_modes_ & RECORDING_MODE &&
+ category == CategoryRegistry::kCategoryMetadata) {
+ state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
}
#if defined(OS_WIN)
if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- enabled_flag |= ENABLED_FOR_ETW_EXPORT;
+ category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
}
#endif
- // TODO(primiano): this is a temporary workaround for catapult:#2341,
- // to guarantee that metadata events are always added even if the category
- // filter is "-*". See crbug.com/618054 for more details and long-term fix.
- if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
- enabled_flag |= ENABLED_FOR_RECORDING;
+ uint32_t enabled_filters_bitmap = 0;
+ int index = 0;
+ for (const auto& event_filter : enabled_event_filters_) {
+ if (event_filter.IsCategoryGroupEnabled(category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
+ DCHECK(GetCategoryGroupFilters()[index]);
+ enabled_filters_bitmap |= 1 << index;
+ }
+ if (index++ >= MAX_TRACE_EVENT_FILTERS) {
+ NOTREACHED();
+ break;
+ }
+ }
+ category->set_enabled_filters(enabled_filters_bitmap);
+ category->set_state(state_flags);
+}
- g_category_group_enabled[category_index] = enabled_flag;
+void TraceLog::UpdateCategoryRegistry() {
+ lock_.AssertAcquired();
+ CreateFiltersForTraceConfig();
+ for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
+ UpdateCategoryState(&category);
+ }
}
-void TraceLog::UpdateCategoryGroupEnabledFlags() {
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = 0; i < category_index; i++)
- UpdateCategoryGroupEnabledFlag(i);
+void TraceLog::CreateFiltersForTraceConfig() {
+ if (!(enabled_modes_ & FILTERING_MODE))
+ return;
+
+ // Filters were already added and tracing could be enabled. Filters list
+ // cannot be changed when trace events are using them.
+ if (GetCategoryGroupFilters().size())
+ return;
+
+ for (auto& filter_config : enabled_event_filters_) {
+ if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
+ NOTREACHED()
+ << "Too many trace event filters installed in the current session";
+ break;
+ }
+
+ std::unique_ptr<TraceEventFilter> new_filter;
+ const std::string& predicate_name = filter_config.predicate_name();
+ if (predicate_name == EventNameFilter::kName) {
+ auto whitelist = MakeUnique<std::unordered_set<std::string>>();
+ CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
+ new_filter = MakeUnique<EventNameFilter>(std::move(whitelist));
+ } else if (predicate_name == HeapProfilerEventFilter::kName) {
+ new_filter = MakeUnique<HeapProfilerEventFilter>();
+ } else {
+ if (filter_factory_for_testing_)
+ new_filter = filter_factory_for_testing_(predicate_name);
+ CHECK(new_filter) << "Unknown trace filter " << predicate_name;
+ }
+ GetCategoryGroupFilters().push_back(std::move(new_filter));
+ }
}
void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
@@ -522,67 +554,16 @@ void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
}
}
-const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
- const char* category_group) {
- DCHECK(!strchr(category_group, '"'))
- << "Category groups may not contain double quote";
- // The g_category_groups is append only, avoid using a lock for the fast path.
- size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
-
- // Search for pre-existing category group.
- for (size_t i = 0; i < current_category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- unsigned char* category_group_enabled = NULL;
- // This is the slow path: the lock is not held in the case above, so more
- // than one thread could have reached here trying to add the same category.
- // Only hold to lock when actually appending a new category, and
- // check the categories groups again.
- AutoLock lock(lock_);
- size_t category_index = base::subtle::Acquire_Load(&g_category_index);
- for (size_t i = 0; i < category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- // Create a new category group.
- DCHECK(category_index < MAX_CATEGORY_GROUPS)
- << "must increase MAX_CATEGORY_GROUPS";
- if (category_index < MAX_CATEGORY_GROUPS) {
- // Don't hold on to the category_group pointer, so that we can create
- // category groups with strings not known at compile time (this is
- // required by SetWatchEvent).
- const char* new_group = strdup(category_group);
- ANNOTATE_LEAKING_OBJECT_PTR(new_group);
- g_category_groups[category_index] = new_group;
- DCHECK(!g_category_group_enabled[category_index]);
- // Note that if both included and excluded patterns in the
- // TraceConfig are empty, we exclude nothing,
- // thereby enabling this category group.
- UpdateCategoryGroupEnabledFlag(category_index);
- category_group_enabled = &g_category_group_enabled[category_index];
- // Update the max index now.
- base::subtle::Release_Store(&g_category_index, category_index + 1);
- } else {
- category_group_enabled =
- &g_category_group_enabled[g_category_categories_exhausted];
- }
- return category_group_enabled;
-}
-
void TraceLog::GetKnownCategoryGroups(
std::vector<std::string>* category_groups) {
- AutoLock lock(lock_);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = g_num_builtin_categories; i < category_index; i++)
- category_groups->push_back(g_category_groups[i]);
+ for (const auto& category : CategoryRegistry::GetAllCategories()) {
+ if (!CategoryRegistry::IsBuiltinCategory(&category))
+ category_groups->push_back(category.name());
+ }
}
-void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
+void TraceLog::SetEnabled(const TraceConfig& trace_config,
+ uint8_t modes_to_enable) {
std::vector<EnabledStateObserver*> observer_list;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
{
@@ -596,28 +577,58 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
InternalTraceOptions old_options = trace_options();
- if (IsEnabled()) {
- if (new_options != old_options) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different "
- << "set of options.";
- }
-
- if (mode != mode_) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
- }
-
- trace_config_.Merge(trace_config);
- UpdateCategoryGroupEnabledFlags();
- return;
- }
-
if (dispatching_to_observer_list_) {
+ // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
DLOG(ERROR)
<< "Cannot manipulate TraceLog::Enabled state from an observer.";
return;
}
- mode_ = mode;
+ // Clear all filters from previous tracing session. These filters are not
+ // cleared at the end of tracing because some threads which hit trace event
+ // when disabling, could try to use the filters.
+ if (!enabled_modes_)
+ GetCategoryGroupFilters().clear();
+
+ // Update trace config for recording.
+ const bool already_recording = enabled_modes_ & RECORDING_MODE;
+ if (modes_to_enable & RECORDING_MODE) {
+ if (already_recording) {
+ // TODO(ssid): Stop suporting enabling of RECODING_MODE when already
+ // enabled crbug.com/625170.
+ DCHECK_EQ(new_options, old_options) << "Attempting to re-enable "
+ "tracing with a different set "
+ "of options.";
+ trace_config_.Merge(trace_config);
+ } else {
+ trace_config_ = trace_config;
+ }
+ }
+
+ // Update event filters.
+ if (modes_to_enable & FILTERING_MODE) {
+ DCHECK(!trace_config.event_filters().empty())
+ << "Attempting to enable filtering without any filters";
+ DCHECK(enabled_event_filters_.empty()) << "Attempting to re-enable "
+ "filtering when filters are "
+ "already enabled.";
+
+ // Use the given event filters only if filtering was not enabled.
+ if (enabled_event_filters_.empty())
+ enabled_event_filters_ = trace_config.event_filters();
+ }
+ // Keep the |trace_config_| updated with only enabled filters in case anyone
+ // tries to read it using |GetCurrentTraceConfig| (even if filters are
+ // empty).
+ trace_config_.SetEventFilters(enabled_event_filters_);
+
+ enabled_modes_ |= modes_to_enable;
+ UpdateCategoryRegistry();
+
+ // Do not notify observers or create trace buffer if only enabled for
+ // filtering or if recording was already enabled.
+ if (!(modes_to_enable & RECORDING_MODE) || already_recording)
+ return;
if (new_options != old_options) {
subtle::NoBarrier_Store(&trace_options_, new_options);
@@ -626,34 +637,16 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
num_traces_recorded_++;
- trace_config_ = TraceConfig(trace_config);
- UpdateCategoryGroupEnabledFlags();
+ UpdateCategoryRegistry();
UpdateSyntheticDelaysFromTraceConfig();
- if (new_options & kInternalEnableSampling) {
- sampling_thread_.reset(new TraceSamplingThread);
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[0], "bucket0",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[1], "bucket1",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[2], "bucket2",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- if (!PlatformThread::Create(0, sampling_thread_.get(),
- &sampling_thread_handle_)) {
- DCHECK(false) << "failed to create thread";
- }
- }
-
dispatching_to_observer_list_ = true;
observer_list = enabled_state_observer_list_;
observer_map = async_observers_;
}
// Notify observers outside the lock in case they trigger trace events.
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogEnabled();
+ for (EnabledStateObserver* observer : observer_list)
+ observer->OnTraceLogEnabled();
for (const auto& it : observer_map) {
it.second.task_runner->PostTask(
FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
@@ -676,10 +669,9 @@ void TraceLog::SetArgumentFilterPredicate(
TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
const TraceConfig& config) {
- InternalTraceOptions ret =
- config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
- if (config.IsArgumentFilterEnabled())
- ret |= kInternalEnableArgumentFilter;
+ InternalTraceOptions ret = config.IsArgumentFilterEnabled()
+ ? kInternalEnableArgumentFilter
+ : kInternalNone;
switch (config.GetTraceRecordMode()) {
case RECORD_UNTIL_FULL:
return ret | kInternalRecordUntilFull;
@@ -701,37 +693,44 @@ TraceConfig TraceLog::GetCurrentTraceConfig() const {
void TraceLog::SetDisabled() {
AutoLock lock(lock_);
- SetDisabledWhileLocked();
+ SetDisabledWhileLocked(RECORDING_MODE);
+}
+
+void TraceLog::SetDisabled(uint8_t modes_to_disable) {
+ AutoLock lock(lock_);
+ SetDisabledWhileLocked(modes_to_disable);
}
-void TraceLog::SetDisabledWhileLocked() {
+void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
lock_.AssertAcquired();
- if (!IsEnabled())
+ if (!(enabled_modes_ & modes_to_disable))
return;
if (dispatching_to_observer_list_) {
+ // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
DLOG(ERROR)
<< "Cannot manipulate TraceLog::Enabled state from an observer.";
return;
}
- mode_ = DISABLED;
+ bool is_recording_mode_disabled =
+ (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
+ enabled_modes_ &= ~modes_to_disable;
- if (sampling_thread_.get()) {
- // Stop the sampling thread.
- sampling_thread_->Stop();
- lock_.Release();
- PlatformThread::Join(sampling_thread_handle_);
- lock_.Acquire();
- sampling_thread_handle_ = PlatformThreadHandle();
- sampling_thread_.reset();
- }
+ if (modes_to_disable & FILTERING_MODE)
+ enabled_event_filters_.clear();
+
+ if (modes_to_disable & RECORDING_MODE)
+ trace_config_.Clear();
+
+ UpdateCategoryRegistry();
+
+ // Add metadata events and notify observers only if recording mode was
+ // disabled now.
+ if (!is_recording_mode_disabled)
+ return;
- trace_config_.Clear();
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- UpdateCategoryGroupEnabledFlags();
AddMetadataEventsWhileLocked();
// Remove metadata events so they will not get added to a subsequent trace.
@@ -747,8 +746,8 @@ void TraceLog::SetDisabledWhileLocked() {
// Dispatch to observers outside the lock in case the observer triggers a
// trace event.
AutoUnlock unlock(lock_);
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogDisabled();
+ for (EnabledStateObserver* observer : observer_list)
+ observer->OnTraceLogDisabled();
for (const auto& it : observer_map) {
it.second.task_runner->PostTask(
FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
@@ -831,25 +830,10 @@ void TraceLog::CheckIfBufferIsFullWhileLocked() {
if (buffer_limit_reached_timestamp_.is_null()) {
buffer_limit_reached_timestamp_ = OffsetNow();
}
- SetDisabledWhileLocked();
+ SetDisabledWhileLocked(RECORDING_MODE);
}
}
-void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb) {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_,
- reinterpret_cast<subtle::AtomicWord>(cb));
- event_callback_trace_config_ = trace_config;
- UpdateCategoryGroupEnabledFlags();
-}
-
-void TraceLog::SetEventCallbackDisabled() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_, 0);
- UpdateCategoryGroupEnabledFlags();
-}
-
// Flush() works as the following:
// 1. Flush() is called in thread A whose task runner is saved in
// flush_task_runner_;
@@ -886,7 +870,7 @@ void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
return;
}
- int generation = this->generation();
+ int gen = generation();
// Copy of thread_message_loops_ to be used without locking.
std::vector<scoped_refptr<SingleThreadTaskRunner>>
thread_message_loop_task_runners;
@@ -904,29 +888,24 @@ void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
std::move(thread_shared_chunk_));
}
- if (thread_message_loops_.size()) {
- for (hash_set<MessageLoop*>::const_iterator it =
- thread_message_loops_.begin();
- it != thread_message_loops_.end(); ++it) {
- thread_message_loop_task_runners.push_back((*it)->task_runner());
- }
- }
+ for (MessageLoop* loop : thread_message_loops_)
+ thread_message_loop_task_runners.push_back(loop->task_runner());
}
- if (thread_message_loop_task_runners.size()) {
- for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
- thread_message_loop_task_runners[i]->PostTask(
+ if (!thread_message_loop_task_runners.empty()) {
+ for (auto& task_runner : thread_message_loop_task_runners) {
+ task_runner->PostTask(
FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
- generation, discard_events));
+ gen, discard_events));
}
flush_task_runner_->PostDelayedTask(
- FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
+ FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), gen,
discard_events),
TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
return;
}
- FinishFlush(generation, discard_events);
+ FinishFlush(gen, discard_events);
}
// Usually it runs on a different thread.
@@ -990,13 +969,21 @@ void TraceLog::FinishFlush(int generation, bool discard_events) {
return;
}
- if (use_worker_thread_ &&
- WorkerPool::PostTask(
- FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
- Passed(&previous_logged_events),
- flush_output_callback, argument_filter_predicate),
- true)) {
+ if (use_worker_thread_) {
+#if 0
+ base::PostTaskWithTraits(
+ FROM_HERE, base::TaskTraits()
+ .MayBlock()
+ .WithPriority(base::TaskPriority::BACKGROUND)
+ .WithShutdownBehavior(
+ base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
+ Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
+ Passed(&previous_logged_events), flush_output_callback,
+ argument_filter_predicate));
return;
+#else
+ NOTREACHED();
+#endif
}
ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
@@ -1019,7 +1006,7 @@ void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
AutoLock lock(lock_);
if (!CheckGeneration(generation) || !flush_task_runner_ ||
- thread_message_loops_.size())
+ !thread_message_loops_.empty())
return;
flush_task_runner_->PostTask(
@@ -1223,10 +1210,13 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
ThreadTicks thread_now = ThreadNow();
- // |thread_local_event_buffer_| can be null if the current thread doesn't have
- // a message loop or the message loop is blocked.
- InitializeThreadLocalEventBufferIfSupported();
- auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
+ ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
+ if (*category_group_enabled & RECORDING_MODE) {
+ // |thread_local_event_buffer_| can be null if the current thread doesn't
+ // have a message loop or the message loop is blocked.
+ InitializeThreadLocalEventBufferIfSupported();
+ thread_local_event_buffer = thread_local_event_buffer_.Get();
+ }
// Check and update the current thread name only if the event is for the
// current thread to avoid locks in most cases.
@@ -1237,9 +1227,9 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// call (if any), but don't bother if the new name is empty. Note this will
// not detect a thread name change within the same char* buffer address: we
// favor common case performance over corner case correctness.
- if (new_name != g_current_thread_name.Get().Get() && new_name &&
- *new_name) {
- g_current_thread_name.Get().Set(new_name);
+ static auto* current_thread_name = new ThreadLocalPointer<const char>();
+ if (new_name != current_thread_name->Get() && new_name && *new_name) {
+ current_thread_name->Set(new_name);
AutoLock thread_info_lock(thread_info_lock_);
@@ -1257,7 +1247,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
bool found = std::find(existing_names.begin(), existing_names.end(),
new_name) != existing_names.end();
if (!found) {
- if (existing_names.size())
+ if (!existing_names.empty())
existing_name->second.push_back(',');
existing_name->second.append(new_name);
}
@@ -1268,14 +1258,37 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
#if defined(OS_WIN)
// This is done sooner rather than later, to avoid creating the event and
// acquiring the lock, which is not needed for ETW as it's already threadsafe.
- if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
+ if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
num_args, arg_names, arg_types, arg_values,
convertable_values);
#endif // OS_WIN
std::string console_message;
- if (*category_group_enabled & ENABLED_FOR_RECORDING) {
+ std::unique_ptr<TraceEvent> filtered_trace_event;
+ bool disabled_by_filters = false;
+ if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
+ std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
+ new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
+ phase, category_group_enabled, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, convertable_values, flags);
+
+ disabled_by_filters = true;
+ ForEachCategoryFilter(
+ category_group_enabled, [&new_trace_event, &disabled_by_filters](
+ TraceEventFilter* trace_event_filter) {
+ if (trace_event_filter->FilterTraceEvent(*new_trace_event))
+ disabled_by_filters = false;
+ });
+ if (!disabled_by_filters)
+ filtered_trace_event = std::move(new_trace_event);
+ }
+
+ // If enabled for recording, the event should be added only if one of the
+ // filters indicates or category is not enabled for filtering.
+ if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
+ !disabled_by_filters) {
OptionalAutoLock lock(&lock_);
TraceEvent* trace_event = NULL;
@@ -1287,21 +1300,14 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
}
if (trace_event) {
- trace_event->Initialize(thread_id,
- offset_event_timestamp,
- thread_now,
- phase,
- category_group_enabled,
- name,
- scope,
- id,
- bind_id,
- num_args,
- arg_names,
- arg_types,
- arg_values,
- convertable_values,
- flags);
+ if (filtered_trace_event) {
+ trace_event->MoveFrom(std::move(filtered_trace_event));
+ } else {
+ trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
+ phase, category_group_enabled, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, convertable_values, flags);
+ }
#if defined(OS_ANDROID)
trace_event->SendToATrace();
@@ -1315,53 +1321,9 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
}
}
- if (console_message.size())
+ if (!console_message.empty())
LOG(ERROR) << console_message;
- if (reinterpret_cast<const unsigned char*>(
- subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
- bool event_name_matches;
- WatchEventCallback watch_event_callback_copy;
- {
- AutoLock lock(lock_);
- event_name_matches = watch_event_name_ == name;
- watch_event_callback_copy = watch_event_callback_;
- }
- if (event_name_matches) {
- if (!watch_event_callback_copy.is_null())
- watch_event_callback_copy.Run();
- }
- }
-
- if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(
- offset_event_timestamp,
- phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
- category_group_enabled, name, scope, id, num_args, arg_names,
- arg_types, arg_values, flags);
- }
- }
-
- // TODO(primiano): Add support for events with copied name crbug.com/581078
- if (!(flags & TRACE_EVENT_FLAG_COPY)) {
- if (AllocationContextTracker::capture_mode() ==
- AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
- if (phase == TRACE_EVENT_PHASE_BEGIN ||
- phase == TRACE_EVENT_PHASE_COMPLETE) {
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PushPseudoStackFrame(name);
- } else if (phase == TRACE_EVENT_PHASE_END) {
- // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
- // is in |TraceLog::UpdateTraceEventDuration|.
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PopPseudoStackFrame(name);
- }
- }
- }
-
return handle;
}
@@ -1419,9 +1381,9 @@ std::string TraceLog::EventToConsoleMessage(unsigned char phase,
thread_colors_[thread_name]);
size_t depth = 0;
- if (thread_event_start_times_.find(thread_id) !=
- thread_event_start_times_.end())
- depth = thread_event_start_times_[thread_id].size();
+ auto it = thread_event_start_times_.find(thread_id);
+ if (it != thread_event_start_times_.end())
+ depth = it->second.size();
for (size_t i = 0; i < depth; ++i)
log << "| ";
@@ -1439,6 +1401,18 @@ std::string TraceLog::EventToConsoleMessage(unsigned char phase,
return log.str();
}
+void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle) {
+ ALLOW_UNUSED_PARAM(handle);
+ const char* category_name = GetCategoryGroupName(category_group_enabled);
+ ForEachCategoryFilter(
+ category_group_enabled,
+ [name, category_name](TraceEventFilter* trace_event_filter) {
+ trace_event_filter->EndEvent(category_name, name);
+ });
+}
+
void TraceLog::UpdateTraceEventDuration(
const unsigned char* category_group_enabled,
const char* name,
@@ -1460,17 +1434,29 @@ void TraceLog::UpdateTraceEventDuration(
#if defined(OS_WIN)
// Generate an ETW event that marks the end of a complete event.
- if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT)
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
TraceEventETWExport::AddCompleteEndEvent(name);
#endif // OS_WIN
std::string console_message;
- if (category_group_enabled_local & ENABLED_FOR_RECORDING) {
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
OptionalAutoLock lock(&lock_);
TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
if (trace_event) {
DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
+ // TEMP(oysteine) to debug crbug.com/638744
+ if (trace_event->duration().ToInternalValue() != -1) {
+ DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq
+ << ", chunk_index " << handle.chunk_index << ", event_index "
+ << handle.event_index;
+
+ std::string serialized_event;
+ trace_event->AppendAsJSON(&serialized_event, ArgumentFilterPredicate());
+ DVLOG(1) << "TraceEvent: " << serialized_event;
+ lock_.AssertAcquired();
+ }
+
trace_event->UpdateDuration(now, thread_now);
#if defined(OS_ANDROID)
trace_event->SendToATrace();
@@ -1481,47 +1467,13 @@ void TraceLog::UpdateTraceEventDuration(
console_message =
EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
}
-
- if (AllocationContextTracker::capture_mode() ==
- AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
- // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PopPseudoStackFrame(name);
- }
}
- if (console_message.size())
+ if (!console_message.empty())
LOG(ERROR) << console_message;
- if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(
- now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
- nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
- }
- }
-}
-
-void TraceLog::SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback) {
- const unsigned char* category =
- GetCategoryGroupEnabled(category_name.c_str());
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_,
- reinterpret_cast<subtle::AtomicWord>(category));
- watch_event_name_ = event_name;
- watch_event_callback_ = callback;
-}
-
-void TraceLog::CancelWatchEvent() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- watch_event_callback_.Reset();
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
+ EndFilteredEvent(category_group_enabled, name, handle);
}
uint64_t TraceLog::MangleEventId(uint64_t id) {
@@ -1551,42 +1503,37 @@ void TraceLog::AddMetadataEventsWhileLocked() {
"sort_index", process_sort_index_);
}
- if (process_name_.size()) {
+ if (!process_name_.empty()) {
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
current_thread_id, "process_name", "name",
process_name_);
}
- if (process_labels_.size() > 0) {
+ if (!process_labels_.empty()) {
std::vector<std::string> labels;
- for (base::hash_map<int, std::string>::iterator it =
- process_labels_.begin();
- it != process_labels_.end(); it++) {
- labels.push_back(it->second);
- }
+ for (const auto& it : process_labels_)
+ labels.push_back(it.second);
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
current_thread_id, "process_labels", "labels",
base::JoinString(labels, ","));
}
// Thread sort indices.
- for (hash_map<int, int>::iterator it = thread_sort_indices_.begin();
- it != thread_sort_indices_.end(); it++) {
- if (it->second == 0)
+ for (const auto& it : thread_sort_indices_) {
+ if (it.second == 0)
continue;
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first, "thread_sort_index", "sort_index",
- it->second);
+ it.first, "thread_sort_index", "sort_index",
+ it.second);
}
// Thread names.
AutoLock thread_info_lock(thread_info_lock_);
- for (hash_map<int, std::string>::iterator it = thread_names_.begin();
- it != thread_names_.end(); it++) {
- if (it->second.empty())
+ for (const auto& it : thread_names_) {
+ if (it.second.empty())
continue;
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first, "thread_name", "name", it->second);
+ it.first, "thread_name", "name", it.second);
}
// If buffer is full, add a metadata record to report this.
@@ -1598,14 +1545,9 @@ void TraceLog::AddMetadataEventsWhileLocked() {
}
}
-void TraceLog::WaitSamplingEventForTesting() {
- if (!sampling_thread_)
- return;
- sampling_thread_->WaitSamplingEventForTesting();
-}
-
void TraceLog::DeleteForTesting() {
internal::DeleteTraceLogForTesting::Delete();
+ CategoryRegistry::ResetForTesting();
}
TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
@@ -1617,6 +1559,10 @@ TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
if (!handle.chunk_seq)
return NULL;
+ DCHECK(handle.chunk_seq);
+ DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
+ DCHECK(handle.event_index < TraceBufferChunk::kTraceBufferChunkSize);
+
if (thread_local_event_buffer_.Get()) {
TraceEvent* trace_event =
thread_local_event_buffer_.Get()->GetEventByHandle(handle);
@@ -1643,10 +1589,10 @@ void TraceLog::SetProcessID(int process_id) {
process_id_ = process_id;
// Create a FNV hash from the process ID for XORing.
// See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
- unsigned long long offset_basis = 14695981039346656037ull;
- unsigned long long fnv_prime = 1099511628211ull;
- unsigned long long pid = static_cast<unsigned long long>(process_id_);
- process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
+ const unsigned long long kOffsetBasis = 14695981039346656037ull;
+ const unsigned long long kFnvPrime = 1099511628211ull;
+ const unsigned long long pid = static_cast<unsigned long long>(process_id_);
+ process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
}
void TraceLog::SetProcessSortIndex(int sort_index) {
@@ -1654,7 +1600,7 @@ void TraceLog::SetProcessSortIndex(int sort_index) {
process_sort_index_ = sort_index;
}
-void TraceLog::SetProcessName(const std::string& process_name) {
+void TraceLog::SetProcessName(const char* process_name) {
AutoLock lock(lock_);
process_name_ = process_name;
}
@@ -1670,12 +1616,7 @@ void TraceLog::UpdateProcessLabel(int label_id,
void TraceLog::RemoveProcessLabel(int label_id) {
AutoLock lock(lock_);
- base::hash_map<int, std::string>::iterator it =
- process_labels_.find(label_id);
- if (it == process_labels_.end())
- return;
-
- process_labels_.erase(it);
+ process_labels_.erase(label_id);
}
void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
@@ -1693,42 +1634,39 @@ size_t TraceLog::GetObserverCountForTest() const {
void TraceLog::SetCurrentThreadBlocksMessageLoop() {
thread_blocks_message_loop_.Set(true);
- if (thread_local_event_buffer_.Get()) {
- // This will flush the thread local buffer.
- delete thread_local_event_buffer_.Get();
- }
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
}
TraceBuffer* TraceLog::CreateTraceBuffer() {
HEAP_PROFILER_SCOPED_IGNORE;
InternalTraceOptions options = trace_options();
- if (options & kInternalRecordContinuously)
+ if (options & kInternalRecordContinuously) {
return TraceBuffer::CreateTraceBufferRingBuffer(
kTraceEventRingBufferChunks);
- else if (options & kInternalEchoToConsole)
+ }
+ if (options & kInternalEchoToConsole) {
return TraceBuffer::CreateTraceBufferRingBuffer(
kEchoToConsoleTraceEventBufferChunks);
- else if (options & kInternalRecordAsMuchAsPossible)
+ }
+ if (options & kInternalRecordAsMuchAsPossible) {
return TraceBuffer::CreateTraceBufferVectorOfSize(
kTraceEventVectorBigBufferChunks);
+ }
return TraceBuffer::CreateTraceBufferVectorOfSize(
kTraceEventVectorBufferChunks);
}
#if defined(OS_WIN)
void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
- AutoLock lock(lock_);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
// Go through each category and set/clear the ETW bit depending on whether the
// category is enabled.
- for (size_t i = 0; i < category_index; i++) {
- const char* category_group = g_category_groups[i];
- DCHECK(category_group);
+ for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT;
+ category.name())) {
+ category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
} else {
- g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT;
+ category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
}
}
}
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index e4407e81bd..88b6e588e4 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -26,15 +26,17 @@ namespace base {
template <typename Type>
struct DefaultSingletonTraits;
+class MessageLoop;
class RefCountedString;
namespace trace_event {
+struct TraceCategory;
class TraceBuffer;
class TraceBufferChunk;
class TraceEvent;
+class TraceEventFilter;
class TraceEventMemoryOverhead;
-class TraceSamplingThread;
struct BASE_EXPORT TraceLogStatus {
TraceLogStatus();
@@ -45,22 +47,14 @@ struct BASE_EXPORT TraceLogStatus {
class BASE_EXPORT TraceLog : public MemoryDumpProvider {
public:
- enum Mode {
- DISABLED = 0,
- RECORDING_MODE
- };
-
- // The pointer returned from GetCategoryGroupEnabledInternal() points to a
- // value with zero or more of the following bits. Used in this class only.
- // The TRACE_EVENT macros should only use the value as a bool.
- // These values must be in sync with macro values in TraceEvent.h in Blink.
- enum CategoryGroupEnabledFlags {
- // Category group enabled for the recording mode.
- ENABLED_FOR_RECORDING = 1 << 0,
- // Category group enabled by SetEventCallbackEnabled().
- ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
- // Category group enabled to export events to ETW.
- ENABLED_FOR_ETW_EXPORT = 1 << 3
+ // Argument passed to TraceLog::SetEnabled.
+ enum Mode : uint8_t {
+ // Enables normal tracing (recording trace events in the trace buffer).
+ RECORDING_MODE = 1 << 0,
+
+ // Trace events are enabled just for filtering but not for recording. Only
+ // event filters config of |trace_config| argument is used.
+ FILTERING_MODE = 1 << 1
};
static TraceLog* GetInstance();
@@ -76,16 +70,30 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// if the current thread supports that (has a message loop).
void InitializeThreadLocalEventBufferIfSupported();
- // Enables normal tracing (recording trace events in the trace buffer).
- // See TraceConfig comments for details on how to control what categories
- // will be traced. If tracing has already been enabled, |category_filter| will
- // be merged into the current category filter.
- void SetEnabled(const TraceConfig& trace_config, Mode mode);
-
- // Disables normal tracing for all categories.
+ // See TraceConfig comments for details on how to control which categories
+ // will be traced. SetDisabled must be called distinctly for each mode that is
+ // enabled. If tracing has already been enabled for recording, category filter
+ // (enabled and disabled categories) will be merged into the current category
+ // filter. Enabling RECORDING_MODE does not enable filters. Trace event
+ // filters will be used only if FILTERING_MODE is set on |modes_to_enable|.
+ // Conversely to RECORDING_MODE, FILTERING_MODE doesn't support upgrading,
+ // i.e. filters can only be enabled if not previously enabled.
+ void SetEnabled(const TraceConfig& trace_config, uint8_t modes_to_enable);
+
+ // TODO(ssid): Remove the default SetEnabled and IsEnabled. They should take
+ // Mode as argument.
+
+ // Disables tracing for all categories for the specified |modes_to_disable|
+ // only. Only RECORDING_MODE is taken as default |modes_to_disable|.
void SetDisabled();
+ void SetDisabled(uint8_t modes_to_disable);
- bool IsEnabled() { return mode_ != DISABLED; }
+ // Returns true if TraceLog is enabled on recording mode.
+ // Note: Returns false even if FILTERING_MODE is enabled.
+ bool IsEnabled() { return enabled_modes_ & RECORDING_MODE; }
+
+ // Returns a bitmap of enabled modes from TraceLog::Mode.
+ uint8_t enabled_modes() { return enabled_modes_; }
// The number of times we have begun recording traces. If tracing is off,
// returns -1. If tracing is on, then it returns the number of times we have
@@ -148,31 +156,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// objects.
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
- // Not using base::Callback because of its limited by 7 parameters.
- // Also, using primitive type allows directly passing callback from WebCore.
- // WARNING: It is possible for the previously set callback to be called
- // after a call to SetEventCallbackEnabled() that replaces or a call to
- // SetEventCallbackDisabled() that disables the callback.
- // This callback may be invoked on any thread.
- // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
- // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
- // interface simple.
- typedef void (*EventCallback)(TimeTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- const char* scope,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned int flags);
-
- // Enable tracing for EventCallback.
- void SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb);
- void SetEventCallbackDisabled();
void SetArgumentFilterPredicate(
const ArgumentFilterPredicate& argument_filter_predicate);
@@ -286,14 +269,9 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
const char* name,
TraceEventHandle handle);
- // For every matching event, the callback will be called.
- typedef base::Callback<void()> WatchEventCallback;
- void SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback);
- // Cancel the watch event. If tracing is enabled, this may race with the
- // watch event notification firing.
- void CancelWatchEvent();
+ void EndFilteredEvent(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle);
int process_id() const { return process_id_; }
@@ -301,7 +279,12 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// Exposed for unittesting:
- void WaitSamplingEventForTesting();
+ // Testing factory for TraceEventFilter.
+ typedef std::unique_ptr<TraceEventFilter> (*FilterFactoryForTesting)(
+ const std::string& /* predicate_name */);
+ void SetFilterFactoryForTesting(FilterFactoryForTesting factory) {
+ filter_factory_for_testing_ = factory;
+ }
// Allows deleting our singleton instance.
static void DeleteForTesting();
@@ -316,8 +299,9 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// on their sort index, ascending, then by their name, and then tid.
void SetProcessSortIndex(int sort_index);
- // Sets the name of the process.
- void SetProcessName(const std::string& process_name);
+ // Sets the name of the process. |process_name| should be a string literal
+ // since it is a whitelisted argument for background field trials.
+ void SetProcessName(const char* process_name);
// Processes can have labels in addition to their names. Use labels, for
// instance, to list out the web page titles that a process is handling.
@@ -371,12 +355,14 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
ProcessMemoryDump* pmd) override;
// Enable/disable each category group based on the current mode_,
- // category_filter_, event_callback_ and event_callback_category_filter_.
- // Enable the category group in the enabled mode if category_filter_ matches
- // the category group, or event_callback_ is not null and
- // event_callback_category_filter_ matches the category group.
- void UpdateCategoryGroupEnabledFlags();
- void UpdateCategoryGroupEnabledFlag(size_t category_index);
+ // category_filter_ and event_filters_enabled_.
+ // Enable the category group in the recording mode if category_filter_ matches
+ // the category group, is not null. Enable category for filtering if any
+ // filter in event_filters_enabled_ enables it.
+ void UpdateCategoryRegistry();
+ void UpdateCategoryState(TraceCategory* category);
+
+ void CreateFiltersForTraceConfig();
// Configure synthetic delays based on the values set in the current
// trace config.
@@ -391,7 +377,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TraceLog();
~TraceLog() override;
- const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
void AddMetadataEventsWhileLocked();
InternalTraceOptions trace_options() const {
@@ -409,7 +394,7 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
bool check_buffer_is_full);
void CheckIfBufferIsFullWhileLocked();
- void SetDisabledWhileLocked();
+ void SetDisabledWhileLocked(uint8_t modes);
TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
OptionalAutoLock* lock);
@@ -448,7 +433,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
static const InternalTraceOptions kInternalRecordUntilFull;
static const InternalTraceOptions kInternalRecordContinuously;
static const InternalTraceOptions kInternalEchoToConsole;
- static const InternalTraceOptions kInternalEnableSampling;
static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
static const InternalTraceOptions kInternalEnableArgumentFilter;
@@ -458,11 +442,10 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// This lock protects accesses to thread_names_, thread_event_start_times_
// and thread_colors_.
Lock thread_info_lock_;
- Mode mode_;
+ uint8_t enabled_modes_; // See TraceLog::Mode.
int num_traces_recorded_;
std::unique_ptr<TraceBuffer> logged_events_;
std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
- subtle::AtomicWord /* EventCallback */ event_callback_;
bool dispatching_to_observer_list_;
std::vector<EnabledStateObserver*> enabled_state_observer_list_;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
@@ -487,19 +470,10 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TimeDelta time_offset_;
- // Allow tests to wake up when certain events occur.
- WatchEventCallback watch_event_callback_;
- subtle::AtomicWord /* const unsigned char* */ watch_category_;
- std::string watch_event_name_;
-
subtle::AtomicWord /* Options */ trace_options_;
- // Sampling thread handles.
- std::unique_ptr<TraceSamplingThread> sampling_thread_;
- PlatformThreadHandle sampling_thread_handle_;
-
TraceConfig trace_config_;
- TraceConfig event_callback_trace_config_;
+ TraceConfig::EventFilters enabled_event_filters_;
ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
ThreadLocalBoolean thread_blocks_message_loop_;
@@ -522,6 +496,8 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
subtle::AtomicWord generation_;
bool use_worker_thread_;
+ FilterFactoryForTesting filter_factory_for_testing_;
+
DISALLOW_COPY_AND_ASSIGN(TraceLog);
};
diff --git a/base/trace_event/trace_log_constants.cc b/base/trace_event/trace_log_constants.cc
index cd2ff0dad3..65dca2e4d6 100644
--- a/base/trace_event/trace_log_constants.cc
+++ b/base/trace_event/trace_log_constants.cc
@@ -14,8 +14,7 @@ const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordUntilFull = 1 << 0;
const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordContinuously = 1 << 1;
-const TraceLog::InternalTraceOptions
- TraceLog::kInternalEnableSampling = 1 << 2;
+// 1 << 2 is reserved for the DEPRECATED kInternalEnableSampling. DO NOT USE.
const TraceLog::InternalTraceOptions
TraceLog::kInternalEchoToConsole = 1 << 3;
const TraceLog::InternalTraceOptions
diff --git a/base/trace_event/trace_sampling_thread.cc b/base/trace_event/trace_sampling_thread.cc
deleted file mode 100644
index 5a0d2f8a02..0000000000
--- a/base/trace_event/trace_sampling_thread.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_impl.h"
-#include "base/trace_event/trace_log.h"
-#include "base/trace_event/trace_sampling_thread.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceBucketData {
- public:
- TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback);
- ~TraceBucketData();
-
- TRACE_EVENT_API_ATOMIC_WORD* bucket;
- const char* bucket_name;
- TraceSampleCallback callback;
-};
-
-TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false),
- waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
-
-TraceSamplingThread::~TraceSamplingThread() {}
-
-void TraceSamplingThread::ThreadMain() {
- PlatformThread::SetName("Sampling Thread");
- thread_running_ = true;
- const int kSamplingFrequencyMicroseconds = 1000;
- while (!cancellation_flag_.IsSet()) {
- PlatformThread::Sleep(
- TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
- GetSamples();
- waitable_event_for_testing_.Signal();
- }
-}
-
-// static
-void TraceSamplingThread::DefaultSamplingCallback(
- TraceBucketData* bucket_data) {
- TRACE_EVENT_API_ATOMIC_WORD category_and_name =
- TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
- if (!category_and_name)
- return;
- const char* const combined =
- reinterpret_cast<const char* const>(category_and_name);
- const char* category_group;
- const char* name;
- ExtractCategoryAndName(combined, &category_group, &name);
- TRACE_EVENT_API_ADD_TRACE_EVENT(
- TRACE_EVENT_PHASE_SAMPLE,
- TraceLog::GetCategoryGroupEnabled(category_group), name,
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
- NULL, NULL, NULL, NULL, 0);
-}
-
-void TraceSamplingThread::GetSamples() {
- for (size_t i = 0; i < sample_buckets_.size(); ++i) {
- TraceBucketData* bucket_data = &sample_buckets_[i];
- bucket_data->callback.Run(bucket_data);
- }
-}
-
-void TraceSamplingThread::RegisterSampleBucket(
- TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback) {
- // Access to sample_buckets_ doesn't cause races with the sampling thread
- // that uses the sample_buckets_, because it is guaranteed that
- // RegisterSampleBucket is called before the sampling thread is created.
- DCHECK(!thread_running_);
- sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
-}
-
-// static
-void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name) {
- *category = combined;
- *name = &combined[strlen(combined) + 1];
-}
-
-void TraceSamplingThread::Stop() {
- cancellation_flag_.Set();
-}
-
-void TraceSamplingThread::WaitSamplingEventForTesting() {
- waitable_event_for_testing_.Wait();
-}
-
-TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback)
- : bucket(bucket), bucket_name(name), callback(callback) {}
-
-TraceBucketData::~TraceBucketData() {}
-
-} // namespace trace_event
-} // namespace base
diff --git a/base/trace_event/trace_sampling_thread.h b/base/trace_event/trace_sampling_thread.h
deleted file mode 100644
index f976a80e07..0000000000
--- a/base/trace_event/trace_sampling_thread.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
-#define BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
-
-#include "base/synchronization/cancellation_flag.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/trace_event/trace_event.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceBucketData;
-typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
-
-// This object must be created on the IO thread.
-class TraceSamplingThread : public PlatformThread::Delegate {
- public:
- TraceSamplingThread();
- ~TraceSamplingThread() override;
-
- // Implementation of PlatformThread::Delegate:
- void ThreadMain() override;
-
- static void DefaultSamplingCallback(TraceBucketData* bucket_data);
-
- void Stop();
- void WaitSamplingEventForTesting();
-
- private:
- friend class TraceLog;
-
- void GetSamples();
- // Not thread-safe. Once the ThreadMain has been called, this can no longer
- // be called.
- void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback);
- // Splits a combined "category\0name" into the two component parts.
- static void ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name);
- std::vector<TraceBucketData> sample_buckets_;
- bool thread_running_;
- CancellationFlag cancellation_flag_;
- WaitableEvent waitable_event_for_testing_;
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_