summaryrefslogtreecommitdiff
path: root/base/trace_event
diff options
context:
space:
mode:
Diffstat (limited to 'base/trace_event')
-rw-r--r--base/trace_event/heap_profiler_allocation_register.cc80
-rw-r--r--base/trace_event/heap_profiler_allocation_register.h79
-rw-r--r--base/trace_event/memory_dump_manager.cc32
-rw-r--r--base/trace_event/memory_dump_manager.h67
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc27
-rw-r--r--base/trace_event/memory_dump_provider_info.cc43
-rw-r--r--base/trace_event/memory_dump_provider_info.h108
-rw-r--r--base/trace_event/memory_dump_scheduler.cc4
-rw-r--r--base/trace_event/trace_config.cc4
-rw-r--r--base/trace_event/trace_config.h4
-rw-r--r--base/trace_event/trace_config_category_filter.cc35
-rw-r--r--base/trace_event/trace_config_category_filter.h4
-rw-r--r--base/trace_event/trace_event_synthetic_delay.cc4
-rw-r--r--base/trace_event/trace_event_unittest.cc1
14 files changed, 300 insertions, 192 deletions
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 63d40611a6..b9f440adb6 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -5,6 +5,7 @@
#include "base/trace_event/heap_profiler_allocation_register.h"
#include <algorithm>
+#include <limits>
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -12,9 +13,9 @@ namespace base {
namespace trace_event {
AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register, AllocationIndex index)
- : register_(alloc_register),
- index_(index) {}
+ const AllocationRegister& alloc_register,
+ AllocationIndex index)
+ : register_(alloc_register), index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
index_ = register_.allocations_.Next(index_ + 1);
@@ -25,12 +26,12 @@ bool AllocationRegister::ConstIterator::operator!=(
return index_ != other.index_;
}
-AllocationRegister::Allocation
-AllocationRegister::ConstIterator::operator*() const {
+AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
+ const {
return register_.GetAllocation(index_);
}
-size_t AllocationRegister::BacktraceHasher::operator () (
+size_t AllocationRegister::BacktraceHasher::operator()(
const Backtrace& backtrace) const {
const size_t kSampleLength = 10;
@@ -42,7 +43,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
}
size_t tail_start = backtrace.frame_count -
- std::min(backtrace.frame_count - head_end, kSampleLength);
+ std::min(backtrace.frame_count - head_end, kSampleLength);
for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
@@ -55,7 +56,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
return (total_value * 131101) >> 14;
}
-size_t AllocationRegister::AddressHasher::operator () (
+size_t AllocationRegister::AddressHasher::operator()(
const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
@@ -75,34 +76,48 @@ AllocationRegister::AllocationRegister()
AllocationRegister::AllocationRegister(size_t allocation_capacity,
size_t backtrace_capacity)
- : allocations_(allocation_capacity),
- backtraces_(backtrace_capacity) {}
-
-AllocationRegister::~AllocationRegister() {
+ : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
+ Backtrace sentinel = {};
+ sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
+ sentinel.frame_count = 1;
+
+ // Rationale for max / 2: in theory we could just start the sentinel with a
+ // refcount == 0. However, using max / 2 allows short circuiting of the
+ // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
+ // path. From a functional viewpoint, the sentinel is safe even if we wrap
+ // over refcount because .
+ BacktraceMap::KVPair::second_type sentinel_refcount =
+ std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
+ auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
+ DCHECK(index_and_flag.second);
+ DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
}
-void AllocationRegister::Insert(const void* address,
+AllocationRegister::~AllocationRegister() {}
+
+bool AllocationRegister::Insert(const void* address,
size_t size,
const AllocationContext& context) {
DCHECK(address != nullptr);
if (size == 0) {
- return;
+ return false;
}
- AllocationInfo info = {
- size,
- context.type_name,
- InsertBacktrace(context.backtrace)
- };
+ AllocationInfo info = {size, context.type_name,
+ InsertBacktrace(context.backtrace)};
// Try to insert the allocation.
auto index_and_flag = allocations_.Insert(address, info);
- if (!index_and_flag.second) {
+ if (!index_and_flag.second &&
+ index_and_flag.first != AllocationMap::kInvalidKVIndex) {
// |address| is already there - overwrite the allocation info.
auto& old_info = allocations_.Get(index_and_flag.first).second;
RemoveBacktrace(old_info.backtrace_index);
old_info = info;
+ return true;
}
+
+ return index_and_flag.second;
}
void AllocationRegister::Remove(const void* address) {
@@ -140,15 +155,17 @@ AllocationRegister::ConstIterator AllocationRegister::end() const {
void AllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
size_t allocated = sizeof(AllocationRegister);
- size_t resident = sizeof(AllocationRegister)
- + allocations_.EstimateUsedMemory()
- + backtraces_.EstimateUsedMemory();
+ size_t resident = sizeof(AllocationRegister) +
+ allocations_.EstimateUsedMemory() +
+ backtraces_.EstimateUsedMemory();
overhead->Add("AllocationRegister", allocated, resident);
}
AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
const Backtrace& backtrace) {
auto index = backtraces_.Insert(backtrace, 0).first;
+ if (index == BacktraceMap::kInvalidKVIndex)
+ return kOutOfStorageBacktraceIndex;
auto& backtrace_and_count = backtraces_.Get(index);
backtrace_and_count.second++;
return index;
@@ -156,7 +173,8 @@ AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
auto& backtrace_and_count = backtraces_.Get(index);
- if (--backtrace_and_count.second == 0) {
+ if (--backtrace_and_count.second == 0 &&
+ index != kOutOfStorageBacktraceIndex) {
// Backtrace is not referenced anymore - remove it.
backtraces_.Remove(index);
}
@@ -165,15 +183,11 @@ void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
AllocationRegister::Allocation AllocationRegister::GetAllocation(
AllocationMap::KVIndex index) const {
const auto& address_and_info = allocations_.Get(index);
- const auto& backtrace_and_count = backtraces_.Get(
- address_and_info.second.backtrace_index);
- return {
- address_and_info.first,
- address_and_info.second.size,
- AllocationContext(
- backtrace_and_count.first,
- address_and_info.second.type_name)
- };
+ const auto& backtrace_and_count =
+ backtraces_.Get(address_and_info.second.backtrace_index);
+ return {address_and_info.first, address_and_info.second.size,
+ AllocationContext(backtrace_and_count.first,
+ address_and_info.second.type_name)};
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index d6a02faeae..ac9872f001 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -48,24 +48,26 @@ class FixedHashMap {
// For implementation simplicity API uses integer index instead
// of iterators. Most operations (except Find) on KVIndex are O(1).
using KVIndex = size_t;
- static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+ enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) };
// Capacity controls how many items this hash map can hold, and largely
// affects memory footprint.
- FixedHashMap(size_t capacity)
- : num_cells_(capacity),
- cells_(static_cast<Cell*>(
- AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
- buckets_(static_cast<Bucket*>(
- AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
- free_list_(nullptr),
- next_unused_cell_(0) {}
+ explicit FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ num_inserts_dropped_(0),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
~FixedHashMap() {
FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
}
+ // Returns {kInvalidKVIndex, false} if the table is full.
std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
Cell** p_cell = Lookup(key);
Cell* cell = *p_cell;
@@ -74,7 +76,15 @@ class FixedHashMap {
}
// Get a free cell and link it.
- *p_cell = cell = GetFreeCell();
+ cell = GetFreeCell();
+ if (!cell) {
+ if (num_inserts_dropped_ <
+ std::numeric_limits<decltype(num_inserts_dropped_)>::max()) {
+ ++num_inserts_dropped_;
+ }
+ return {kInvalidKVIndex, false};
+ }
+ *p_cell = cell;
cell->p_prev = p_cell;
cell->next = nullptr;
@@ -137,6 +147,8 @@ class FixedHashMap {
bits::Align(sizeof(Bucket) * NumBuckets, page_size);
}
+ size_t num_inserts_dropped() const { return num_inserts_dropped_; }
+
private:
friend base::trace_event::AllocationRegisterTest;
@@ -175,7 +187,8 @@ class FixedHashMap {
}
// Returns a cell that is not being used to store an entry (either by
- // recycling from the free list or by taking a fresh cell).
+ // recycling from the free list or by taking a fresh cell). May return
+ // nullptr if the hash table has run out of memory.
Cell* GetFreeCell() {
// First try to re-use a cell from the free list.
if (free_list_) {
@@ -184,26 +197,14 @@ class FixedHashMap {
return cell;
}
- // Otherwise pick the next cell that has not been touched before.
- size_t idx = next_unused_cell_;
- next_unused_cell_++;
-
// If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside
- // of the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
-
- CHECK_LT(next_unused_cell_, num_cells_ + 1)
- << "Allocation Register hash table has too little capacity. Increase "
- "the capacity to run heap profiler in large sessions.";
-
- return &cells_[idx];
+ // was reserved for |cells_|), return nullptr.
+ if (next_unused_cell_ >= num_cells_) {
+ return nullptr;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ return &cells_[next_unused_cell_++];
}
// Returns a value in the range [0, NumBuckets - 1] (inclusive).
@@ -219,6 +220,9 @@ class FixedHashMap {
// Number of cells.
size_t const num_cells_;
+ // Number of calls to Insert() that were lost because the hashtable was full.
+ size_t num_inserts_dropped_;
+
// The array of cells. This array is backed by mmapped memory. Lower indices
// are accessed first, higher indices are accessed only when the |free_list_|
// is empty. This is to minimize the amount of resident memory used.
@@ -248,6 +252,8 @@ class TraceEventMemoryOverhead;
// freed. Internally it has two hashtables: one for Backtraces and one for
// actual allocations. Sizes of both hashtables are fixed, and this class
// allocates (mmaps) only in its constructor.
+//
+// When either hash table hits max size, new inserts are dropped.
class BASE_EXPORT AllocationRegister {
public:
// Details about an allocation.
@@ -282,7 +288,10 @@ class BASE_EXPORT AllocationRegister {
// Inserts allocation details into the table. If the address was present
// already, its details are updated. |address| must not be null.
- void Insert(const void* address,
+ //
+ // Returns true if an insert occurred. Inserts may fail because the table
+ // is full.
+ bool Insert(const void* address,
size_t size,
const AllocationContext& context);
@@ -359,6 +368,14 @@ class BASE_EXPORT AllocationRegister {
AllocationMap allocations_;
BacktraceMap backtraces_;
+ // Sentinel used when the |backtraces_| table is full.
+ //
+ // This is a slightly abstraction to allow for constant propagation. It
+ // knows that the sentinel will be the first item inserted into the table
+ // and that the first index retuned will be 0. The constructor DCHECKs
+ // this assumption.
+ enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
+
DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
};
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index a74b95634d..6ed1ca8fff 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -426,7 +426,7 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
}
void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
AutoLock lock(lock_);
dump_providers_for_polling_.insert(mdpinfo);
@@ -438,7 +438,7 @@ void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
}
void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
mdpinfo->dump_provider->SuspendFastMemoryPolling();
AutoLock lock(lock_);
@@ -956,34 +956,6 @@ bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
return session_state_->IsDumpModeAllowed(dump_mode);
}
-MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
- MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode)
- : dump_provider(dump_provider),
- name(name),
- task_runner(std::move(task_runner)),
- options(options),
- consecutive_failures(0),
- disabled(false),
- whitelisted_for_background_mode(whitelisted_for_background_mode) {}
-
-MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
-
-bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
- if (!a || !b)
- return a.get() < b.get();
- // Ensure that unbound providers (task_runner == nullptr) always run last.
- // Rationale: some unbound dump providers are known to be slow, keep them last
- // to avoid skewing timings of the other dump providers.
- return std::tie(a->task_runner, a->dump_provider) >
- std::tie(b->task_runner, b->dump_provider);
-}
-
MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index ebee048691..e7f5194850 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -9,7 +9,7 @@
#include <map>
#include <memory>
-#include <set>
+#include <unordered_set>
#include <vector>
#include "base/atomicops.h"
@@ -19,6 +19,7 @@
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_provider_info.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
@@ -170,70 +171,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend class MemoryDumpScheduler;
friend class memory_instrumentation::MemoryDumpManagerDelegateImplTest;
- // Descriptor used to hold information about registered MDPs.
- // Some important considerations about lifetime of this object:
- // - In nominal conditions, all the MemoryDumpProviderInfo instances live in
- // the |dump_providers_| collection (% unregistration while dumping).
- // - Upon each dump they (actually their scoped_refptr-s) are copied into
- // the ProcessMemoryDumpAsyncState. This is to allow removal (see below).
- // - When the MDP.OnMemoryDump() is invoked, the corresponding MDPInfo copy
- // inside ProcessMemoryDumpAsyncState is removed.
- // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
- // - If UnregisterDumpProvider() is called while a dump is in progress, the
- // MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
- // when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
- // - The non-const fields of MemoryDumpProviderInfo are safe to access only
- // on tasks running in the |task_runner|, unless the thread has been
- // destroyed.
- struct MemoryDumpProviderInfo
- : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
- // Define a total order based on the |task_runner| affinity, so that MDPs
- // belonging to the same SequencedTaskRunner are adjacent in the set.
- struct Comparator {
- bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpProviderInfo>& b) const;
- };
- using OrderedSet =
- std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
-
- MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode);
-
- MemoryDumpProvider* const dump_provider;
-
- // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
- // nullptr in all other cases.
- std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
-
- // Human readable name, for debugging and testing. Not necessarily unique.
- const char* const name;
-
- // The task runner affinity. Can be nullptr, in which case the dump provider
- // will be invoked on |dump_thread_|.
- const scoped_refptr<SequencedTaskRunner> task_runner;
-
- // The |options| arg passed to RegisterDumpProvider().
- const MemoryDumpProvider::Options options;
-
- // For fail-safe logic (auto-disable failing MDPs).
- int consecutive_failures;
-
- // Flagged either by the auto-disable logic or during unregistration.
- bool disabled;
-
- // True if the dump provider is whitelisted for background mode.
- const bool whitelisted_for_background_mode;
-
- private:
- friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
- ~MemoryDumpProviderInfo();
-
- DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
- };
-
// Holds the state of a process memory dump that needs to be carried over
// across task runners in order to fulfil an asynchronous CreateProcessDump()
// request. At any time exactly one task runner owns a
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index e037fd4982..e126edd397 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -32,6 +32,7 @@
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_config_memory_test_util.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -104,7 +105,7 @@ void OnTraceDataCollected(Closure quit_closure,
// Posts |task| to |task_runner| and blocks until it is executed.
void PostTaskAndWait(const tracked_objects::Location& from_here,
SequencedTaskRunner* task_runner,
- base::Closure task) {
+ base::OnceClosure task) {
base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner->PostTask(from_here, std::move(task));
@@ -115,8 +116,6 @@ void PostTaskAndWait(const tracked_objects::Location& from_here,
event.Wait();
}
-} // namespace
-
// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
// requests locally to the MemoryDumpManager instead of performing IPC dances.
class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
@@ -183,14 +182,14 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override {
NOTREACHED();
return false;
}
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override {
num_of_post_tasks_++;
if (enabled_) {
@@ -213,6 +212,8 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned num_of_post_tasks_;
};
+} // namespace
+
class MemoryDumpManagerTest : public testing::Test {
public:
MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -439,7 +440,13 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
// Checks that the dump provider invocations depend only on the current
// registration state and not on previous registrations and dumps.
-TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
+// Flaky on iOS, see crbug.com/706874
+#if defined(OS_IOS)
+#define MAYBE_RegistrationConsistency DISABLED_RegistrationConsistency
+#else
+#define MAYBE_RegistrationConsistency RegistrationConsistency
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_RegistrationConsistency) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
@@ -1013,7 +1020,13 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// Tests against race conditions that might arise when disabling tracing in the
// middle of a global memory dump.
-TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
+// Flaky on iOS, see crbug.com/706961
+#if defined(OS_IOS)
+#define MAYBE_DisableTracingWhileDumping DISABLED_DisableTracingWhileDumping
+#else
+#define MAYBE_DisableTracingWhileDumping DisableTracingWhileDumping
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_DisableTracingWhileDumping) {
base::WaitableEvent tracing_disabled_event(
WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
diff --git a/base/trace_event/memory_dump_provider_info.cc b/base/trace_event/memory_dump_provider_info.cc
new file mode 100644
index 0000000000..6bb711018b
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.cc
@@ -0,0 +1,43 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_provider_info.h"
+
+#include <tuple>
+
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+namespace trace_event {
+
+MemoryDumpProviderInfo::MemoryDumpProviderInfo(
+ MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
+ : dump_provider(dump_provider),
+ options(options),
+ name(name),
+ task_runner(std::move(task_runner)),
+ whitelisted_for_background_mode(whitelisted_for_background_mode),
+ consecutive_failures(0),
+ disabled(false) {}
+
+MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
+
+bool MemoryDumpProviderInfo::Comparator::operator()(
+ const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const {
+ if (!a || !b)
+ return a.get() < b.get();
+ // Ensure that unbound providers (task_runner == nullptr) always run last.
+ // Rationale: some unbound dump providers are known to be slow, keep them last
+ // to avoid skewing timings of the other dump providers.
+ return std::tie(a->task_runner, a->dump_provider) >
+ std::tie(b->task_runner, b->dump_provider);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_dump_provider_info.h b/base/trace_event/memory_dump_provider_info.h
new file mode 100644
index 0000000000..ca63a987b2
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.h
@@ -0,0 +1,108 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+// Wraps a MemoryDumpProvider (MDP), which is registered via
+// MemoryDumpManager(MDM)::RegisterDumpProvider(), holding the extra information
+// required to deal with it (which task runner it should be invoked onto,
+// whether it has been disabled, etc.)
+// More importantly, having a refptr to this object guarantees that a MDP that
+// is not thread-bound (hence which can only be unregistered via
+// MDM::UnregisterAndDeleteDumpProviderSoon()) will stay alive as long as the
+// refptr is held.
+//
+// Lifetime:
+// At any time, there is at most one instance of this class for each instance
+// of a given MemoryDumpProvider, but there might be several scoped_refptr
+// holding onto each of this. Specifically:
+// - In nominal conditions, there is a refptr for each registerd MDP in the
+// MDM's |dump_providers_| list.
+// - In most cases, the only refptr (in the |dump_providers_| list) is destroyed
+// by MDM::UnregisterDumpProvider().
+// - However, when MDM starts a dump, the list of refptrs is copied into the
+// ProcessMemoryDumpAsyncState. That list is pruned as MDP(s) are invoked.
+// - If UnregisterDumpProvider() is called on a non-thread-bound MDP while a
+// dump is in progress, the extar extra of the handle is destroyed in
+// MDM::SetupNextMemoryDump() or MDM::InvokeOnMemoryDump(), when the copy
+// inside ProcessMemoryDumpAsyncState is erase()-d.
+// - The PeakDetector can keep extra refptrs when enabled.
+struct BASE_EXPORT MemoryDumpProviderInfo
+ : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
+ public:
+ // Define a total order based on the |task_runner| affinity, so that MDPs
+ // belonging to the same SequencedTaskRunner are adjacent in the set.
+ struct Comparator {
+ bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const;
+ };
+ using OrderedSet =
+ std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
+
+ MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
+
+ // It is safe to access the const fields below from any thread as they are
+ // never mutated.
+
+ MemoryDumpProvider* const dump_provider;
+
+ // The |options| arg passed to MDM::RegisterDumpProvider().
+ const MemoryDumpProvider::Options options;
+
+ // Human readable name, not unique (distinct MDP instances might have the same
+ // name). Used for debugging, testing and whitelisting for BACKGROUND mode.
+ const char* const name;
+
+ // The task runner on which the MDP::OnMemoryDump call should be posted onto.
+ // Can be nullptr, in which case the MDP will be invoked on a background
+ // thread handled by MDM.
+ const scoped_refptr<SequencedTaskRunner> task_runner;
+
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
+ // These fields below, instead, are not thread safe and can be mutated only:
+ // - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
+ // - By the MDM's background thread (or in any other way that guarantees
+ // sequencing) for non-thread-bound MDPs.
+
+ // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
+ // nullptr in all other cases.
+ std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
+
+ // For fail-safe logic (auto-disable failing MDPs).
+ int consecutive_failures;
+
+ // Flagged either by the auto-disable logic or during unregistration.
+ bool disabled;
+
+ private:
+ friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
+ ~MemoryDumpProviderInfo();
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
index 66ea6c9f1a..150feb8e79 100644
--- a/base/trace_event/memory_dump_scheduler.cc
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -171,9 +171,11 @@ void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
}
void MemoryDumpScheduler::PollMemoryOnPollingThread() {
- if (polling_state_->current_state != PollingTriggerState::ENABLED)
+ if (!polling_state_)
return;
+ DCHECK_EQ(PollingTriggerState::ENABLED, polling_state_->current_state);
+
uint64_t polled_memory = 0;
bool res = mdm_->PollFastMemoryTotal(&polled_memory);
DCHECK(res);
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 3df09992b1..7ee9a4a101 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -186,7 +186,7 @@ bool TraceConfig::EventFilterConfig::GetArgAsSet(
}
bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
@@ -277,7 +277,7 @@ std::string TraceConfig::ToCategoryFilterString() const {
}
bool TraceConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
// TraceLog should call this method only as part of enabling/disabling
// categories.
return category_filter_.IsCategoryGroupEnabled(category_group_name);
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 29edc9a8ec..13b2f5f0ee 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -103,7 +103,7 @@ class BASE_EXPORT TraceConfig {
bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
const std::string& predicate_name() const { return predicate_name_; }
base::DictionaryValue* filter_args() const { return args_.get(); }
@@ -231,7 +231,7 @@ class BASE_EXPORT TraceConfig {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Merges config with the current TraceConfig
void Merge(const TraceConfig& config);
diff --git a/base/trace_event/trace_config_category_filter.cc b/base/trace_event/trace_config_category_filter.cc
index dc30e0ea99..234db18c5c 100644
--- a/base/trace_event/trace_config_category_filter.cc
+++ b/base/trace_event/trace_config_category_filter.cc
@@ -45,9 +45,9 @@ TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
void TraceConfigCategoryFilter::InitializeFromString(
const StringPiece& category_filter_string) {
- std::vector<std::string> split =
- SplitString(category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
- for (const std::string& category : split) {
+ std::vector<StringPiece> split = SplitStringPiece(
+ category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const StringPiece& category : split) {
// Ignore empty categories.
if (category.empty())
continue;
@@ -55,23 +55,22 @@ void TraceConfigCategoryFilter::InitializeFromString(
if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
CompareCase::SENSITIVE) &&
category.back() == ')') {
- std::string synthetic_category = category.substr(
+ StringPiece synthetic_category = category.substr(
strlen(kSyntheticDelayCategoryFilterPrefix),
category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
size_t name_length = synthetic_category.find(';');
if (name_length != std::string::npos && name_length > 0 &&
name_length != synthetic_category.size() - 1) {
- synthetic_delays_.push_back(synthetic_category);
+ synthetic_delays_.push_back(synthetic_category.as_string());
}
} else if (category.front() == '-') {
// Excluded categories start with '-'.
// Remove '-' from category string.
- excluded_categories_.push_back(category.substr(1));
- } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
+ excluded_categories_.push_back(category.substr(1).as_string());
+ } else if (category.starts_with(TRACE_DISABLED_BY_DEFAULT(""))) {
+ disabled_categories_.push_back(category.as_string());
} else {
- included_categories_.push_back(category);
+ included_categories_.push_back(category.as_string());
}
}
}
@@ -88,17 +87,17 @@ void TraceConfigCategoryFilter::InitializeFromConfigDict(
}
bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
bool had_enabled_by_default = false;
- DCHECK(category_group_name);
- std::string category_group_name_str = category_group_name;
- StringTokenizer category_group_tokens(category_group_name_str, ",");
+ DCHECK(!category_group_name.empty());
+ CStringTokenizer category_group_tokens(category_group_name.begin(),
+ category_group_name.end(), ",");
while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
+ StringPiece category_group_token = category_group_tokens.token_piece();
// Don't allow empty tokens, nor tokens with leading or trailing space.
DCHECK(IsCategoryNameAllowed(category_group_token))
<< "Disallowed category string";
- if (IsCategoryEnabled(category_group_token.c_str()))
+ if (IsCategoryEnabled(category_group_token))
return true;
if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
@@ -109,7 +108,7 @@ bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
category_group_tokens.Reset();
bool category_group_disabled = false;
while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
+ StringPiece category_group_token = category_group_tokens.token_piece();
for (const std::string& category : excluded_categories_) {
if (MatchPattern(category_group_token, category)) {
// Current token of category_group_name is present in excluded_list.
@@ -140,7 +139,7 @@ bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
}
bool TraceConfigCategoryFilter::IsCategoryEnabled(
- const char* category_name) const {
+ const StringPiece& category_name) const {
// Check the disabled- filters and the disabled-* wildcard first so that a
// "*" filter does not include the disabled.
for (const std::string& category : disabled_categories_) {
diff --git a/base/trace_event/trace_config_category_filter.h b/base/trace_event/trace_config_category_filter.h
index df8c3a5b2a..0d7dba0374 100644
--- a/base/trace_event/trace_config_category_filter.h
+++ b/base/trace_event/trace_config_category_filter.h
@@ -40,13 +40,13 @@ class BASE_EXPORT TraceConfigCategoryFilter {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Returns true if the category is enabled according to this trace config.
// This tells whether a category is enabled from the TraceConfig's
// perspective. Please refer to IsCategoryGroupEnabled() to determine if a
// category is enabled from the tracing runtime's perspective.
- bool IsCategoryEnabled(const char* category_name) const;
+ bool IsCategoryEnabled(const StringPiece& category_name) const;
void ToDict(DictionaryValue* dict) const;
diff --git a/base/trace_event/trace_event_synthetic_delay.cc b/base/trace_event/trace_event_synthetic_delay.cc
index b6ce2845c4..cfae7435e9 100644
--- a/base/trace_event/trace_event_synthetic_delay.cc
+++ b/base/trace_event/trace_event_synthetic_delay.cc
@@ -4,6 +4,7 @@
#include "base/macros.h"
#include "base/memory/singleton.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
namespace {
@@ -80,6 +81,7 @@ void TraceEventSyntheticDelay::Begin() {
// calculation is done with a lock held, it will always be correct. The only
// downside of this is that we may fail to apply some delays when the target
// duration changes.
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue())
return;
@@ -94,6 +96,7 @@ void TraceEventSyntheticDelay::Begin() {
void TraceEventSyntheticDelay::BeginParallel(TimeTicks* out_end_time) {
// See note in Begin().
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue()) {
*out_end_time = TimeTicks();
return;
@@ -108,6 +111,7 @@ void TraceEventSyntheticDelay::BeginParallel(TimeTicks* out_end_time) {
void TraceEventSyntheticDelay::End() {
// See note in Begin().
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue())
return;
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 85e1e16312..7a30e4ee57 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -2158,7 +2158,6 @@ TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
trace_log->SetDisabled();
}
-
class MyData : public ConvertableToTraceFormat {
public:
MyData() {}