summaryrefslogtreecommitdiff
path: root/base/trace_event/memory_dump_manager.cc
diff options
context:
space:
mode:
Diffstat (limited to 'base/trace_event/memory_dump_manager.cc')
-rw-r--r--base/trace_event/memory_dump_manager.cc314
1 files changed, 204 insertions, 110 deletions
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index eed070a782..5a54a773c5 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -7,21 +7,26 @@
#include <algorithm>
#include <utility>
+#include "base/allocator/features.h"
#include "base/atomic_sequence_num.h"
#include "base/base_switches.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
#include "base/debug/debugging_flags.h"
#include "base/debug/stack_trace.h"
+#include "base/debug/thread_heap_usage_tracker.h"
#include "base/memory/ptr_util.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_dump_session_state.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
@@ -33,10 +38,6 @@
#include "base/trace_event/java_heap_dump_provider_android.h"
#endif
-#if defined(OS_WIN)
-#include "base/trace_event/winheap_dump_provider_win.h"
-#endif
-
namespace base {
namespace trace_event {
@@ -49,6 +50,31 @@ const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
MemoryDumpManager* g_instance_for_testing = nullptr;
+// The list of names of dump providers that are blacklisted from strict thread
+// affinity check on unregistration. These providers could potentially cause
+// crashes on build bots if they do not unregister on right thread.
+// TODO(ssid): Fix all the dump providers to unregister if needed and clear the
+// blacklist, crbug.com/643438.
+const char* const kStrictThreadCheckBlacklist[] = {
+ "ClientDiscardableSharedMemoryManager",
+ "ContextProviderCommandBuffer",
+ "DiscardableSharedMemoryManager",
+ "FontCaches",
+ "GpuMemoryBufferVideoFramePool",
+ "IndexedDBBackingStore",
+ "Sql",
+ "ThreadLocalEventBuffer",
+ "TraceLog",
+ "URLRequestContext",
+ "VpxVideoDecoder",
+ "cc::SoftwareImageDecodeCache",
+ "cc::StagingBufferPool",
+ "gpu::BufferManager",
+ "gpu::MappedMemoryManager",
+ "gpu::RenderbufferManager",
+ "BlacklistTestDumpProvider" // for testing
+};
+
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -110,8 +136,6 @@ const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
const char* const MemoryDumpManager::kSystemAllocatorPoolName =
#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
MallocDumpProvider::kAllocatedObjects;
-#elif defined(OS_WIN)
- WinHeapDumpProvider::kAllocatedObjects;
#else
nullptr;
#endif
@@ -142,6 +166,9 @@ MemoryDumpManager::MemoryDumpManager()
// At this point the command line may not be initialized but we try to
// enable the heap profiler to capture allocations as soon as possible.
EnableHeapProfilingIfNeeded();
+
+ strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
+ std::end(kStrictThreadCheckBlacklist));
}
MemoryDumpManager::~MemoryDumpManager() {
@@ -162,18 +189,20 @@ void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
if (profiling_mode == "") {
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::PSEUDO_STACK);
- }
- else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
#if HAVE_TRACE_STACK_FRAME_POINTERS && \
(BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
+ } else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
// We need frame pointers for native tracing to work, and they are
// enabled in profiling and debug builds.
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::NATIVE_STACK);
-#else
- CHECK(false) << "'" << profiling_mode << "' mode for "
- << switches::kEnableHeapProfiling << " flag is not supported "
- << "for this platform / build type.";
+#endif
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ } else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) {
+ // Enable heap tracking, which in turn enables capture of heap usage
+ // tracking in tracked_objects.cc.
+ if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
+ base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
#endif
} else {
CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
@@ -206,14 +235,33 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
nullptr);
#endif
-#if defined(OS_WIN)
- RegisterDumpProvider(WinHeapDumpProvider::GetInstance(), "WinHeap", nullptr);
-#endif
+ TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
+
+ // TODO(ssid): This should be done in EnableHeapProfiling so that we capture
+ // more allocations (crbug.com/625170).
+ if (AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
+ !(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
+ // Create trace config with heap profiling filter.
+ TraceConfig::EventFilterConfig heap_profiler_filter_config(
+ HeapProfilerEventFilter::kName);
+ heap_profiler_filter_config.AddIncludedCategory("*");
+ heap_profiler_filter_config.AddIncludedCategory(
+ MemoryDumpManager::kTraceCategory);
+ TraceConfig::EventFilters filters;
+ filters.push_back(heap_profiler_filter_config);
+ TraceConfig filtering_trace_config;
+ filtering_trace_config.SetEventFilters(filters);
+
+ TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
+ TraceLog::FILTERING_MODE);
+ }
// If tracing was enabled before initializing MemoryDumpManager, we missed the
// OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
+ // IsEnabled is called before adding observer to avoid calling
+ // OnTraceLogEnabled twice.
bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
- TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
TraceLog::GetInstance()->AddEnabledStateObserver(this);
if (is_tracing_already_enabled)
OnTraceLogEnabled();
@@ -262,6 +310,11 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
whitelisted_for_background_mode);
+ if (options.is_fast_polling_supported) {
+ DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
+ "polling must NOT be thread bound.";
+ }
+
{
AutoLock lock(lock_);
bool already_registered = !dump_providers_.insert(mdpinfo).second;
@@ -269,6 +322,15 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
// path for RenderThreadImpl::Init().
if (already_registered)
return;
+
+ // The list of polling MDPs is populated OnTraceLogEnabled(). This code
+ // deals with the case of a MDP capable of fast polling that is registered
+ // after the OnTraceLogEnabled()
+ if (options.is_fast_polling_supported && dump_thread_) {
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
+ Unretained(this), mdpinfo));
+ }
}
if (heap_profiling_enabled_)
@@ -307,9 +369,18 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
// - At the end of this function, if no dump is in progress.
// - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
// removed from |pending_dump_providers|.
+ // - When the provider is removed from |dump_providers_for_polling_|.
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
- } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
+ subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ // If dump provider's name is on |strict_thread_check_blacklist_|, then the
+ // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
+ // fired even when tracing is not enabled (stricter).
+ // TODO(ssid): Remove this condition after removing all the dump providers
+ // in the blacklist and the buildbots are no longer flakily hitting the
+ // DCHECK, crbug.com/643438.
+
// If you hit this DCHECK, your dump provider has a bug.
// Unregistration of a MemoryDumpProvider is safe only if:
// - The MDP has specified a sequenced task runner affinity AND the
@@ -325,6 +396,13 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
<< "unregister itself in a racy way. Please file a crbug.";
}
+ if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) {
+ DCHECK(take_mdp_ownership_and_delete_async);
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
+ Unretained(this), *mdp_iter));
+ }
+
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
// the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
@@ -334,6 +412,28 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
dump_providers_.erase(mdp_iter);
}
+void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.insert(mdpinfo);
+
+ // Notify ready for polling when first polling supported provider is
+ // registered. This handles the case where OnTraceLogEnabled() did not notify
+ // ready since no polling supported mdp has yet been registered.
+ if (dump_providers_for_polling_.size() == 1)
+ dump_scheduler_->NotifyPollingSupported();
+}
+
+void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.erase(mdpinfo);
+ DCHECK(!dump_providers_for_polling_.empty())
+ << "All polling MDPs cannot be unregistered.";
+}
+
void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
@@ -413,8 +513,10 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
// with disallowed modes. If |session_state_| is null then tracing is
// disabled.
CHECK(!session_state_ ||
- session_state_->memory_dump_config().allowed_dump_modes.count(
- args.level_of_detail));
+ session_state_->IsDumpModeAllowed(args.level_of_detail));
+
+ if (dump_scheduler_)
+ dump_scheduler_->NotifyDumpTriggered();
}
TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
@@ -570,6 +672,16 @@ void MemoryDumpManager::InvokeOnMemoryDump(
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
"dump_provider.name", mdpinfo->name);
+ // A stack allocated string with dump provider name is useful to debug
+ // crashes while invoking dump after a |dump_provider| is not unregistered
+ // in safe way.
+ // TODO(ssid): Remove this after fixing crbug.com/643438.
+ char provider_name_for_debugging[16];
+ strncpy(provider_name_for_debugging, mdpinfo->name,
+ sizeof(provider_name_for_debugging) - 1);
+ provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
+ base::debug::Alias(provider_name_for_debugging);
+
// Pid of the target process being dumped. Often kNullProcessId (= current
// process), non-zero when the coordinator process creates dumps on behalf
// of child processes (see crbug.com/461788).
@@ -587,6 +699,28 @@ void MemoryDumpManager::InvokeOnMemoryDump(
SetupNextMemoryDump(std::move(pmd_async_state));
}
+bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
+#if DCHECK_IS_ON()
+ {
+ AutoLock lock(lock_);
+ if (dump_thread_)
+ DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
+ }
+#endif
+ if (dump_providers_for_polling_.empty())
+ return false;
+
+ *memory_total = 0;
+ // Note that we call PollFastMemoryTotal() even if the dump provider is
+ // disabled (unregistered). This is to avoid taking lock while polling.
+ for (const auto& mdpinfo : dump_providers_for_polling_) {
+ uint64_t value = 0;
+ mdpinfo->dump_provider->PollFastMemoryTotal(&value);
+ *memory_total += value;
+ }
+ return true;
+}
+
// static
void MemoryDumpManager::FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
@@ -663,11 +797,15 @@ void MemoryDumpManager::OnTraceLogEnabled() {
return;
}
- const TraceConfig trace_config =
+ const TraceConfig& trace_config =
TraceLog::GetInstance()->GetCurrentTraceConfig();
+ const TraceConfig::MemoryDumpConfig& memory_dump_config =
+ trace_config.memory_dump_config();
scoped_refptr<MemoryDumpSessionState> session_state =
new MemoryDumpSessionState;
- session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
+ session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
+ session_state->set_heap_profiler_breakdown_threshold_bytes(
+ memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
if (heap_profiling_enabled_) {
// If heap profiling is enabled, the stack frame deduplicator and type name
// deduplicator will be in use. Add a metadata events to write the frames
@@ -681,14 +819,26 @@ void MemoryDumpManager::OnTraceLogEnabled() {
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
"stackFrames",
- WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
- session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
+ MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
+ session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
"typeNames",
- WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
- session_state, &MemoryDumpSessionState::type_name_deduplicator)));
+ MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
+ session_state, &MemoryDumpSessionState::type_name_deduplicator));
+ }
+
+ std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
+ new MemoryDumpScheduler(this, dump_thread->task_runner()));
+ DCHECK_LE(memory_dump_config.triggers.size(), 3u);
+ for (const auto& trigger : memory_dump_config.triggers) {
+ if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
+ NOTREACHED();
+ continue;
+ }
+ dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
+ trigger.min_time_between_dumps_ms);
}
{
@@ -699,48 +849,65 @@ void MemoryDumpManager::OnTraceLogEnabled() {
DCHECK(!dump_thread_);
dump_thread_ = std::move(dump_thread);
+ dump_scheduler_ = std::move(dump_scheduler);
subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
- // TODO(primiano): This is a temporary hack to disable periodic memory dumps
- // when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps. See crbug.com/529184 .
- if (!is_coordinator_ ||
- CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
- return;
+ dump_providers_for_polling_.clear();
+ for (const auto& mdpinfo : dump_providers_) {
+ if (mdpinfo->options.is_fast_polling_supported)
+ dump_providers_for_polling_.insert(mdpinfo);
}
+ // Notify polling supported only if some polling supported provider was
+ // registered, else RegisterPollingMDPOnDumpThread() will notify when first
+ // polling MDP registers.
+ if (!dump_providers_for_polling_.empty())
+ dump_scheduler_->NotifyPollingSupported();
+
+ // Only coordinator process triggers periodic global memory dumps.
+ if (is_coordinator_)
+ dump_scheduler_->NotifyPeriodicTriggerSupported();
}
- // Enable periodic dumps if necessary.
- periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
}
void MemoryDumpManager::OnTraceLogDisabled() {
// There might be a memory dump in progress while this happens. Therefore,
// ensure that the MDM state which depends on the tracing enabled / disabled
// state is always accessed by the dumping methods holding the |lock_|.
+ if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
+ return;
subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
std::unique_ptr<Thread> dump_thread;
+ std::unique_ptr<MemoryDumpScheduler> scheduler;
{
AutoLock lock(lock_);
dump_thread = std::move(dump_thread_);
session_state_ = nullptr;
+ scheduler = std::move(dump_scheduler_);
}
+ scheduler->DisableAllTriggers();
// Thread stops are blocking and must be performed outside of the |lock_|
// or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
- periodic_dump_timer_.Stop();
if (dump_thread)
dump_thread->Stop();
+
+ // |dump_providers_for_polling_| must be cleared only after the dump thread is
+ // stopped (polling tasks are done).
+ {
+ AutoLock lock(lock_);
+ for (const auto& mdpinfo : dump_providers_for_polling_)
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+ dump_providers_for_polling_.clear();
+ }
}
bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
AutoLock lock(lock_);
if (!session_state_)
return false;
- return session_state_->memory_dump_config().allowed_dump_modes.count(
- dump_mode) != 0;
+ return session_state_->IsDumpModeAllowed(dump_mode);
}
uint64_t MemoryDumpManager::GetTracingProcessId() const {
@@ -806,78 +973,5 @@ ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
return iter->second.get();
}
-MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
-
-MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
- Stop();
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
- const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
- if (triggers_list.empty())
- return;
-
- // At the moment the periodic support is limited to at most one periodic
- // trigger per dump mode. All intervals should be an integer multiple of the
- // smallest interval specified.
- periodic_dumps_count_ = 0;
- uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
- uint32_t light_dump_period_ms = 0;
- uint32_t heavy_dump_period_ms = 0;
- DCHECK_LE(triggers_list.size(), 3u);
- auto* mdm = MemoryDumpManager::GetInstance();
- for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
- DCHECK_NE(0u, config.periodic_interval_ms);
- switch (config.level_of_detail) {
- case MemoryDumpLevelOfDetail::BACKGROUND:
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
- break;
- case MemoryDumpLevelOfDetail::LIGHT:
- DCHECK_EQ(0u, light_dump_period_ms);
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
- light_dump_period_ms = config.periodic_interval_ms;
- break;
- case MemoryDumpLevelOfDetail::DETAILED:
- DCHECK_EQ(0u, heavy_dump_period_ms);
- DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
- heavy_dump_period_ms = config.periodic_interval_ms;
- break;
- }
- min_timer_period_ms =
- std::min(min_timer_period_ms, config.periodic_interval_ms);
- }
-
- DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
- light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
- DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
- heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
-
- timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
- base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
- base::Unretained(this)));
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
- if (IsRunning()) {
- timer_.Stop();
- }
-}
-
-bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
- return timer_.IsRunning();
-}
-
-void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
- MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
- if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
- level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
- level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
- ++periodic_dumps_count_;
-
- MemoryDumpManager::GetInstance()->RequestGlobalDump(
- MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
} // namespace trace_event
} // namespace base