summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHidehiko Abe <hidehiko@google.com>2018-02-21 01:04:53 +0900
committerHidehiko Abe <hidehiko@google.com>2018-02-21 01:15:15 +0900
commitf810b5921dde57180956b9eadf39a3a2b8cb5855 (patch)
treeb5545667cc754e2b0745fb59dd891d65d30afaeb
parent4e42e67fa291bd27b2ffb00be57a4ca9a5000526 (diff)
downloadlibchrome-f810b5921dde57180956b9eadf39a3a2b8cb5855.tar.gz
Re-uprev to r462023.
Previous uprevs didn't include several CLs. This re-uprev's to the r462023. cf) Missing CLs I found. https://codereview.chromium.org/2556563002 https://codereview.chromium.org/2754483002 https://codereview.chromium.org/2171833002 https://codereview.chromium.org/2778183003 https://codereview.chromium.org/2500473002 https://codereview.chromium.org/2173523002 https://codereview.chromium.org/2666423002 https://codereview.chromium.org/2723423002 https://codereview.chromium.org/2789463002 https://codereview.chromium.org/2723083004 https://codereview.chromium.org/2637843002 https://codereview.chromium.org/2785943004 https://codereview.chromium.org/2657603004 https://codereview.chromium.org/2774363003 https://codereview.chromium.org/2776853002 https://codereview.chromium.org/2736053003 https://codereview.chromium.org/2779413002 https://codereview.chromium.org/2782503002 https://codereview.chromium.org/2782083003 https://codereview.chromium.org/2399213005 https://codereview.chromium.org/2787383002 https://codereview.chromium.org/2790523004 https://codereview.chromium.org/2787533002 https://codereview.chromium.org/2780983003 https://codereview.chromium.org/2790403003 https://codereview.chromium.org/2747673002 https://codereview.chromium.org/2778173003 https://codereview.chromium.org/2788613004 https://codereview.chromium.org/2781983003 https://codereview.chromium.org/2774223003 Bug: 73270448 Test: Built and ran libchrome_test locally. Run treehugger. Change-Id: I5e76096d4fcf660571275cce5f4a980a8bb574fe
-rw-r--r--Android.bp1
-rw-r--r--base/allocator/allocator_shim.cc11
-rw-r--r--base/at_exit.cc4
-rw-r--r--base/bind_unittest.cc38
-rw-r--r--base/callback_internal.cc21
-rw-r--r--base/callback_internal.h39
-rw-r--r--base/command_line.cc15
-rw-r--r--base/command_line_unittest.cc31
-rw-r--r--base/critical_closure.h13
-rw-r--r--base/debug/activity_tracker.cc181
-rw-r--r--base/debug/activity_tracker.h75
-rw-r--r--base/debug/activity_tracker_unittest.cc12
-rw-r--r--base/files/file.h10
-rw-r--r--base/files/file_path.cc8
-rw-r--r--base/files/file_path.h2
-rw-r--r--base/files/file_path_unittest.cc2
-rw-r--r--base/memory/ref_counted.cc31
-rw-r--r--base/memory/ref_counted.h211
-rw-r--r--base/memory/ref_counted_unittest.cc38
-rw-r--r--base/message_loop/incoming_task_queue.cc3
-rw-r--r--base/message_loop/incoming_task_queue.h2
-rw-r--r--base/message_loop/message_loop_task_runner.cc4
-rw-r--r--base/message_loop/message_loop_task_runner.h4
-rw-r--r--base/message_loop/message_loop_task_runner_unittest.cc11
-rw-r--r--base/message_loop/message_loop_unittest.cc31
-rw-r--r--base/message_loop/message_pump_glib_unittest.cc1
-rw-r--r--base/message_loop/message_pump_libevent.cc1
-rw-r--r--base/metrics/histogram_macros.h14
-rw-r--r--base/metrics/histogram_macros_internal.h61
-rw-r--r--base/metrics/histogram_macros_unittest.cc31
-rw-r--r--base/metrics/sparse_histogram.cc8
-rw-r--r--base/native_library.h10
-rw-r--r--base/post_task_and_reply_with_result_internal.h9
-rw-r--r--base/power_monitor/power_monitor_device_source.h1
-rw-r--r--base/process/launch.h5
-rw-r--r--base/process/launch_posix.cc8
-rw-r--r--base/process/process_metrics.h23
-rw-r--r--base/process/process_metrics_unittest.cc40
-rw-r--r--base/sequence_checker_impl.cc4
-rw-r--r--base/sequenced_task_runner.cc2
-rw-r--r--base/sequenced_task_runner.h4
-rw-r--r--base/task_runner.cc10
-rw-r--r--base/task_runner.h8
-rw-r--r--base/task_scheduler/sequence.cc2
-rw-r--r--base/task_scheduler/sequence.h2
-rw-r--r--base/task_scheduler/sequence_unittest.cc11
-rw-r--r--base/task_scheduler/task.cc2
-rw-r--r--base/task_scheduler/task.h2
-rw-r--r--base/template_util.h61
-rw-r--r--base/template_util_unittest.cc33
-rw-r--r--base/test/test_mock_time_task_runner.cc8
-rw-r--r--base/test/test_mock_time_task_runner.h4
-rw-r--r--base/test/test_pending_task.cc2
-rw-r--r--base/test/test_pending_task.h2
-rw-r--r--base/test/test_simple_task_runner.cc4
-rw-r--r--base/test/test_simple_task_runner.h4
-rw-r--r--base/threading/post_task_and_reply_impl.cc26
-rw-r--r--base/threading/post_task_and_reply_impl.h6
-rw-r--r--base/threading/sequenced_worker_pool.cc75
-rw-r--r--base/threading/sequenced_worker_pool.h17
-rw-r--r--base/threading/thread.cc2
-rw-r--r--base/threading/thread_unittest.cc4
-rw-r--r--base/threading/worker_pool.cc14
-rw-r--r--base/threading/worker_pool.h6
-rw-r--r--base/threading/worker_pool_posix.cc10
-rw-r--r--base/threading/worker_pool_posix.h2
-rw-r--r--base/trace_event/heap_profiler_allocation_register.cc80
-rw-r--r--base/trace_event/heap_profiler_allocation_register.h79
-rw-r--r--base/trace_event/memory_dump_manager.cc32
-rw-r--r--base/trace_event/memory_dump_manager.h67
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc27
-rw-r--r--base/trace_event/memory_dump_provider_info.cc43
-rw-r--r--base/trace_event/memory_dump_provider_info.h108
-rw-r--r--base/trace_event/memory_dump_scheduler.cc4
-rw-r--r--base/trace_event/trace_config.cc4
-rw-r--r--base/trace_event/trace_config.h4
-rw-r--r--base/trace_event/trace_config_category_filter.cc35
-rw-r--r--base/trace_event/trace_config_category_filter.h4
-rw-r--r--base/trace_event/trace_event_synthetic_delay.cc4
-rw-r--r--base/trace_event/trace_event_unittest.cc1
-rw-r--r--base/values.cc91
-rw-r--r--base/values.h14
-rw-r--r--base/values_unittest.cc76
83 files changed, 1286 insertions, 724 deletions
diff --git a/Android.bp b/Android.bp
index eaa5cc5849..c208ed4e9a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -249,6 +249,7 @@ libchromeCommonSrc = [
"base/trace_event/memory_allocator_dump.cc",
"base/trace_event/memory_allocator_dump_guid.cc",
"base/trace_event/memory_dump_manager.cc",
+ "base/trace_event/memory_dump_provider_info.cc",
"base/trace_event/memory_dump_request_args.cc",
"base/trace_event/memory_dump_scheduler.cc",
"base/trace_event/memory_dump_session_state.cc",
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
index f511b53088..0caeb73490 100644
--- a/base/allocator/allocator_shim.cc
+++ b/base/allocator/allocator_shim.cc
@@ -131,16 +131,7 @@ void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
}
}
- // This function does not guarantee to be thread-safe w.r.t. concurrent
- // insertions, but still has to guarantee that all the threads always
- // see a consistent chain, hence the MemoryBarrier() below.
- // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
- // we don't really want this to be a release-store with a corresponding
- // acquire-load during malloc().
- subtle::MemoryBarrier();
-
- subtle::NoBarrier_Store(&g_chain_head,
- reinterpret_cast<subtle::AtomicWord>(dispatch));
+ CHECK(false); // Too many retries, this shouldn't happen.
}
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
diff --git a/base/at_exit.cc b/base/at_exit.cc
index 5dcc83cb2f..e0025ea0d3 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -81,6 +81,10 @@ void AtExitManager::ProcessCallbacksNow() {
g_top_manager->processing_callbacks_ = true;
}
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // It's safe since all other threads should be terminated at this point.
+ ScopedAllowCrossThreadRefCountAccess allow_cross_thread_ref_count_access;
+
while (!tasks.empty()) {
base::Closure task = tasks.top();
task.Run();
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index a9ca9d2538..0de9294894 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -1309,65 +1309,59 @@ TEST_F(BindTest, OnceCallback) {
static_assert(std::is_constructible<
RepeatingClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be copyable.");
- static_assert(is_assignable<
- RepeatingClosure, const RepeatingClosure&>::value,
+ static_assert(
+ std::is_assignable<RepeatingClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be copy-assignable.");
// Move constructor and assignment of RepeatingCallback.
static_assert(std::is_constructible<
RepeatingClosure, RepeatingClosure&&>::value,
"RepeatingClosure should be movable.");
- static_assert(is_assignable<
- RepeatingClosure, RepeatingClosure&&>::value,
- "RepeatingClosure should be move-assignable");
+ static_assert(std::is_assignable<RepeatingClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be move-assignable");
// Conversions from OnceCallback to RepeatingCallback.
static_assert(!std::is_constructible<
RepeatingClosure, const OnceClosure&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(!is_assignable<
- RepeatingClosure, const OnceClosure&>::value,
+ static_assert(
+ !std::is_assignable<RepeatingClosure, const OnceClosure&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
// Destructive conversions from OnceCallback to RepeatingCallback.
static_assert(!std::is_constructible<
RepeatingClosure, OnceClosure&&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(!is_assignable<
- RepeatingClosure, OnceClosure&&>::value,
- "OnceClosure should not be convertible to RepeatingClosure.");
+ static_assert(!std::is_assignable<RepeatingClosure, OnceClosure&&>::value,
+ "OnceClosure should not be convertible to RepeatingClosure.");
// Copy constructor and assignment of OnceCallback.
static_assert(!std::is_constructible<
OnceClosure, const OnceClosure&>::value,
"OnceClosure should not be copyable.");
- static_assert(!is_assignable<
- OnceClosure, const OnceClosure&>::value,
- "OnceClosure should not be copy-assignable");
+ static_assert(!std::is_assignable<OnceClosure, const OnceClosure&>::value,
+ "OnceClosure should not be copy-assignable");
// Move constructor and assignment of OnceCallback.
static_assert(std::is_constructible<
OnceClosure, OnceClosure&&>::value,
"OnceClosure should be movable.");
- static_assert(is_assignable<
- OnceClosure, OnceClosure&&>::value,
- "OnceClosure should be move-assignable.");
+ static_assert(std::is_assignable<OnceClosure, OnceClosure&&>::value,
+ "OnceClosure should be move-assignable.");
// Conversions from RepeatingCallback to OnceCallback.
static_assert(std::is_constructible<
OnceClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be convertible to OnceClosure.");
- static_assert(is_assignable<
- OnceClosure, const RepeatingClosure&>::value,
- "RepeatingClosure should be convertible to OnceClosure.");
+ static_assert(std::is_assignable<OnceClosure, const RepeatingClosure&>::value,
+ "RepeatingClosure should be convertible to OnceClosure.");
// Destructive conversions from RepeatingCallback to OnceCallback.
static_assert(std::is_constructible<
OnceClosure, RepeatingClosure&&>::value,
"RepeatingClosure should be convertible to OnceClosure.");
- static_assert(is_assignable<
- OnceClosure, RepeatingClosure&&>::value,
- "RepeatingClosure should be covretible to OnceClosure.");
+ static_assert(std::is_assignable<OnceClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be covretible to OnceClosure.");
OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
std::move(cb).Run();
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
index 4330e9cce5..a760f0664c 100644
--- a/base/callback_internal.cc
+++ b/base/callback_internal.cc
@@ -17,6 +17,10 @@ bool ReturnFalse(const BindStateBase*) {
} // namespace
+void BindStateBaseRefCountTraits::Destruct(const BindStateBase* bind_state) {
+ bind_state->destructor_(bind_state);
+}
+
BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*))
: BindStateBase(polymorphic_invoke, destructor, &ReturnFalse) {
@@ -26,19 +30,9 @@ BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*),
bool (*is_cancelled)(const BindStateBase*))
: polymorphic_invoke_(polymorphic_invoke),
- ref_count_(0),
destructor_(destructor),
is_cancelled_(is_cancelled) {}
-void BindStateBase::AddRef() const {
- AtomicRefCountInc(&ref_count_);
-}
-
-void BindStateBase::Release() const {
- if (!AtomicRefCountDec(&ref_count_))
- destructor_(this);
-}
-
CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c) = default;
CallbackBase<CopyMode::MoveOnly>&
@@ -80,10 +74,9 @@ bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
return bind_state_ == other.bind_state_;
}
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(
- BindStateBase* bind_state)
- : bind_state_(bind_state) {
- DCHECK(!bind_state_.get() || bind_state_->ref_count_ == 1);
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(BindStateBase* bind_state)
+ : bind_state_(bind_state ? AdoptRef(bind_state) : nullptr) {
+ DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
}
CallbackBase<CopyMode::MoveOnly>::~CallbackBase() {}
diff --git a/base/callback_internal.h b/base/callback_internal.h
index d6dcfeb3c0..29b07c23bd 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -8,17 +8,29 @@
#ifndef BASE_CALLBACK_INTERNAL_H_
#define BASE_CALLBACK_INTERNAL_H_
-#include "base/atomic_ref_count.h"
#include "base/base_export.h"
#include "base/callback_forward.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
namespace base {
+
+struct FakeBindState;
+
namespace internal {
+
template <CopyMode copy_mode>
class CallbackBase;
+class BindStateBase;
+
+template <typename Functor, typename... BoundArgs>
+struct BindState;
+
+struct BindStateBaseRefCountTraits {
+ static void Destruct(const BindStateBase*);
+};
+
// BindStateBase is used to provide an opaque handle that the Callback
// class can use to represent a function object with bound arguments. It
// behaves as an existential type that is used by a corresponding
@@ -30,38 +42,43 @@ class CallbackBase;
// Creating a vtable for every BindState template instantiation results in a lot
// of bloat. Its only task is to call the destructor which can be done with a
// function pointer.
-class BASE_EXPORT BindStateBase {
+class BASE_EXPORT BindStateBase
+ : public RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits> {
public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
using InvokeFuncStorage = void(*)();
- protected:
+ private:
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*));
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*),
bool (*is_cancelled)(const BindStateBase*));
+
~BindStateBase() = default;
- private:
- friend class scoped_refptr<BindStateBase>;
+ friend struct BindStateBaseRefCountTraits;
+ friend class RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits>;
+
template <CopyMode copy_mode>
friend class CallbackBase;
+ // Whitelist subclasses that access the destructor of BindStateBase.
+ template <typename Functor, typename... BoundArgs>
+ friend struct BindState;
+ friend struct ::base::FakeBindState;
+
bool IsCancelled() const {
return is_cancelled_(this);
}
- void AddRef() const;
- void Release() const;
-
// In C++, it is safe to cast function pointers to function pointers of
// another type. It is not okay to use void*. We create a InvokeFuncStorage
// that that can store our function pointer, and then cast it back to
// the original type on usage.
InvokeFuncStorage polymorphic_invoke_;
- mutable AtomicRefCount ref_count_;
-
// Pointer to a function that will properly destroy |this|.
void (*destructor_)(const BindStateBase*);
bool (*is_cancelled_)(const BindStateBase*);
@@ -86,7 +103,7 @@ class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
CallbackBase& operator=(CallbackBase<CopyMode::Copyable>&& c);
// Returns true if Callback is null (doesn't refer to anything).
- bool is_null() const { return bind_state_.get() == NULL; }
+ bool is_null() const { return !bind_state_; }
explicit operator bool() const { return !is_null(); }
// Returns true if the callback invocation will be nop due to an cancellation.
diff --git a/base/command_line.cc b/base/command_line.cc
index 99ea2b0003..873da81348 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
@@ -411,11 +412,15 @@ void CommandLine::AppendArguments(const CommandLine& other,
void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
if (wrapper.empty())
return;
- // The wrapper may have embedded arguments (like "gdb --args"). In this case,
- // we don't pretend to do anything fancy, we just split on spaces.
- StringVector wrapper_argv = SplitString(
- wrapper, FilePath::StringType(1, ' '), base::TRIM_WHITESPACE,
- base::SPLIT_WANT_ALL);
+ // Split the wrapper command based on whitespace (with quoting).
+ using CommandLineTokenizer =
+ StringTokenizerT<StringType, StringType::const_iterator>;
+ CommandLineTokenizer tokenizer(wrapper, FILE_PATH_LITERAL(" "));
+ tokenizer.set_quote_chars(FILE_PATH_LITERAL("'\""));
+ std::vector<StringType> wrapper_argv;
+ while (tokenizer.GetNext())
+ wrapper_argv.emplace_back(tokenizer.token());
+
// Prepend the wrapper and update the switches/arguments |begin_args_|.
argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
begin_args_ += wrapper_argv.size();
diff --git a/base/command_line_unittest.cc b/base/command_line_unittest.cc
index bcfc6c59c9..79c9aecc2a 100644
--- a/base/command_line_unittest.cc
+++ b/base/command_line_unittest.cc
@@ -406,4 +406,35 @@ TEST(CommandLineTest, Copy) {
EXPECT_TRUE(assigned.HasSwitch(pair.first));
}
+TEST(CommandLineTest, PrependSimpleWrapper) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+ cl.AppendSwitch("a");
+ cl.AppendSwitch("b");
+ cl.PrependWrapper(FILE_PATH_LITERAL("wrapper --foo --bar"));
+
+ EXPECT_EQ(6u, cl.argv().size());
+ EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--foo"), cl.argv()[1]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--bar"), cl.argv()[2]);
+ EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
+TEST(CommandLineTest, PrependComplexWrapper) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+ cl.AppendSwitch("a");
+ cl.AppendSwitch("b");
+ cl.PrependWrapper(
+ FILE_PATH_LITERAL("wrapper --foo='hello world' --bar=\"let's go\""));
+
+ EXPECT_EQ(6u, cl.argv().size());
+ EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--foo='hello world'"), cl.argv()[1]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--bar=\"let's go\""), cl.argv()[2]);
+ EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
} // namespace base
diff --git a/base/critical_closure.h b/base/critical_closure.h
index 35ce2b5c46..94c618dfbb 100644
--- a/base/critical_closure.h
+++ b/base/critical_closure.h
@@ -29,13 +29,13 @@ bool IsMultiTaskingSupported();
// |ios::ScopedCriticalAction|.
class CriticalClosure {
public:
- explicit CriticalClosure(Closure closure);
+ explicit CriticalClosure(OnceClosure closure);
~CriticalClosure();
void Run();
private:
ios::ScopedCriticalAction critical_action_;
- Closure closure_;
+ OnceClosure closure_;
DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
};
@@ -57,13 +57,14 @@ class CriticalClosure {
// background running time, |MakeCriticalClosure| should be applied on them
// before posting.
#if defined(OS_IOS)
-inline Closure MakeCriticalClosure(Closure closure) {
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
DCHECK(internal::IsMultiTaskingSupported());
- return base::Bind(&internal::CriticalClosure::Run,
- Owned(new internal::CriticalClosure(std::move(closure))));
+ return base::BindOnce(
+ &internal::CriticalClosure::Run,
+ Owned(new internal::CriticalClosure(std::move(closure))));
}
#else // defined(OS_IOS)
-inline Closure MakeCriticalClosure(Closure closure) {
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
// No-op for platforms where the application does not need to acquire
// background time for closures to finish when it goes into the background.
return closure;
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
index 6b492f0e15..5081c1c9d2 100644
--- a/base/debug/activity_tracker.cc
+++ b/base/debug/activity_tracker.cc
@@ -67,7 +67,7 @@ union ThreadRef {
#endif
};
-// Get the next non-zero identifier. It is only unique within a process.
+// Gets the next non-zero identifier. It is only unique within a process.
uint32_t GetNextDataId() {
uint32_t id;
while ((id = g_next_id.GetNext()) == 0)
@@ -75,6 +75,16 @@ uint32_t GetNextDataId() {
return id;
}
+// Gets the current process-id, either from the GlobalActivityTracker if it
+// exists (where the PID can be defined for testing) or from the system if
+// there isn't such.
+int64_t GetProcessId() {
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ if (global)
+ return global->process_id();
+ return GetCurrentProcId();
+}
+
// Finds and reuses a specific allocation or creates a new one.
PersistentMemoryAllocator::Reference AllocateFrom(
PersistentMemoryAllocator* allocator,
@@ -114,15 +124,15 @@ Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
OwningProcess::OwningProcess() {}
OwningProcess::~OwningProcess() {}
-void OwningProcess::Release_Initialize() {
+void OwningProcess::Release_Initialize(int64_t pid) {
uint32_t old_id = data_id.load(std::memory_order_acquire);
DCHECK_EQ(0U, old_id);
- process_id = GetCurrentProcId();
+ process_id = pid != 0 ? pid : GetProcessId();
create_stamp = Time::Now().ToInternalValue();
data_id.store(GetNextDataId(), std::memory_order_release);
}
-void OwningProcess::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
+void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
DCHECK_NE(0U, data_id);
process_id = pid;
create_stamp = stamp;
@@ -130,14 +140,14 @@ void OwningProcess::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
// static
bool OwningProcess::GetOwningProcessId(const void* memory,
- ProcessId* out_id,
+ int64_t* out_id,
int64_t* out_stamp) {
const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
uint32_t id = info->data_id.load(std::memory_order_acquire);
if (id == 0)
return false;
- *out_id = static_cast<ProcessId>(info->process_id);
+ *out_id = info->process_id;
*out_stamp = info->create_stamp;
return id == info->data_id.load(std::memory_order_seq_cst);
}
@@ -322,12 +332,15 @@ ActivityUserData::MemoryHeader::~MemoryHeader() {}
ActivityUserData::FieldHeader::FieldHeader() {}
ActivityUserData::FieldHeader::~FieldHeader() {}
-ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0) {}
+ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
-ActivityUserData::ActivityUserData(void* memory, size_t size)
+ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
: memory_(reinterpret_cast<char*>(memory)),
available_(RoundDownToAlignment(size, kMemoryAlignment)),
- header_(reinterpret_cast<MemoryHeader*>(memory)) {
+ header_(reinterpret_cast<MemoryHeader*>(memory)),
+ orig_data_id(0),
+ orig_process_id(0),
+ orig_create_stamp(0) {
// It's possible that no user data is being stored.
if (!memory_)
return;
@@ -335,10 +348,16 @@ ActivityUserData::ActivityUserData(void* memory, size_t size)
static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
DCHECK_LT(sizeof(MemoryHeader), available_);
if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
- header_->owner.Release_Initialize();
+ header_->owner.Release_Initialize(pid);
memory_ += sizeof(MemoryHeader);
available_ -= sizeof(MemoryHeader);
+ // Make a copy of identifying information for later comparison.
+ *const_cast<uint32_t*>(&orig_data_id) =
+ header_->owner.data_id.load(std::memory_order_acquire);
+ *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
+ *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
+
// If there is already data present, load that. This allows the same class
// to be used for analysis through snapshots.
ImportExistingData();
@@ -354,18 +373,18 @@ bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
// class that is adding records.
ImportExistingData();
+ // Add all the values to the snapshot.
for (const auto& entry : values_) {
TypedValue value;
+ const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
value.type_ = entry.second.type;
- DCHECK_GE(entry.second.extent,
- entry.second.size_ptr->load(std::memory_order_relaxed));
+ DCHECK_GE(entry.second.extent, size);
switch (entry.second.type) {
case RAW_VALUE:
case STRING_VALUE:
value.long_value_ =
- std::string(reinterpret_cast<char*>(entry.second.memory),
- entry.second.size_ptr->load(std::memory_order_relaxed));
+ std::string(reinterpret_cast<char*>(entry.second.memory), size);
break;
case RAW_VALUE_REFERENCE:
case STRING_VALUE_REFERENCE: {
@@ -391,6 +410,16 @@ bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
DCHECK(inserted.second); // True if inserted, false if existed.
}
+ // Another import attempt will validate that the underlying memory has not
+ // been reused for another purpose. Entries added since the first import
+ // will be ignored here but will be returned if another snapshot is created.
+ ImportExistingData();
+ if (!memory_) {
+ output_snapshot->clear();
+ return false;
+ }
+
+ // Successful snapshot.
return true;
}
@@ -400,7 +429,7 @@ const void* ActivityUserData::GetBaseAddress() const {
return header_;
}
-void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
+void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
int64_t stamp) {
if (!header_)
return;
@@ -409,7 +438,7 @@ void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
// static
bool ActivityUserData::GetOwningProcessId(const void* memory,
- ProcessId* out_id,
+ int64_t* out_id,
int64_t* out_stamp) {
const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
@@ -524,6 +553,10 @@ void ActivityUserData::SetReference(StringPiece name,
}
void ActivityUserData::ImportExistingData() const {
+ // It's possible that no user data is being stored.
+ if (!memory_)
+ return;
+
while (available_ > sizeof(FieldHeader)) {
FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
ValueType type =
@@ -555,6 +588,14 @@ void ActivityUserData::ImportExistingData() const {
memory_ += header->record_size;
available_ -= header->record_size;
}
+
+ // Check if memory has been completely reused.
+ if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
+ header_->owner.process_id != orig_process_id ||
+ header_->owner.create_stamp != orig_create_stamp) {
+ memory_ = nullptr;
+ values_.clear();
+ }
}
// This information is kept for every thread that is tracked. It is filled
@@ -895,6 +936,7 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
// structure are valid (at least at the current moment in time).
const uint32_t starting_id =
header_->owner.data_id.load(std::memory_order_acquire);
+ const int64_t starting_create_stamp = header_->owner.create_stamp;
const int64_t starting_process_id = header_->owner.process_id;
const int64_t starting_thread_id = header_->thread_ref.as_id;
@@ -932,6 +974,7 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
// Get the general thread information.
output_snapshot->thread_name =
std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
+ output_snapshot->create_stamp = header_->owner.create_stamp;
output_snapshot->thread_id = header_->thread_ref.as_id;
output_snapshot->process_id = header_->owner.process_id;
@@ -944,6 +987,7 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
// If the data ID has changed then the tracker has exited and the memory
// reused by a new one. Try again.
if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
+ output_snapshot->create_stamp != starting_create_stamp ||
output_snapshot->process_id != starting_process_id ||
output_snapshot->thread_id != starting_thread_id) {
continue;
@@ -981,14 +1025,14 @@ const void* ThreadActivityTracker::GetBaseAddress() {
return header_;
}
-void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid,
+void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
int64_t stamp) {
header_->owner.SetOwningProcessIdForTesting(pid, stamp);
}
// static
bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
- ProcessId* out_id,
+ int64_t* out_id,
int64_t* out_stamp) {
const Header* header = reinterpret_cast<const Header*>(memory);
return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
@@ -1179,8 +1223,9 @@ ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
}
GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
- size_t size)
- : ActivityUserData(memory, size) {}
+ size_t size,
+ int64_t pid)
+ : ActivityUserData(memory, size, pid) {}
GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
@@ -1210,10 +1255,11 @@ GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
void GlobalActivityTracker::CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth) {
+ int stack_depth,
+ int64_t process_id) {
// There's no need to do anything with the result. It is self-managing.
GlobalActivityTracker* global_tracker =
- new GlobalActivityTracker(std::move(allocator), stack_depth);
+ new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
// Create a tracker for this thread since it is known.
global_tracker->CreateTrackerForCurrentThread();
}
@@ -1239,7 +1285,7 @@ void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
DCHECK(success);
CreateWithAllocator(MakeUnique<FilePersistentMemoryAllocator>(
std::move(mapped_file), size, id, name, false),
- stack_depth);
+ stack_depth, 0);
}
#endif // !defined(OS_NACL)
@@ -1247,11 +1293,37 @@ void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
- int stack_depth) {
+ int stack_depth,
+ int64_t process_id) {
CreateWithAllocator(
- MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
+ MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth,
+ process_id);
}
+// static
+void GlobalActivityTracker::SetForTesting(
+ std::unique_ptr<GlobalActivityTracker> tracker) {
+ CHECK(!subtle::NoBarrier_Load(&g_tracker_));
+ subtle::Release_Store(&g_tracker_,
+ reinterpret_cast<uintptr_t>(tracker.release()));
+}
+
+// static
+std::unique_ptr<GlobalActivityTracker>
+GlobalActivityTracker::ReleaseForTesting() {
+ GlobalActivityTracker* tracker = Get();
+ if (!tracker)
+ return nullptr;
+
+ // Thread trackers assume that the global tracker is present for some
+ // operations so ensure that there aren't any.
+ tracker->ReleaseTrackerForCurrentThreadForTesting();
+ DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
+
+ subtle::Release_Store(&g_tracker_, 0);
+ return WrapUnique(tracker);
+};
+
ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
DCHECK(!this_thread_tracker_.Get());
@@ -1308,8 +1380,10 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
ThreadActivityTracker* tracker =
reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
- if (tracker)
+ if (tracker) {
+ this_thread_tracker_.Set(nullptr);
delete tracker;
+ }
}
void GlobalActivityTracker::SetBackgroundTaskRunner(
@@ -1327,21 +1401,23 @@ void GlobalActivityTracker::SetProcessExitCallback(
void GlobalActivityTracker::RecordProcessLaunch(
ProcessId process_id,
const FilePath::StringType& cmd) {
- DCHECK_NE(GetCurrentProcId(), process_id);
+ const int64_t pid = process_id;
+ DCHECK_NE(GetProcessId(), pid);
+ DCHECK_NE(0, pid);
base::AutoLock lock(global_tracker_lock_);
- if (base::ContainsKey(known_processes_, process_id)) {
+ if (base::ContainsKey(known_processes_, pid)) {
// TODO(bcwhite): Measure this in UMA.
NOTREACHED() << "Process #" << process_id
<< " was previously recorded as \"launched\""
<< " with no corresponding exit.";
- known_processes_.erase(process_id);
+ known_processes_.erase(pid);
}
#if defined(OS_WIN)
- known_processes_.insert(std::make_pair(process_id, UTF16ToUTF8(cmd)));
+ known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
#else
- known_processes_.insert(std::make_pair(process_id, cmd));
+ known_processes_.insert(std::make_pair(pid, cmd));
#endif
}
@@ -1349,25 +1425,27 @@ void GlobalActivityTracker::RecordProcessLaunch(
ProcessId process_id,
const FilePath::StringType& exe,
const FilePath::StringType& args) {
+ const int64_t pid = process_id;
if (exe.find(FILE_PATH_LITERAL(" "))) {
- RecordProcessLaunch(process_id,
- FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
- FILE_PATH_LITERAL("\" ") + args);
+ RecordProcessLaunch(pid, FilePath::StringType(FILE_PATH_LITERAL("\"")) +
+ exe + FILE_PATH_LITERAL("\" ") + args);
} else {
- RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
+ RecordProcessLaunch(pid, exe + FILE_PATH_LITERAL(' ') + args);
}
}
void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
int exit_code) {
- DCHECK_NE(GetCurrentProcId(), process_id);
+ const int64_t pid = process_id;
+ DCHECK_NE(GetProcessId(), pid);
+ DCHECK_NE(0, pid);
scoped_refptr<TaskRunner> task_runner;
std::string command_line;
{
base::AutoLock lock(global_tracker_lock_);
task_runner = background_task_runner_;
- auto found = known_processes_.find(process_id);
+ auto found = known_processes_.find(pid);
if (found != known_processes_.end()) {
command_line = std::move(found->second);
known_processes_.erase(found);
@@ -1385,20 +1463,19 @@ void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
task_runner->PostTask(
FROM_HERE,
- Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
- process_id, now_stamp, exit_code, Passed(&command_line)));
+ Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this), pid,
+ now_stamp, exit_code, Passed(&command_line)));
return;
}
- CleanupAfterProcess(process_id, now_stamp, exit_code,
- std::move(command_line));
+ CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
}
void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
process_data().SetInt(kProcessPhaseDataKey, phase);
}
-void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
+void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
int64_t exit_stamp,
int exit_code,
std::string&& command_line) {
@@ -1422,7 +1499,7 @@ void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
const void* memory = allocator_->GetAsArray<char>(
ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
- ProcessId found_id;
+ int64_t found_id;
int64_t create_stamp;
if (ActivityUserData::GetOwningProcessId(memory, &found_id,
&create_stamp)) {
@@ -1459,7 +1536,7 @@ void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
case ModuleInfoRecord::kPersistentTypeId: {
const void* memory = allocator_->GetAsArray<char>(
ref, type, PersistentMemoryAllocator::kSizeAny);
- ProcessId found_id;
+ int64_t found_id;
int64_t create_stamp;
// By convention, the OwningProcess structure is always the first
@@ -1527,9 +1604,11 @@ void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
GlobalActivityTracker::GlobalActivityTracker(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth)
+ int stack_depth,
+ int64_t process_id)
: allocator_(std::move(allocator)),
stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
+ process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
this_thread_tracker_(&OnTLSDestroy),
thread_tracker_count_(0),
thread_tracker_allocator_(allocator_.get(),
@@ -1551,16 +1630,16 @@ GlobalActivityTracker::GlobalActivityTracker(
kTypeIdProcessDataRecord),
kTypeIdProcessDataRecord,
kProcessDataSize),
- kProcessDataSize),
+ kProcessDataSize,
+ process_id_),
global_data_(
allocator_->GetAsArray<char>(
allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
kTypeIdGlobalDataRecord,
kGlobalDataSize),
- kGlobalDataSize) {
- // Ensure the passed memory is valid and empty (iterator finds nothing).
- uint32_t type;
- DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
+ kGlobalDataSize,
+ process_id_) {
+ DCHECK_NE(0, process_id_);
// Ensure that there is no other global object and then make this one such.
DCHECK(!g_tracker_);
@@ -1583,7 +1662,7 @@ GlobalActivityTracker::GlobalActivityTracker(
}
GlobalActivityTracker::~GlobalActivityTracker() {
- DCHECK_EQ(Get(), this);
+ DCHECK(Get() == nullptr || Get() == this);
DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
subtle::Release_Store(&g_tracker_, 0);
}
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
index e6eb197881..c8cf1e972e 100644
--- a/base/debug/activity_tracker.h
+++ b/base/debug/activity_tracker.h
@@ -67,16 +67,16 @@ struct OwningProcess {
// Initializes structure with the current process id and the current time.
// These can uniquely identify a process. A unique non-zero data_id will be
// set making it possible to tell using atomic reads if the data has changed.
- void Release_Initialize();
+ void Release_Initialize(int64_t pid = 0);
// Explicitly sets the process ID.
- void SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp);
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
// Gets the associated process ID, in native form, and the creation timestamp
// from memory without loading the entire structure for analysis. This will
// return false if no valid process ID is available.
static bool GetOwningProcessId(const void* memory,
- ProcessId* out_id,
+ int64_t* out_id,
int64_t* out_stamp);
// SHA1(base::debug::OwningProcess): Increment this if structure changes!
@@ -393,7 +393,7 @@ class BASE_EXPORT ActivityUserData {
private:
friend class ActivityUserData;
- ValueType type_;
+ ValueType type_ = END_OF_VALUES;
uint64_t short_value_; // Used to hold copy of numbers, etc.
std::string long_value_; // Used to hold copy of raw/string data.
StringPiece ref_value_; // Used to hold reference to external data.
@@ -404,7 +404,7 @@ class BASE_EXPORT ActivityUserData {
// Initialize the object either as a "sink" that just accepts and discards
// data or an active one that writes to a given (zeroed) memory block.
ActivityUserData();
- ActivityUserData(void* memory, size_t size);
+ ActivityUserData(void* memory, size_t size, int64_t pid = 0);
virtual ~ActivityUserData();
// Gets the unique ID number for this user data. If this changes then the
@@ -459,22 +459,22 @@ class BASE_EXPORT ActivityUserData {
// Creates a snapshot of the key/value pairs contained within. The returned
// data will be fixed, independent of whatever changes afterward. There is
- // protection against concurrent modification of the values but no protection
- // against a complete overwrite of the contents; the caller must ensure that
- // the memory segment is not going to be re-initialized while this runs.
+ // some protection against concurrent modification. This will return false
+ // if the data is invalid or if a complete overwrite of the contents is
+ // detected.
bool CreateSnapshot(Snapshot* output_snapshot) const;
// Gets the base memory address used for storing data.
const void* GetBaseAddress() const;
// Explicitly sets the process ID.
- void SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp);
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
// Gets the associated process ID, in native form, and the creation timestamp
// from tracker memory without loading the entire structure for analysis. This
// will return false if no valid process ID is available.
static bool GetOwningProcessId(const void* memory,
- ProcessId* out_id,
+ int64_t* out_id,
int64_t* out_stamp);
protected:
@@ -533,7 +533,10 @@ class BASE_EXPORT ActivityUserData {
size_t size);
// Loads any data already in the memory segment. This allows for accessing
- // records created previously.
+ // records created previously. If this detects that the underlying data has
+ // gone away (cleared by another thread/process), it will invalidate all the
+ // data in this object and turn it into simple "sink" with no values to
+ // return.
void ImportExistingData() const;
// A map of all the values within the memory block, keyed by name for quick
@@ -550,6 +553,12 @@ class BASE_EXPORT ActivityUserData {
// A pointer to the memory header for this instance.
MemoryHeader* const header_;
+ // These hold values used when initially creating the object. They are
+ // compared against current header values to check for outside changes.
+ const uint32_t orig_data_id;
+ const int64_t orig_process_id;
+ const int64_t orig_create_stamp;
+
DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
};
@@ -584,6 +593,9 @@ class BASE_EXPORT ThreadActivityTracker {
// truncated due to internal length limitations.
std::string thread_name;
+ // The timestamp at which this process was created.
+ int64_t create_stamp;
+
// The process and thread IDs. These values have no meaning other than
// they uniquely identify a running process and a running thread within
// that process. Thread-IDs can be re-used across different processes
@@ -704,13 +716,13 @@ class BASE_EXPORT ThreadActivityTracker {
const void* GetBaseAddress();
// Explicitly sets the process ID.
- void SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp);
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
// Gets the associated process ID, in native form, and the creation timestamp
// from tracker memory without loading the entire structure for analysis. This
// will return false if no valid process ID is available.
static bool GetOwningProcessId(const void* memory,
- ProcessId* out_id,
+ int64_t* out_id,
int64_t* out_stamp);
// Calculates the memory size required for a given stack depth, including
@@ -857,9 +869,12 @@ class BASE_EXPORT GlobalActivityTracker {
// Creates a global tracker using a given persistent-memory |allocator| and
// providing the given |stack_depth| to each thread tracker it manages. The
// created object is activated so tracking will begin immediately upon return.
+ // The |process_id| can be zero to get it from the OS but is taken for testing
+ // purposes.
static void CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
#if !defined(OS_NACL)
// Like above but internally creates an allocator around a disk file with
@@ -874,11 +889,13 @@ class BASE_EXPORT GlobalActivityTracker {
#endif // !defined(OS_NACL)
// Like above but internally creates an allocator using local heap memory of
- // the specified size. This is used primarily for unit tests.
+ // the specified size. This is used primarily for unit tests. The |process_id|
+ // can be zero to get it from the OS but is taken for testing purposes.
static void CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
// Gets the global activity-tracker or null if none exists.
static GlobalActivityTracker* Get() {
@@ -886,6 +903,15 @@ class BASE_EXPORT GlobalActivityTracker {
subtle::Acquire_Load(&g_tracker_));
}
+ // Sets the global activity-tracker for testing purposes.
+ static void SetForTesting(std::unique_ptr<GlobalActivityTracker> tracker);
+
+ // This access to the persistent allocator is only for testing; it extracts
+ // the global tracker completely. All tracked threads must exit before
+ // calling this. Tracking for the current thread will be automatically
+ // stopped.
+ static std::unique_ptr<GlobalActivityTracker> ReleaseForTesting();
+
// Convenience method for determining if a global tracker is active.
static bool IsEnabled() { return Get() != nullptr; }
@@ -998,6 +1024,10 @@ class BASE_EXPORT GlobalActivityTracker {
code);
}
+ // Gets the process ID used for tracking. This is typically the same as what
+ // the OS thinks is the current process but can be overridden for testing.
+ int64_t process_id() { return process_id_; };
+
// Accesses the process data record for storing arbitrary key/value pairs.
// Updates to this are thread-safe.
ActivityUserData& process_data() { return process_data_; }
@@ -1024,7 +1054,7 @@ class BASE_EXPORT GlobalActivityTracker {
// thread.
class ThreadSafeUserData : public ActivityUserData {
public:
- ThreadSafeUserData(void* memory, size_t size);
+ ThreadSafeUserData(void* memory, size_t size, int64_t pid = 0);
~ThreadSafeUserData() override;
private:
@@ -1108,8 +1138,11 @@ class BASE_EXPORT GlobalActivityTracker {
// Creates a global tracker using a given persistent-memory |allocator| and
// providing the given |stack_depth| to each thread tracker it manages. The
// created object is activated so tracking has already started upon return.
+ // The |process_id| can be zero to get it from the OS but is taken for testing
+ // purposes.
GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
// Returns the memory used by an activity-tracker managed by this class.
// It is called during the destruction of a ManagedActivityTracker object.
@@ -1124,7 +1157,7 @@ class BASE_EXPORT GlobalActivityTracker {
static void OnTLSDestroy(void* value);
// Does process-exit work. This can be run on any thread.
- void CleanupAfterProcess(ProcessId process_id,
+ void CleanupAfterProcess(int64_t process_id,
int64_t exit_stamp,
int exit_code,
std::string&& command_line);
@@ -1137,6 +1170,10 @@ class BASE_EXPORT GlobalActivityTracker {
// provide the stack-depth requested during construction.
const size_t stack_memory_size_;
+ // The process-id of the current process. This is kept as a member variable,
+ // defined during initialization, for testing purposes.
+ const int64_t process_id_;
+
// The activity tracker for the currently executing thread.
base::ThreadLocalStorage::Slot this_thread_tracker_;
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc
index 116c13d623..c7efa580e8 100644
--- a/base/debug/activity_tracker_unittest.cc
+++ b/base/debug/activity_tracker_unittest.cc
@@ -204,7 +204,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
}
TEST_F(ActivityTrackerTest, ScopedTaskTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
ThreadActivityTracker* tracker =
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
@@ -251,7 +251,7 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
}
TEST_F(ActivityTrackerTest, ExceptionTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
GlobalActivityTracker* global = GlobalActivityTracker::Get();
ThreadActivityTracker* tracker =
@@ -301,7 +301,7 @@ TEST_F(ActivityTrackerTest, CreateWithFileTest) {
// GlobalActivityTracker tests below.
TEST_F(ActivityTrackerTest, BasicTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
GlobalActivityTracker* global = GlobalActivityTracker::Get();
// Ensure the data repositories have backing store, indicated by non-zero ID.
@@ -364,7 +364,7 @@ class SimpleActivityThread : public SimpleThread {
};
TEST_F(ActivityTrackerTest, ThreadDeathTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
const size_t starting_active = GetGlobalActiveTrackerCount();
const size_t starting_inactive = GetGlobalInactiveTrackerCount();
@@ -401,7 +401,7 @@ TEST_F(ActivityTrackerTest, ProcessDeathTest) {
// testing interfaces to simulate data created by other processes.
const ProcessId other_process_id = GetCurrentProcId() + 1;
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
GlobalActivityTracker* global = GlobalActivityTracker::Get();
ThreadActivityTracker* thread = global->GetOrCreateTrackerForCurrentThread();
@@ -441,7 +441,7 @@ TEST_F(ActivityTrackerTest, ProcessDeathTest) {
memcpy(tracker_copy.get(), thread->GetBaseAddress(), tracker_size);
// Change the objects to appear to be owned by another process.
- ProcessId owning_id;
+ int64_t owning_id;
int64_t stamp;
ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
global->process_data().GetBaseAddress(), &owning_id, &stamp));
diff --git a/base/files/file.h b/base/files/file.h
index 94a9d5cf49..0155c7c259 100644
--- a/base/files/file.h
+++ b/base/files/file.h
@@ -255,6 +255,16 @@ class BASE_EXPORT File {
// Instructs the filesystem to flush the file to disk. (POSIX: fsync, Windows:
// FlushFileBuffers).
+ // Calling Flush() does not guarantee file integrity and thus is not a valid
+ // substitute for file integrity checks and recovery codepaths for malformed
+ // files. It can also be *really* slow, so avoid blocking on Flush(),
+ // especially please don't block shutdown on Flush().
+ // Latency percentiles of Flush() across all platforms as of July 2016:
+ // 50 % > 5 ms
+ // 10 % > 58 ms
+ // 1 % > 357 ms
+ // 0.1 % > 1.8 seconds
+ // 0.01 % > 7.6 seconds
bool Flush();
// Updates the file times.
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 8f7fcc2c58..5b1eb29dd6 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -174,7 +174,7 @@ FilePath::FilePath() {
FilePath::FilePath(const FilePath& that) : path_(that.path_) {
}
-FilePath::FilePath(FilePath&& that) = default;
+FilePath::FilePath(FilePath&& that) noexcept = default;
FilePath::FilePath(StringPieceType path) {
path.CopyToString(&path_);
@@ -563,6 +563,12 @@ FilePath FilePath::StripTrailingSeparators() const {
}
bool FilePath::ReferencesParent() const {
+ if (path_.find(kParentDirectory) == StringType::npos) {
+ // GetComponents is quite expensive, so avoid calling it in the majority
+ // of cases where there isn't a kParentDirectory anywhere in the path.
+ return false;
+ }
+
std::vector<StringType> components;
GetComponents(&components);
diff --git a/base/files/file_path.h b/base/files/file_path.h
index 02846f6892..0be0ad0b10 100644
--- a/base/files/file_path.h
+++ b/base/files/file_path.h
@@ -184,7 +184,7 @@ class BASE_EXPORT FilePath {
// Constructs FilePath with the contents of |that|, which is left in valid but
// unspecified state.
- FilePath(FilePath&& that);
+ FilePath(FilePath&& that) noexcept;
// Replaces the contents with those of |that|, which is left in valid but
// unspecified state.
FilePath& operator=(FilePath&& that);
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
index a091e62dd1..9f339527f5 100644
--- a/base/files/file_path_unittest.cc
+++ b/base/files/file_path_unittest.cc
@@ -1306,7 +1306,7 @@ TEST_F(FilePathTest, GetHFSDecomposedFormWithInvalidInput) {
FPL("\xf0\x28\x8c\xbc"),
FPL("\xf0\x28\x8c\x28"),
};
- for (const auto& invalid_input : cases) {
+ for (auto* invalid_input : cases) {
FilePath::StringType observed = FilePath::GetHFSDecomposedForm(
invalid_input);
EXPECT_TRUE(observed.empty());
diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc
index 46bbd7ad85..039f255b15 100644
--- a/base/memory/ref_counted.cc
+++ b/base/memory/ref_counted.cc
@@ -3,9 +3,17 @@
// found in the LICENSE file.
#include "base/memory/ref_counted.h"
+
#include "base/threading/thread_collision_warner.h"
namespace base {
+namespace {
+
+#if DCHECK_IS_ON()
+AtomicRefCount g_cross_thread_ref_count_access_allow_count = 0;
+#endif
+
+} // namespace
namespace subtle {
@@ -13,8 +21,6 @@ bool RefCountedThreadSafeBase::HasOneRef() const {
return AtomicRefCountIsOne(&ref_count_);
}
-RefCountedThreadSafeBase::RefCountedThreadSafeBase() = default;
-
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
@@ -25,6 +31,10 @@ RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
void RefCountedThreadSafeBase::AddRef() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeShared.";
#endif
AtomicRefCountInc(&ref_count_);
}
@@ -43,6 +53,23 @@ bool RefCountedThreadSafeBase::Release() const {
return false;
}
+#if DCHECK_IS_ON()
+bool RefCountedBase::CalledOnValidSequence() const {
+ return sequence_checker_.CalledOnValidSequence() ||
+ !AtomicRefCountIsZero(&g_cross_thread_ref_count_access_allow_count);
+}
+#endif
+
} // namespace subtle
+#if DCHECK_IS_ON()
+ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
+ AtomicRefCountInc(&g_cross_thread_ref_count_access_allow_count);
+}
+
+ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
+ AtomicRefCountDec(&g_cross_thread_ref_count_access_allow_count);
+}
+#endif
+
} // namespace base
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index ff46e6d6e5..be493f632f 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -16,24 +16,40 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/sequence_checker.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
+template <class T>
+class scoped_refptr;
+
namespace base {
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
namespace subtle {
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
class BASE_EXPORT RefCountedBase {
public:
bool HasOneRef() const { return ref_count_ == 1; }
protected:
- RefCountedBase()
- : ref_count_(0)
+ explicit RefCountedBase(StartRefCountFromZeroTag) {
#if DCHECK_IS_ON()
- , in_dtor_(false)
+ sequence_checker_.DetachFromSequence();
+#endif
+ }
+
+ explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+ sequence_checker_.DetachFromSequence();
#endif
- {
}
~RefCountedBase() {
@@ -42,7 +58,6 @@ class BASE_EXPORT RefCountedBase {
#endif
}
-
void AddRef() const {
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
@@ -50,32 +65,62 @@ class BASE_EXPORT RefCountedBase {
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeShared.";
+ if (ref_count_ >= 1) {
+ DCHECK(CalledOnValidSequence());
+ }
#endif
+
++ref_count_;
}
// Returns true if the object should self-delete.
bool Release() const {
+ --ref_count_;
+
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
-#endif
- if (--ref_count_ == 0) {
-#if DCHECK_IS_ON()
+ if (ref_count_ == 0)
in_dtor_ = true;
+
+ if (ref_count_ >= 1)
+ DCHECK(CalledOnValidSequence());
+ if (ref_count_ == 1)
+ sequence_checker_.DetachFromSequence();
#endif
- return true;
- }
- return false;
+
+ return ref_count_ == 0;
}
private:
- mutable size_t ref_count_;
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
#if DCHECK_IS_ON()
- mutable bool in_dtor_;
+ bool CalledOnValidSequence() const;
+#endif
+
+ mutable size_t ref_count_ = 0;
+
+#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
+ mutable bool in_dtor_ = false;
+ mutable SequenceChecker sequence_checker_;
#endif
DFAKE_MUTEX(add_release_);
@@ -88,7 +133,13 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool HasOneRef() const;
protected:
- RefCountedThreadSafeBase();
+ explicit RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+ explicit RefCountedThreadSafeBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+#endif
+ }
+
~RefCountedThreadSafeBase();
void AddRef() const;
@@ -97,8 +148,19 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool Release() const;
private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
mutable AtomicRefCount ref_count_ = 0;
#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
#endif
@@ -107,6 +169,27 @@ class BASE_EXPORT RefCountedThreadSafeBase {
} // namespace subtle
+// ScopedAllowCrossThreadRefCountAccess disables the check documented on
+// RefCounted below for rare pre-existing use cases where thread-safety was
+// guaranteed through other means (e.g. explicit sequencing of calls across
+// execution sequences when bouncing between threads in order). New callers
+// should refrain from using this (callsites handling thread-safety through
+// locks should use RefCountedThreadSafe per the overhead of its atomics being
+// negligible compared to locks anyways and callsites doing explicit sequencing
+// should properly std::move() the ref to avoid hitting this check).
+// TODO(tzik): Cleanup existing use cases and remove
+// ScopedAllowCrossThreadRefCountAccess.
+class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
+ public:
+#if DCHECK_IS_ON()
+ ScopedAllowCrossThreadRefCountAccess();
+ ~ScopedAllowCrossThreadRefCountAccess();
+#else
+ ScopedAllowCrossThreadRefCountAccess() {}
+ ~ScopedAllowCrossThreadRefCountAccess() {}
+#endif
+};
+
//
// A base class for reference counted classes. Otherwise, known as a cheap
// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
@@ -121,10 +204,45 @@ class BASE_EXPORT RefCountedThreadSafeBase {
//
// You should always make your destructor non-public, to avoid any code deleting
// the object accidently while there are references to it.
+//
+//
+// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
+// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
+// passed to another execution sequence only when its ref count is 1. If the ref
+// count is more than 1, the RefCounted class verifies the ref updates are made
+// on the same execution sequence as the previous ones.
+//
+//
+// The reference count starts from zero by default, and we intended to migrate
+// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
+// the ref counted class to opt-in.
+//
+// If an object has start-from-one ref count, the first scoped_refptr need to be
+// created by base::AdoptRef() or base::MakeShared(). We can use
+// base::MakeShared() to create create both type of ref counted object.
+//
+// The motivations to use start-from-one ref count are:
+// - Start-from-one ref count doesn't need the ref count increment for the
+// first reference.
+// - It can detect an invalid object acquisition for a being-deleted object
+// that has zero ref count. That tends to happen on custom deleter that
+// delays the deletion.
+// TODO(tzik): Implement invalid acquisition detection.
+// - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
+// And start-from-one ref count is a step to merge WTF::RefCounted into
+// base::RefCounted.
+//
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
+ static constexpr ::base::subtle::StartRefCountFromOneTag \
+ kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+
template <class T>
class RefCounted : public subtle::RefCountedBase {
public:
- RefCounted() = default;
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
void AddRef() const {
subtle::RefCountedBase::AddRef();
@@ -140,7 +258,7 @@ class RefCounted : public subtle::RefCountedBase {
~RefCounted() = default;
private:
- DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
+ DISALLOW_COPY_AND_ASSIGN(RefCounted);
};
// Forward declaration.
@@ -171,10 +289,17 @@ struct DefaultRefCountedThreadSafeTraits {
// private:
// friend class base::RefCountedThreadSafe<MyFoo>;
// ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
- RefCountedThreadSafe() = default;
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ explicit RefCountedThreadSafe()
+ : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
void AddRef() const {
subtle::RefCountedThreadSafeBase::AddRef();
@@ -214,6 +339,43 @@ class RefCountedData
~RefCountedData() = default;
};
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+ using Tag = typename std::decay<decltype(T::kRefCountPreference)>::type;
+ static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+ "Use AdoptRef only for the reference count starts from one.");
+
+ DCHECK(obj);
+ DCHECK(obj->HasOneRef());
+ obj->Adopted();
+ return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+ return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+ return AdoptRef(obj);
+}
+
+} // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeShared(Args&&... args) {
+ T* obj = new T(std::forward<Args>(args)...);
+ return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
} // namespace base
//
@@ -354,14 +516,10 @@ class scoped_refptr {
return *this;
}
- void swap(T** pp) {
- T* p = ptr_;
- ptr_ = *pp;
- *pp = p;
- }
-
void swap(scoped_refptr<T>& r) {
- swap(&r.ptr_);
+ T* tmp = ptr_;
+ ptr_ = r.ptr_;
+ r.ptr_ = tmp;
}
explicit operator bool() const { return ptr_ != nullptr; }
@@ -385,6 +543,11 @@ class scoped_refptr {
T* ptr_ = nullptr;
private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
// Friend required for move constructors that set r.ptr_ to null.
template <typename U>
friend class scoped_refptr;
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index 65c15d26ab..515f4227ea 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/test/gtest_util.h"
#include "base/test/opaque_ref_counted.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -122,6 +123,16 @@ scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
return self_assign;
}
+class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+ InitialRefCountIsOne() {}
+
+ private:
+ friend class base::RefCounted<InitialRefCountIsOne>;
+ ~InitialRefCountIsOne() {}
+};
} // end namespace
@@ -528,3 +539,30 @@ TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
scoped_refptr<Other> other2(other);
EXPECT_EQ(other2, Overloaded(std::move(other)));
}
+
+TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
+ scoped_refptr<InitialRefCountIsOne> obj =
+ base::MakeShared<InitialRefCountIsOne>();
+ EXPECT_TRUE(obj->HasOneRef());
+ obj = nullptr;
+
+ scoped_refptr<InitialRefCountIsOne> obj2 =
+ base::AdoptRef(new InitialRefCountIsOne);
+ EXPECT_TRUE(obj2->HasOneRef());
+ obj2 = nullptr;
+
+ scoped_refptr<Other> obj3 = base::MakeShared<Other>();
+ EXPECT_TRUE(obj3->HasOneRef());
+ obj3 = nullptr;
+}
+
+TEST(RefCountedDeathTest, TestAdoptRef) {
+ EXPECT_DCHECK_DEATH(make_scoped_refptr(new InitialRefCountIsOne));
+
+ InitialRefCountIsOne* ptr = nullptr;
+ EXPECT_DCHECK_DEATH(base::AdoptRef(ptr));
+
+ scoped_refptr<InitialRefCountIsOne> obj =
+ base::MakeShared<InitialRefCountIsOne>();
+ EXPECT_DCHECK_DEATH(base::AdoptRef(obj.get()));
+}
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index f0df650adc..316b5ec645 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -60,9 +60,10 @@ IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
bool IncomingTaskQueue::AddToIncomingQueue(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay,
bool nestable) {
+ DCHECK(task);
DLOG_IF(WARNING,
delay.InSeconds() > kTaskDelayWarningThresholdInSeconds)
<< "Requesting super-long task delay period of " << delay.InSeconds()
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index a912dc2ee1..17bea07674 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -36,7 +36,7 @@ class BASE_EXPORT IncomingTaskQueue
// returns false. In all cases, the ownership of |task| is transferred to the
// called method.
bool AddToIncomingQueue(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay,
bool nestable);
diff --git a/base/message_loop/message_loop_task_runner.cc b/base/message_loop/message_loop_task_runner.cc
index ddfdeb2b65..aece087b76 100644
--- a/base/message_loop/message_loop_task_runner.cc
+++ b/base/message_loop/message_loop_task_runner.cc
@@ -26,7 +26,7 @@ void MessageLoopTaskRunner::BindToCurrentThread() {
bool MessageLoopTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
@@ -35,7 +35,7 @@ bool MessageLoopTaskRunner::PostDelayedTask(
bool MessageLoopTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
diff --git a/base/message_loop/message_loop_task_runner.h b/base/message_loop/message_loop_task_runner.h
index 11ee8a6bf7..99a96a711e 100644
--- a/base/message_loop/message_loop_task_runner.h
+++ b/base/message_loop/message_loop_task_runner.h
@@ -32,10 +32,10 @@ class BASE_EXPORT MessageLoopTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index 54551daadd..d403c70700 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -127,7 +127,7 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
RunLoop().Run();
EXPECT_EQ(task_thread_.message_loop(), task_run_on);
- EXPECT_EQ(current_loop_.get(), task_deleted_on);
+ EXPECT_EQ(task_thread_.message_loop(), task_deleted_on);
EXPECT_EQ(current_loop_.get(), reply_run_on);
EXPECT_EQ(current_loop_.get(), reply_deleted_on);
EXPECT_LT(task_delete_order, reply_delete_order);
@@ -200,7 +200,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
EXPECT_LT(task_delete_order, reply_delete_order);
}
-TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
+TEST_F(MessageLoopTaskRunnerTest,
+ PostTaskAndReply_DeadReplyTaskRunnerBehavior) {
// Annotate the scope as having memory leaks to suppress heapchecker reports.
ANNOTATE_SCOPED_MEMORY_LEAK;
MessageLoop* task_run_on = NULL;
@@ -237,11 +238,13 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
MessageLoop* task_loop = task_thread_.message_loop();
task_thread_.Stop();
+ // Even if the reply task runner is already gone, the original task should
+ // already be deleted. However, the reply which hasn't executed yet should
+ // leak to avoid thread-safety issues.
EXPECT_EQ(task_loop, task_run_on);
- ASSERT_FALSE(task_deleted_on);
+ EXPECT_EQ(task_loop, task_deleted_on);
EXPECT_FALSE(reply_run_on);
ASSERT_FALSE(reply_deleted_on);
- EXPECT_EQ(task_delete_order, reply_delete_order);
// The PostTaskAndReplyRelay is leaked here. Even if we had a reference to
// it, we cannot just delete it because PostTaskAndReplyRelay's destructor
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index 14fe1ee391..9d771d5ecb 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -12,6 +12,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_test.h"
@@ -93,16 +94,19 @@ void AbortMessagePump() {
static_cast<base::MessageLoopForUI*>(base::MessageLoop::current())->Abort();
}
-void RunTest_AbortDontRunMoreTasks(bool delayed) {
- MessageLoop loop(MessageLoop::TYPE_JAVA);
-
+void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- std::unique_ptr<android::JavaHandlerThreadForTesting> java_thread;
- java_thread.reset(new android::JavaHandlerThreadForTesting(
- "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
- &test_done_event));
+ std::unique_ptr<android::JavaHandlerThread> java_thread;
+ if (init_java_first) {
+ java_thread =
+ android::JavaHandlerThreadForTesting::CreateJavaFirst(&test_done_event);
+ } else {
+ java_thread = android::JavaHandlerThreadForTesting::Create(
+ "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
+ &test_done_event);
+ }
java_thread->Start();
if (delayed) {
@@ -121,10 +125,19 @@ void RunTest_AbortDontRunMoreTasks(bool delayed) {
}
TEST(MessageLoopTest, JavaExceptionAbort) {
- RunTest_AbortDontRunMoreTasks(false);
+ constexpr bool delayed = false;
+ constexpr bool init_java_first = false;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
TEST(MessageLoopTest, DelayedJavaExceptionAbort) {
- RunTest_AbortDontRunMoreTasks(true);
+ constexpr bool delayed = true;
+ constexpr bool init_java_first = false;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+}
+TEST(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
+ constexpr bool delayed = false;
+ constexpr bool init_java_first = true;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
#endif // defined(OS_ANDROID)
diff --git a/base/message_loop/message_pump_glib_unittest.cc b/base/message_loop/message_pump_glib_unittest.cc
index a89ccb9365..607d3c93d6 100644
--- a/base/message_loop/message_pump_glib_unittest.cc
+++ b/base/message_loop/message_pump_glib_unittest.cc
@@ -13,6 +13,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
+#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
diff --git a/base/message_loop/message_pump_libevent.cc b/base/message_loop/message_pump_libevent.cc
index 1cbde8ac18..48cb98a330 100644
--- a/base/message_loop/message_pump_libevent.cc
+++ b/base/message_loop/message_pump_libevent.cc
@@ -13,7 +13,6 @@
#include "base/compiler_specific.h"
#include "base/files/file_util.h"
#include "base/logging.h"
-#include "base/observer_list.h"
#include "base/posix/eintr_wrapper.h"
#include "base/third_party/libevent/event.h"
#include "base/time/time.h"
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index 78473761dd..d39972a8a1 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -41,10 +41,9 @@
// delete and reused. The value in |sample| must be strictly less than
// |enum_max|.
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
- INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
- name, sample, enum_max, \
- base::HistogramBase::kUmaTargetedHistogramFlag)
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
+ name, sample, enum_max, base::HistogramBase::kUmaTargetedHistogramFlag)
// Histogram for boolean values.
@@ -68,14 +67,15 @@
// Sample usage:
// UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
- UMA_HISTOGRAM_ENUMERATION(name, sample, value_max)
+ INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
+ name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
// Used for capturing basic percentages. This will be 100 buckets of size 1.
// Sample usage:
// UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
-#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
- UMA_HISTOGRAM_ENUMERATION(name, percent_as_int, 101)
+#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+ UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
//------------------------------------------------------------------------------
// Count histograms. These are used for collecting numeric data. Note that we
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
index 53e4f11b75..c107a4729d 100644
--- a/base/metrics/histogram_macros_internal.h
+++ b/base/metrics/histogram_macros_internal.h
@@ -5,6 +5,11 @@
#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
#include "base/atomicops.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
@@ -96,17 +101,42 @@
base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
// This is a helper macro used by other macros and shouldn't be used directly.
-// For an enumeration with N items, recording values in the range [0, N - 1],
-// this macro creates a linear histogram with N + 1 buckets:
-// [0, 1), [1, 2), ..., [N - 1, N), and an overflow bucket [N, infinity).
+// The bucketing scheme is linear with a bucket size of 1. For N items,
+// recording values in the range [0, N - 1] creates a linear histogram with N +
+// 1 buckets:
+// [0, 1), [1, 2), ..., [N - 1, N)
+// and an overflow bucket [N, infinity).
+//
// Code should never emit to the overflow bucket; only to the other N buckets.
-// This allows future versions of Chrome to safely append new entries to the
-// enumeration. Otherwise, the histogram would have [N - 1, infinity) as its
-// overflow bucket, and so the maximal value (N - 1) would be emitted to this
-// overflow bucket. But, if an additional enumerated value were later added, the
-// bucket label for the value (N - 1) would change to [N - 1, N), which would
-// result in different versions of Chrome using different bucket labels for
-// identical data.
+// This allows future versions of Chrome to safely increase the boundary size.
+// Otherwise, the histogram would have [N - 1, infinity) as its overflow bucket,
+// and so the maximal value (N - 1) would be emitted to this overflow bucket.
+// But, if an additional value were later added, the bucket label for
+// the value (N - 1) would change to [N - 1, N), which would result in different
+// versions of Chrome using different bucket labels for identical data.
+#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary, \
+ flag) \
+ do { \
+ static_assert(!std::is_enum<decltype(sample)>::value, \
+ "|sample| should not be an enum type!"); \
+ static_assert(!std::is_enum<decltype(boundary)>::value, \
+ "|boundary| should not be an enum type!"); \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, Add(sample), \
+ base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
+ flag)); \
+ } while (0)
+
+// Similar to the previous macro but intended for enumerations. This delegates
+// the work to the previous macro, but supports scoped enumerations as well by
+// forcing an explicit cast to the HistogramBase::Sample integral type.
+//
+// Note the range checks verify two separate issues:
+// - that the declared enum max isn't out of range of HistogramBase::Sample
+// - that the declared enum max is > 0
+//
+// TODO(dcheng): This should assert that the passed in types are actually enum
+// types.
#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
do { \
static_assert( \
@@ -115,9 +145,14 @@
std::is_same<std::remove_const<decltype(sample)>::type, \
std::remove_const<decltype(boundary)>::type>::value, \
"|sample| and |boundary| shouldn't be of different enums"); \
- STATIC_HISTOGRAM_POINTER_BLOCK( \
- name, Add(sample), base::LinearHistogram::FactoryGet( \
- name, 1, boundary, boundary + 1, flag)); \
+ static_assert( \
+ static_cast<uintmax_t>(boundary) < \
+ static_cast<uintmax_t>( \
+ std::numeric_limits<base::HistogramBase::Sample>::max()), \
+ "|boundary| is out of range of HistogramBase::Sample"); \
+ INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
+ name, static_cast<base::HistogramBase::Sample>(sample), \
+ static_cast<base::HistogramBase::Sample>(boundary), flag); \
} while (0)
// This is a helper macro used by other macros and shouldn't be used directly.
diff --git a/base/metrics/histogram_macros_unittest.cc b/base/metrics/histogram_macros_unittest.cc
index c5991619a0..33a9c6e5b2 100644
--- a/base/metrics/histogram_macros_unittest.cc
+++ b/base/metrics/histogram_macros_unittest.cc
@@ -15,4 +15,35 @@ TEST(ScopedHistogramTimer, TwoTimersOneScope) {
SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer1");
}
+// Compile tests for UMA_HISTOGRAM_ENUMERATION with the three different types it
+// accepts:
+// - integral types
+// - unscoped enums
+// - scoped enums
+TEST(HistogramMacro, IntegralPsuedoEnumeration) {
+ UMA_HISTOGRAM_ENUMERATION("Test.FauxEnumeration", 1, 10000);
+}
+
+TEST(HistogramMacro, UnscopedEnumeration) {
+ enum TestEnum : char {
+ FIRST_VALUE,
+ SECOND_VALUE,
+ THIRD_VALUE,
+ MAX_ENTRIES,
+ };
+ UMA_HISTOGRAM_ENUMERATION("Test.UnscopedEnumeration", SECOND_VALUE,
+ MAX_ENTRIES);
+}
+
+TEST(HistogramMacro, ScopedEnumeration) {
+ enum class TestEnum {
+ FIRST_VALUE,
+ SECOND_VALUE,
+ THIRD_VALUE,
+ MAX_ENTRIES,
+ };
+ UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration", TestEnum::SECOND_VALUE,
+ TestEnum::MAX_ENTRIES);
+}
+
} // namespace base
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index cca76bfee7..415d7f9430 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -213,13 +213,13 @@ HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
return SparseHistogram::FactoryGet(histogram_name, flags);
}
-void SparseHistogram::GetParameters(DictionaryValue* /*params*/) const {
+void SparseHistogram::GetParameters(DictionaryValue* params) const {
// TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
}
-void SparseHistogram::GetCountAndBucketData(Count* /*count*/,
- int64_t* /*sum*/,
- ListValue* /*buckets*/) const {
+void SparseHistogram::GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const {
// TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
}
diff --git a/base/native_library.h b/base/native_library.h
index 02eae1d508..e2b9ca7e6d 100644
--- a/base/native_library.h
+++ b/base/native_library.h
@@ -91,16 +91,6 @@ BASE_EXPORT NativeLibrary LoadNativeLibraryWithOptions(
const NativeLibraryOptions& options,
NativeLibraryLoadError* error);
-#if defined(OS_WIN)
-// Loads a native library from disk. Release it with UnloadNativeLibrary when
-// you're done.
-// This function retrieves the LoadLibrary function exported from kernel32.dll
-// and calls it instead of directly calling the LoadLibrary function via the
-// import table.
-BASE_EXPORT NativeLibrary LoadNativeLibraryDynamically(
- const FilePath& library_path);
-#endif // OS_WIN
-
// Unloads a native library.
BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
diff --git a/base/post_task_and_reply_with_result_internal.h b/base/post_task_and_reply_with_result_internal.h
index 1456129324..6f50de8b86 100644
--- a/base/post_task_and_reply_with_result_internal.h
+++ b/base/post_task_and_reply_with_result_internal.h
@@ -16,16 +16,15 @@ namespace internal {
// Adapts a function that produces a result via a return value to
// one that returns via an output parameter.
template <typename ReturnType>
-void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
- ReturnType* result) {
- *result = func.Run();
+void ReturnAsParamAdapter(OnceCallback<ReturnType()> func, ReturnType* result) {
+ *result = std::move(func).Run();
}
// Adapts a T* result to a callblack that expects a T.
template <typename TaskReturnType, typename ReplyArgType>
-void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
+void ReplyAdapter(OnceCallback<void(ReplyArgType)> callback,
TaskReturnType* result) {
- callback.Run(std::move(*result));
+ std::move(callback).Run(std::move(*result));
}
} // namespace internal
diff --git a/base/power_monitor/power_monitor_device_source.h b/base/power_monitor/power_monitor_device_source.h
index 4ecd2d981b..1e2c885fa4 100644
--- a/base/power_monitor/power_monitor_device_source.h
+++ b/base/power_monitor/power_monitor_device_source.h
@@ -13,7 +13,6 @@
#if defined(OS_WIN)
#include <windows.h>
-
#endif // !OS_WIN
#if defined(OS_IOS)
diff --git a/base/process/launch.h b/base/process/launch.h
index be8f6e73b9..99a7280cb3 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -262,6 +262,11 @@ BASE_EXPORT bool GetAppOutput(const StringPiece16& cl, std::string* output);
BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
std::string* output);
+// Like the above POSIX-specific version of GetAppOutput, but also includes
+// stderr.
+BASE_EXPORT bool GetAppOutputAndError(const std::vector<std::string>& argv,
+ std::string* output);
+
// A version of |GetAppOutput()| which also returns the exit code of the
// executed command. Returns true if the application runs and exits cleanly. If
// this is the case the exit code of the application is available in
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 19effa2ce5..1c4df40665 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -668,6 +668,14 @@ bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
return result && exit_code == EXIT_SUCCESS;
}
+bool GetAppOutputAndError(const std::vector<std::string>& argv,
+ std::string* output) {
+ int exit_code;
+ bool result =
+ GetAppOutputInternal(argv, nullptr, true, output, true, &exit_code);
+ return result && exit_code == EXIT_SUCCESS;
+}
+
bool GetAppOutputWithExitCode(const CommandLine& cl,
std::string* output,
int* exit_code) {
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 33cb64e244..1562e7b156 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -27,6 +27,10 @@
#include "base/process/port_provider_mac.h"
#endif
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
namespace base {
#if defined(OS_WIN)
@@ -63,8 +67,12 @@ struct IoCounters {
// shareable: 0
// swapped Pages swapped out to zram.
//
-// On OS X: TODO(thakis): Revise.
-// priv: Memory.
+// On macOS:
+// priv: Resident size (RSS) including shared memory. Warning: This
+// does not include compressed size and does not always
+// accurately account for shared memory due to things like
+// copy-on-write. TODO(erikchen): Revamp this with something
+// more accurate.
// shared: 0
// shareable: 0
//
@@ -154,10 +162,13 @@ class BASE_EXPORT ProcessMetrics {
// system call.
bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
WorkingSetKBytes* ws_usage) const;
- // Returns private, shared, and total resident bytes.
+ // Returns private, shared, and total resident bytes. |locked_bytes| refers to
+ // bytes that must stay resident. |locked_bytes| only counts bytes locked by
+ // this task, not bytes locked by the kernel.
bool GetMemoryBytes(size_t* private_bytes,
size_t* shared_bytes,
- size_t* resident_bytes) const;
+ size_t* resident_bytes,
+ size_t* locked_bytes) const;
#endif
// Returns the CPU usage in percent since the last time this method or
@@ -213,7 +224,11 @@ class BASE_EXPORT ProcessMetrics {
int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
#endif
+#if defined(OS_WIN)
+ win::ScopedHandle process_;
+#else
ProcessHandle process_;
+#endif
int processor_count_;
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index 21ad8ceea9..288cde9fc6 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -24,6 +24,10 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
+#if defined(OS_MACOSX)
+#include <sys/mman.h>
+#endif
+
namespace base {
namespace debug {
@@ -53,6 +57,42 @@ class SystemMetricsTest : public testing::Test {
/////////////////////////////////////////////////////////////////////////////
+#if defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+TEST_F(SystemMetricsTest, LockedBytes) {
+ ProcessHandle handle = GetCurrentProcessHandle();
+ std::unique_ptr<ProcessMetrics> metrics(
+ ProcessMetrics::CreateProcessMetrics(handle, nullptr));
+
+ size_t initial_locked_bytes;
+ bool result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &initial_locked_bytes);
+ ASSERT_TRUE(result);
+
+ size_t size = 8 * 1024 * 1024;
+ std::unique_ptr<char[]> memory(new char[size]);
+ int r = mlock(memory.get(), size);
+ ASSERT_EQ(0, r);
+
+ size_t new_locked_bytes;
+ result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
+ ASSERT_TRUE(result);
+
+ // There should be around |size| more locked bytes, but multi-threading might
+ // cause noise.
+ EXPECT_LT(initial_locked_bytes + size / 2, new_locked_bytes);
+ EXPECT_GT(initial_locked_bytes + size * 1.5, new_locked_bytes);
+
+ r = munlock(memory.get(), size);
+ ASSERT_EQ(0, r);
+
+ result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
+ ASSERT_TRUE(result);
+ EXPECT_EQ(initial_locked_bytes, new_locked_bytes);
+}
+#endif // defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST_F(SystemMetricsTest, IsValidDiskName) {
std::string invalid_input1 = "";
diff --git a/base/sequence_checker_impl.cc b/base/sequence_checker_impl.cc
index df2a8cb24f..6a9b5b2d0f 100644
--- a/base/sequence_checker_impl.cc
+++ b/base/sequence_checker_impl.cc
@@ -26,7 +26,7 @@ class SequenceCheckerImpl::Core {
~Core() = default;
- bool CalledOnValidThread() const {
+ bool CalledOnValidSequence() const {
if (sequence_token_.IsValid())
return sequence_token_ == SequenceToken::GetForCurrentThread();
@@ -58,7 +58,7 @@ bool SequenceCheckerImpl::CalledOnValidSequence() const {
AutoLock auto_lock(lock_);
if (!core_)
core_ = MakeUnique<Core>();
- return core_->CalledOnValidThread();
+ return core_->CalledOnValidSequence();
}
void SequenceCheckerImpl::DetachFromSequence() {
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
index fa19ae50c1..4c367cb927 100644
--- a/base/sequenced_task_runner.cc
+++ b/base/sequenced_task_runner.cc
@@ -12,7 +12,7 @@ namespace base {
bool SequencedTaskRunner::PostNonNestableTask(
const tracked_objects::Location& from_here,
- Closure task) {
+ OnceClosure task) {
return PostNonNestableDelayedTask(from_here, std::move(task),
base::TimeDelta());
}
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
index b92bd997e1..b29153927e 100644
--- a/base/sequenced_task_runner.h
+++ b/base/sequenced_task_runner.h
@@ -110,11 +110,11 @@ class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
// below.
bool PostNonNestableTask(const tracked_objects::Location& from_here,
- Closure task);
+ OnceClosure task);
virtual bool PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay) = 0;
// Submits a non-nestable task to delete the given object. Returns
diff --git a/base/task_runner.cc b/base/task_runner.cc
index 8502510eb0..c3e0574a1b 100644
--- a/base/task_runner.cc
+++ b/base/task_runner.cc
@@ -23,7 +23,7 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- Closure task) override;
+ OnceClosure task) override;
// Non-owning.
TaskRunner* destination_;
@@ -36,20 +36,20 @@ PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
bool PostTaskAndReplyTaskRunner::PostTask(
const tracked_objects::Location& from_here,
- Closure task) {
+ OnceClosure task) {
return destination_->PostTask(from_here, std::move(task));
}
} // namespace
bool TaskRunner::PostTask(const tracked_objects::Location& from_here,
- Closure task) {
+ OnceClosure task) {
return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
}
bool TaskRunner::PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
diff --git a/base/task_runner.h b/base/task_runner.h
index d6a387109a..0421d564e6 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -61,7 +61,7 @@ class BASE_EXPORT TaskRunner
// will not be run.
//
// Equivalent to PostDelayedTask(from_here, task, 0).
- bool PostTask(const tracked_objects::Location& from_here, Closure task);
+ bool PostTask(const tracked_objects::Location& from_here, OnceClosure task);
// Like PostTask, but tries to run the posted task only after
// |delay_ms| has passed.
@@ -69,7 +69,7 @@ class BASE_EXPORT TaskRunner
// It is valid for an implementation to ignore |delay_ms|; that is,
// to have PostDelayedTask behave the same as PostTask.
virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay) = 0;
// Returns true if the current thread is a thread on which a task
@@ -122,8 +122,8 @@ class BASE_EXPORT TaskRunner
// and the reply will cancel itself safely because it is bound to a
// WeakPtr<>.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
protected:
friend struct TaskRunnerTraits;
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
index 601b5402d0..9867c1dfd2 100644
--- a/base/task_scheduler/sequence.cc
+++ b/base/task_scheduler/sequence.cc
@@ -15,6 +15,8 @@ namespace internal {
Sequence::Sequence() = default;
bool Sequence::PushTask(std::unique_ptr<Task> task) {
+ DCHECK(task);
+ DCHECK(task->task);
DCHECK(task->sequenced_time.is_null());
task->sequenced_time = base::TimeTicks::Now();
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
index ed1d0ac401..408d99f9c6 100644
--- a/base/task_scheduler/sequence.h
+++ b/base/task_scheduler/sequence.h
@@ -82,7 +82,7 @@ class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
// Queue of tasks to execute.
std::queue<std::unique_ptr<Task>> queue_;
- // Number of tasks contained in the sequence for each priority.
+ // Number of tasks contained in the Sequence for each priority.
size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
{};
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
index c45d8a87d0..7093b1e94d 100644
--- a/base/task_scheduler/sequence_unittest.cc
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/test/gtest_util.h"
@@ -24,27 +25,27 @@ class TaskSchedulerSequenceTest : public testing::Test {
TaskSchedulerSequenceTest()
: task_a_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::BACKGROUND),
TimeDelta())),
task_b_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
TimeDelta())),
task_c_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
TimeDelta())),
task_d_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
TimeDelta())),
task_e_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::BACKGROUND),
TimeDelta())),
task_a_(task_a_owned_.get()),
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
index 44aaa6016d..fc513e3e9f 100644
--- a/base/task_scheduler/task.cc
+++ b/base/task_scheduler/task.cc
@@ -12,7 +12,7 @@ namespace base {
namespace internal {
Task::Task(const tracked_objects::Location& posted_from,
- Closure task,
+ OnceClosure task,
const TaskTraits& traits,
TimeDelta delay)
: PendingTask(
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
index 1f3b775a23..43095f2ae7 100644
--- a/base/task_scheduler/task.h
+++ b/base/task_scheduler/task.h
@@ -28,7 +28,7 @@ struct BASE_EXPORT Task : public PendingTask {
// behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
// automatically adjusted to SKIP_ON_SHUTDOWN.
Task(const tracked_objects::Location& posted_from,
- Closure task,
+ OnceClosure task,
const TaskTraits& traits,
TimeDelta delay);
~Task();
diff --git a/base/template_util.h b/base/template_util.h
index 42552107cf..10154dbbeb 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -51,46 +51,8 @@ template <class T> struct is_non_const_reference : std::false_type {};
template <class T> struct is_non_const_reference<T&> : std::true_type {};
template <class T> struct is_non_const_reference<const T&> : std::false_type {};
-// is_assignable
-
namespace internal {
-template <typename First, typename Second>
-struct SelectSecond {
- using type = Second;
-};
-
-struct Any {
- Any(...);
-};
-
-// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
-// value is a true_type.
-template <class Lvalue, class Rvalue>
-typename internal::SelectSecond<
- decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
- std::true_type>::type
-IsAssignableTest(Lvalue&&, Rvalue&&);
-
-// False case: Otherwise the return value is a false_type.
-template <class Rvalue>
-std::false_type IsAssignableTest(internal::Any, Rvalue&&);
-
-// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
-// determine the type of IsAssignableImpl.
-template <class Lvalue,
- class Rvalue,
- bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
-struct IsAssignableImpl
- : public std::common_type<decltype(
- internal::IsAssignableTest(std::declval<Lvalue>(),
- std::declval<Rvalue>()))>::type {};
-
-// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
-// is false_type.
-template <class Lvalue, class Rvalue>
-struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
-
// Uses expression SFINAE to detect whether using operator<< would work.
template <typename T, typename = void>
struct SupportsOstreamOperator : std::false_type {};
@@ -102,29 +64,6 @@ struct SupportsOstreamOperator<T,
} // namespace internal
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class Lvalue, class Rvalue>
-struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
-
-// is_copy_assignable is true if a T const& is assignable to a T&.
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class T>
-struct is_copy_assignable
- : public is_assignable<typename std::add_lvalue_reference<T>::type,
- typename std::add_lvalue_reference<
- typename std::add_const<T>::type>::type> {};
-
-// is_move_assignable is true if a T&& is assignable to a T&.
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class T>
-struct is_move_assignable
- : public is_assignable<typename std::add_lvalue_reference<T>::type,
- const typename std::add_rvalue_reference<T>::type> {
-};
-
// underlying_type produces the integer type backing an enum type.
// TODO(crbug.com/554293): Remove this when all platforms have this in the std
// namespace.
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index 921596474b..e34a25b042 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -30,39 +30,6 @@ static_assert(!is_non_const_reference<const int&>::value,
"IsNonConstReference");
static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
-class AssignParent {};
-class AssignChild : AssignParent {};
-
-// is_assignable<Type1, Type2>
-static_assert(!is_assignable<int, int>::value, "IsAssignable"); // 1 = 1;
-static_assert(!is_assignable<int, double>::value, "IsAssignable");
-static_assert(is_assignable<int&, int>::value, "IsAssignable");
-static_assert(is_assignable<int&, double>::value, "IsAssignable");
-static_assert(is_assignable<int&, int&>::value, "IsAssignable");
-static_assert(is_assignable<int&, int const&>::value, "IsAssignable");
-static_assert(!is_assignable<int const&, int>::value, "IsAssignable");
-static_assert(!is_assignable<AssignParent&, AssignChild>::value,
- "IsAssignable");
-static_assert(!is_assignable<AssignChild&, AssignParent>::value,
- "IsAssignable");
-
-struct AssignCopy {};
-struct AssignNoCopy {
- AssignNoCopy& operator=(AssignNoCopy&&) { return *this; }
- AssignNoCopy& operator=(const AssignNoCopy&) = delete;
-};
-struct AssignNoMove {
- AssignNoMove& operator=(AssignNoMove&&) = delete;
- AssignNoMove& operator=(const AssignNoMove&) = delete;
-};
-
-static_assert(is_copy_assignable<AssignCopy>::value, "IsCopyAssignable");
-static_assert(!is_copy_assignable<AssignNoCopy>::value, "IsCopyAssignable");
-
-static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
-static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
-static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
-
// A few standard types that definitely support printing.
static_assert(internal::SupportsOstreamOperator<int>::value,
"ints should be printable");
diff --git a/base/test/test_mock_time_task_runner.cc b/base/test/test_mock_time_task_runner.cc
index a431189231..a236acffa1 100644
--- a/base/test/test_mock_time_task_runner.cc
+++ b/base/test/test_mock_time_task_runner.cc
@@ -81,7 +81,7 @@ struct TestMockTimeTaskRunner::TestOrderedPendingTask
: public base::TestPendingTask {
TestOrderedPendingTask();
TestOrderedPendingTask(const tracked_objects::Location& location,
- Closure task,
+ OnceClosure task,
TimeTicks post_time,
TimeDelta delay,
size_t ordinal,
@@ -106,7 +106,7 @@ TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
const tracked_objects::Location& location,
- Closure task,
+ OnceClosure task,
TimeTicks post_time,
TimeDelta delay,
size_t ordinal,
@@ -240,7 +240,7 @@ bool TestMockTimeTaskRunner::RunsTasksOnCurrentThread() const {
bool TestMockTimeTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock scoped_lock(tasks_lock_);
tasks_.push(TestOrderedPendingTask(from_here, std::move(task), now_ticks_,
@@ -251,7 +251,7 @@ bool TestMockTimeTaskRunner::PostDelayedTask(
bool TestMockTimeTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
return PostDelayedTask(from_here, std::move(task), delay);
}
diff --git a/base/test/test_mock_time_task_runner.h b/base/test/test_mock_time_task_runner.h
index 5c61a36f69..2f892f52cc 100644
--- a/base/test/test_mock_time_task_runner.h
+++ b/base/test/test_mock_time_task_runner.h
@@ -141,10 +141,10 @@ class TestMockTimeTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner:
bool RunsTasksOnCurrentThread() const override;
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
protected:
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 63bdfffacd..3f71a9988f 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -12,7 +12,7 @@ namespace base {
TestPendingTask::TestPendingTask() : nestability(NESTABLE) {}
TestPendingTask::TestPendingTask(const tracked_objects::Location& location,
- Closure task,
+ OnceClosure task,
TimeTicks post_time,
TimeDelta delay,
TestNestability nestability)
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 4497ba18ca..52ca592f25 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -23,7 +23,7 @@ struct TestPendingTask {
TestPendingTask();
TestPendingTask(TestPendingTask&& other);
TestPendingTask(const tracked_objects::Location& location,
- Closure task,
+ OnceClosure task,
TimeTicks post_time,
TimeDelta delay,
TestNestability nestability);
diff --git a/base/test/test_simple_task_runner.cc b/base/test/test_simple_task_runner.cc
index df0334097b..4280a0de62 100644
--- a/base/test/test_simple_task_runner.cc
+++ b/base/test/test_simple_task_runner.cc
@@ -18,7 +18,7 @@ TestSimpleTaskRunner::~TestSimpleTaskRunner() = default;
bool TestSimpleTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock auto_lock(lock_);
pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
@@ -29,7 +29,7 @@ bool TestSimpleTaskRunner::PostDelayedTask(
bool TestSimpleTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock auto_lock(lock_);
pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
diff --git a/base/test/test_simple_task_runner.h b/base/test/test_simple_task_runner.h
index ac609f1f7f..f46e065e47 100644
--- a/base/test/test_simple_task_runner.h
+++ b/base/test/test_simple_task_runner.h
@@ -44,10 +44,10 @@ class TestSimpleTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner implementation.
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index d16f8bd225..cddb8981ad 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -29,8 +29,8 @@ namespace {
class PostTaskAndReplyRelay {
public:
PostTaskAndReplyRelay(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply)
+ OnceClosure task,
+ OnceClosure reply)
: sequence_checker_(),
from_here_(from_here),
origin_task_runner_(SequencedTaskRunnerHandle::Get()),
@@ -39,12 +39,10 @@ class PostTaskAndReplyRelay {
~PostTaskAndReplyRelay() {
DCHECK(sequence_checker_.CalledOnValidSequence());
- task_.Reset();
- reply_.Reset();
}
void RunTaskAndPostReply() {
- task_.Run();
+ std::move(task_).Run();
origin_task_runner_->PostTask(
from_here_, Bind(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
base::Unretained(this)));
@@ -54,12 +52,12 @@ class PostTaskAndReplyRelay {
void RunReplyAndSelfDestruct() {
DCHECK(sequence_checker_.CalledOnValidSequence());
- // Force |task_| to be released before |reply_| is to ensure that no one
- // accidentally depends on |task_| keeping one of its arguments alive while
- // |reply_| is executing.
- task_.Reset();
+ // Ensure |task_| has already been released before |reply_| to ensure that
+ // no one accidentally depends on |task_| keeping one of its arguments alive
+ // while |reply_| is executing.
+ DCHECK(!task_);
- reply_.Run();
+ std::move(reply_).Run();
// Cue mission impossible theme.
delete this;
@@ -68,8 +66,8 @@ class PostTaskAndReplyRelay {
const SequenceChecker sequence_checker_;
const tracked_objects::Location from_here_;
const scoped_refptr<SequencedTaskRunner> origin_task_runner_;
- Closure reply_;
- Closure task_;
+ OnceClosure reply_;
+ OnceClosure task_;
};
} // namespace
@@ -78,8 +76,8 @@ namespace internal {
bool PostTaskAndReplyImpl::PostTaskAndReply(
const tracked_objects::Location& from_here,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
DCHECK(!task.is_null()) << from_here.ToString();
DCHECK(!reply.is_null()) << from_here.ToString();
PostTaskAndReplyRelay* relay =
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
index a02c32ec8c..00aee6d0ed 100644
--- a/base/threading/post_task_and_reply_impl.h
+++ b/base/threading/post_task_and_reply_impl.h
@@ -29,12 +29,12 @@ class BASE_EXPORT PostTaskAndReplyImpl {
// SequencedTaskRunnerHandle::IsSet(). Both |task| and |reply| are guaranteed
// to be deleted on the sequence or thread that called this.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
private:
virtual bool PostTask(const tracked_objects::Location& from_here,
- Closure task) = 0;
+ OnceClosure task) = 0;
};
} // namespace internal
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index ecf6e2c8e0..1d8a67c2e0 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -97,12 +97,15 @@ struct SequencedTask : public TrackingInfo {
~SequencedTask() {}
+ SequencedTask(SequencedTask&&) = default;
+ SequencedTask& operator=(SequencedTask&&) = default;
+
int sequence_token_id;
int trace_id;
int64_t sequence_task_number;
SequencedWorkerPool::WorkerShutdown shutdown_behavior;
tracked_objects::Location posted_from;
- Closure task;
+ OnceClosure task;
// Non-delayed tasks and delayed tasks are managed together by time-to-run
// order. We calculate the time by adding the posted time and the given delay.
@@ -144,7 +147,7 @@ class SequencedWorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -168,7 +171,7 @@ SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
bool SequencedWorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
if (delay.is_zero()) {
return pool_->PostWorkerTaskWithShutdownBehavior(from_here, std::move(task),
@@ -198,13 +201,13 @@ class SequencedWorkerPool::PoolSequencedTaskRunner
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
// SequencedTaskRunner implementation
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
private:
@@ -233,7 +236,7 @@ SequencedWorkerPool::PoolSequencedTaskRunner::
bool SequencedWorkerPool::PoolSequencedTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
if (delay.is_zero()) {
return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
@@ -250,7 +253,7 @@ bool SequencedWorkerPool::PoolSequencedTaskRunner::
bool SequencedWorkerPool::PoolSequencedTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
// There's no way to run nested tasks, so simply forward to
// PostDelayedTask.
@@ -353,7 +356,7 @@ class SequencedWorkerPool::Inner {
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay);
bool RunsTasksOnCurrentThread() const;
@@ -398,8 +401,7 @@ class SequencedWorkerPool::Inner {
// Returns true if the task may run at some point in the future and false if
// it will definitely not run.
// Coalesce upon resolution of http://crbug.com/622400.
- bool PostTaskToTaskScheduler(const SequencedTask& sequenced,
- const TimeDelta& delay);
+ bool PostTaskToTaskScheduler(SequencedTask sequenced, const TimeDelta& delay);
// Returns the TaskScheduler TaskRunner for the specified |sequence_token_id|
// and |traits|.
@@ -697,8 +699,10 @@ bool SequencedWorkerPool::Inner::PostTask(
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
+ DCHECK(task);
+
// TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
// revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
// waterfall. https://crbug.com/622400
@@ -758,20 +762,22 @@ bool SequencedWorkerPool::Inner::PostTask(
// See on top of the file why we don't compile this on Arc++.
#if 0
if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
- if (!PostTaskToTaskScheduler(sequenced, delay))
+ if (!PostTaskToTaskScheduler(std::move(sequenced), delay))
return false;
} else {
#endif
- pending_tasks_.insert(sequenced);
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior =
+ sequenced.shutdown_behavior;
+ pending_tasks_.insert(std::move(sequenced));
- if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN)
+ if (shutdown_behavior == BLOCK_SHUTDOWN)
blocking_shutdown_pending_task_count_++;
create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
- }
#if 0
- }
+ }
#endif
+ }
// Use != REDIRECTED_TO_TASK_SCHEDULER instead of == USE_WORKER_POOL to ensure
// correct behavior if a task is posted to a SequencedWorkerPool before
@@ -803,7 +809,7 @@ bool SequencedWorkerPool::Inner::PostTask(
}
bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
- const SequencedTask& sequenced,
+ SequencedTask sequenced,
const TimeDelta& delay) {
#if 1
NOTREACHED();
@@ -837,7 +843,8 @@ bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
.WithPriority(task_priority_)
.WithShutdownBehavior(task_shutdown_behavior);
return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits)
- ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay);
+ ->PostDelayedTask(sequenced.posted_from, std::move(sequenced.task),
+ delay);
#endif
}
@@ -1263,7 +1270,11 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
// refcounted, so we just need to keep a copy of them alive until the lock
// is exited. The calling code can just clear() the vector they passed to
// us once the lock is exited to make this happen.
- delete_these_outside_lock->push_back(*i);
+ //
+ // The const_cast here is safe since the object is erased from
+ // |pending_tasks_| soon after the move.
+ delete_these_outside_lock->push_back(
+ std::move(const_cast<SequencedTask&>(*i)));
pending_tasks_.erase(i++);
continue;
}
@@ -1274,14 +1285,18 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
status = GET_WORK_WAIT;
if (cleanup_state_ == CLEANUP_RUNNING) {
// Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
- delete_these_outside_lock->push_back(*i);
+ // The const_cast here is safe since the object is erased from
+ // |pending_tasks_| soon after the move.
+ delete_these_outside_lock->push_back(
+ std::move(const_cast<SequencedTask&>(*i)));
pending_tasks_.erase(i);
}
break;
}
- // Found a runnable task.
- *task = *i;
+ // Found a runnable task. The const_cast is safe here since the object is
+ // erased from |pending_tasks_| soon after the move.
+ *task = std::move(const_cast<SequencedTask&>(*i));
pending_tasks_.erase(i);
if (task->shutdown_behavior == BLOCK_SHUTDOWN) {
blocking_shutdown_pending_task_count_--;
@@ -1558,14 +1573,14 @@ SequencedWorkerPool::GetTaskRunnerWithShutdownBehavior(
bool SequencedWorkerPool::PostWorkerTask(
const tracked_objects::Location& from_here,
- Closure task) {
+ OnceClosure task) {
return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN, from_here,
std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedWorkerTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
@@ -1575,7 +1590,7 @@ bool SequencedWorkerPool::PostDelayedWorkerTask(
bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior) {
return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
std::move(task), TimeDelta());
@@ -1584,7 +1599,7 @@ bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
bool SequencedWorkerPool::PostSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- Closure task) {
+ OnceClosure task) {
return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN, from_here,
std::move(task), TimeDelta());
}
@@ -1592,7 +1607,7 @@ bool SequencedWorkerPool::PostSequencedWorkerTask(
bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
@@ -1603,7 +1618,7 @@ bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
bool SequencedWorkerPool::PostNamedSequencedWorkerTask(
const std::string& token_name,
const tracked_objects::Location& from_here,
- Closure task) {
+ OnceClosure task) {
DCHECK(!token_name.empty());
return inner_->PostTask(&token_name, SequenceToken(), BLOCK_SHUTDOWN,
from_here, std::move(task), TimeDelta());
@@ -1612,7 +1627,7 @@ bool SequencedWorkerPool::PostNamedSequencedWorkerTask(
bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior) {
return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
std::move(task), TimeDelta());
@@ -1620,7 +1635,7 @@ bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
bool SequencedWorkerPool::PostDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
return PostDelayedWorkerTask(from_here, std::move(task), delay);
}
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index 8cdeb0b5db..e577e1be11 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -275,7 +275,8 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
//
// Returns true if the task was posted successfully. This may fail during
// shutdown regardless of the specified ShutdownBehavior.
- bool PostWorkerTask(const tracked_objects::Location& from_here, Closure task);
+ bool PostWorkerTask(const tracked_objects::Location& from_here,
+ OnceClosure task);
// Same as PostWorkerTask but allows a delay to be specified (although doing
// so changes the shutdown behavior). The task will be run after the given
@@ -287,13 +288,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// task will be guaranteed to run to completion before shutdown
// (BLOCK_SHUTDOWN semantics).
bool PostDelayedWorkerTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay);
// Same as PostWorkerTask but allows specification of the shutdown behavior.
bool PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior);
// Like PostWorkerTask above, but provides sequencing semantics. This means
@@ -309,13 +310,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// shutdown regardless of the specified ShutdownBehavior.
bool PostSequencedWorkerTask(SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- Closure task);
+ OnceClosure task);
// Like PostSequencedWorkerTask above, but allows you to specify a named
// token, which saves an extra call to GetNamedSequenceToken.
bool PostNamedSequencedWorkerTask(const std::string& token_name,
const tracked_objects::Location& from_here,
- Closure task);
+ OnceClosure task);
// Same as PostSequencedWorkerTask but allows a delay to be specified
// (although doing so changes the shutdown behavior). The task will be run
@@ -329,7 +330,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay);
// Same as PostSequencedWorkerTask but allows specification of the shutdown
@@ -337,12 +338,12 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior);
// TaskRunner implementation. Forwards to PostDelayedWorkerTask().
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
index c30320f0dc..0aeed2a9e4 100644
--- a/base/threading/thread.cc
+++ b/base/threading/thread.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/run_loop.h"
#include "base/synchronization/waitable_event.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_restrictions.h"
@@ -290,6 +291,7 @@ void Thread::ThreadMain() {
// Complete the initialization of our Thread object.
PlatformThread::SetName(name_.c_str());
+ ANNOTATE_THREAD_NAME(name_.c_str()); // Tell the name to race detector.
// Lazily initialize the |message_loop| so that it can run on this thread.
DCHECK(message_loop_);
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index af8347432b..0cb964e8f7 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -32,6 +32,8 @@ typedef PlatformTest ThreadTest;
namespace {
void ToggleValue(bool* value) {
+ ANNOTATE_BENIGN_RACE(value, "Test-only data race on boolean "
+ "in base/thread_unittest");
*value = !*value;
}
@@ -39,6 +41,8 @@ class SleepInsideInitThread : public Thread {
public:
SleepInsideInitThread() : Thread("none") {
init_called_ = false;
+ ANNOTATE_BENIGN_RACE(
+ this, "Benign test-only data race on vptr - http://crbug.com/98219");
}
~SleepInsideInitThread() override { Stop(); }
diff --git a/base/threading/worker_pool.cc b/base/threading/worker_pool.cc
index bc313ce25b..26ff10f1f5 100644
--- a/base/threading/worker_pool.cc
+++ b/base/threading/worker_pool.cc
@@ -27,7 +27,7 @@ class PostTaskAndReplyWorkerPool : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- Closure task) override {
+ OnceClosure task) override {
return WorkerPool::PostTask(from_here, std::move(task), task_is_slow_);
}
@@ -45,7 +45,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -56,7 +56,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// zero because non-zero delays are not supported.
bool PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay);
const bool tasks_are_slow_;
@@ -73,7 +73,7 @@ WorkerPoolTaskRunner::~WorkerPoolTaskRunner() {
bool WorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) {
return PostDelayedTaskAssertZeroDelay(from_here, std::move(task), delay);
}
@@ -84,7 +84,7 @@ bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
bool WorkerPoolTaskRunner::PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK_EQ(delay.InMillisecondsRoundedUp(), 0)
<< "WorkerPoolTaskRunner does not support non-zero delays";
@@ -102,8 +102,8 @@ struct TaskRunnerHolder {
} // namespace
bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply,
+ OnceClosure task,
+ OnceClosure reply,
bool task_is_slow) {
// Do not report PostTaskAndReplyRelay leaks in tests. There's nothing we can
// do about them because WorkerPool doesn't have a flushing API.
diff --git a/base/threading/worker_pool.h b/base/threading/worker_pool.h
index d97dbd6a69..d1c666d2f9 100644
--- a/base/threading/worker_pool.h
+++ b/base/threading/worker_pool.h
@@ -32,15 +32,15 @@ class BASE_EXPORT WorkerPool {
// false if |task| could not be posted to a worker thread. Regardless of
// return value, ownership of |task| is transferred to the worker pool.
static bool PostTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
bool task_is_slow);
// Just like TaskRunner::PostTaskAndReply, except the destination
// for |task| is a worker thread and you can specify |task_is_slow| just
// like you can for PostTask above.
static bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply,
+ OnceClosure task,
+ OnceClosure reply,
bool task_is_slow);
// Return true if the current thread is one that this WorkerPool runs tasks
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index e0efa463f7..851480adda 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -49,7 +49,7 @@ class WorkerPoolImpl {
~WorkerPoolImpl() = delete;
void PostTask(const tracked_objects::Location& from_here,
- base::Closure task,
+ base::OnceClosure task,
bool task_is_slow);
private:
@@ -61,7 +61,7 @@ WorkerPoolImpl::WorkerPoolImpl()
kIdleSecondsBeforeExit)) {}
void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
- base::Closure task,
+ base::OnceClosure task,
bool task_is_slow) {
pool_->PostTask(from_here, std::move(task));
}
@@ -114,7 +114,7 @@ void WorkerThread::ThreadMain() {
// static
bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
- base::Closure task,
+ base::OnceClosure task,
bool task_is_slow) {
g_lazy_worker_pool.Pointer()->PostTask(from_here, std::move(task),
task_is_slow);
@@ -140,12 +140,14 @@ PosixDynamicThreadPool::~PosixDynamicThreadPool() {
void PosixDynamicThreadPool::PostTask(
const tracked_objects::Location& from_here,
- base::Closure task) {
+ base::OnceClosure task) {
PendingTask pending_task(from_here, std::move(task));
AddTask(&pending_task);
}
void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
+ DCHECK(pending_task);
+ DCHECK(pending_task->task);
AutoLock locked(lock_);
pending_tasks_.push(std::move(*pending_task));
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index cfa50c21dd..0b10adf8f3 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -51,7 +51,7 @@ class BASE_EXPORT PosixDynamicThreadPool
int idle_seconds_before_exit);
// Adds |task| to the thread pool.
- void PostTask(const tracked_objects::Location& from_here, Closure task);
+ void PostTask(const tracked_objects::Location& from_here, OnceClosure task);
// Worker thread method to wait for up to |idle_seconds_before_exit| for more
// work from the thread pool. Returns NULL if no work is available.
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 63d40611a6..b9f440adb6 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -5,6 +5,7 @@
#include "base/trace_event/heap_profiler_allocation_register.h"
#include <algorithm>
+#include <limits>
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -12,9 +13,9 @@ namespace base {
namespace trace_event {
AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register, AllocationIndex index)
- : register_(alloc_register),
- index_(index) {}
+ const AllocationRegister& alloc_register,
+ AllocationIndex index)
+ : register_(alloc_register), index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
index_ = register_.allocations_.Next(index_ + 1);
@@ -25,12 +26,12 @@ bool AllocationRegister::ConstIterator::operator!=(
return index_ != other.index_;
}
-AllocationRegister::Allocation
-AllocationRegister::ConstIterator::operator*() const {
+AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
+ const {
return register_.GetAllocation(index_);
}
-size_t AllocationRegister::BacktraceHasher::operator () (
+size_t AllocationRegister::BacktraceHasher::operator()(
const Backtrace& backtrace) const {
const size_t kSampleLength = 10;
@@ -42,7 +43,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
}
size_t tail_start = backtrace.frame_count -
- std::min(backtrace.frame_count - head_end, kSampleLength);
+ std::min(backtrace.frame_count - head_end, kSampleLength);
for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
@@ -55,7 +56,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
return (total_value * 131101) >> 14;
}
-size_t AllocationRegister::AddressHasher::operator () (
+size_t AllocationRegister::AddressHasher::operator()(
const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
@@ -75,34 +76,48 @@ AllocationRegister::AllocationRegister()
AllocationRegister::AllocationRegister(size_t allocation_capacity,
size_t backtrace_capacity)
- : allocations_(allocation_capacity),
- backtraces_(backtrace_capacity) {}
-
-AllocationRegister::~AllocationRegister() {
+ : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
+ Backtrace sentinel = {};
+ sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
+ sentinel.frame_count = 1;
+
+ // Rationale for max / 2: in theory we could just start the sentinel with a
+ // refcount == 0. However, using max / 2 allows short circuiting of the
+ // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
+ // path. From a functional viewpoint, the sentinel is safe even if we wrap
+ // over refcount because .
+ BacktraceMap::KVPair::second_type sentinel_refcount =
+ std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
+ auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
+ DCHECK(index_and_flag.second);
+ DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
}
-void AllocationRegister::Insert(const void* address,
+AllocationRegister::~AllocationRegister() {}
+
+bool AllocationRegister::Insert(const void* address,
size_t size,
const AllocationContext& context) {
DCHECK(address != nullptr);
if (size == 0) {
- return;
+ return false;
}
- AllocationInfo info = {
- size,
- context.type_name,
- InsertBacktrace(context.backtrace)
- };
+ AllocationInfo info = {size, context.type_name,
+ InsertBacktrace(context.backtrace)};
// Try to insert the allocation.
auto index_and_flag = allocations_.Insert(address, info);
- if (!index_and_flag.second) {
+ if (!index_and_flag.second &&
+ index_and_flag.first != AllocationMap::kInvalidKVIndex) {
// |address| is already there - overwrite the allocation info.
auto& old_info = allocations_.Get(index_and_flag.first).second;
RemoveBacktrace(old_info.backtrace_index);
old_info = info;
+ return true;
}
+
+ return index_and_flag.second;
}
void AllocationRegister::Remove(const void* address) {
@@ -140,15 +155,17 @@ AllocationRegister::ConstIterator AllocationRegister::end() const {
void AllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
size_t allocated = sizeof(AllocationRegister);
- size_t resident = sizeof(AllocationRegister)
- + allocations_.EstimateUsedMemory()
- + backtraces_.EstimateUsedMemory();
+ size_t resident = sizeof(AllocationRegister) +
+ allocations_.EstimateUsedMemory() +
+ backtraces_.EstimateUsedMemory();
overhead->Add("AllocationRegister", allocated, resident);
}
AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
const Backtrace& backtrace) {
auto index = backtraces_.Insert(backtrace, 0).first;
+ if (index == BacktraceMap::kInvalidKVIndex)
+ return kOutOfStorageBacktraceIndex;
auto& backtrace_and_count = backtraces_.Get(index);
backtrace_and_count.second++;
return index;
@@ -156,7 +173,8 @@ AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
auto& backtrace_and_count = backtraces_.Get(index);
- if (--backtrace_and_count.second == 0) {
+ if (--backtrace_and_count.second == 0 &&
+ index != kOutOfStorageBacktraceIndex) {
// Backtrace is not referenced anymore - remove it.
backtraces_.Remove(index);
}
@@ -165,15 +183,11 @@ void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
AllocationRegister::Allocation AllocationRegister::GetAllocation(
AllocationMap::KVIndex index) const {
const auto& address_and_info = allocations_.Get(index);
- const auto& backtrace_and_count = backtraces_.Get(
- address_and_info.second.backtrace_index);
- return {
- address_and_info.first,
- address_and_info.second.size,
- AllocationContext(
- backtrace_and_count.first,
- address_and_info.second.type_name)
- };
+ const auto& backtrace_and_count =
+ backtraces_.Get(address_and_info.second.backtrace_index);
+ return {address_and_info.first, address_and_info.second.size,
+ AllocationContext(backtrace_and_count.first,
+ address_and_info.second.type_name)};
}
} // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index d6a02faeae..ac9872f001 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -48,24 +48,26 @@ class FixedHashMap {
// For implementation simplicity API uses integer index instead
// of iterators. Most operations (except Find) on KVIndex are O(1).
using KVIndex = size_t;
- static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+ enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) };
// Capacity controls how many items this hash map can hold, and largely
// affects memory footprint.
- FixedHashMap(size_t capacity)
- : num_cells_(capacity),
- cells_(static_cast<Cell*>(
- AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
- buckets_(static_cast<Bucket*>(
- AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
- free_list_(nullptr),
- next_unused_cell_(0) {}
+ explicit FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ num_inserts_dropped_(0),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
~FixedHashMap() {
FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
}
+ // Returns {kInvalidKVIndex, false} if the table is full.
std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
Cell** p_cell = Lookup(key);
Cell* cell = *p_cell;
@@ -74,7 +76,15 @@ class FixedHashMap {
}
// Get a free cell and link it.
- *p_cell = cell = GetFreeCell();
+ cell = GetFreeCell();
+ if (!cell) {
+ if (num_inserts_dropped_ <
+ std::numeric_limits<decltype(num_inserts_dropped_)>::max()) {
+ ++num_inserts_dropped_;
+ }
+ return {kInvalidKVIndex, false};
+ }
+ *p_cell = cell;
cell->p_prev = p_cell;
cell->next = nullptr;
@@ -137,6 +147,8 @@ class FixedHashMap {
bits::Align(sizeof(Bucket) * NumBuckets, page_size);
}
+ size_t num_inserts_dropped() const { return num_inserts_dropped_; }
+
private:
friend base::trace_event::AllocationRegisterTest;
@@ -175,7 +187,8 @@ class FixedHashMap {
}
// Returns a cell that is not being used to store an entry (either by
- // recycling from the free list or by taking a fresh cell).
+ // recycling from the free list or by taking a fresh cell). May return
+ // nullptr if the hash table has run out of memory.
Cell* GetFreeCell() {
// First try to re-use a cell from the free list.
if (free_list_) {
@@ -184,26 +197,14 @@ class FixedHashMap {
return cell;
}
- // Otherwise pick the next cell that has not been touched before.
- size_t idx = next_unused_cell_;
- next_unused_cell_++;
-
// If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside
- // of the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
-
- CHECK_LT(next_unused_cell_, num_cells_ + 1)
- << "Allocation Register hash table has too little capacity. Increase "
- "the capacity to run heap profiler in large sessions.";
-
- return &cells_[idx];
+ // was reserved for |cells_|), return nullptr.
+ if (next_unused_cell_ >= num_cells_) {
+ return nullptr;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ return &cells_[next_unused_cell_++];
}
// Returns a value in the range [0, NumBuckets - 1] (inclusive).
@@ -219,6 +220,9 @@ class FixedHashMap {
// Number of cells.
size_t const num_cells_;
+ // Number of calls to Insert() that were lost because the hashtable was full.
+ size_t num_inserts_dropped_;
+
// The array of cells. This array is backed by mmapped memory. Lower indices
// are accessed first, higher indices are accessed only when the |free_list_|
// is empty. This is to minimize the amount of resident memory used.
@@ -248,6 +252,8 @@ class TraceEventMemoryOverhead;
// freed. Internally it has two hashtables: one for Backtraces and one for
// actual allocations. Sizes of both hashtables are fixed, and this class
// allocates (mmaps) only in its constructor.
+//
+// When either hash table hits max size, new inserts are dropped.
class BASE_EXPORT AllocationRegister {
public:
// Details about an allocation.
@@ -282,7 +288,10 @@ class BASE_EXPORT AllocationRegister {
// Inserts allocation details into the table. If the address was present
// already, its details are updated. |address| must not be null.
- void Insert(const void* address,
+ //
+ // Returns true if an insert occurred. Inserts may fail because the table
+ // is full.
+ bool Insert(const void* address,
size_t size,
const AllocationContext& context);
@@ -359,6 +368,14 @@ class BASE_EXPORT AllocationRegister {
AllocationMap allocations_;
BacktraceMap backtraces_;
+ // Sentinel used when the |backtraces_| table is full.
+ //
+ // This is a slightly abstraction to allow for constant propagation. It
+ // knows that the sentinel will be the first item inserted into the table
+ // and that the first index retuned will be 0. The constructor DCHECKs
+ // this assumption.
+ enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
+
DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
};
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index a74b95634d..6ed1ca8fff 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -426,7 +426,7 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
}
void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
AutoLock lock(lock_);
dump_providers_for_polling_.insert(mdpinfo);
@@ -438,7 +438,7 @@ void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
}
void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
mdpinfo->dump_provider->SuspendFastMemoryPolling();
AutoLock lock(lock_);
@@ -956,34 +956,6 @@ bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
return session_state_->IsDumpModeAllowed(dump_mode);
}
-MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
- MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode)
- : dump_provider(dump_provider),
- name(name),
- task_runner(std::move(task_runner)),
- options(options),
- consecutive_failures(0),
- disabled(false),
- whitelisted_for_background_mode(whitelisted_for_background_mode) {}
-
-MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
-
-bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
- if (!a || !b)
- return a.get() < b.get();
- // Ensure that unbound providers (task_runner == nullptr) always run last.
- // Rationale: some unbound dump providers are known to be slow, keep them last
- // to avoid skewing timings of the other dump providers.
- return std::tie(a->task_runner, a->dump_provider) >
- std::tie(b->task_runner, b->dump_provider);
-}
-
MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index ebee048691..e7f5194850 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -9,7 +9,7 @@
#include <map>
#include <memory>
-#include <set>
+#include <unordered_set>
#include <vector>
#include "base/atomicops.h"
@@ -19,6 +19,7 @@
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_provider_info.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
@@ -170,70 +171,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend class MemoryDumpScheduler;
friend class memory_instrumentation::MemoryDumpManagerDelegateImplTest;
- // Descriptor used to hold information about registered MDPs.
- // Some important considerations about lifetime of this object:
- // - In nominal conditions, all the MemoryDumpProviderInfo instances live in
- // the |dump_providers_| collection (% unregistration while dumping).
- // - Upon each dump they (actually their scoped_refptr-s) are copied into
- // the ProcessMemoryDumpAsyncState. This is to allow removal (see below).
- // - When the MDP.OnMemoryDump() is invoked, the corresponding MDPInfo copy
- // inside ProcessMemoryDumpAsyncState is removed.
- // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
- // - If UnregisterDumpProvider() is called while a dump is in progress, the
- // MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
- // when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
- // - The non-const fields of MemoryDumpProviderInfo are safe to access only
- // on tasks running in the |task_runner|, unless the thread has been
- // destroyed.
- struct MemoryDumpProviderInfo
- : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
- // Define a total order based on the |task_runner| affinity, so that MDPs
- // belonging to the same SequencedTaskRunner are adjacent in the set.
- struct Comparator {
- bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpProviderInfo>& b) const;
- };
- using OrderedSet =
- std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
-
- MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode);
-
- MemoryDumpProvider* const dump_provider;
-
- // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
- // nullptr in all other cases.
- std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
-
- // Human readable name, for debugging and testing. Not necessarily unique.
- const char* const name;
-
- // The task runner affinity. Can be nullptr, in which case the dump provider
- // will be invoked on |dump_thread_|.
- const scoped_refptr<SequencedTaskRunner> task_runner;
-
- // The |options| arg passed to RegisterDumpProvider().
- const MemoryDumpProvider::Options options;
-
- // For fail-safe logic (auto-disable failing MDPs).
- int consecutive_failures;
-
- // Flagged either by the auto-disable logic or during unregistration.
- bool disabled;
-
- // True if the dump provider is whitelisted for background mode.
- const bool whitelisted_for_background_mode;
-
- private:
- friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
- ~MemoryDumpProviderInfo();
-
- DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
- };
-
// Holds the state of a process memory dump that needs to be carried over
// across task runners in order to fulfil an asynchronous CreateProcessDump()
// request. At any time exactly one task runner owns a
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index e037fd4982..e126edd397 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -32,6 +32,7 @@
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_config_memory_test_util.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -104,7 +105,7 @@ void OnTraceDataCollected(Closure quit_closure,
// Posts |task| to |task_runner| and blocks until it is executed.
void PostTaskAndWait(const tracked_objects::Location& from_here,
SequencedTaskRunner* task_runner,
- base::Closure task) {
+ base::OnceClosure task) {
base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner->PostTask(from_here, std::move(task));
@@ -115,8 +116,6 @@ void PostTaskAndWait(const tracked_objects::Location& from_here,
event.Wait();
}
-} // namespace
-
// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
// requests locally to the MemoryDumpManager instead of performing IPC dances.
class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
@@ -183,14 +182,14 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override {
NOTREACHED();
return false;
}
bool PostDelayedTask(const tracked_objects::Location& from_here,
- Closure task,
+ OnceClosure task,
TimeDelta delay) override {
num_of_post_tasks_++;
if (enabled_) {
@@ -213,6 +212,8 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned num_of_post_tasks_;
};
+} // namespace
+
class MemoryDumpManagerTest : public testing::Test {
public:
MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -439,7 +440,13 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
// Checks that the dump provider invocations depend only on the current
// registration state and not on previous registrations and dumps.
-TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
+// Flaky on iOS, see crbug.com/706874
+#if defined(OS_IOS)
+#define MAYBE_RegistrationConsistency DISABLED_RegistrationConsistency
+#else
+#define MAYBE_RegistrationConsistency RegistrationConsistency
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_RegistrationConsistency) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
@@ -1013,7 +1020,13 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// Tests against race conditions that might arise when disabling tracing in the
// middle of a global memory dump.
-TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
+// Flaky on iOS, see crbug.com/706961
+#if defined(OS_IOS)
+#define MAYBE_DisableTracingWhileDumping DISABLED_DisableTracingWhileDumping
+#else
+#define MAYBE_DisableTracingWhileDumping DisableTracingWhileDumping
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_DisableTracingWhileDumping) {
base::WaitableEvent tracing_disabled_event(
WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
diff --git a/base/trace_event/memory_dump_provider_info.cc b/base/trace_event/memory_dump_provider_info.cc
new file mode 100644
index 0000000000..6bb711018b
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.cc
@@ -0,0 +1,43 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_provider_info.h"
+
+#include <tuple>
+
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+namespace trace_event {
+
+MemoryDumpProviderInfo::MemoryDumpProviderInfo(
+ MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
+ : dump_provider(dump_provider),
+ options(options),
+ name(name),
+ task_runner(std::move(task_runner)),
+ whitelisted_for_background_mode(whitelisted_for_background_mode),
+ consecutive_failures(0),
+ disabled(false) {}
+
+MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
+
+bool MemoryDumpProviderInfo::Comparator::operator()(
+ const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const {
+ if (!a || !b)
+ return a.get() < b.get();
+ // Ensure that unbound providers (task_runner == nullptr) always run last.
+ // Rationale: some unbound dump providers are known to be slow, keep them last
+ // to avoid skewing timings of the other dump providers.
+ return std::tie(a->task_runner, a->dump_provider) >
+ std::tie(b->task_runner, b->dump_provider);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/memory_dump_provider_info.h b/base/trace_event/memory_dump_provider_info.h
new file mode 100644
index 0000000000..ca63a987b2
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.h
@@ -0,0 +1,108 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+// Wraps a MemoryDumpProvider (MDP), which is registered via
+// MemoryDumpManager(MDM)::RegisterDumpProvider(), holding the extra information
+// required to deal with it (which task runner it should be invoked onto,
+// whether it has been disabled, etc.)
+// More importantly, having a refptr to this object guarantees that a MDP that
+// is not thread-bound (hence which can only be unregistered via
+// MDM::UnregisterAndDeleteDumpProviderSoon()) will stay alive as long as the
+// refptr is held.
+//
+// Lifetime:
+// At any time, there is at most one instance of this class for each instance
+// of a given MemoryDumpProvider, but there might be several scoped_refptr
+// holding onto each of this. Specifically:
+// - In nominal conditions, there is a refptr for each registerd MDP in the
+// MDM's |dump_providers_| list.
+// - In most cases, the only refptr (in the |dump_providers_| list) is destroyed
+// by MDM::UnregisterDumpProvider().
+// - However, when MDM starts a dump, the list of refptrs is copied into the
+// ProcessMemoryDumpAsyncState. That list is pruned as MDP(s) are invoked.
+// - If UnregisterDumpProvider() is called on a non-thread-bound MDP while a
+// dump is in progress, the extar extra of the handle is destroyed in
+// MDM::SetupNextMemoryDump() or MDM::InvokeOnMemoryDump(), when the copy
+// inside ProcessMemoryDumpAsyncState is erase()-d.
+// - The PeakDetector can keep extra refptrs when enabled.
+struct BASE_EXPORT MemoryDumpProviderInfo
+ : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
+ public:
+ // Define a total order based on the |task_runner| affinity, so that MDPs
+ // belonging to the same SequencedTaskRunner are adjacent in the set.
+ struct Comparator {
+ bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const;
+ };
+ using OrderedSet =
+ std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
+
+ MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
+
+ // It is safe to access the const fields below from any thread as they are
+ // never mutated.
+
+ MemoryDumpProvider* const dump_provider;
+
+ // The |options| arg passed to MDM::RegisterDumpProvider().
+ const MemoryDumpProvider::Options options;
+
+ // Human readable name, not unique (distinct MDP instances might have the same
+ // name). Used for debugging, testing and whitelisting for BACKGROUND mode.
+ const char* const name;
+
+ // The task runner on which the MDP::OnMemoryDump call should be posted onto.
+ // Can be nullptr, in which case the MDP will be invoked on a background
+ // thread handled by MDM.
+ const scoped_refptr<SequencedTaskRunner> task_runner;
+
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
+ // These fields below, instead, are not thread safe and can be mutated only:
+ // - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
+ // - By the MDM's background thread (or in any other way that guarantees
+ // sequencing) for non-thread-bound MDPs.
+
+ // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
+ // nullptr in all other cases.
+ std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
+
+ // For fail-safe logic (auto-disable failing MDPs).
+ int consecutive_failures;
+
+ // Flagged either by the auto-disable logic or during unregistration.
+ bool disabled;
+
+ private:
+ friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
+ ~MemoryDumpProviderInfo();
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
index 66ea6c9f1a..150feb8e79 100644
--- a/base/trace_event/memory_dump_scheduler.cc
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -171,9 +171,11 @@ void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
}
void MemoryDumpScheduler::PollMemoryOnPollingThread() {
- if (polling_state_->current_state != PollingTriggerState::ENABLED)
+ if (!polling_state_)
return;
+ DCHECK_EQ(PollingTriggerState::ENABLED, polling_state_->current_state);
+
uint64_t polled_memory = 0;
bool res = mdm_->PollFastMemoryTotal(&polled_memory);
DCHECK(res);
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 3df09992b1..7ee9a4a101 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -186,7 +186,7 @@ bool TraceConfig::EventFilterConfig::GetArgAsSet(
}
bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
@@ -277,7 +277,7 @@ std::string TraceConfig::ToCategoryFilterString() const {
}
bool TraceConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
// TraceLog should call this method only as part of enabling/disabling
// categories.
return category_filter_.IsCategoryGroupEnabled(category_group_name);
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 29edc9a8ec..13b2f5f0ee 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -103,7 +103,7 @@ class BASE_EXPORT TraceConfig {
bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
const std::string& predicate_name() const { return predicate_name_; }
base::DictionaryValue* filter_args() const { return args_.get(); }
@@ -231,7 +231,7 @@ class BASE_EXPORT TraceConfig {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Merges config with the current TraceConfig
void Merge(const TraceConfig& config);
diff --git a/base/trace_event/trace_config_category_filter.cc b/base/trace_event/trace_config_category_filter.cc
index dc30e0ea99..234db18c5c 100644
--- a/base/trace_event/trace_config_category_filter.cc
+++ b/base/trace_event/trace_config_category_filter.cc
@@ -45,9 +45,9 @@ TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
void TraceConfigCategoryFilter::InitializeFromString(
const StringPiece& category_filter_string) {
- std::vector<std::string> split =
- SplitString(category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
- for (const std::string& category : split) {
+ std::vector<StringPiece> split = SplitStringPiece(
+ category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const StringPiece& category : split) {
// Ignore empty categories.
if (category.empty())
continue;
@@ -55,23 +55,22 @@ void TraceConfigCategoryFilter::InitializeFromString(
if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
CompareCase::SENSITIVE) &&
category.back() == ')') {
- std::string synthetic_category = category.substr(
+ StringPiece synthetic_category = category.substr(
strlen(kSyntheticDelayCategoryFilterPrefix),
category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
size_t name_length = synthetic_category.find(';');
if (name_length != std::string::npos && name_length > 0 &&
name_length != synthetic_category.size() - 1) {
- synthetic_delays_.push_back(synthetic_category);
+ synthetic_delays_.push_back(synthetic_category.as_string());
}
} else if (category.front() == '-') {
// Excluded categories start with '-'.
// Remove '-' from category string.
- excluded_categories_.push_back(category.substr(1));
- } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
+ excluded_categories_.push_back(category.substr(1).as_string());
+ } else if (category.starts_with(TRACE_DISABLED_BY_DEFAULT(""))) {
+ disabled_categories_.push_back(category.as_string());
} else {
- included_categories_.push_back(category);
+ included_categories_.push_back(category.as_string());
}
}
}
@@ -88,17 +87,17 @@ void TraceConfigCategoryFilter::InitializeFromConfigDict(
}
bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
bool had_enabled_by_default = false;
- DCHECK(category_group_name);
- std::string category_group_name_str = category_group_name;
- StringTokenizer category_group_tokens(category_group_name_str, ",");
+ DCHECK(!category_group_name.empty());
+ CStringTokenizer category_group_tokens(category_group_name.begin(),
+ category_group_name.end(), ",");
while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
+ StringPiece category_group_token = category_group_tokens.token_piece();
// Don't allow empty tokens, nor tokens with leading or trailing space.
DCHECK(IsCategoryNameAllowed(category_group_token))
<< "Disallowed category string";
- if (IsCategoryEnabled(category_group_token.c_str()))
+ if (IsCategoryEnabled(category_group_token))
return true;
if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
@@ -109,7 +108,7 @@ bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
category_group_tokens.Reset();
bool category_group_disabled = false;
while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
+ StringPiece category_group_token = category_group_tokens.token_piece();
for (const std::string& category : excluded_categories_) {
if (MatchPattern(category_group_token, category)) {
// Current token of category_group_name is present in excluded_list.
@@ -140,7 +139,7 @@ bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
}
bool TraceConfigCategoryFilter::IsCategoryEnabled(
- const char* category_name) const {
+ const StringPiece& category_name) const {
// Check the disabled- filters and the disabled-* wildcard first so that a
// "*" filter does not include the disabled.
for (const std::string& category : disabled_categories_) {
diff --git a/base/trace_event/trace_config_category_filter.h b/base/trace_event/trace_config_category_filter.h
index df8c3a5b2a..0d7dba0374 100644
--- a/base/trace_event/trace_config_category_filter.h
+++ b/base/trace_event/trace_config_category_filter.h
@@ -40,13 +40,13 @@ class BASE_EXPORT TraceConfigCategoryFilter {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Returns true if the category is enabled according to this trace config.
// This tells whether a category is enabled from the TraceConfig's
// perspective. Please refer to IsCategoryGroupEnabled() to determine if a
// category is enabled from the tracing runtime's perspective.
- bool IsCategoryEnabled(const char* category_name) const;
+ bool IsCategoryEnabled(const StringPiece& category_name) const;
void ToDict(DictionaryValue* dict) const;
diff --git a/base/trace_event/trace_event_synthetic_delay.cc b/base/trace_event/trace_event_synthetic_delay.cc
index b6ce2845c4..cfae7435e9 100644
--- a/base/trace_event/trace_event_synthetic_delay.cc
+++ b/base/trace_event/trace_event_synthetic_delay.cc
@@ -4,6 +4,7 @@
#include "base/macros.h"
#include "base/memory/singleton.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
namespace {
@@ -80,6 +81,7 @@ void TraceEventSyntheticDelay::Begin() {
// calculation is done with a lock held, it will always be correct. The only
// downside of this is that we may fail to apply some delays when the target
// duration changes.
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue())
return;
@@ -94,6 +96,7 @@ void TraceEventSyntheticDelay::Begin() {
void TraceEventSyntheticDelay::BeginParallel(TimeTicks* out_end_time) {
// See note in Begin().
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue()) {
*out_end_time = TimeTicks();
return;
@@ -108,6 +111,7 @@ void TraceEventSyntheticDelay::BeginParallel(TimeTicks* out_end_time) {
void TraceEventSyntheticDelay::End() {
// See note in Begin().
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue())
return;
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 85e1e16312..7a30e4ee57 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -2158,7 +2158,6 @@ TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
trace_log->SetDisabled();
}
-
class MyData : public ConvertableToTraceFormat {
public:
MyData() {}
diff --git a/base/values.cc b/base/values.cc
index fa90a57dbb..b5e44e68dd 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -69,7 +69,7 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
static_cast<const DictionaryValue&>(node));
default:
- return node.CreateDeepCopy();
+ return MakeUnique<Value>(node);
}
}
@@ -95,7 +95,7 @@ Value::Value(Value&& that) noexcept {
InternalMoveConstructFrom(std::move(that));
}
-Value::Value() : type_(Type::NONE) {}
+Value::Value() noexcept : type_(Type::NONE) {}
Value::Value(Type type) : type_(type) {
// Initialize with the default value.
@@ -149,7 +149,7 @@ Value::Value(const std::string& in_string) : type_(Type::STRING) {
DCHECK(IsStringUTF8(*string_value_));
}
-Value::Value(std::string&& in_string) : type_(Type::STRING) {
+Value::Value(std::string&& in_string) noexcept : type_(Type::STRING) {
string_value_.Init(std::move(in_string));
DCHECK(IsStringUTF8(*string_value_));
}
@@ -168,7 +168,7 @@ Value::Value(const std::vector<char>& in_blob) : type_(Type::BINARY) {
binary_value_.Init(in_blob);
}
-Value::Value(std::vector<char>&& in_blob) : type_(Type::BINARY) {
+Value::Value(std::vector<char>&& in_blob) noexcept : type_(Type::BINARY) {
binary_value_.Init(std::move(in_blob));
}
@@ -184,7 +184,7 @@ Value& Value::operator=(const Value& that) {
return *this;
}
-Value& Value::operator=(Value&& that) {
+Value& Value::operator=(Value&& that) noexcept {
DCHECK(this != &that) << "attempt to self move assign.";
InternalCleanup();
InternalMoveConstructFrom(std::move(that));
@@ -341,55 +341,11 @@ bool Value::GetAsDictionary(const DictionaryValue** out_value) const {
}
Value* Value::DeepCopy() const {
- // This method should only be getting called for null Values--all subclasses
- // need to provide their own implementation;.
- switch (type()) {
- case Type::NONE:
- return CreateNullValue().release();
-
- case Type::BOOLEAN:
- return new Value(bool_value_);
- case Type::INTEGER:
- return new Value(int_value_);
- case Type::DOUBLE:
- return new Value(double_value_);
- case Type::STRING:
- return new Value(*string_value_);
- // For now, make BinaryValues for backward-compatibility. Convert to
- // Value when that code is deleted.
- case Type::BINARY:
- return new Value(*binary_value_);
-
- // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
- // are completely inlined.
- case Type::DICTIONARY: {
- DictionaryValue* result = new DictionaryValue;
-
- for (const auto& current_entry : **dict_ptr_) {
- result->SetWithoutPathExpansion(current_entry.first,
- current_entry.second->CreateDeepCopy());
- }
-
- return result;
- }
-
- case Type::LIST: {
- ListValue* result = new ListValue;
-
- for (const auto& entry : *list_)
- result->Append(entry->CreateDeepCopy());
-
- return result;
- }
-
- default:
- NOTREACHED();
- return nullptr;
- }
+ return new Value(*this);
}
std::unique_ptr<Value> Value::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<Value>(*this);
}
bool operator==(const Value& lhs, const Value& rhs) {
@@ -542,14 +498,23 @@ void Value::InternalCopyConstructFrom(const Value& that) {
binary_value_.Init(*that.binary_value_);
return;
// DictStorage and ListStorage are move-only types due to the presence of
- // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
+ // unique_ptrs. This is why the explicit copy of every element is necessary
+ // here.
// TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
// can be copied directly.
case Type::DICTIONARY:
- dict_ptr_.Init(std::move(*that.CreateDeepCopy()->dict_ptr_));
+ dict_ptr_.Init(MakeUnique<DictStorage>());
+ for (const auto& it : **that.dict_ptr_) {
+ (*dict_ptr_)
+ ->emplace_hint((*dict_ptr_)->end(), it.first,
+ MakeUnique<Value>(*it.second));
+ }
return;
case Type::LIST:
- list_.Init(std::move(*that.CreateDeepCopy()->list_));
+ list_.Init();
+ list_->reserve(that.list_->size());
+ for (const auto& it : *that.list_)
+ list_->push_back(MakeUnique<Value>(*it));
return;
}
}
@@ -600,14 +565,15 @@ void Value::InternalCopyAssignFromSameType(const Value& that) {
*binary_value_ = *that.binary_value_;
return;
// DictStorage and ListStorage are move-only types due to the presence of
- // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
+ // unique_ptrs. This is why the explicit call to the copy constructor is
+ // necessary here.
// TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
// can be copied directly.
case Type::DICTIONARY:
- *dict_ptr_ = std::move(*that.CreateDeepCopy()->dict_ptr_);
+ *dict_ptr_ = std::move(*Value(that).dict_ptr_);
return;
case Type::LIST:
- *list_ = std::move(*that.CreateDeepCopy()->list_);
+ *list_ = std::move(*Value(that).list_);
return;
}
}
@@ -1073,8 +1039,7 @@ void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
}
}
// All other cases: Make a copy and hook it up.
- SetWithoutPathExpansion(it.key(),
- base::WrapUnique(merge_value->DeepCopy()));
+ SetWithoutPathExpansion(it.key(), MakeUnique<Value>(*merge_value));
}
}
@@ -1091,11 +1056,11 @@ DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
DictionaryValue::Iterator::~Iterator() {}
DictionaryValue* DictionaryValue::DeepCopy() const {
- return static_cast<DictionaryValue*>(Value::DeepCopy());
+ return new DictionaryValue(*this);
}
std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<DictionaryValue>(*this);
}
///////////////////// ListValue ////////////////////
@@ -1358,11 +1323,11 @@ void ListValue::Swap(ListValue* other) {
}
ListValue* ListValue::DeepCopy() const {
- return static_cast<ListValue*>(Value::DeepCopy());
+ return new ListValue(*this);
}
std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<ListValue>(*this);
}
ValueSerializer::~ValueSerializer() {
diff --git a/base/values.h b/base/values.h
index 95d5d1c2ba..925152dbee 100644
--- a/base/values.h
+++ b/base/values.h
@@ -75,7 +75,7 @@ class BASE_EXPORT Value {
Value(const Value& that);
Value(Value&& that) noexcept;
- Value(); // A null value.
+ Value() noexcept; // A null value.
explicit Value(Type type);
explicit Value(bool in_bool);
explicit Value(int in_int);
@@ -89,16 +89,16 @@ class BASE_EXPORT Value {
// arguments.
explicit Value(const char* in_string);
explicit Value(const std::string& in_string);
- explicit Value(std::string&& in_string);
+ explicit Value(std::string&& in_string) noexcept;
explicit Value(const char16* in_string);
explicit Value(const string16& in_string);
explicit Value(StringPiece in_string);
explicit Value(const std::vector<char>& in_blob);
- explicit Value(std::vector<char>&& in_blob);
+ explicit Value(std::vector<char>&& in_blob) noexcept;
Value& operator=(const Value& that);
- Value& operator=(Value&& that);
+ Value& operator=(Value&& that) noexcept;
~Value();
@@ -157,6 +157,8 @@ class BASE_EXPORT Value {
// to the copy. The caller gets ownership of the copy, of course.
// Subclasses return their own type directly in their overrides;
// this works because C++ supports covariant return types.
+ // DEPRECATED, use Value's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
Value* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<Value> CreateDeepCopy() const;
@@ -364,6 +366,8 @@ class BASE_EXPORT DictionaryValue : public Value {
DictStorage::const_iterator it_;
};
+ // DEPRECATED, use DictionaryValue's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
DictionaryValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
@@ -480,6 +484,8 @@ class BASE_EXPORT ListValue : public Value {
const_iterator begin() const { return list_->begin(); }
const_iterator end() const { return list_->end(); }
+ // DEPRECATED, use ListValue's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
ListValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
std::unique_ptr<ListValue> CreateDeepCopy() const;
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 9a7eb2f270..6c1f017095 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -8,6 +8,8 @@
#include <limits>
#include <memory>
+#include <string>
+#include <type_traits>
#include <utility>
#include <vector>
@@ -18,6 +20,20 @@
namespace base {
+TEST(ValuesTest, TestNothrow) {
+ static_assert(std::is_nothrow_move_constructible<Value>::value,
+ "IsNothrowMoveConstructible");
+ static_assert(std::is_nothrow_default_constructible<Value>::value,
+ "IsNothrowDefaultConstructible");
+ static_assert(std::is_nothrow_constructible<Value, std::string&&>::value,
+ "IsNothrowMoveConstructibleFromString");
+ static_assert(
+ std::is_nothrow_constructible<Value, std::vector<char>&&>::value,
+ "IsNothrowMoveConstructibleFromBlob");
+ static_assert(std::is_nothrow_move_assignable<Value>::value,
+ "IsNothrowMoveAssignable");
+}
+
// Group of tests for the value constructors.
TEST(ValuesTest, ConstructBool) {
Value true_value(true);
@@ -679,7 +695,7 @@ TEST(ValuesTest, DeepCopy) {
scoped_nested_dictionary->SetString("key", "value");
original_dict.Set("dictionary", std::move(scoped_nested_dictionary));
- std::unique_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
+ auto copy_dict = MakeUnique<DictionaryValue>(original_dict);
ASSERT_TRUE(copy_dict.get());
ASSERT_NE(copy_dict.get(), &original_dict);
@@ -802,14 +818,14 @@ TEST(ValuesTest, Equals) {
dv.SetString("d2", ASCIIToUTF16("http://google.com"));
dv.Set("e", Value::CreateNullValue());
- std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
+ auto copy = MakeUnique<DictionaryValue>(dv);
EXPECT_EQ(dv, *copy);
std::unique_ptr<ListValue> list(new ListValue);
ListValue* original_list = list.get();
list->Append(Value::CreateNullValue());
list->Append(WrapUnique(new DictionaryValue));
- std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
+ auto list_copy = MakeUnique<Value>(*list);
dv.Set("f", std::move(list));
EXPECT_NE(dv, *copy);
@@ -820,7 +836,7 @@ TEST(ValuesTest, Equals) {
EXPECT_NE(dv, *copy);
// Check if Equals detects differences in only the keys.
- copy = dv.CreateDeepCopy();
+ copy = MakeUnique<DictionaryValue>(dv);
EXPECT_EQ(dv, *copy);
copy->Remove("a", NULL);
copy->SetBoolean("aa", false);
@@ -1005,15 +1021,15 @@ TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
scoped_list->Append(std::move(scoped_list_element_1));
original_dict.Set("list", std::move(scoped_list));
- std::unique_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
- std::unique_ptr<Value> copy_null = original_null->CreateDeepCopy();
- std::unique_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
- std::unique_ptr<Value> copy_int = original_int->CreateDeepCopy();
- std::unique_ptr<Value> copy_double = original_double->CreateDeepCopy();
- std::unique_ptr<Value> copy_string = original_string->CreateDeepCopy();
- std::unique_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
- std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
- std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
+ auto copy_dict = MakeUnique<Value>(original_dict);
+ auto copy_null = MakeUnique<Value>(*original_null);
+ auto copy_bool = MakeUnique<Value>(*original_bool);
+ auto copy_int = MakeUnique<Value>(*original_int);
+ auto copy_double = MakeUnique<Value>(*original_double);
+ auto copy_string = MakeUnique<Value>(*original_string);
+ auto copy_string16 = MakeUnique<Value>(*original_string16);
+ auto copy_binary = MakeUnique<Value>(*original_binary);
+ auto copy_list = MakeUnique<Value>(*original_list);
EXPECT_EQ(original_dict, *copy_dict);
EXPECT_EQ(*original_null, *copy_null);
@@ -1188,7 +1204,7 @@ TEST(ValuesTest, DictionaryIterator) {
}
Value value1("value1");
- dict.Set("key1", value1.CreateDeepCopy());
+ dict.Set("key1", MakeUnique<Value>(value1));
bool seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
EXPECT_FALSE(seen1);
@@ -1199,7 +1215,7 @@ TEST(ValuesTest, DictionaryIterator) {
EXPECT_TRUE(seen1);
Value value2("value2");
- dict.Set("key2", value2.CreateDeepCopy());
+ dict.Set("key2", MakeUnique<Value>(value2));
bool seen2 = seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
if (it.key() == "key1") {
@@ -1232,21 +1248,21 @@ TEST(ValuesTest, GetWithNullOutValue) {
DictionaryValue dict_value;
ListValue list_value;
- main_dict.Set("bool", bool_value.CreateDeepCopy());
- main_dict.Set("int", int_value.CreateDeepCopy());
- main_dict.Set("double", double_value.CreateDeepCopy());
- main_dict.Set("string", string_value.CreateDeepCopy());
- main_dict.Set("binary", binary_value.CreateDeepCopy());
- main_dict.Set("dict", dict_value.CreateDeepCopy());
- main_dict.Set("list", list_value.CreateDeepCopy());
-
- main_list.Append(bool_value.CreateDeepCopy());
- main_list.Append(int_value.CreateDeepCopy());
- main_list.Append(double_value.CreateDeepCopy());
- main_list.Append(string_value.CreateDeepCopy());
- main_list.Append(binary_value.CreateDeepCopy());
- main_list.Append(dict_value.CreateDeepCopy());
- main_list.Append(list_value.CreateDeepCopy());
+ main_dict.Set("bool", MakeUnique<Value>(bool_value));
+ main_dict.Set("int", MakeUnique<Value>(int_value));
+ main_dict.Set("double", MakeUnique<Value>(double_value));
+ main_dict.Set("string", MakeUnique<Value>(string_value));
+ main_dict.Set("binary", MakeUnique<Value>(binary_value));
+ main_dict.Set("dict", MakeUnique<Value>(dict_value));
+ main_dict.Set("list", MakeUnique<Value>(list_value));
+
+ main_list.Append(MakeUnique<Value>(bool_value));
+ main_list.Append(MakeUnique<Value>(int_value));
+ main_list.Append(MakeUnique<Value>(double_value));
+ main_list.Append(MakeUnique<Value>(string_value));
+ main_list.Append(MakeUnique<Value>(binary_value));
+ main_list.Append(MakeUnique<Value>(dict_value));
+ main_list.Append(MakeUnique<Value>(list_value));
EXPECT_TRUE(main_dict.Get("bool", NULL));
EXPECT_TRUE(main_dict.Get("int", NULL));