summaryrefslogtreecommitdiff
path: root/base/task/sequence_manager
diff options
context:
space:
mode:
Diffstat (limited to 'base/task/sequence_manager')
-rw-r--r--base/task/sequence_manager/enqueue_order.cc17
-rw-r--r--base/task/sequence_manager/enqueue_order.h71
-rw-r--r--base/task/sequence_manager/graceful_queue_shutdown_helper.cc42
-rw-r--r--base/task/sequence_manager/graceful_queue_shutdown_helper.h50
-rw-r--r--base/task/sequence_manager/intrusive_heap.h229
-rw-r--r--base/task/sequence_manager/intrusive_heap_unittest.cc378
-rw-r--r--base/task/sequence_manager/lazily_deallocated_deque.h364
-rw-r--r--base/task/sequence_manager/lazily_deallocated_deque_unittest.cc364
-rw-r--r--base/task/sequence_manager/lazy_now.cc36
-rw-r--r--base/task/sequence_manager/lazy_now.h41
-rw-r--r--base/task/sequence_manager/moveable_auto_lock.h41
-rw-r--r--base/task/sequence_manager/real_time_domain.cc48
-rw-r--r--base/task/sequence_manager/real_time_domain.h37
-rw-r--r--base/task/sequence_manager/sequence_manager.cc26
-rw-r--r--base/task/sequence_manager/sequence_manager.h132
-rw-r--r--base/task/sequence_manager/sequence_manager_impl.cc724
-rw-r--r--base/task/sequence_manager/sequence_manager_impl.h341
-rw-r--r--base/task/sequence_manager/sequence_manager_impl_unittest.cc3260
-rw-r--r--base/task/sequence_manager/sequence_manager_perftest.cc306
-rw-r--r--base/task/sequence_manager/sequenced_task_source.h37
-rw-r--r--base/task/sequence_manager/task_queue.cc289
-rw-r--r--base/task/sequence_manager/task_queue.h368
-rw-r--r--base/task/sequence_manager/task_queue_impl.cc1016
-rw-r--r--base/task/sequence_manager/task_queue_impl.h471
-rw-r--r--base/task/sequence_manager/task_queue_selector.cc407
-rw-r--r--base/task/sequence_manager/task_queue_selector.h225
-rw-r--r--base/task/sequence_manager/task_queue_selector_logic.h37
-rw-r--r--base/task/sequence_manager/task_queue_selector_unittest.cc885
-rw-r--r--base/task/sequence_manager/task_time_observer.h32
-rw-r--r--base/task/sequence_manager/test/fake_task.cc35
-rw-r--r--base/task/sequence_manager/test/fake_task.h31
-rw-r--r--base/task/sequence_manager/test/lazy_thread_controller_for_test.cc123
-rw-r--r--base/task/sequence_manager/test/lazy_thread_controller_for_test.h53
-rw-r--r--base/task/sequence_manager/test/mock_time_domain.cc39
-rw-r--r--base/task/sequence_manager/test/mock_time_domain.h38
-rw-r--r--base/task/sequence_manager/test/sequence_manager_for_test.cc79
-rw-r--r--base/task/sequence_manager/test/sequence_manager_for_test.h46
-rw-r--r--base/task/sequence_manager/test/test_task_queue.cc23
-rw-r--r--base/task/sequence_manager/test/test_task_queue.h33
-rw-r--r--base/task/sequence_manager/test/test_task_time_observer.h23
-rw-r--r--base/task/sequence_manager/thread_controller.h85
-rw-r--r--base/task/sequence_manager/thread_controller_impl.cc269
-rw-r--r--base/task/sequence_manager/thread_controller_impl.h130
-rw-r--r--base/task/sequence_manager/thread_controller_with_message_pump_impl.cc205
-rw-r--r--base/task/sequence_manager/thread_controller_with_message_pump_impl.h109
-rw-r--r--base/task/sequence_manager/time_domain.cc136
-rw-r--r--base/task/sequence_manager/time_domain.h139
-rw-r--r--base/task/sequence_manager/time_domain_unittest.cc324
-rw-r--r--base/task/sequence_manager/work_queue.cc236
-rw-r--r--base/task/sequence_manager/work_queue.h152
-rw-r--r--base/task/sequence_manager/work_queue_sets.cc172
-rw-r--r--base/task/sequence_manager/work_queue_sets.h102
-rw-r--r--base/task/sequence_manager/work_queue_sets_unittest.cc328
-rw-r--r--base/task/sequence_manager/work_queue_unittest.cc475
54 files changed, 13659 insertions, 0 deletions
diff --git a/base/task/sequence_manager/enqueue_order.cc b/base/task/sequence_manager/enqueue_order.cc
new file mode 100644
index 0000000000..066ef0382e
--- /dev/null
+++ b/base/task/sequence_manager/enqueue_order.cc
@@ -0,0 +1,17 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/enqueue_order.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+EnqueueOrder::Generator::Generator() : counter_(kFirst) {}
+
+EnqueueOrder::Generator::~Generator() = default;
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/enqueue_order.h b/base/task/sequence_manager/enqueue_order.h
new file mode 100644
index 0000000000..fac1d179b0
--- /dev/null
+++ b/base/task/sequence_manager/enqueue_order.h
@@ -0,0 +1,71 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// 64-bit number which is used to order tasks.
+// SequenceManager assumes this number will never overflow.
+class EnqueueOrder {
+ public:
+ EnqueueOrder() : value_(kNone) {}
+ ~EnqueueOrder() = default;
+
+ static EnqueueOrder none() { return EnqueueOrder(kNone); }
+ static EnqueueOrder blocking_fence() { return EnqueueOrder(kBlockingFence); }
+
+ // It's okay to use EnqueueOrder in boolean expressions keeping in mind
+ // that some non-zero values have a special meaning.
+ operator uint64_t() const { return value_; }
+
+ static EnqueueOrder FromIntForTesting(uint64_t value) {
+ return EnqueueOrder(value);
+ }
+
+ // EnqueueOrder can't be created from a raw number in non-test code.
+ // Generator is used to create it with strictly monotonic guarantee.
+ class BASE_EXPORT Generator {
+ public:
+ Generator();
+ ~Generator();
+
+ // Can be called from any thread.
+ EnqueueOrder GenerateNext() {
+ return EnqueueOrder(std::atomic_fetch_add_explicit(
+ &counter_, uint64_t(1), std::memory_order_relaxed));
+ }
+
+ private:
+ std::atomic<uint64_t> counter_;
+ DISALLOW_COPY_AND_ASSIGN(Generator);
+ };
+
+ private:
+ explicit EnqueueOrder(uint64_t value) : value_(value) {}
+
+ enum SpecialValues : uint64_t {
+ kNone = 0,
+ kBlockingFence = 1,
+ kFirst = 2,
+ };
+
+ uint64_t value_;
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
diff --git a/base/task/sequence_manager/graceful_queue_shutdown_helper.cc b/base/task/sequence_manager/graceful_queue_shutdown_helper.cc
new file mode 100644
index 0000000000..9a8c893e93
--- /dev/null
+++ b/base/task/sequence_manager/graceful_queue_shutdown_helper.cc
@@ -0,0 +1,42 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/graceful_queue_shutdown_helper.h"
+
+#include "base/task/sequence_manager/task_queue_impl.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+GracefulQueueShutdownHelper::GracefulQueueShutdownHelper()
+ : sequence_manager_deleted_(false) {}
+
+GracefulQueueShutdownHelper::~GracefulQueueShutdownHelper() = default;
+
+void GracefulQueueShutdownHelper::GracefullyShutdownTaskQueue(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue) {
+ AutoLock lock(lock_);
+ if (sequence_manager_deleted_)
+ return;
+ queues_.push_back(std::move(task_queue));
+}
+
+void GracefulQueueShutdownHelper::OnSequenceManagerDeleted() {
+ AutoLock lock(lock_);
+ sequence_manager_deleted_ = true;
+ queues_.clear();
+}
+
+std::vector<std::unique_ptr<internal::TaskQueueImpl>>
+GracefulQueueShutdownHelper::TakeQueues() {
+ AutoLock lock(lock_);
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> result;
+ result.swap(queues_);
+ return result;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/graceful_queue_shutdown_helper.h b/base/task/sequence_manager/graceful_queue_shutdown_helper.h
new file mode 100644
index 0000000000..108eb827b2
--- /dev/null
+++ b/base/task/sequence_manager/graceful_queue_shutdown_helper.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class TaskQueueImpl;
+
+// Thread-safe helper to shutdown queues from any thread.
+class GracefulQueueShutdownHelper
+ : public RefCountedThreadSafe<GracefulQueueShutdownHelper> {
+ public:
+ GracefulQueueShutdownHelper();
+
+ void GracefullyShutdownTaskQueue(
+ std::unique_ptr<internal::TaskQueueImpl> queue);
+
+ void OnSequenceManagerDeleted();
+
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> TakeQueues();
+
+ private:
+ // This class is ref-counted so it controls its own lifetime.
+ ~GracefulQueueShutdownHelper();
+ friend class RefCountedThreadSafe<GracefulQueueShutdownHelper>;
+
+ Lock lock_;
+ bool sequence_manager_deleted_;
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues_;
+
+ DISALLOW_COPY_AND_ASSIGN(GracefulQueueShutdownHelper);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
diff --git a/base/task/sequence_manager/intrusive_heap.h b/base/task/sequence_manager/intrusive_heap.h
new file mode 100644
index 0000000000..eb2fc8a454
--- /dev/null
+++ b/base/task/sequence_manager/intrusive_heap.h
@@ -0,0 +1,229 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_INTRUSIVE_HEAP_H_
+#define BASE_TASK_SEQUENCE_MANAGER_INTRUSIVE_HEAP_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+template <typename T>
+class IntrusiveHeap;
+
+// Intended as an opaque wrapper around |index_|.
+class HeapHandle {
+ public:
+ HeapHandle() : index_(0u) {}
+
+ bool IsValid() const { return index_ != 0u; }
+
+ private:
+ template <typename T>
+ friend class IntrusiveHeap;
+
+ HeapHandle(size_t index) : index_(index) {}
+
+ size_t index_;
+};
+
+// A standard min-heap with the following assumptions:
+// 1. T has operator <=
+// 2. T has method void SetHeapHandle(HeapHandle handle)
+// 3. T has method void ClearHeapHandle()
+// 4. T is moveable
+// 5. T is default constructible
+// 6. The heap size never gets terribly big so reclaiming memory on pop/erase
+// isn't a priority.
+//
+// The reason IntrusiveHeap exists is to provide similar performance to
+// std::priority_queue while allowing removal of arbitrary elements.
+template <typename T>
+class IntrusiveHeap {
+ public:
+ IntrusiveHeap() : nodes_(kMinimumHeapSize), size_(0) {}
+
+ ~IntrusiveHeap() {
+ for (size_t i = 1; i <= size_; i++) {
+ MakeHole(i);
+ }
+ }
+
+ bool empty() const { return size_ == 0; }
+
+ size_t size() const { return size_; }
+
+ void Clear() {
+ for (size_t i = 1; i <= size_; i++) {
+ MakeHole(i);
+ }
+ nodes_.resize(kMinimumHeapSize);
+ size_ = 0;
+ }
+
+ const T& Min() const {
+ DCHECK_GE(size_, 1u);
+ return nodes_[1];
+ }
+
+ void Pop() {
+ DCHECK_GE(size_, 1u);
+ MakeHole(1u);
+ size_t top_index = size_--;
+ if (!empty())
+ MoveHoleDownAndFillWithLeafElement(1u, std::move(nodes_[top_index]));
+ }
+
+ void insert(T&& element) {
+ size_++;
+ if (size_ >= nodes_.size())
+ nodes_.resize(nodes_.size() * 2);
+ // Notionally we have a hole in the tree at index |size_|, move this up
+ // to find the right insertion point.
+ MoveHoleUpAndFillWithElement(size_, std::move(element));
+ }
+
+ void erase(HeapHandle handle) {
+ DCHECK_GT(handle.index_, 0u);
+ DCHECK_LE(handle.index_, size_);
+ MakeHole(handle.index_);
+ size_t top_index = size_--;
+ if (empty() || top_index == handle.index_)
+ return;
+ if (nodes_[handle.index_] <= nodes_[top_index]) {
+ MoveHoleDownAndFillWithLeafElement(handle.index_,
+ std::move(nodes_[top_index]));
+ } else {
+ MoveHoleUpAndFillWithElement(handle.index_, std::move(nodes_[top_index]));
+ }
+ }
+
+ void ReplaceMin(T&& element) {
+ // Note |element| might not be a leaf node so we can't use
+ // MoveHoleDownAndFillWithLeafElement.
+ MoveHoleDownAndFillWithElement(1u, std::move(element));
+ }
+
+ void ChangeKey(HeapHandle handle, T&& element) {
+ if (nodes_[handle.index_] <= element) {
+ MoveHoleDownAndFillWithLeafElement(handle.index_, std::move(element));
+ } else {
+ MoveHoleUpAndFillWithElement(handle.index_, std::move(element));
+ }
+ }
+
+ // Caution mutating the heap invalidates the iterators.
+ const T* begin() const { return &nodes_[1u]; }
+ const T* end() const { return begin() + size_; }
+
+ private:
+ enum {
+ // The majority of sets in the scheduler have 0-3 items in them (a few will
+ // have perhaps up to 100), so this means we usually only have to allocate
+ // memory once.
+ kMinimumHeapSize = 4u
+ };
+
+ friend class IntrusiveHeapTest;
+
+ size_t MoveHole(size_t new_hole_pos, size_t old_hole_pos) {
+ DCHECK_GT(new_hole_pos, 0u);
+ DCHECK_LE(new_hole_pos, size_);
+ DCHECK_GT(new_hole_pos, 0u);
+ DCHECK_LE(new_hole_pos, size_);
+ DCHECK_NE(old_hole_pos, new_hole_pos);
+ nodes_[old_hole_pos] = std::move(nodes_[new_hole_pos]);
+ nodes_[old_hole_pos].SetHeapHandle(HeapHandle(old_hole_pos));
+ return new_hole_pos;
+ }
+
+ // Notionally creates a hole in the tree at |index|.
+ void MakeHole(size_t index) {
+ DCHECK_GT(index, 0u);
+ DCHECK_LE(index, size_);
+ nodes_[index].ClearHeapHandle();
+ }
+
+ void FillHole(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ nodes_[hole] = std::move(element);
+ nodes_[hole].SetHeapHandle(HeapHandle(hole));
+ DCHECK(std::is_heap(begin(), end(), CompareNodes));
+ }
+
+ // is_heap requires a strict comparator.
+ static bool CompareNodes(const T& a, const T& b) { return !(a <= b); }
+
+ // Moves the |hole| up the tree and when the right position has been found
+ // |element| is moved in.
+ void MoveHoleUpAndFillWithElement(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ while (hole >= 2u) {
+ size_t parent_pos = hole / 2;
+ if (nodes_[parent_pos] <= element)
+ break;
+
+ hole = MoveHole(parent_pos, hole);
+ }
+ FillHole(hole, std::move(element));
+ }
+
+ // Moves the |hole| down the tree and when the right position has been found
+ // |element| is moved in.
+ void MoveHoleDownAndFillWithElement(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ size_t child_pos = hole * 2;
+ while (child_pos < size_) {
+ if (nodes_[child_pos + 1] <= nodes_[child_pos])
+ child_pos++;
+
+ if (element <= nodes_[child_pos])
+ break;
+
+ hole = MoveHole(child_pos, hole);
+ child_pos *= 2;
+ }
+ if (child_pos == size_ && !(element <= nodes_[child_pos]))
+ hole = MoveHole(child_pos, hole);
+ FillHole(hole, std::move(element));
+ }
+
+ // Moves the |hole| down the tree and when the right position has been found
+ // |leaf_element| is moved in. Faster than MoveHoleDownAndFillWithElement
+ // (it does one key comparison per level instead of two) but only valid for
+ // leaf elements (i.e. one of the max values).
+ void MoveHoleDownAndFillWithLeafElement(size_t hole, T&& leaf_element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ size_t child_pos = hole * 2;
+ while (child_pos < size_) {
+ size_t second_child = child_pos + 1;
+ if (nodes_[second_child] <= nodes_[child_pos])
+ child_pos = second_child;
+
+ hole = MoveHole(child_pos, hole);
+ child_pos *= 2;
+ }
+ if (child_pos == size_)
+ hole = MoveHole(child_pos, hole);
+ MoveHoleUpAndFillWithElement(hole, std::move(leaf_element));
+ }
+
+ std::vector<T> nodes_; // NOTE we use 1-based indexing
+ size_t size_;
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_INTRUSIVE_HEAP_H_
diff --git a/base/task/sequence_manager/intrusive_heap_unittest.cc b/base/task/sequence_manager/intrusive_heap_unittest.cc
new file mode 100644
index 0000000000..3c1323a76f
--- /dev/null
+++ b/base/task/sequence_manager/intrusive_heap_unittest.cc
@@ -0,0 +1,378 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+namespace {
+
+struct TestElement {
+ int key;
+ HeapHandle* handle;
+
+ bool operator<=(const TestElement& other) const { return key <= other.key; }
+
+ void SetHeapHandle(HeapHandle h) {
+ if (handle)
+ *handle = h;
+ }
+
+ void ClearHeapHandle() {
+ if (handle)
+ *handle = HeapHandle();
+ }
+};
+
+} // namespace
+
+class IntrusiveHeapTest : public testing::Test {
+ protected:
+ static bool CompareNodes(const TestElement& a, const TestElement& b) {
+ return IntrusiveHeap<TestElement>::CompareNodes(a, b);
+ }
+};
+
+TEST_F(IntrusiveHeapTest, Basic) {
+ IntrusiveHeap<TestElement> heap;
+
+ EXPECT_TRUE(heap.empty());
+ EXPECT_EQ(0u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, Clear) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+
+ heap.insert({11, &index1});
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+
+ heap.Clear();
+ EXPECT_EQ(0u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, Destructor) {
+ HeapHandle index1;
+
+ {
+ IntrusiveHeap<TestElement> heap;
+
+ heap.insert({11, &index1});
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+ }
+
+ EXPECT_FALSE(index1.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, Min) {
+ IntrusiveHeap<TestElement> heap;
+
+ heap.insert({9, nullptr});
+ heap.insert({10, nullptr});
+ heap.insert({8, nullptr});
+ heap.insert({2, nullptr});
+ heap.insert({7, nullptr});
+ heap.insert({15, nullptr});
+ heap.insert({22, nullptr});
+ heap.insert({3, nullptr});
+
+ EXPECT_FALSE(heap.empty());
+ EXPECT_EQ(8u, heap.size());
+ EXPECT_EQ(2, heap.Min().key);
+}
+
+TEST_F(IntrusiveHeapTest, InsertAscending) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+
+ for (int i = 0; i < 50; i++)
+ heap.insert({i, nullptr});
+
+ EXPECT_EQ(0, heap.Min().key);
+ EXPECT_EQ(50u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, InsertDescending) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 50; i++)
+ heap.insert({50 - i, nullptr});
+
+ EXPECT_EQ(1, heap.Min().key);
+ EXPECT_EQ(50u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, HeapIndex) {
+ HeapHandle index5;
+ HeapHandle index4;
+ HeapHandle index3;
+ HeapHandle index2;
+ HeapHandle index1;
+ IntrusiveHeap<TestElement> heap;
+
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_FALSE(index2.IsValid());
+ EXPECT_FALSE(index3.IsValid());
+ EXPECT_FALSE(index4.IsValid());
+ EXPECT_FALSE(index5.IsValid());
+
+ heap.insert({15, &index5});
+ heap.insert({14, &index4});
+ heap.insert({13, &index3});
+ heap.insert({12, &index2});
+ heap.insert({11, &index1});
+
+ EXPECT_TRUE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+ EXPECT_TRUE(index3.IsValid());
+ EXPECT_TRUE(index4.IsValid());
+ EXPECT_TRUE(index5.IsValid());
+
+ EXPECT_FALSE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, Pop) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+ HeapHandle index2;
+
+ heap.insert({11, &index1});
+ heap.insert({12, &index2});
+ EXPECT_EQ(2u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+
+ heap.Pop();
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+
+ heap.Pop();
+ EXPECT_EQ(0u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_FALSE(index2.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, PopMany) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 500; i++)
+ heap.insert({i, nullptr});
+
+ EXPECT_FALSE(heap.empty());
+ EXPECT_EQ(500u, heap.size());
+ for (int i = 0; i < 500; i++) {
+ EXPECT_EQ(i, heap.Min().key);
+ heap.Pop();
+ }
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, Erase) {
+ IntrusiveHeap<TestElement> heap;
+
+ HeapHandle index12;
+
+ heap.insert({15, nullptr});
+ heap.insert({14, nullptr});
+ heap.insert({13, nullptr});
+ heap.insert({12, &index12});
+ heap.insert({11, nullptr});
+
+ EXPECT_EQ(5u, heap.size());
+ EXPECT_TRUE(index12.IsValid());
+ heap.erase(index12);
+ EXPECT_EQ(4u, heap.size());
+ EXPECT_FALSE(index12.IsValid());
+
+ EXPECT_EQ(11, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(13, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(14, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(15, heap.Min().key);
+ heap.Pop();
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMin) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 500; i++)
+ heap.insert({500 - i, nullptr});
+
+ EXPECT_EQ(1, heap.Min().key);
+
+ for (int i = 0; i < 500; i++)
+ heap.ReplaceMin({1000 + i, nullptr});
+
+ EXPECT_EQ(1000, heap.Min().key);
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMinWithNonLeafNode) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 50; i++) {
+ heap.insert({i, nullptr});
+ heap.insert({200 + i, nullptr});
+ }
+
+ EXPECT_EQ(0, heap.Min().key);
+
+ for (int i = 0; i < 50; i++)
+ heap.ReplaceMin({100 + i, nullptr});
+
+ for (int i = 0; i < 50; i++) {
+ EXPECT_EQ((100 + i), heap.Min().key);
+ heap.Pop();
+ }
+ for (int i = 0; i < 50; i++) {
+ EXPECT_EQ((200 + i), heap.Min().key);
+ heap.Pop();
+ }
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMinCheckAllFinalPositions) {
+ HeapHandle index[100];
+
+ for (int j = -1; j <= 201; j += 2) {
+ IntrusiveHeap<TestElement> heap;
+ for (size_t i = 0; i < 100; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ReplaceMin({j, &index[40]});
+
+ int prev = -2;
+ while (!heap.empty()) {
+ DCHECK_GT(heap.Min().key, prev);
+ DCHECK(heap.Min().key == j || (heap.Min().key % 2) == 0);
+ DCHECK_NE(heap.Min().key, 0);
+ prev = heap.Min().key;
+ heap.Pop();
+ }
+ }
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyUp) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {17, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 12, 14, 16, 17, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyUpButDoesntMove) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {11, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 11, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyDown) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {1, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 1, 2, 4, 6, 8, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyDownButDoesntMove) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {9, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 9, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyCheckAllFinalPositions) {
+ HeapHandle index[100];
+
+ for (int j = -1; j <= 201; j += 2) {
+ IntrusiveHeap<TestElement> heap;
+ for (size_t i = 0; i < 100; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[40], {j, &index[40]});
+
+ int prev = -2;
+ while (!heap.empty()) {
+ DCHECK_GT(heap.Min().key, prev);
+ DCHECK(heap.Min().key == j || (heap.Min().key % 2) == 0);
+ DCHECK_NE(heap.Min().key, 80);
+ prev = heap.Min().key;
+ heap.Pop();
+ }
+ }
+}
+
+TEST_F(IntrusiveHeapTest, CompareNodes) {
+ TestElement five{5, nullptr}, six{6, nullptr};
+
+ // Check that we have a strict comparator, otherwise std::is_heap()
+ // (used in DCHECK) may fail. See http://crbug.com/661080.
+ EXPECT_FALSE(IntrusiveHeapTest::CompareNodes(six, six));
+
+ EXPECT_FALSE(IntrusiveHeapTest::CompareNodes(five, six));
+ EXPECT_TRUE(IntrusiveHeapTest::CompareNodes(six, five));
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/lazily_deallocated_deque.h b/base/task/sequence_manager/lazily_deallocated_deque.h
new file mode 100644
index 0000000000..7a4d7bad6a
--- /dev/null
+++ b/base/task/sequence_manager/lazily_deallocated_deque.h
@@ -0,0 +1,364 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// A LazilyDeallocatedDeque specialized for the SequenceManager's usage
+// patterns. The queue generally grows while tasks are added and then removed
+// until empty and the cycle repeats.
+//
+// The main difference between sequence_manager::LazilyDeallocatedDeque and
+// others is memory management. For performance (memory allocation isn't free)
+// we don't automatically reclaiming memory when the queue becomes empty.
+// Instead we rely on the surrounding code periodically calling
+// MaybeShrinkQueue, ideally when the queue is empty.
+//
+// We keep track of the maximum recent queue size and rate limit
+// MaybeShrinkQueue to avoid unnecessary churn.
+//
+// NB this queue isn't by itself thread safe.
+template <typename T>
+class LazilyDeallocatedDeque {
+ public:
+ enum {
+ // Minimum allocation for a ring. Note a ring of size 4 will only hold up to
+ // 3 elements.
+ kMinimumRingSize = 4,
+
+ // Maximum "wasted" capacity allowed when considering if we should resize
+ // the backing store.
+ kReclaimThreshold = 16,
+
+ // Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
+ // queue.
+ kMinimumShrinkIntervalInSeconds = 5
+ };
+
+ LazilyDeallocatedDeque() {}
+
+ ~LazilyDeallocatedDeque() { clear(); }
+
+ bool empty() const { return size_ == 0; }
+
+ size_t max_size() const { return max_size_; }
+
+ size_t size() const { return size_; }
+
+ size_t capacity() const {
+ size_t capacity = 0;
+ for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
+ capacity += iter->capacity();
+ }
+ return capacity;
+ }
+
+ void clear() {
+ while (head_) {
+ head_ = std::move(head_->next_);
+ }
+
+ tail_ = nullptr;
+ size_ = 0;
+ }
+
+ // Assumed to be an uncommon operation.
+ void push_front(T t) {
+ if (!head_) {
+ head_ = std::make_unique<Ring>(kMinimumRingSize);
+ tail_ = head_.get();
+ }
+
+ // Grow if needed, by the minimum amount.
+ if (!head_->CanPush()) {
+ std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
+ new_ring->next_ = std::move(head_);
+ head_ = std::move(new_ring);
+ }
+
+ head_->push_front(std::move(t));
+ max_size_ = std::max(max_size_, ++size_);
+ }
+
+ // Assumed to be a common operation.
+ void push_back(T t) {
+ if (!head_) {
+ head_ = std::make_unique<Ring>(kMinimumRingSize);
+ tail_ = head_.get();
+ }
+
+ // Grow if needed.
+ if (!tail_->CanPush()) {
+ tail_->next_ = std::make_unique<Ring>(tail_->capacity() * 2);
+ tail_ = tail_->next_.get();
+ }
+
+ tail_->push_back(std::move(t));
+ max_size_ = std::max(max_size_, ++size_);
+ }
+
+ T& front() {
+ DCHECK(head_);
+ return head_->front();
+ }
+
+ const T& front() const {
+ DCHECK(head_);
+ return head_->front();
+ }
+
+ T& back() {
+ DCHECK(tail_);
+ return tail_->back();
+ }
+
+ const T& back() const {
+ DCHECK(tail_);
+ return tail_->back();
+ }
+
+ void pop_front() {
+ DCHECK(tail_);
+ DCHECK_GT(size_, 0u);
+ head_->pop_front();
+
+ // If the ring has become empty and we have several rings then, remove the
+ // head one (which we expect to have lower capacity than the remaining
+ // ones).
+ if (head_->empty() && head_->next_) {
+ head_ = std::move(head_->next_);
+ }
+
+ --size_;
+ }
+
+ void swap(LazilyDeallocatedDeque& other) {
+ std::swap(head_, other.head_);
+ std::swap(tail_, other.tail_);
+ std::swap(size_, other.size_);
+ std::swap(max_size_, other.max_size_);
+ std::swap(next_resize_time_, other.next_resize_time_);
+ }
+
+ void MaybeShrinkQueue() {
+ if (!tail_)
+ return;
+
+ DCHECK_GE(max_size_, size_);
+
+ // Rate limit how often we shrink the queue because it's somewhat expensive.
+ TimeTicks current_time = TimeTicks::Now();
+ if (current_time < next_resize_time_)
+ return;
+
+ // Due to the way the Ring works we need 1 more slot than is used.
+ size_t new_capacity = max_size_ + 1;
+ if (new_capacity < kMinimumRingSize)
+ new_capacity = kMinimumRingSize;
+
+ // Reset |max_size_| so that unless usage has spiked up we will consider
+ // reclaiming it next time.
+ max_size_ = size_;
+
+ // Only realloc if the current capacity is sufficiently the observed maximum
+ // size for the previous period.
+ if (new_capacity + kReclaimThreshold >= capacity())
+ return;
+
+ SetCapacity(new_capacity);
+ next_resize_time_ =
+ current_time + TimeDelta::FromSeconds(kMinimumShrinkIntervalInSeconds);
+ }
+
+ void SetCapacity(size_t new_capacity) {
+ std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
+
+ DCHECK_GE(new_capacity, size_ + 1);
+
+ // Preserve the |size_| which counts down to zero in the while loop.
+ size_t real_size = size_;
+
+ while (!empty()) {
+ DCHECK(new_ring->CanPush());
+ new_ring->push_back(std::move(head_->front()));
+ pop_front();
+ }
+
+ size_ = real_size;
+
+ DCHECK_EQ(head_.get(), tail_);
+ head_ = std::move(new_ring);
+ tail_ = head_.get();
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
+ FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
+
+ struct Ring {
+ explicit Ring(size_t capacity)
+ : capacity_(capacity),
+ front_index_(0),
+ back_index_(0),
+ data_(reinterpret_cast<T*>(new char[sizeof(T) * capacity])),
+ next_(nullptr) {
+ DCHECK_GE(capacity_, kMinimumRingSize);
+ }
+
+ ~Ring() {
+ while (!empty()) {
+ pop_front();
+ }
+ delete[] reinterpret_cast<char*>(data_);
+ }
+
+ bool empty() const { return back_index_ == front_index_; }
+
+ size_t capacity() const { return capacity_; }
+
+ bool CanPush() const {
+ return front_index_ != CircularIncrement(back_index_);
+ }
+
+ void push_front(T&& t) {
+ // Mustn't appear to become empty.
+ DCHECK_NE(CircularDecrement(front_index_), back_index_);
+ new (&data_[front_index_]) T(std::move(t));
+ front_index_ = CircularDecrement(front_index_);
+ }
+
+ void push_back(T&& t) {
+ back_index_ = CircularIncrement(back_index_);
+ DCHECK(!empty()); // Mustn't appear to become empty.
+ new (&data_[back_index_]) T(std::move(t));
+ }
+
+ bool CanPop() const { return front_index_ != back_index_; }
+
+ void pop_front() {
+ DCHECK(!empty());
+ front_index_ = CircularIncrement(front_index_);
+ data_[front_index_].~T();
+ }
+
+ T& front() {
+ DCHECK(!empty());
+ return data_[CircularIncrement(front_index_)];
+ }
+
+ const T& front() const {
+ DCHECK(!empty());
+ return data_[CircularIncrement(front_index_)];
+ }
+
+ T& back() {
+ DCHECK(!empty());
+ return data_[back_index_];
+ }
+
+ const T& back() const {
+ DCHECK(!empty());
+ return data_[back_index_];
+ }
+
+ size_t CircularDecrement(size_t index) const {
+ if (index == 0)
+ return capacity_ - 1;
+ return index - 1;
+ }
+
+ size_t CircularIncrement(size_t index) const {
+ DCHECK_LT(index, capacity_);
+ ++index;
+ if (index == capacity_)
+ return 0;
+ return index;
+ }
+
+ size_t capacity_;
+ size_t front_index_;
+ size_t back_index_;
+ T* data_;
+ std::unique_ptr<Ring> next_;
+
+ DISALLOW_COPY_AND_ASSIGN(Ring);
+ };
+
+ public:
+ class Iterator {
+ public:
+ using value_type = T;
+ using pointer = const T*;
+ using reference = const T&;
+
+ const T& operator->() const { return ring_->data_[index_]; }
+ const T& operator*() const { return ring_->data_[index_]; }
+
+ Iterator& operator++() {
+ if (index_ == ring_->back_index_) {
+ ring_ = ring_->next_.get();
+ index_ = 0;
+ } else {
+ index_ = ring_->CircularIncrement(index_);
+ }
+ return *this;
+ }
+
+ operator bool() const { return !!ring_; }
+
+ private:
+ explicit Iterator(const Ring* ring) {
+ if (!ring || ring->empty()) {
+ ring_ = nullptr;
+ index_ = 0;
+ return;
+ }
+
+ ring_ = ring;
+ index_ = ring_->CircularIncrement(ring->front_index_);
+ }
+
+ const Ring* ring_;
+ size_t index_;
+
+ friend class LazilyDeallocatedDeque;
+ };
+
+ Iterator begin() const { return Iterator(head_.get()); }
+
+ Iterator end() const { return Iterator(nullptr); }
+
+ private:
+ // We maintain a list of Ring buffers, to enable us to grow without copying,
+ // but most of the time we aim to have only one active Ring.
+ std::unique_ptr<Ring> head_;
+ Ring* tail_ = nullptr;
+
+ size_t size_ = 0;
+ size_t max_size_ = 0;
+ TimeTicks next_resize_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(LazilyDeallocatedDeque);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
diff --git a/base/task/sequence_manager/lazily_deallocated_deque_unittest.cc b/base/task/sequence_manager/lazily_deallocated_deque_unittest.cc
new file mode 100644
index 0000000000..2afa048ac9
--- /dev/null
+++ b/base/task/sequence_manager/lazily_deallocated_deque_unittest.cc
@@ -0,0 +1,364 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/lazily_deallocated_deque.h"
+
+#include "base/time/time_override.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class LazilyDeallocatedDequeTest : public testing::Test {};
+
+TEST_F(LazilyDeallocatedDequeTest, InitiallyEmpty) {
+ LazilyDeallocatedDeque<int> d;
+
+ EXPECT_TRUE(d.empty());
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushBackAndPopFront1) {
+ LazilyDeallocatedDeque<int> d;
+
+ d.push_back(123);
+
+ EXPECT_FALSE(d.empty());
+ EXPECT_EQ(1u, d.size());
+
+ EXPECT_EQ(123, d.front());
+
+ d.pop_front();
+ EXPECT_TRUE(d.empty());
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushBackAndPopFront1000) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+
+ EXPECT_EQ(0, d.front());
+ EXPECT_EQ(999, d.back());
+ EXPECT_EQ(1000u, d.size());
+
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(i, d.front());
+ d.pop_front();
+ }
+
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushFrontBackAndPopFront1) {
+ LazilyDeallocatedDeque<int> d;
+
+ d.push_front(123);
+
+ EXPECT_FALSE(d.empty());
+ EXPECT_EQ(1u, d.size());
+
+ EXPECT_EQ(123, d.front());
+
+ d.pop_front();
+ EXPECT_TRUE(d.empty());
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushFrontAndPopFront1000) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_front(i);
+ }
+
+ EXPECT_EQ(999, d.front());
+ EXPECT_EQ(0, d.back());
+ EXPECT_EQ(1000u, d.size());
+
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(999 - i, d.front());
+ d.pop_front();
+ }
+
+ EXPECT_EQ(0u, d.size());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueWithLargeSizeDrop) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+ EXPECT_EQ(1000u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // Drop most elements.
+ for (int i = 0; i < 990; i++) {
+ d.pop_front();
+ }
+ EXPECT_EQ(10u, d.size());
+ EXPECT_EQ(512u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // This won't do anything since the max size is greater than the current
+ // capacity.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(512u, d.capacity());
+ EXPECT_EQ(10u, d.max_size());
+
+ // This will shrink because the max size is now much less than the current
+ // capacity.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(11u, d.capacity());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueWithSmallSizeDrop) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1010; i++) {
+ d.push_back(i);
+ }
+ EXPECT_EQ(1010u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1010u, d.max_size());
+
+ // Drop a couple of elements.
+ d.pop_front();
+ d.pop_front();
+ EXPECT_EQ(1008u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1010u, d.max_size());
+
+ // This won't do anything since the max size is only slightly lower than the
+ // capacity.
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1010u, d.max_size());
+
+ // Ditto. Nothing changed so no point shrinking.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(1008u, d.max_size());
+ EXPECT_EQ(1020u, d.capacity());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueToEmpty) {
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_front(i);
+ }
+
+ for (int i = 0; i < 1000; i++) {
+ d.pop_front();
+ }
+
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(0u, d.max_size());
+ EXPECT_EQ(LazilyDeallocatedDeque<int>::kMinimumRingSize, d.capacity());
+}
+
+namespace {
+TimeTicks fake_now;
+}
+
+TEST_F(LazilyDeallocatedDequeTest, MaybeShrinkQueueRateLimiting) {
+ subtle::ScopedTimeClockOverrides time_overrides(
+ nullptr, []() { return fake_now; }, nullptr);
+ LazilyDeallocatedDeque<int> d;
+
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+ EXPECT_EQ(1000u, d.size());
+ EXPECT_EQ(1020u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // Drop some elements.
+ for (int i = 0; i < 100; i++) {
+ d.pop_front();
+ }
+ EXPECT_EQ(900u, d.size());
+ EXPECT_EQ(960u, d.capacity());
+ EXPECT_EQ(1000u, d.max_size());
+
+ // This won't do anything since the max size is greater than the current
+ // capacity.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(960u, d.capacity());
+ EXPECT_EQ(900u, d.max_size());
+
+ // This will shrink to fit.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(901u, d.capacity());
+ EXPECT_EQ(900u, d.max_size());
+
+ // Drop some more elements.
+ for (int i = 0; i < 100; i++) {
+ d.pop_front();
+ }
+ EXPECT_EQ(800u, d.size());
+ EXPECT_EQ(901u, d.capacity());
+ EXPECT_EQ(900u, d.max_size());
+
+ // Not enough time has passed so max_size is untouched and not shrunk.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(900u, d.max_size());
+ EXPECT_EQ(901u, d.capacity());
+
+ // After time passes we re-sample max_size.
+ fake_now += TimeDelta::FromSeconds(
+ LazilyDeallocatedDeque<int>::kMinimumShrinkIntervalInSeconds);
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(800u, d.max_size());
+ EXPECT_EQ(901u, d.capacity());
+
+ // And The next call to MaybeShrinkQueue actually shrinks the queue.
+ d.MaybeShrinkQueue();
+ EXPECT_EQ(800u, d.max_size());
+ EXPECT_EQ(801u, d.capacity());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, Iterators) {
+ LazilyDeallocatedDeque<int> d;
+
+ d.push_back(1);
+ d.push_back(2);
+ d.push_back(3);
+
+ auto iter = d.begin();
+ EXPECT_EQ(1, *iter);
+ EXPECT_NE(++iter, d.end());
+
+ EXPECT_EQ(2, *iter);
+ EXPECT_NE(++iter, d.end());
+
+ EXPECT_EQ(3, *iter);
+ EXPECT_EQ(++iter, d.end());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, PushBackAndFront) {
+ LazilyDeallocatedDeque<int> d;
+
+ int j = 1;
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(j++);
+ d.push_back(j++);
+ d.push_back(j++);
+ d.push_back(j++);
+ d.push_front(-i);
+ }
+
+ for (int i = -999; i < 4000; i++) {
+ EXPECT_EQ(d.front(), i);
+ d.pop_front();
+ }
+}
+
+TEST_F(LazilyDeallocatedDequeTest, SetCapacity) {
+ LazilyDeallocatedDeque<int> d;
+ for (int i = 0; i < 1000; i++) {
+ d.push_back(i);
+ }
+
+ EXPECT_EQ(1020u, d.capacity());
+
+ // We need 1 more spot than the size due to the way the Ring works.
+ d.SetCapacity(1001);
+
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(d.front(), i);
+ d.pop_front();
+ }
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingPushFront) {
+ LazilyDeallocatedDeque<int>::Ring r(4);
+
+ r.push_front(1);
+ r.push_front(2);
+ r.push_front(3);
+
+ EXPECT_EQ(3, r.front());
+ EXPECT_EQ(1, r.back());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingPushBack) {
+ LazilyDeallocatedDeque<int>::Ring r(4);
+
+ r.push_back(1);
+ r.push_back(2);
+ r.push_back(3);
+
+ EXPECT_EQ(1, r.front());
+ EXPECT_EQ(3, r.back());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingCanPush) {
+ LazilyDeallocatedDeque<int>::Ring r1(4);
+ LazilyDeallocatedDeque<int>::Ring r2(4);
+
+ for (int i = 0; i < 3; i++) {
+ EXPECT_TRUE(r1.CanPush());
+ r1.push_back(0);
+
+ EXPECT_TRUE(r2.CanPush());
+ r2.push_back(0);
+ }
+
+ EXPECT_FALSE(r1.CanPush());
+ EXPECT_FALSE(r2.CanPush());
+}
+
+TEST_F(LazilyDeallocatedDequeTest, RingPushPopPushPop) {
+ LazilyDeallocatedDeque<int>::Ring r(4);
+
+ EXPECT_FALSE(r.CanPop());
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(1);
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(2);
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(3);
+ EXPECT_FALSE(r.CanPush());
+
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(1, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(2, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(3, r.front());
+ r.pop_front();
+ EXPECT_FALSE(r.CanPop());
+
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(10);
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(20);
+ EXPECT_TRUE(r.CanPush());
+ r.push_back(30);
+ EXPECT_FALSE(r.CanPush());
+
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(10, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(20, r.front());
+ r.pop_front();
+ EXPECT_TRUE(r.CanPop());
+ EXPECT_EQ(30, r.front());
+ r.pop_front();
+
+ EXPECT_FALSE(r.CanPop());
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/lazy_now.cc b/base/task/sequence_manager/lazy_now.cc
new file mode 100644
index 0000000000..b391b32a4e
--- /dev/null
+++ b/base/task/sequence_manager/lazy_now.cc
@@ -0,0 +1,36 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/lazy_now.h"
+
+#include "base/time/tick_clock.h"
+
+namespace base {
+namespace sequence_manager {
+
+LazyNow::LazyNow(TimeTicks now) : tick_clock_(nullptr), now_(now) {}
+
+LazyNow::LazyNow(const TickClock* tick_clock)
+ : tick_clock_(tick_clock), now_() {
+ DCHECK(tick_clock);
+}
+
+LazyNow::LazyNow(LazyNow&& move_from) noexcept
+ : tick_clock_(move_from.tick_clock_), now_(move_from.now_) {
+ move_from.tick_clock_ = nullptr;
+ move_from.now_ = nullopt;
+}
+
+TimeTicks LazyNow::Now() {
+ // It looks tempting to avoid using Optional and to rely on is_null() instead,
+ // but in some test environments clock intentionally starts from zero.
+ if (!now_) {
+ DCHECK(tick_clock_); // It can fire only on use after std::move.
+ now_ = tick_clock_->NowTicks();
+ }
+ return *now_;
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/lazy_now.h b/base/task/sequence_manager/lazy_now.h
new file mode 100644
index 0000000000..d9ace8bf24
--- /dev/null
+++ b/base/task/sequence_manager/lazy_now.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_
+#define BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_
+
+#include "base/base_export.h"
+#include "base/optional.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class TickClock;
+
+namespace sequence_manager {
+
+// Now() is somewhat expensive so it makes sense not to call Now() unless we
+// really need to and to avoid subsequent calls if already called once.
+// LazyNow objects are expected to be short-living to represent accurate time.
+class BASE_EXPORT LazyNow {
+ public:
+ explicit LazyNow(TimeTicks now);
+ explicit LazyNow(const TickClock* tick_clock);
+
+ LazyNow(LazyNow&& move_from) noexcept;
+
+ // Result will not be updated on any subsesequent calls.
+ TimeTicks Now();
+
+ private:
+ const TickClock* tick_clock_; // Not owned.
+ Optional<TimeTicks> now_;
+
+ DISALLOW_COPY_AND_ASSIGN(LazyNow);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_
diff --git a/base/task/sequence_manager/moveable_auto_lock.h b/base/task/sequence_manager/moveable_auto_lock.h
new file mode 100644
index 0000000000..a80d5f8a74
--- /dev/null
+++ b/base/task/sequence_manager/moveable_auto_lock.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_MOVEABLE_AUTO_LOCK_H_
+#define BASE_TASK_SEQUENCE_MANAGER_MOVEABLE_AUTO_LOCK_H_
+
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace sequence_manager {
+
+class MoveableAutoLock {
+ public:
+ explicit MoveableAutoLock(Lock& lock) : lock_(lock), moved_(false) {
+ lock_.Acquire();
+ }
+
+ MoveableAutoLock(MoveableAutoLock&& other) noexcept
+ : lock_(other.lock_), moved_(other.moved_) {
+ lock_.AssertAcquired();
+ other.moved_ = true;
+ }
+
+ ~MoveableAutoLock() {
+ if (moved_)
+ return;
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ Lock& lock_;
+ bool moved_;
+ DISALLOW_COPY_AND_ASSIGN(MoveableAutoLock);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_MOVEABLE_AUTO_LOCK_H_
diff --git a/base/task/sequence_manager/real_time_domain.cc b/base/task/sequence_manager/real_time_domain.cc
new file mode 100644
index 0000000000..6a6caf094d
--- /dev/null
+++ b/base/task/sequence_manager/real_time_domain.cc
@@ -0,0 +1,48 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/real_time_domain.h"
+
+#include "base/task/sequence_manager/sequence_manager.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+RealTimeDomain::RealTimeDomain() {}
+
+RealTimeDomain::~RealTimeDomain() = default;
+
+LazyNow RealTimeDomain::CreateLazyNow() const {
+ return LazyNow(sequence_manager()->GetTickClock());
+}
+
+TimeTicks RealTimeDomain::Now() const {
+ return sequence_manager()->NowTicks();
+}
+
+Optional<TimeDelta> RealTimeDomain::DelayTillNextTask(LazyNow* lazy_now) {
+ Optional<TimeTicks> next_run_time = NextScheduledRunTime();
+ if (!next_run_time)
+ return nullopt;
+
+ TimeTicks now = lazy_now->Now();
+ if (now >= next_run_time) {
+ // Overdue work needs to be run immediately.
+ return TimeDelta();
+ }
+
+ TimeDelta delay = *next_run_time - now;
+ TRACE_EVENT1("sequence_manager", "RealTimeDomain::DelayTillNextTask",
+ "delay_ms", delay.InMillisecondsF());
+ return delay;
+}
+
+const char* RealTimeDomain::GetName() const {
+ return "RealTimeDomain";
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/real_time_domain.h b/base/task/sequence_manager/real_time_domain.h
new file mode 100644
index 0000000000..4923ebf06e
--- /dev/null
+++ b/base/task/sequence_manager/real_time_domain.h
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
+#define BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/task/sequence_manager/time_domain.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class BASE_EXPORT RealTimeDomain : public TimeDomain {
+ public:
+ RealTimeDomain();
+ ~RealTimeDomain() override;
+
+ // TimeDomain implementation:
+ LazyNow CreateLazyNow() const override;
+ TimeTicks Now() const override;
+ Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override;
+
+ protected:
+ const char* GetName() const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RealTimeDomain);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
diff --git a/base/task/sequence_manager/sequence_manager.cc b/base/task/sequence_manager/sequence_manager.cc
new file mode 100644
index 0000000000..3451f98fe8
--- /dev/null
+++ b/base/task/sequence_manager/sequence_manager.cc
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager.h"
+
+namespace base {
+namespace sequence_manager {
+
+SequenceManager::MetricRecordingSettings::MetricRecordingSettings() {}
+
+SequenceManager::MetricRecordingSettings::MetricRecordingSettings(
+ bool cpu_time_for_each_task,
+ double task_thread_time_sampling_rate)
+ : records_cpu_time_for_each_task(base::ThreadTicks::IsSupported() &&
+ cpu_time_for_each_task),
+ task_sampling_rate_for_recording_cpu_time(
+ task_thread_time_sampling_rate) {
+ if (records_cpu_time_for_each_task)
+ task_sampling_rate_for_recording_cpu_time = 1;
+ if (!base::ThreadTicks::IsSupported())
+ task_sampling_rate_for_recording_cpu_time = 0;
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/sequence_manager.h b/base/task/sequence_manager/sequence_manager.h
new file mode 100644
index 0000000000..41e56ec03a
--- /dev/null
+++ b/base/task/sequence_manager/sequence_manager.h
@@ -0,0 +1,132 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
+
+#include <memory>
+#include <utility>
+
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/task_time_observer.h"
+
+namespace base {
+namespace sequence_manager {
+
+class TimeDomain;
+
+// SequenceManager manages TaskQueues which have different properties
+// (e.g. priority, common task type) multiplexing all posted tasks into
+// a single backing sequence (currently bound to a single thread, which is
+// refererred as *main thread* in the comments below). SequenceManager
+// implementation can be used in a various ways to apply scheduling logic.
+class SequenceManager {
+ public:
+ class Observer {
+ public:
+ virtual ~Observer() = default;
+ // Called back on the main thread.
+ virtual void OnBeginNestedRunLoop() = 0;
+ virtual void OnExitNestedRunLoop() = 0;
+ };
+
+ struct MetricRecordingSettings {
+ MetricRecordingSettings();
+ // Note: These parameters are desired and MetricRecordingSetting's will
+ // update them for consistency (e.g. setting values to false when
+ // ThreadTicks are not supported).
+ MetricRecordingSettings(bool records_cpu_time_for_each_task,
+ double task_sampling_rate_for_recording_cpu_time);
+
+ // True if cpu time is measured for each task, so the integral
+ // metrics (as opposed to per-task metrics) can be recorded.
+ bool records_cpu_time_for_each_task = false;
+ // The proportion of the tasks for which the cpu time will be
+ // sampled or 0 if this is not enabled.
+ // This value is always 1 if the |records_cpu_time_for_each_task| is true.
+ double task_sampling_rate_for_recording_cpu_time = 0;
+ };
+
+ virtual ~SequenceManager() = default;
+
+ // TODO(kraynov): Bring back CreateOnCurrentThread static method here
+ // when the move is done. It's not here yet to reduce PLATFORM_EXPORT
+ // macros hacking during the move.
+
+ // Must be called on the main thread.
+ // Can be called only once, before creating TaskQueues.
+ // Observer must outlive the SequenceManager.
+ virtual void SetObserver(Observer* observer) = 0;
+
+ // Must be called on the main thread.
+ virtual void AddTaskObserver(MessageLoop::TaskObserver* task_observer) = 0;
+ virtual void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer) = 0;
+ virtual void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
+ virtual void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
+
+ // Registers a TimeDomain with SequenceManager.
+ // TaskQueues must only be created with a registered TimeDomain.
+ // Conversely, any TimeDomain must remain registered until no
+ // TaskQueues (using that TimeDomain) remain.
+ virtual void RegisterTimeDomain(TimeDomain* time_domain) = 0;
+ virtual void UnregisterTimeDomain(TimeDomain* time_domain) = 0;
+
+ virtual TimeDomain* GetRealTimeDomain() const = 0;
+ virtual const TickClock* GetTickClock() const = 0;
+ virtual TimeTicks NowTicks() const = 0;
+
+ // Sets the SingleThreadTaskRunner that will be returned by
+ // ThreadTaskRunnerHandle::Get on the main thread.
+ virtual void SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) = 0;
+
+ // Removes all canceled delayed tasks.
+ virtual void SweepCanceledDelayedTasks() = 0;
+
+ // Returns true if no tasks were executed in TaskQueues that monitor
+ // quiescence since the last call to this method.
+ virtual bool GetAndClearSystemIsQuiescentBit() = 0;
+
+ // Set the number of tasks executed in a single SequenceManager invocation.
+ // Increasing this number reduces the overhead of the tasks dispatching
+ // logic at the cost of a potentially worse latency. 1 by default.
+ virtual void SetWorkBatchSize(int work_batch_size) = 0;
+
+ // Enables crash keys that can be set in the scope of a task which help
+ // to identify the culprit if upcoming work results in a crash.
+ // Key names must be thread-specific to avoid races and corrupted crash dumps.
+ virtual void EnableCrashKeys(const char* file_name_crash_key,
+ const char* function_name_crash_key) = 0;
+
+ // Returns the metric recording configuration for the current SequenceManager.
+ virtual const MetricRecordingSettings& GetMetricRecordingSettings() const = 0;
+
+ // Creates a task queue with the given type, |spec| and args.
+ // Must be called on the main thread.
+ // TODO(scheduler-dev): SequenceManager should not create TaskQueues.
+ template <typename TaskQueueType, typename... Args>
+ scoped_refptr<TaskQueueType> CreateTaskQueue(const TaskQueue::Spec& spec,
+ Args&&... args) {
+ return WrapRefCounted(new TaskQueueType(CreateTaskQueueImpl(spec), spec,
+ std::forward<Args>(args)...));
+ }
+
+ protected:
+ virtual std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
+ const TaskQueue::Spec& spec) = 0;
+};
+
+// Create SequenceManager using MessageLoop on the current thread.
+// Implementation is located in sequence_manager_impl.cc.
+// TODO(scheduler-dev): Rename to TakeOverCurrentThread when we'll stop using
+// MessageLoop and will actually take over a thread.
+BASE_EXPORT std::unique_ptr<SequenceManager>
+CreateSequenceManagerOnCurrentThread();
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
diff --git a/base/task/sequence_manager/sequence_manager_impl.cc b/base/task/sequence_manager/sequence_manager_impl.cc
new file mode 100644
index 0000000000..7afea9c3fc
--- /dev/null
+++ b/base/task/sequence_manager/sequence_manager_impl.cc
@@ -0,0 +1,724 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+
+#include <queue>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bit_cast.h"
+#include "base/compiler_specific.h"
+#include "base/debug/crash_logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/rand_util.h"
+#include "base/task/sequence_manager/real_time_domain.h"
+#include "base/task/sequence_manager/task_time_observer.h"
+#include "base/task/sequence_manager/thread_controller_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace sequence_manager {
+
+std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread() {
+ return internal::SequenceManagerImpl::CreateOnCurrentThread();
+}
+
+namespace internal {
+
+namespace {
+
+constexpr base::TimeDelta kLongTaskTraceEventThreshold =
+ base::TimeDelta::FromMilliseconds(50);
+// Proportion of tasks which will record thread time for metrics.
+const double kTaskSamplingRateForRecordingCPUTime = 0.01;
+// Proprortion of SequenceManagers which will record thread time for each task,
+// enabling advanced metrics.
+const double kThreadSamplingRateForRecordingCPUTime = 0.0001;
+
+// Magic value to protect against memory corruption and bail out
+// early when detected.
+constexpr int kMemoryCorruptionSentinelValue = 0xdeadbeef;
+
+void SweepCanceledDelayedTasksInQueue(
+ internal::TaskQueueImpl* queue,
+ std::map<TimeDomain*, TimeTicks>* time_domain_now) {
+ TimeDomain* time_domain = queue->GetTimeDomain();
+ if (time_domain_now->find(time_domain) == time_domain_now->end())
+ time_domain_now->insert(std::make_pair(time_domain, time_domain->Now()));
+ queue->SweepCanceledDelayedTasks(time_domain_now->at(time_domain));
+}
+
+SequenceManager::MetricRecordingSettings InitializeMetricRecordingSettings() {
+ bool cpu_time_recording_always_on =
+ base::RandDouble() < kThreadSamplingRateForRecordingCPUTime;
+ return SequenceManager::MetricRecordingSettings(
+ cpu_time_recording_always_on, kTaskSamplingRateForRecordingCPUTime);
+}
+
+} // namespace
+
+SequenceManagerImpl::SequenceManagerImpl(
+ std::unique_ptr<internal::ThreadController> controller)
+ : graceful_shutdown_helper_(new internal::GracefulQueueShutdownHelper()),
+ controller_(std::move(controller)),
+ metric_recording_settings_(InitializeMetricRecordingSettings()),
+ memory_corruption_sentinel_(kMemoryCorruptionSentinelValue),
+ weak_factory_(this) {
+ // TODO(altimin): Create a sequence checker here.
+ DCHECK(controller_->RunsTasksInCurrentSequence());
+
+ TRACE_EVENT_WARMUP_CATEGORY("sequence_manager");
+ TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("sequence_manager"));
+ TRACE_EVENT_WARMUP_CATEGORY(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"));
+ TRACE_EVENT_WARMUP_CATEGORY(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.verbose_snapshots"));
+
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
+ main_thread_only().selector.SetTaskQueueSelectorObserver(this);
+
+ RegisterTimeDomain(main_thread_only().real_time_domain.get());
+
+ controller_->SetSequencedTaskSource(this);
+ controller_->AddNestingObserver(this);
+}
+
+SequenceManagerImpl::~SequenceManagerImpl() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
+
+ // TODO(altimin): restore default task runner automatically when
+ // ThreadController is destroyed.
+ controller_->RestoreDefaultTaskRunner();
+
+ for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
+ main_thread_only().selector.RemoveQueue(queue);
+ queue->UnregisterTaskQueue();
+ }
+
+ main_thread_only().active_queues.clear();
+ main_thread_only().queues_to_gracefully_shutdown.clear();
+
+ graceful_shutdown_helper_->OnSequenceManagerDeleted();
+
+ main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
+ controller_->RemoveNestingObserver(this);
+}
+
+SequenceManagerImpl::AnyThread::AnyThread() = default;
+
+SequenceManagerImpl::AnyThread::~AnyThread() = default;
+
+SequenceManagerImpl::MainThreadOnly::MainThreadOnly()
+ : random_generator(RandUint64()),
+ uniform_distribution(0.0, 1.0),
+ real_time_domain(new internal::RealTimeDomain()) {}
+
+SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
+
+// static
+std::unique_ptr<SequenceManagerImpl>
+SequenceManagerImpl::CreateOnCurrentThread() {
+ return WrapUnique(
+ new SequenceManagerImpl(internal::ThreadControllerImpl::Create(
+ MessageLoop::current(), DefaultTickClock::GetInstance())));
+}
+
+void SequenceManagerImpl::RegisterTimeDomain(TimeDomain* time_domain) {
+ main_thread_only().time_domains.insert(time_domain);
+ time_domain->OnRegisterWithSequenceManager(this);
+}
+
+void SequenceManagerImpl::UnregisterTimeDomain(TimeDomain* time_domain) {
+ main_thread_only().time_domains.erase(time_domain);
+}
+
+TimeDomain* SequenceManagerImpl::GetRealTimeDomain() const {
+ return main_thread_only().real_time_domain.get();
+}
+
+std::unique_ptr<internal::TaskQueueImpl>
+SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ TimeDomain* time_domain = spec.time_domain
+ ? spec.time_domain
+ : main_thread_only().real_time_domain.get();
+ DCHECK(main_thread_only().time_domains.find(time_domain) !=
+ main_thread_only().time_domains.end());
+ std::unique_ptr<internal::TaskQueueImpl> task_queue =
+ std::make_unique<internal::TaskQueueImpl>(this, time_domain, spec);
+ main_thread_only().active_queues.insert(task_queue.get());
+ main_thread_only().selector.AddQueue(task_queue.get());
+ return task_queue;
+}
+
+void SequenceManagerImpl::SetObserver(Observer* observer) {
+ main_thread_only().observer = observer;
+}
+
+bool SequenceManagerImpl::AddToIncomingImmediateWorkList(
+ internal::TaskQueueImpl* task_queue,
+ internal::EnqueueOrder enqueue_order) {
+ AutoLock lock(any_thread_lock_);
+ // Check if |task_queue| is already in the linked list.
+ if (task_queue->immediate_work_list_storage()->queue)
+ return false;
+
+ // Insert into the linked list.
+ task_queue->immediate_work_list_storage()->queue = task_queue;
+ task_queue->immediate_work_list_storage()->order = enqueue_order;
+ task_queue->immediate_work_list_storage()->next =
+ any_thread().incoming_immediate_work_list;
+ any_thread().incoming_immediate_work_list =
+ task_queue->immediate_work_list_storage();
+ return true;
+}
+
+void SequenceManagerImpl::RemoveFromIncomingImmediateWorkList(
+ internal::TaskQueueImpl* task_queue) {
+ AutoLock lock(any_thread_lock_);
+ internal::IncomingImmediateWorkList** prev =
+ &any_thread().incoming_immediate_work_list;
+ while (*prev) {
+ if ((*prev)->queue == task_queue) {
+ *prev = (*prev)->next;
+ break;
+ }
+ prev = &(*prev)->next;
+ }
+
+ task_queue->immediate_work_list_storage()->next = nullptr;
+ task_queue->immediate_work_list_storage()->queue = nullptr;
+}
+
+void SequenceManagerImpl::UnregisterTaskQueueImpl(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue) {
+ TRACE_EVENT1("sequence_manager", "SequenceManagerImpl::UnregisterTaskQueue",
+ "queue_name", task_queue->GetName());
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+
+ main_thread_only().selector.RemoveQueue(task_queue.get());
+
+ // After UnregisterTaskQueue returns no new tasks can be posted.
+ // It's important to call it first to avoid race condition between removing
+ // the task queue from various lists here and adding it to the same lists
+ // when posting a task.
+ task_queue->UnregisterTaskQueue();
+
+ // Remove |task_queue| from the linked list if present.
+ // This is O(n). We assume this will be a relatively infrequent operation.
+ RemoveFromIncomingImmediateWorkList(task_queue.get());
+
+ // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
+ // it from being freed while any of our structures hold hold a raw pointer to
+ // it.
+ main_thread_only().active_queues.erase(task_queue.get());
+ main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
+}
+
+void SequenceManagerImpl::ReloadEmptyWorkQueues() {
+ // There are two cases where a queue needs reloading. First, it might be
+ // completely empty and we've just posted a task (this method handles that
+ // case). Secondly if the work queue becomes empty in when calling
+ // WorkQueue::TakeTaskFromWorkQueue (handled there).
+ for (internal::TaskQueueImpl* queue : main_thread_only().queues_to_reload) {
+ queue->ReloadImmediateWorkQueueIfEmpty();
+ }
+}
+
+void SequenceManagerImpl::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManagerImpl::WakeUpReadyDelayedQueues");
+
+ for (TimeDomain* time_domain : main_thread_only().time_domains) {
+ if (time_domain == main_thread_only().real_time_domain.get()) {
+ time_domain->WakeUpReadyDelayedQueues(lazy_now);
+ } else {
+ LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
+ time_domain->WakeUpReadyDelayedQueues(&time_domain_lazy_now);
+ }
+ }
+}
+
+void SequenceManagerImpl::OnBeginNestedRunLoop() {
+ main_thread_only().nesting_depth++;
+ if (main_thread_only().observer)
+ main_thread_only().observer->OnBeginNestedRunLoop();
+}
+
+void SequenceManagerImpl::OnExitNestedRunLoop() {
+ main_thread_only().nesting_depth--;
+ DCHECK_GE(main_thread_only().nesting_depth, 0);
+ if (main_thread_only().nesting_depth == 0) {
+ // While we were nested some non-nestable tasks may have been deferred.
+ // We push them back onto the *front* of their original work queues,
+ // that's why we iterate |non_nestable_task_queue| in FIFO order.
+ while (!main_thread_only().non_nestable_task_queue.empty()) {
+ internal::TaskQueueImpl::DeferredNonNestableTask& non_nestable_task =
+ main_thread_only().non_nestable_task_queue.back();
+ non_nestable_task.task_queue->RequeueDeferredNonNestableTask(
+ std::move(non_nestable_task));
+ main_thread_only().non_nestable_task_queue.pop_back();
+ }
+ }
+ if (main_thread_only().observer)
+ main_thread_only().observer->OnExitNestedRunLoop();
+}
+
+void SequenceManagerImpl::OnQueueHasIncomingImmediateWork(
+ internal::TaskQueueImpl* queue,
+ internal::EnqueueOrder enqueue_order,
+ bool queue_is_blocked) {
+ if (AddToIncomingImmediateWorkList(queue, enqueue_order) && !queue_is_blocked)
+ controller_->ScheduleWork();
+}
+
+void SequenceManagerImpl::MaybeScheduleImmediateWork(
+ const Location& from_here) {
+ controller_->ScheduleWork();
+}
+
+void SequenceManagerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
+ TimeTicks run_time) {
+ controller_->SetNextDelayedDoWork(lazy_now, run_time);
+}
+
+Optional<PendingTask> SequenceManagerImpl::TakeTask() {
+ CHECK(Validate());
+
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ TRACE_EVENT0("sequence_manager", "SequenceManagerImpl::TakeTask");
+
+ {
+ AutoLock lock(any_thread_lock_);
+ main_thread_only().queues_to_reload.clear();
+
+ for (internal::IncomingImmediateWorkList* iter =
+ any_thread().incoming_immediate_work_list;
+ iter; iter = iter->next) {
+ main_thread_only().queues_to_reload.push_back(iter->queue);
+ iter->queue = nullptr;
+ }
+
+ any_thread().incoming_immediate_work_list = nullptr;
+ }
+
+ // It's important we call ReloadEmptyWorkQueues out side of the lock to
+ // avoid a lock order inversion.
+ ReloadEmptyWorkQueues();
+ LazyNow lazy_now(controller_->GetClock());
+ WakeUpReadyDelayedQueues(&lazy_now);
+
+ while (true) {
+ internal::WorkQueue* work_queue = nullptr;
+ bool should_run =
+ main_thread_only().selector.SelectWorkQueueToService(&work_queue);
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
+ this, AsValueWithSelectorResult(should_run, work_queue));
+
+ if (!should_run)
+ return nullopt;
+
+ // If the head task was canceled, remove it and run the selector again.
+ if (work_queue->RemoveAllCanceledTasksFromFront())
+ continue;
+
+ if (work_queue->GetFrontTask()->nestable == Nestable::kNonNestable &&
+ main_thread_only().nesting_depth > 0) {
+ // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
+ // the additional delay should not be a problem.
+ // Note because we don't delete queues while nested, it's perfectly OK to
+ // store the raw pointer for |queue| here.
+ internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
+ work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
+ work_queue->queue_type()};
+ main_thread_only().non_nestable_task_queue.push_back(
+ std::move(deferred_task));
+ continue;
+ }
+
+ main_thread_only().task_execution_stack.emplace_back(
+ work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
+ InitializeTaskTiming(work_queue->task_queue()));
+
+ UMA_HISTOGRAM_COUNTS_1000("TaskQueueManager.ActiveQueuesCount",
+ main_thread_only().active_queues.size());
+
+ ExecutingTask& executing_task =
+ *main_thread_only().task_execution_stack.rbegin();
+ NotifyWillProcessTask(&executing_task, &lazy_now);
+ return std::move(executing_task.pending_task);
+ }
+}
+
+void SequenceManagerImpl::DidRunTask() {
+ LazyNow lazy_now(controller_->GetClock());
+ ExecutingTask& executing_task =
+ *main_thread_only().task_execution_stack.rbegin();
+ NotifyDidProcessTask(&executing_task, &lazy_now);
+ main_thread_only().task_execution_stack.pop_back();
+
+ if (main_thread_only().nesting_depth == 0)
+ CleanUpQueues();
+}
+
+TimeDelta SequenceManagerImpl::DelayTillNextTask(LazyNow* lazy_now) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+
+ // If the selector has non-empty queues we trivially know there is immediate
+ // work to be done.
+ if (!main_thread_only().selector.AllEnabledWorkQueuesAreEmpty())
+ return TimeDelta();
+
+ // Its possible the selectors state is dirty because ReloadEmptyWorkQueues
+ // hasn't been called yet. This check catches the case of fresh incoming work.
+ {
+ AutoLock lock(any_thread_lock_);
+ for (const internal::IncomingImmediateWorkList* iter =
+ any_thread().incoming_immediate_work_list;
+ iter; iter = iter->next) {
+ if (iter->queue->CouldTaskRun(iter->order))
+ return TimeDelta();
+ }
+ }
+
+ // Otherwise we need to find the shortest delay, if any. NB we don't need to
+ // call WakeUpReadyDelayedQueues because it's assumed DelayTillNextTask will
+ // return TimeDelta>() if the delayed task is due to run now.
+ TimeDelta delay_till_next_task = TimeDelta::Max();
+ for (TimeDomain* time_domain : main_thread_only().time_domains) {
+ Optional<TimeDelta> delay = time_domain->DelayTillNextTask(lazy_now);
+ if (!delay)
+ continue;
+
+ if (*delay < delay_till_next_task)
+ delay_till_next_task = *delay;
+ }
+ return delay_till_next_task;
+}
+
+void SequenceManagerImpl::WillQueueTask(
+ internal::TaskQueueImpl::Task* pending_task) {
+ controller_->WillQueueTask(pending_task);
+}
+
+TaskQueue::TaskTiming SequenceManagerImpl::InitializeTaskTiming(
+ internal::TaskQueueImpl* task_queue) {
+ bool records_wall_time =
+ (task_queue->GetShouldNotifyObservers() &&
+ main_thread_only().task_time_observers.might_have_observers()) ||
+ task_queue->RequiresTaskTiming();
+ bool records_thread_time = records_wall_time && ShouldRecordCPUTimeForTask();
+ return TaskQueue::TaskTiming(records_wall_time, records_thread_time);
+}
+
+void SequenceManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
+ LazyNow* time_before_task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManagerImpl::NotifyWillProcessTaskObservers");
+ if (executing_task->task_queue->GetQuiescenceMonitored())
+ main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
+
+#if !defined(OS_NACL)
+ debug::SetCrashKeyString(
+ main_thread_only().file_name_crash_key,
+ executing_task->pending_task.posted_from.file_name());
+ debug::SetCrashKeyString(
+ main_thread_only().function_name_crash_key,
+ executing_task->pending_task.posted_from.function_name());
+#endif // OS_NACL
+
+ executing_task->task_timing.RecordTaskStart(time_before_task);
+
+ if (!executing_task->task_queue->GetShouldNotifyObservers())
+ return;
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.WillProcessTaskObservers");
+ for (auto& observer : main_thread_only().task_observers)
+ observer.WillProcessTask(executing_task->pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueNotifyWillProcessTask");
+ executing_task->task_queue->NotifyWillProcessTask(
+ executing_task->pending_task);
+ }
+
+ bool notify_time_observers =
+ main_thread_only().task_time_observers.might_have_observers() ||
+ executing_task->task_queue->RequiresTaskTiming();
+
+ if (!notify_time_observers)
+ return;
+
+ if (main_thread_only().nesting_depth == 0) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.WillProcessTaskTimeObservers");
+ for (auto& observer : main_thread_only().task_time_observers)
+ observer.WillProcessTask(executing_task->task_timing.start_time());
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueOnTaskStarted");
+ executing_task->task_queue->OnTaskStarted(executing_task->pending_task,
+ executing_task->task_timing);
+ }
+}
+
+void SequenceManagerImpl::NotifyDidProcessTask(ExecutingTask* executing_task,
+ LazyNow* time_after_task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManagerImpl::NotifyDidProcessTaskObservers");
+
+ executing_task->task_timing.RecordTaskEnd(time_after_task);
+
+ const TaskQueue::TaskTiming& task_timing = executing_task->task_timing;
+
+ if (!executing_task->task_queue->GetShouldNotifyObservers())
+ return;
+
+ if (task_timing.has_wall_time() && main_thread_only().nesting_depth == 0) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.DidProcessTaskTimeObservers");
+ for (auto& observer : main_thread_only().task_time_observers) {
+ observer.DidProcessTask(task_timing.start_time(), task_timing.end_time());
+ }
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.DidProcessTaskObservers");
+ for (auto& observer : main_thread_only().task_observers)
+ observer.DidProcessTask(executing_task->pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueNotifyDidProcessTask");
+ executing_task->task_queue->NotifyDidProcessTask(
+ executing_task->pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.QueueOnTaskCompleted");
+ if (task_timing.has_wall_time())
+ executing_task->task_queue->OnTaskCompleted(executing_task->pending_task,
+ task_timing);
+ }
+
+ // TODO(altimin): Move this back to blink.
+ if (task_timing.has_wall_time() &&
+ task_timing.wall_duration() > kLongTaskTraceEventThreshold &&
+ main_thread_only().nesting_depth == 0) {
+ TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
+ "duration", task_timing.wall_duration().InSecondsF());
+ }
+}
+
+void SequenceManagerImpl::SetWorkBatchSize(int work_batch_size) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ DCHECK_GE(work_batch_size, 1);
+ controller_->SetWorkBatchSize(work_batch_size);
+}
+
+void SequenceManagerImpl::AddTaskObserver(
+ MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_observers.AddObserver(task_observer);
+}
+
+void SequenceManagerImpl::RemoveTaskObserver(
+ MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_observers.RemoveObserver(task_observer);
+}
+
+void SequenceManagerImpl::AddTaskTimeObserver(
+ TaskTimeObserver* task_time_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_time_observers.AddObserver(task_time_observer);
+}
+
+void SequenceManagerImpl::RemoveTaskTimeObserver(
+ TaskTimeObserver* task_time_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
+}
+
+bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
+ bool task_was_run =
+ main_thread_only().task_was_run_on_quiescence_monitored_queue;
+ main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
+ return !task_was_run;
+}
+
+internal::EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
+ return enqueue_order_generator_.GenerateNext();
+}
+
+std::unique_ptr<trace_event::ConvertableToTraceFormat>
+SequenceManagerImpl::AsValueWithSelectorResult(
+ bool should_run,
+ internal::WorkQueue* selected_work_queue) const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ std::unique_ptr<trace_event::TracedValue> state(
+ new trace_event::TracedValue());
+ TimeTicks now = NowTicks();
+ state->BeginArray("active_queues");
+ for (auto* const queue : main_thread_only().active_queues)
+ queue->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginArray("queues_to_gracefully_shutdown");
+ for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
+ pair.first->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginArray("queues_to_delete");
+ for (const auto& pair : main_thread_only().queues_to_delete)
+ pair.first->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginDictionary("selector");
+ main_thread_only().selector.AsValueInto(state.get());
+ state->EndDictionary();
+ if (should_run) {
+ state->SetString("selected_queue",
+ selected_work_queue->task_queue()->GetName());
+ state->SetString("work_queue_name", selected_work_queue->name());
+ }
+
+ state->BeginArray("time_domains");
+ for (auto* time_domain : main_thread_only().time_domains)
+ time_domain->AsValueInto(state.get());
+ state->EndArray();
+ {
+ AutoLock lock(any_thread_lock_);
+ state->BeginArray("has_incoming_immediate_work");
+ for (const internal::IncomingImmediateWorkList* iter =
+ any_thread().incoming_immediate_work_list;
+ iter; iter = iter->next) {
+ state->AppendString(iter->queue->GetName());
+ }
+ state->EndArray();
+ }
+ return std::move(state);
+}
+
+void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ DCHECK(queue->IsQueueEnabled());
+ // Only schedule DoWork if there's something to do.
+ if (queue->HasTaskToRunImmediately() && !queue->BlockedByFence())
+ MaybeScheduleImmediateWork(FROM_HERE);
+}
+
+void SequenceManagerImpl::SweepCanceledDelayedTasks() {
+ std::map<TimeDomain*, TimeTicks> time_domain_now;
+ for (auto* const queue : main_thread_only().active_queues)
+ SweepCanceledDelayedTasksInQueue(queue, &time_domain_now);
+ for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
+ SweepCanceledDelayedTasksInQueue(pair.first, &time_domain_now);
+}
+
+void SequenceManagerImpl::TakeQueuesToGracefullyShutdownFromHelper() {
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues =
+ graceful_shutdown_helper_->TakeQueues();
+ for (std::unique_ptr<internal::TaskQueueImpl>& queue : queues) {
+ main_thread_only().queues_to_gracefully_shutdown[queue.get()] =
+ std::move(queue);
+ }
+}
+
+void SequenceManagerImpl::CleanUpQueues() {
+ TakeQueuesToGracefullyShutdownFromHelper();
+
+ for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
+ it != main_thread_only().queues_to_gracefully_shutdown.end();) {
+ if (it->first->IsEmpty()) {
+ UnregisterTaskQueueImpl(std::move(it->second));
+ main_thread_only().active_queues.erase(it->first);
+ main_thread_only().queues_to_gracefully_shutdown.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ main_thread_only().queues_to_delete.clear();
+}
+
+scoped_refptr<internal::GracefulQueueShutdownHelper>
+SequenceManagerImpl::GetGracefulQueueShutdownHelper() const {
+ return graceful_shutdown_helper_;
+}
+
+WeakPtr<SequenceManagerImpl> SequenceManagerImpl::GetWeakPtr() {
+ return weak_factory_.GetWeakPtr();
+}
+
+void SequenceManagerImpl::SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ controller_->SetDefaultTaskRunner(task_runner);
+}
+
+const TickClock* SequenceManagerImpl::GetTickClock() const {
+ return controller_->GetClock();
+}
+
+TimeTicks SequenceManagerImpl::NowTicks() const {
+ return controller_->GetClock()->NowTicks();
+}
+
+bool SequenceManagerImpl::ShouldRecordCPUTimeForTask() {
+ return ThreadTicks::IsSupported() &&
+ main_thread_only().uniform_distribution(
+ main_thread_only().random_generator) <
+ metric_recording_settings_
+ .task_sampling_rate_for_recording_cpu_time;
+}
+
+const SequenceManager::MetricRecordingSettings&
+SequenceManagerImpl::GetMetricRecordingSettings() const {
+ return metric_recording_settings_;
+}
+
+MSVC_DISABLE_OPTIMIZE()
+bool SequenceManagerImpl::Validate() {
+ return memory_corruption_sentinel_ == kMemoryCorruptionSentinelValue;
+}
+MSVC_ENABLE_OPTIMIZE()
+
+void SequenceManagerImpl::EnableCrashKeys(
+ const char* file_name_crash_key_name,
+ const char* function_name_crash_key_name) {
+ DCHECK(!main_thread_only().file_name_crash_key);
+ DCHECK(!main_thread_only().function_name_crash_key);
+#if !defined(OS_NACL)
+ main_thread_only().file_name_crash_key = debug::AllocateCrashKeyString(
+ file_name_crash_key_name, debug::CrashKeySize::Size64);
+ main_thread_only().function_name_crash_key = debug::AllocateCrashKeyString(
+ function_name_crash_key_name, debug::CrashKeySize::Size64);
+#endif // OS_NACL
+}
+
+internal::TaskQueueImpl* SequenceManagerImpl::currently_executing_task_queue()
+ const {
+ if (main_thread_only().task_execution_stack.empty())
+ return nullptr;
+ return main_thread_only().task_execution_stack.rbegin()->task_queue;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/sequence_manager_impl.h b/base/task/sequence_manager/sequence_manager_impl.h
new file mode 100644
index 0000000000..b42dc72798
--- /dev/null
+++ b/base/task/sequence_manager/sequence_manager_impl.h
@@ -0,0 +1,341 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <random>
+#include <set>
+#include <unordered_map>
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/cancelable_callback.h"
+#include "base/containers/circular_deque.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/graceful_queue_shutdown_helper.h"
+#include "base/task/sequence_manager/moveable_auto_lock.h"
+#include "base/task/sequence_manager/sequence_manager.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/task_queue_selector.h"
+#include "base/task/sequence_manager/thread_controller.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+namespace debug {
+struct CrashKeyString;
+} // namespace debug
+
+namespace trace_event {
+class ConvertableToTraceFormat;
+} // namespace trace_event
+
+namespace sequence_manager {
+
+class SequenceManagerForTest;
+class TaskQueue;
+class TaskTimeObserver;
+class TimeDomain;
+
+namespace internal {
+
+class RealTimeDomain;
+class TaskQueueImpl;
+
+// The task queue manager provides N task queues and a selector interface for
+// choosing which task queue to service next. Each task queue consists of two
+// sub queues:
+//
+// 1. Incoming task queue. Tasks that are posted get immediately appended here.
+// When a task is appended into an empty incoming queue, the task manager
+// work function (DoWork()) is scheduled to run on the main task runner.
+//
+// 2. Work queue. If a work queue is empty when DoWork() is entered, tasks from
+// the incoming task queue (if any) are moved here. The work queues are
+// registered with the selector as input to the scheduling decision.
+//
+class BASE_EXPORT SequenceManagerImpl
+ : public SequenceManager,
+ public internal::SequencedTaskSource,
+ public internal::TaskQueueSelector::Observer,
+ public RunLoop::NestingObserver {
+ public:
+ using Observer = SequenceManager::Observer;
+
+ ~SequenceManagerImpl() override;
+
+ // Assume direct control over current thread and create a SequenceManager.
+ // This function should be called only once per thread.
+ // This function assumes that a MessageLoop is initialized for
+ // the current thread.
+ static std::unique_ptr<SequenceManagerImpl> CreateOnCurrentThread();
+
+ // SequenceManager implementation:
+ void SetObserver(Observer* observer) override;
+ void AddTaskObserver(MessageLoop::TaskObserver* task_observer) override;
+ void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer) override;
+ void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
+ void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
+ void RegisterTimeDomain(TimeDomain* time_domain) override;
+ void UnregisterTimeDomain(TimeDomain* time_domain) override;
+ TimeDomain* GetRealTimeDomain() const override;
+ const TickClock* GetTickClock() const override;
+ TimeTicks NowTicks() const override;
+ void SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) override;
+ void SweepCanceledDelayedTasks() override;
+ bool GetAndClearSystemIsQuiescentBit() override;
+ void SetWorkBatchSize(int work_batch_size) override;
+ void EnableCrashKeys(const char* file_name_crash_key,
+ const char* function_name_crash_key) override;
+ const MetricRecordingSettings& GetMetricRecordingSettings() const override;
+
+ // Implementation of SequencedTaskSource:
+ Optional<PendingTask> TakeTask() override;
+ void DidRunTask() override;
+ TimeDelta DelayTillNextTask(LazyNow* lazy_now) override;
+
+ // Requests that a task to process work is posted on the main task runner.
+ // These tasks are de-duplicated in two buckets: main-thread and all other
+ // threads. This distinction is done to reduce the overhead from locks, we
+ // assume the main-thread path will be hot.
+ void MaybeScheduleImmediateWork(const Location& from_here);
+
+ // Requests that a delayed task to process work is posted on the main task
+ // runner. These delayed tasks are de-duplicated. Must be called on the thread
+ // this class was created on.
+
+ // Schedules next wake-up at the given time, cancels any previous requests.
+ // Use TimeTicks::Max() to cancel a wake-up.
+ // Must be called from a TimeDomain only.
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
+
+ // Returns the currently executing TaskQueue if any. Must be called on the
+ // thread this class was created on.
+ internal::TaskQueueImpl* currently_executing_task_queue() const;
+
+ // Unregisters a TaskQueue previously created by |NewTaskQueue()|.
+ // No tasks will run on this queue after this call.
+ void UnregisterTaskQueueImpl(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue);
+
+ scoped_refptr<internal::GracefulQueueShutdownHelper>
+ GetGracefulQueueShutdownHelper() const;
+
+ WeakPtr<SequenceManagerImpl> GetWeakPtr();
+
+ protected:
+ // Create a task queue manager where |controller| controls the thread
+ // on which the tasks are eventually run.
+ explicit SequenceManagerImpl(
+ std::unique_ptr<internal::ThreadController> controller);
+
+ friend class internal::TaskQueueImpl;
+ friend class ::base::sequence_manager::SequenceManagerForTest;
+
+ private:
+ enum class ProcessTaskResult {
+ kDeferred,
+ kExecuted,
+ kSequenceManagerDeleted,
+ };
+
+ struct AnyThread {
+ AnyThread();
+ ~AnyThread();
+
+ // Task queues with newly available work on the incoming queue.
+ internal::IncomingImmediateWorkList* incoming_immediate_work_list = nullptr;
+ };
+
+ // SequenceManager maintains a queue of non-nestable tasks since they're
+ // uncommon and allocating an extra deque per TaskQueue will waste the memory.
+ using NonNestableTaskDeque =
+ circular_deque<internal::TaskQueueImpl::DeferredNonNestableTask>;
+
+ // We have to track rentrancy because we support nested runloops but the
+ // selector interface is unaware of those. This struct keeps track off all
+ // task related state needed to make pairs of TakeTask() / DidRunTask() work.
+ struct ExecutingTask {
+ ExecutingTask(internal::TaskQueueImpl::Task&& pending_task,
+ internal::TaskQueueImpl* task_queue,
+ TaskQueue::TaskTiming task_timing)
+ : pending_task(std::move(pending_task)),
+ task_queue(task_queue),
+ task_timing(task_timing) {}
+
+ internal::TaskQueueImpl::Task pending_task;
+ internal::TaskQueueImpl* task_queue = nullptr;
+ TaskQueue::TaskTiming task_timing;
+ };
+
+ struct MainThreadOnly {
+ MainThreadOnly();
+ ~MainThreadOnly();
+
+ int nesting_depth = 0;
+ NonNestableTaskDeque non_nestable_task_queue;
+ // TODO(altimin): Switch to instruction pointer crash key when it's
+ // available.
+ debug::CrashKeyString* file_name_crash_key = nullptr;
+ debug::CrashKeyString* function_name_crash_key = nullptr;
+
+ std::mt19937_64 random_generator;
+ std::uniform_real_distribution<double> uniform_distribution;
+
+ internal::TaskQueueSelector selector;
+ ObserverList<MessageLoop::TaskObserver> task_observers;
+ ObserverList<TaskTimeObserver> task_time_observers;
+ std::set<TimeDomain*> time_domains;
+ std::unique_ptr<internal::RealTimeDomain> real_time_domain;
+
+ // List of task queues managed by this SequenceManager.
+ // - active_queues contains queues that are still running tasks.
+ // Most often they are owned by relevant TaskQueues, but
+ // queues_to_gracefully_shutdown_ are included here too.
+ // - queues_to_gracefully_shutdown contains queues which should be deleted
+ // when they become empty.
+ // - queues_to_delete contains soon-to-be-deleted queues, because some
+ // internal scheduling code does not expect queues to be pulled
+ // from underneath.
+
+ std::set<internal::TaskQueueImpl*> active_queues;
+ std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
+ queues_to_gracefully_shutdown;
+ std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
+ queues_to_delete;
+
+ // Scratch space used to store the contents of
+ // any_thread().incoming_immediate_work_list for use by
+ // ReloadEmptyWorkQueues. We keep hold of this vector to avoid unnecessary
+ // memory allocations.
+ std::vector<internal::TaskQueueImpl*> queues_to_reload;
+
+ bool task_was_run_on_quiescence_monitored_queue = false;
+
+ // Due to nested runloops more than one task can be executing concurrently.
+ std::list<ExecutingTask> task_execution_stack;
+
+ Observer* observer = nullptr; // NOT OWNED
+ };
+
+ // TaskQueueSelector::Observer:
+ void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override;
+
+ // RunLoop::NestingObserver:
+ void OnBeginNestedRunLoop() override;
+ void OnExitNestedRunLoop() override;
+
+ // Called by the task queue to inform this SequenceManager of a task that's
+ // about to be queued. This SequenceManager may use this opportunity to add
+ // metadata to |pending_task| before it is moved into the queue.
+ void WillQueueTask(internal::TaskQueueImpl::Task* pending_task);
+
+ // Delayed Tasks with run_times <= Now() are enqueued onto the work queue and
+ // reloads any empty work queues.
+ void WakeUpReadyDelayedQueues(LazyNow* lazy_now);
+
+ void NotifyWillProcessTask(ExecutingTask* task, LazyNow* time_before_task);
+ void NotifyDidProcessTask(ExecutingTask* task, LazyNow* time_after_task);
+
+ internal::EnqueueOrder GetNextSequenceNumber();
+
+ std::unique_ptr<trace_event::ConvertableToTraceFormat>
+ AsValueWithSelectorResult(bool should_run,
+ internal::WorkQueue* selected_work_queue) const;
+
+ // Adds |queue| to |any_thread().has_incoming_immediate_work_| and if
+ // |queue_is_blocked| is false it makes sure a DoWork is posted.
+ // Can be called from any thread.
+ void OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl* queue,
+ internal::EnqueueOrder enqueue_order,
+ bool queue_is_blocked);
+
+ // Returns true if |task_queue| was added to the list, or false if it was
+ // already in the list. If |task_queue| was inserted, the |order| is set
+ // with |enqueue_order|.
+ bool AddToIncomingImmediateWorkList(internal::TaskQueueImpl* task_queue,
+ internal::EnqueueOrder enqueue_order);
+ void RemoveFromIncomingImmediateWorkList(internal::TaskQueueImpl* task_queue);
+
+ // Calls |ReloadImmediateWorkQueueIfEmpty| on all queues in
+ // |main_thread_only().queues_to_reload|.
+ void ReloadEmptyWorkQueues();
+
+ std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
+ const TaskQueue::Spec& spec) override;
+
+ void TakeQueuesToGracefullyShutdownFromHelper();
+
+ // Deletes queues marked for deletion and empty queues marked for shutdown.
+ void CleanUpQueues();
+
+ bool ShouldRecordCPUTimeForTask();
+
+ // Determines if wall time or thread time should be recorded for the next
+ // task.
+ TaskQueue::TaskTiming InitializeTaskTiming(
+ internal::TaskQueueImpl* task_queue);
+
+ const scoped_refptr<internal::GracefulQueueShutdownHelper>
+ graceful_shutdown_helper_;
+
+ internal::EnqueueOrder::Generator enqueue_order_generator_;
+
+ std::unique_ptr<internal::ThreadController> controller_;
+
+ mutable Lock any_thread_lock_;
+ AnyThread any_thread_;
+
+ struct AnyThread& any_thread() {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+ const struct AnyThread& any_thread() const {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+
+ const MetricRecordingSettings metric_recording_settings_;
+
+ // A check to bail out early during memory corruption.
+ // https://crbug.com/757940
+ bool Validate();
+
+ int32_t memory_corruption_sentinel_;
+
+ THREAD_CHECKER(main_thread_checker_);
+ MainThreadOnly main_thread_only_;
+ MainThreadOnly& main_thread_only() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+ const MainThreadOnly& main_thread_only() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+
+ WeakPtrFactory<SequenceManagerImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceManagerImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
diff --git a/base/task/sequence_manager/sequence_manager_impl_unittest.cc b/base/task/sequence_manager/sequence_manager_impl_unittest.cc
new file mode 100644
index 0000000000..e1587ef09c
--- /dev/null
+++ b/base/task/sequence_manager/sequence_manager_impl_unittest.cc
@@ -0,0 +1,3260 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+
+#include <stddef.h>
+#include <memory>
+#include <utility>
+
+#include "base/location.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/optional.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task/sequence_manager/real_time_domain.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/task_queue_selector.h"
+#include "base/task/sequence_manager/test/mock_time_domain.h"
+#include "base/task/sequence_manager/test/sequence_manager_for_test.h"
+#include "base/task/sequence_manager/test/test_task_queue.h"
+#include "base/task/sequence_manager/test/test_task_time_observer.h"
+#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/test/trace_event_analyzer.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/blame_context.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::AnyNumber;
+using testing::Contains;
+using testing::ElementsAre;
+using testing::ElementsAreArray;
+using testing::Mock;
+using testing::Not;
+using testing::_;
+using base::sequence_manager::internal::EnqueueOrder;
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+// To avoid symbol collisions in jumbo builds.
+namespace sequence_manager_impl_unittest {
+
+enum class TestType : int {
+ kCustom = 0,
+ kUseMockTaskRunner = 1,
+ kUseMessageLoop = 2,
+ kUseMessagePump = 3,
+};
+
+class SequenceManagerTestBase : public testing::TestWithParam<TestType> {
+ protected:
+ void TearDown() override {
+ // SequenceManager should be deleted before an underlying task runner.
+ manager_.reset();
+ }
+
+ scoped_refptr<TestTaskQueue> CreateTaskQueue(
+ TaskQueue::Spec spec = TaskQueue::Spec("test")) {
+ return manager_->CreateTaskQueue<TestTaskQueue>(spec);
+ }
+
+ void CreateTaskQueues(size_t num_queues) {
+ for (size_t i = 0; i < num_queues; i++)
+ runners_.push_back(CreateTaskQueue());
+ }
+
+ std::unique_ptr<SequenceManagerForTest> manager_;
+ std::vector<scoped_refptr<TestTaskQueue>> runners_;
+ TimeTicks start_time_;
+ TestTaskTimeObserver test_task_time_observer_;
+};
+
+// SequenceManagerImpl uses TestMockTimeTaskRunner which controls
+// both task execution and mock clock.
+// TODO(kraynov): Make this class to support all TestTypes.
+// It will allow us to re-run tests in various environments before we'll
+// eventually move to MessagePump and remove current ThreadControllerImpl.
+class SequenceManagerTest : public SequenceManagerTestBase {
+ public:
+ void DeleteSequenceManagerTask() { manager_.reset(); }
+
+ protected:
+ void SetUp() override {
+ ASSERT_EQ(GetParam(), TestType::kUseMockTaskRunner);
+ test_task_runner_ = WrapRefCounted(new TestMockTimeTaskRunner(
+ TestMockTimeTaskRunner::Type::kBoundToThread));
+ // A null clock triggers some assertions.
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(1));
+ start_time_ = GetTickClock()->NowTicks();
+
+ manager_ =
+ SequenceManagerForTest::Create(nullptr, ThreadTaskRunnerHandle::Get(),
+ test_task_runner_->GetMockTickClock());
+ }
+
+ const TickClock* GetTickClock() {
+ return test_task_runner_->GetMockTickClock();
+ }
+
+ void RunPendingTasks() {
+ // We should only run tasks already posted by that moment.
+ RunLoop run_loop;
+ test_task_runner_->PostTask(FROM_HERE, run_loop.QuitClosure());
+ // TestMockTimeTaskRunner will fast-forward mock clock if necessary.
+ run_loop.Run();
+ }
+
+ // Runs all immediate tasks until there is no more work to do and advances
+ // time if there is a pending delayed task. |per_run_time_callback| is called
+ // when the clock advances.
+ // The only difference to FastForwardUntilNoTasksRemain is that time
+ // advancing isn't driven by the test task runner, but uses time domain's
+ // next scheduled run time instead. It allows us to double-check consistency
+ // and allows to count such bursts of doing work, which is a test subject.
+ void RunUntilManagerIsIdle(RepeatingClosure per_run_time_callback) {
+ for (;;) {
+ // Advance time if we've run out of immediate work to do.
+ if (!manager_->HasImmediateWork()) {
+ LazyNow lazy_now(GetTickClock());
+ Optional<TimeDelta> delay =
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now);
+ if (delay) {
+ test_task_runner_->AdvanceMockTickClock(*delay);
+ per_run_time_callback.Run();
+ } else {
+ break;
+ }
+ }
+ RunPendingTasks();
+ }
+ }
+
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner_;
+};
+
+// SequenceManagerImpl is being initialized with real MessageLoop
+// at cost of less control over a task runner.
+// It also runs a version with experimental MessagePump support.
+// TODO(kraynov): Generalize as many tests as possible to run it
+// in all supported environments.
+class SequenceManagerTestWithMessageLoop : public SequenceManagerTestBase {
+ protected:
+ void SetUp() override {
+ switch (GetParam()) {
+ case TestType::kUseMessageLoop:
+ SetUpWithMessageLoop();
+ break;
+ case TestType::kUseMessagePump:
+ SetUpWithMessagePump();
+ break;
+ default:
+ FAIL();
+ }
+ }
+
+ void SetUpWithMessageLoop() {
+ message_loop_.reset(new MessageLoop());
+ // A null clock triggers some assertions.
+ mock_clock_.Advance(TimeDelta::FromMilliseconds(1));
+ start_time_ = mock_clock_.NowTicks();
+
+ manager_ = SequenceManagerForTest::Create(
+ message_loop_.get(), ThreadTaskRunnerHandle::Get(), &mock_clock_);
+ }
+
+ void SetUpWithMessagePump() {
+ mock_clock_.Advance(TimeDelta::FromMilliseconds(1));
+ start_time_ = mock_clock_.NowTicks();
+ manager_ = std::make_unique<SequenceManagerForTest>(
+ std::make_unique<ThreadControllerWithMessagePumpImpl>(&mock_clock_));
+ // ThreadControllerWithMessagePumpImpl doesn't provide a default tas runner.
+ scoped_refptr<TaskQueue> default_task_queue =
+ manager_->CreateTaskQueue<TestTaskQueue>(TaskQueue::Spec("default"));
+ manager_->SetDefaultTaskRunner(default_task_queue);
+ }
+
+ const TickClock* GetTickClock() { return &mock_clock_; }
+
+ std::unique_ptr<MessageLoop> message_loop_;
+ SimpleTestTickClock mock_clock_;
+};
+
+class SequenceManagerTestWithCustomInitialization
+ : public SequenceManagerTestWithMessageLoop {
+ protected:
+ void SetUp() override { ASSERT_EQ(GetParam(), TestType::kCustom); }
+};
+
+INSTANTIATE_TEST_CASE_P(,
+ SequenceManagerTest,
+ testing::Values(TestType::kUseMockTaskRunner));
+
+INSTANTIATE_TEST_CASE_P(,
+ SequenceManagerTestWithMessageLoop,
+ testing::Values(TestType::kUseMessageLoop,
+ TestType::kUseMessagePump));
+
+INSTANTIATE_TEST_CASE_P(,
+ SequenceManagerTestWithCustomInitialization,
+ testing::Values(TestType::kCustom));
+
+void PostFromNestedRunloop(SingleThreadTaskRunner* runner,
+ std::vector<std::pair<OnceClosure, bool>>* tasks) {
+ for (std::pair<OnceClosure, bool>& pair : *tasks) {
+ if (pair.second) {
+ runner->PostTask(FROM_HERE, std::move(pair.first));
+ } else {
+ runner->PostNonNestableTask(FROM_HERE, std::move(pair.first));
+ }
+ }
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
+}
+
+void NopTask() {}
+
+class TestCountUsesTimeSource : public TickClock {
+ public:
+ TestCountUsesTimeSource() = default;
+ ~TestCountUsesTimeSource() override = default;
+
+ TimeTicks NowTicks() const override {
+ now_calls_count_++;
+ // Don't return 0, as it triggers some assertions.
+ return TimeTicks() + TimeDelta::FromSeconds(1);
+ }
+
+ int now_calls_count() const { return now_calls_count_; }
+
+ private:
+ mutable int now_calls_count_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(TestCountUsesTimeSource);
+};
+
+TEST_P(SequenceManagerTestWithCustomInitialization,
+ NowCalledMinimumNumberOfTimesToComputeTaskDurations) {
+ message_loop_.reset(new MessageLoop());
+ // This memory is managed by the SequenceManager, but we need to hold a
+ // pointer to this object to read out how many times Now was called.
+ TestCountUsesTimeSource test_count_uses_time_source;
+
+ manager_ = SequenceManagerForTest::Create(
+ nullptr, ThreadTaskRunnerHandle::Get(), &test_count_uses_time_source);
+ manager_->SetWorkBatchSize(6);
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+
+ for (size_t i = 0; i < 3; i++)
+ runners_.push_back(CreateTaskQueue());
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ RunLoop().RunUntilIdle();
+ // Now is called each time a task is queued, when first task is started
+ // running, and when a task is completed. 6 * 3 = 18 calls.
+ EXPECT_EQ(18, test_count_uses_time_source.now_calls_count());
+}
+
+void NullTask() {}
+
+void TestTask(uint64_t value, std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(value));
+}
+
+void DisableQueueTestTask(uint64_t value,
+ std::vector<EnqueueOrder>* out_result,
+ TaskQueue::QueueEnabledVoter* voter) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(value));
+ voter->SetQueueEnabled(false);
+}
+
+TEST_P(SequenceManagerTest, SingleQueuePosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, MultiQueuePosting) {
+ CreateTaskQueues(3u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, NonNestableTaskPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostNonNestableTask(FROM_HERE,
+ BindOnce(&TestTask, 1, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ NonNestableTaskExecutesInExpectedOrder) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ runners_[0]->PostNonNestableTask(FROM_HERE,
+ BindOnce(&TestTask, 5, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ NonNestableTasksDoesntExecuteInNestedLoop) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 3, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 4, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 5, &run_order), true));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 6, &run_order), true));
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+
+ RunLoop().RunUntilIdle();
+ // Note we expect tasks 3 & 4 to run last because they're non-nestable.
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 5u, 6u, 3u, 4u));
+}
+
+namespace {
+
+void InsertFenceAndPostTestTask(int id,
+ std::vector<EnqueueOrder>* run_order,
+ scoped_refptr<TestTaskQueue> task_queue) {
+ run_order->push_back(EnqueueOrder::FromIntForTesting(id));
+ task_queue->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ task_queue->PostTask(FROM_HERE, BindOnce(&TestTask, id + 1, run_order));
+
+ // Force reload of immediate work queue. In real life the same effect can be
+ // achieved with cross-thread posting.
+ task_queue->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskQueueDisabledFromNestedLoop) {
+ CreateTaskQueues(1u);
+ std::vector<EnqueueOrder> run_order;
+
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 1, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(std::make_pair(
+ BindOnce(&InsertFenceAndPostTestTask, 2, &run_order, runners_[0]), true));
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+ RunLoop().RunUntilIdle();
+
+ // Task 1 shouldn't run first due to it being non-nestable and queue gets
+ // blocked after task 2. Task 1 runs after existing nested message loop
+ // due to being posted before inserting a fence.
+ // This test checks that breaks when nestable task is pushed into a redo
+ // queue.
+ EXPECT_THAT(run_order, ElementsAre(2u, 1u));
+
+ runners_[0]->RemoveFence();
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(2u, 1u, 3u));
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork_ImmediateTask) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move the task into the |immediate_work_queue|.
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->immediate_work_queue()->Empty());
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(
+ runners_[0]->GetTaskQueueImpl()->immediate_work_queue()->Empty());
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Run the task, making the queue empty.
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork_DelayedTask) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ test_task_runner_->AdvanceMockTickClock(delay);
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move the task into the |delayed_work_queue|.
+ LazyNow lazy_now(GetTickClock());
+ manager_->WakeUpReadyDelayedQueues(&lazy_now);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->delayed_work_queue()->Empty());
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Run the task, making the queue empty.
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(10),
+ test_task_runner_->NextPendingTaskDelay());
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ EXPECT_TRUE(run_order.empty());
+
+ // The task doesn't run before the delay has completed.
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(9));
+ EXPECT_TRUE(run_order.empty());
+
+ // After the delay has completed, the task runs normally.
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+ EXPECT_THAT(run_order, ElementsAre(1u));
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskExecutedInOneMessageLoopTask) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(10));
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+ EXPECT_EQ(0u, test_task_runner_->GetPendingTaskCount());
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskPosting_MultipleTasks_DecendingOrder) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(8));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(5));
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(5),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ EXPECT_THAT(run_order, ElementsAre(3u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(3),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(3));
+ EXPECT_THAT(run_order, ElementsAre(3u, 2u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(2));
+ EXPECT_THAT(run_order, ElementsAre(3u, 2u, 1u));
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskPosting_MultipleTasks_AscendingOrder) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(1));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(5));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(10));
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+ EXPECT_THAT(run_order, ElementsAre(1u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(4),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(4));
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(5),
+ test_task_runner_->NextPendingTaskDelay());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, PostDelayedTask_SharesUnderlyingDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ delay);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ delay);
+
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+}
+
+class TestObject {
+ public:
+ ~TestObject() { destructor_count__++; }
+
+ void Run() { FAIL() << "TestObject::Run should not be called"; }
+
+ static int destructor_count__;
+};
+
+int TestObject::destructor_count__ = 0;
+
+TEST_P(SequenceManagerTest, PendingDelayedTasksRemovedOnShutdown) {
+ CreateTaskQueues(1u);
+
+ TestObject::destructor_count__ = 0;
+
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&TestObject::Run, Owned(new TestObject())), delay);
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&TestObject::Run, Owned(new TestObject())));
+
+ manager_.reset();
+
+ EXPECT_EQ(2, TestObject::destructor_count__);
+}
+
+TEST_P(SequenceManagerTest, InsertAndRemoveFence) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a task when pumping is disabled doesn't result in work getting
+ // posted.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+
+ // However polling still works.
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // After removing the fence the task runs normally.
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(test_task_runner_->HasPendingTask());
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, RemovingFenceForDisabledQueueDoesNotPostDoWork) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+}
+
+TEST_P(SequenceManagerTest, EnablingFencedQueueDoesNotPostDoWork) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ voter->SetQueueEnabled(true);
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+}
+
+TEST_P(SequenceManagerTest, DenyRunning_BeforePosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, DenyRunning_AfterPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ EXPECT_TRUE(test_task_runner_->HasPendingTask());
+ voter->SetQueueEnabled(false);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, DenyRunning_AfterRemovingFence) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ runners_[0]->RemoveFence();
+ voter->SetQueueEnabled(true);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, RemovingFenceWithDelayedTask) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a delayed task when fenced will apply the delay, but won't cause
+ // work to executed afterwards.
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ // The task does not run even though it's delay is up.
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(run_order.empty());
+
+ // Removing the fence causes the task to run.
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(test_task_runner_->HasPendingTask());
+ RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, RemovingFenceWithMultipleDelayedTasks) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a delayed task when fenced will apply the delay, but won't cause
+ // work to executed afterwards.
+ TimeDelta delay1(TimeDelta::FromMilliseconds(1));
+ TimeDelta delay2(TimeDelta::FromMilliseconds(10));
+ TimeDelta delay3(TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay1);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ delay2);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ delay3);
+
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(15));
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ // Removing the fence causes the ready tasks to run.
+ runners_[0]->RemoveFence();
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, InsertFencePreventsDelayedTasksFromRunning) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay(TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(run_order.empty());
+}
+
+TEST_P(SequenceManagerTest, MultipleFences) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ // Subsequent tasks should be blocked.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, InsertFenceThenImmediatlyRemoveDoesNotBlock) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->RemoveFence();
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, InsertFencePostThenRemoveDoesNotBlock) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->RemoveFence();
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, MultipleFencesWithInitiallyEmptyQueue) {
+ CreateTaskQueues(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+TEST_P(SequenceManagerTest, BlockedByFence) {
+ CreateTaskQueues(1u);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+}
+
+TEST_P(SequenceManagerTest, BlockedByFence_BothTypesOfFence) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+}
+
+namespace {
+
+void RecordTimeTask(std::vector<TimeTicks>* run_times, const TickClock* clock) {
+ run_times->push_back(clock->NowTicks());
+}
+
+void RecordTimeAndQueueTask(
+ std::vector<std::pair<scoped_refptr<TestTaskQueue>, TimeTicks>>* run_times,
+ scoped_refptr<TestTaskQueue> task_queue,
+ const TickClock* clock) {
+ run_times->emplace_back(task_queue, clock->NowTicks());
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTest, DelayedFence_DelayedTasks) {
+ CreateTaskQueues(1u);
+
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(100));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(200));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(300));
+
+ runners_[0]->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+ run_times.clear();
+
+ runners_[0]->RemoveFence();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(300)));
+}
+
+TEST_P(SequenceManagerTest, DelayedFence_ImmediateTasks) {
+ CreateTaskQueues(1u);
+
+ std::vector<TimeTicks> run_times;
+ runners_[0]->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+
+ for (int i = 0; i < 5; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()));
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(100));
+ if (i < 2) {
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ } else {
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ }
+ }
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(start_time_, start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+ run_times.clear();
+
+ runners_[0]->RemoveFence();
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(500),
+ start_time_ + TimeDelta::FromMilliseconds(500)));
+}
+
+TEST_P(SequenceManagerTest, DelayedFence_RemovedFenceDoesNotActivate) {
+ CreateTaskQueues(1u);
+
+ std::vector<TimeTicks> run_times;
+ runners_[0]->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+
+ for (int i = 0; i < 3; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(100));
+ }
+
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ runners_[0]->RemoveFence();
+
+ for (int i = 0; i < 2; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()));
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(100));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ }
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(start_time_, start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200),
+ start_time_ + TimeDelta::FromMilliseconds(300),
+ start_time_ + TimeDelta::FromMilliseconds(400)));
+}
+
+TEST_P(SequenceManagerTest, DelayedFence_TakeIncomingImmediateQueue) {
+ // This test checks that everything works correctly when a work queue
+ // is swapped with an immediate incoming queue and a delayed fence
+ // is activated, forcing a different queue to become active.
+ CreateTaskQueues(2u);
+
+ scoped_refptr<TestTaskQueue> queue1 = runners_[0];
+ scoped_refptr<TestTaskQueue> queue2 = runners_[1];
+
+ std::vector<std::pair<scoped_refptr<TestTaskQueue>, TimeTicks>> run_times;
+
+ // Fence ensures that the task posted after advancing time is blocked.
+ queue1->InsertFenceAt(GetTickClock()->NowTicks() +
+ TimeDelta::FromMilliseconds(250));
+
+ // This task should not be blocked and should run immediately after
+ // advancing time at 301ms.
+ queue1->PostTask(FROM_HERE, BindOnce(&RecordTimeAndQueueTask, &run_times,
+ queue1, GetTickClock()));
+ // Force reload of immediate work queue. In real life the same effect can be
+ // achieved with cross-thread posting.
+ queue1->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(300));
+
+ // This task should be blocked.
+ queue1->PostTask(FROM_HERE, BindOnce(&RecordTimeAndQueueTask, &run_times,
+ queue1, GetTickClock()));
+ // This task on a different runner should run as expected.
+ queue2->PostTask(FROM_HERE, BindOnce(&RecordTimeAndQueueTask, &run_times,
+ queue2, GetTickClock()));
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(std::make_pair(
+ queue1, start_time_ + TimeDelta::FromMilliseconds(300)),
+ std::make_pair(
+ queue2, start_time_ + TimeDelta::FromMilliseconds(300))));
+}
+
+namespace {
+
+void ReentrantTestTask(scoped_refptr<SingleThreadTaskRunner> runner,
+ int countdown,
+ std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(countdown));
+ if (--countdown) {
+ runner->PostTask(
+ FROM_HERE, BindOnce(&ReentrantTestTask, runner, countdown, out_result));
+ }
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTest, ReentrantPosting) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&ReentrantTestTask, runners_[0], 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(3u, 2u, 1u));
+}
+
+TEST_P(SequenceManagerTest, NoTasksAfterShutdown) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ manager_.reset();
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+}
+
+void PostTaskToRunner(scoped_refptr<SingleThreadTaskRunner> runner,
+ std::vector<EnqueueOrder>* run_order) {
+ runner->PostTask(FROM_HERE, BindOnce(&TestTask, 1, run_order));
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, PostFromThread) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ Thread thread("TestThread");
+ thread.Start();
+ thread.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&PostTaskToRunner, runners_[0], &run_order));
+ thread.Stop();
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+}
+
+void RePostingTestTask(scoped_refptr<SingleThreadTaskRunner> runner,
+ int* run_count) {
+ (*run_count)++;
+ runner->PostTask(FROM_HERE, BindOnce(&RePostingTestTask,
+ Unretained(runner.get()), run_count));
+}
+
+TEST_P(SequenceManagerTest, DoWorkCantPostItselfMultipleTimes) {
+ CreateTaskQueues(1u);
+
+ int run_count = 0;
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&RePostingTestTask, runners_[0], &run_count));
+
+ RunPendingTasks();
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ EXPECT_EQ(1, run_count);
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, PostFromNestedRunloop) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&TestTask, 1, &run_order), true));
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 0, &run_order));
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(0u, 2u, 1u));
+}
+
+TEST_P(SequenceManagerTest, WorkBatching) {
+ CreateTaskQueues(1u);
+
+ manager_->SetWorkBatchSize(2);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+
+ // Running one task in the host message loop should cause two posted tasks to
+ // get executed.
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ // The second task runs the remaining two posted tasks.
+ EXPECT_EQ(1u, test_task_runner_->GetPendingTaskCount());
+ RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u));
+}
+
+class MockTaskObserver : public MessageLoop::TaskObserver {
+ public:
+ MOCK_METHOD1(DidProcessTask, void(const PendingTask& task));
+ MOCK_METHOD1(WillProcessTask, void(const PendingTask& task));
+};
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskObserverAdding) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+
+ manager_->SetWorkBatchSize(2);
+ manager_->AddTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(2);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(2);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskObserverRemoving) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(2);
+ manager_->AddTaskObserver(&observer);
+ manager_->RemoveTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(0);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ RunLoop().RunUntilIdle();
+}
+
+void RemoveObserverTask(SequenceManagerImpl* manager,
+ MessageLoop::TaskObserver* observer) {
+ manager->RemoveTaskObserver(observer);
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, TaskObserverRemovingInsideTask) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(3);
+ manager_->AddTaskObserver(&observer);
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RemoveObserverTask, manager_.get(), &observer));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, QueueTaskObserverAdding) {
+ CreateTaskQueues(2u);
+ MockTaskObserver observer;
+
+ manager_->SetWorkBatchSize(2);
+ runners_[0]->AddTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(1);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, QueueTaskObserverRemoving) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(2);
+ runners_[0]->AddTaskObserver(&observer);
+ runners_[0]->RemoveTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(0);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+
+ RunLoop().RunUntilIdle();
+}
+
+void RemoveQueueObserverTask(scoped_refptr<TaskQueue> queue,
+ MessageLoop::TaskObserver* observer) {
+ queue->RemoveTaskObserver(observer);
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ QueueTaskObserverRemovingInsideTask) {
+ CreateTaskQueues(1u);
+ MockTaskObserver observer;
+ runners_[0]->AddTaskObserver(&observer);
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&RemoveQueueObserverTask, runners_[0], &observer));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTest, ThreadCheckAfterTermination) {
+ CreateTaskQueues(1u);
+ EXPECT_TRUE(runners_[0]->RunsTasksInCurrentSequence());
+ manager_.reset();
+ EXPECT_TRUE(runners_[0]->RunsTasksInCurrentSequence());
+}
+
+TEST_P(SequenceManagerTest, TimeDomain_NextScheduledRunTime) {
+ CreateTaskQueues(2u);
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMicroseconds(10000));
+ LazyNow lazy_now_1(GetTickClock());
+
+ // With no delayed tasks.
+ EXPECT_FALSE(manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With a non-delayed task.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ EXPECT_FALSE(manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With a delayed task.
+ TimeDelta expected_delay = TimeDelta::FromMilliseconds(50);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), expected_delay);
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With another delayed task in the same queue with a longer delay.
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(100));
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With another delayed task in the same queue with a shorter delay.
+ expected_delay = TimeDelta::FromMilliseconds(20);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), expected_delay);
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // With another delayed task in a different queue with a shorter delay.
+ expected_delay = TimeDelta::FromMilliseconds(10);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), expected_delay);
+ EXPECT_EQ(expected_delay,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_1));
+
+ // Test it updates as time progresses
+ test_task_runner_->AdvanceMockTickClock(expected_delay);
+ LazyNow lazy_now_2(GetTickClock());
+ EXPECT_EQ(TimeDelta(),
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now_2));
+}
+
+TEST_P(SequenceManagerTest, TimeDomain_NextScheduledRunTime_MultipleQueues) {
+ CreateTaskQueues(3u);
+
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(50);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(5);
+ TimeDelta delay3 = TimeDelta::FromMilliseconds(10);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay2);
+ runners_[2]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay3);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(delay2,
+ manager_->GetRealTimeDomain()->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DeleteSequenceManagerInsideATask) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&SequenceManagerTest::DeleteSequenceManagerTask,
+ Unretained(this)));
+
+ // This should not crash, assuming DoWork detects the SequenceManager has
+ // been deleted.
+ RunLoop().RunUntilIdle();
+}
+
+TEST_P(SequenceManagerTest, GetAndClearSystemIsQuiescentBit) {
+ CreateTaskQueues(3u);
+
+ scoped_refptr<TaskQueue> queue0 =
+ CreateTaskQueue(TaskQueue::Spec("test").SetShouldMonitorQuiescence(true));
+ scoped_refptr<TaskQueue> queue1 =
+ CreateTaskQueue(TaskQueue::Spec("test").SetShouldMonitorQuiescence(true));
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue0->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue1->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue2->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue0->PostTask(FROM_HERE, BindOnce(&NopTask));
+ queue1->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork) {
+ CreateTaskQueues(1u);
+
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(NullTask));
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_P(SequenceManagerTest, HasPendingImmediateWork_DelayedTasks) {
+ CreateTaskQueues(1u);
+
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(NullTask),
+ TimeDelta::FromMilliseconds(12));
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move time forwards until just before the delayed task should run.
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(10));
+ LazyNow lazy_now_1(GetTickClock());
+ manager_->WakeUpReadyDelayedQueues(&lazy_now_1);
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+
+ // Force the delayed task onto the work queue.
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromMilliseconds(2));
+ LazyNow lazy_now_2(GetTickClock());
+ manager_->WakeUpReadyDelayedQueues(&lazy_now_2);
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+void ExpensiveTestTask(int value,
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner,
+ std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(EnqueueOrder::FromIntForTesting(value));
+ test_task_runner->FastForwardBy(TimeDelta::FromMilliseconds(1));
+}
+
+TEST_P(SequenceManagerTest, ImmediateAndDelayedTaskInterleaving) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ for (int i = 10; i < 19; i++) {
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&ExpensiveTestTask, i, test_task_runner_, &run_order), delay);
+ }
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(10));
+
+ for (int i = 0; i < 9; i++) {
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&ExpensiveTestTask, i,
+ test_task_runner_, &run_order));
+ }
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ // Delayed tasks are not allowed to starve out immediate work which is why
+ // some of the immediate tasks run out of order.
+ uint64_t expected_run_order[] = {10u, 11u, 12u, 13u, 0u, 14u, 15u, 16u, 1u,
+ 17u, 18u, 2u, 3u, 4u, 5u, 6u, 7u, 8u};
+ EXPECT_THAT(run_order, ElementsAreArray(expected_run_order));
+}
+
+TEST_P(SequenceManagerTest,
+ DelayedTaskDoesNotSkipAHeadOfNonDelayedTask_SameQueue) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ test_task_runner_->AdvanceMockTickClock(delay * 2);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2u, 3u, 1u));
+}
+
+TEST_P(SequenceManagerTest,
+ DelayedTaskDoesNotSkipAHeadOfNonDelayedTask_DifferentQueues) {
+ CreateTaskQueues(2u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay);
+
+ test_task_runner_->AdvanceMockTickClock(delay * 2);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2u, 3u, 1u));
+}
+
+TEST_P(SequenceManagerTest, DelayedTaskDoesNotSkipAHeadOfShorterDelayedTask) {
+ CreateTaskQueues(2u);
+
+ std::vector<EnqueueOrder> run_order;
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(5);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ delay1);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ delay2);
+
+ test_task_runner_->AdvanceMockTickClock(delay1 * 2);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2u, 1u));
+}
+
+void CheckIsNested(bool* is_nested) {
+ *is_nested = RunLoop::IsNestedOnCurrentThread();
+}
+
+void PostAndQuitFromNestedRunloop(RunLoop* run_loop,
+ SingleThreadTaskRunner* runner,
+ bool* was_nested) {
+ runner->PostTask(FROM_HERE, run_loop->QuitClosure());
+ runner->PostTask(FROM_HERE, BindOnce(&CheckIsNested, was_nested));
+ run_loop->Run();
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, QuitWhileNested) {
+ // This test makes sure we don't continue running a work batch after a nested
+ // run loop has been exited in the middle of the batch.
+ CreateTaskQueues(1u);
+ manager_->SetWorkBatchSize(2);
+
+ bool was_nested = true;
+ RunLoop run_loop(RunLoop::Type::kNestableTasksAllowed);
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostAndQuitFromNestedRunloop, Unretained(&run_loop),
+ RetainedRef(runners_[0]), Unretained(&was_nested)));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(was_nested);
+}
+
+class SequenceNumberCapturingTaskObserver : public MessageLoop::TaskObserver {
+ public:
+ // MessageLoop::TaskObserver overrides.
+ void WillProcessTask(const PendingTask& pending_task) override {}
+ void DidProcessTask(const PendingTask& pending_task) override {
+ sequence_numbers_.push_back(pending_task.sequence_num);
+ }
+
+ const std::vector<int>& sequence_numbers() const { return sequence_numbers_; }
+
+ private:
+ std::vector<int> sequence_numbers_;
+};
+
+TEST_P(SequenceManagerTest, SequenceNumSetWhenTaskIsPosted) {
+ CreateTaskQueues(1u);
+
+ SequenceNumberCapturingTaskObserver observer;
+ manager_->AddTaskObserver(&observer);
+
+ // Register four tasks that will run in reverse order.
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(30));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(40));
+ ASSERT_THAT(run_order, ElementsAre(4u, 3u, 2u, 1u));
+
+ // The sequence numbers are a one-based monotonically incrememting counter
+ // which should be set when the task is posted rather than when it's enqueued
+ // onto the Incoming queue. This counter starts with 2.
+ EXPECT_THAT(observer.sequence_numbers(), ElementsAre(5, 4, 3, 2));
+
+ manager_->RemoveTaskObserver(&observer);
+}
+
+TEST_P(SequenceManagerTest, NewTaskQueues) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue1 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue3 = CreateTaskQueue();
+
+ ASSERT_NE(queue1, queue2);
+ ASSERT_NE(queue1, queue3);
+ ASSERT_NE(queue2, queue3);
+
+ std::vector<EnqueueOrder> run_order;
+ queue1->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ queue2->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ queue3->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+}
+
+TEST_P(SequenceManagerTest, ShutdownTaskQueue) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue1 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue3 = CreateTaskQueue();
+
+ ASSERT_NE(queue1, queue2);
+ ASSERT_NE(queue1, queue3);
+ ASSERT_NE(queue2, queue3);
+
+ std::vector<EnqueueOrder> run_order;
+ queue1->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ queue2->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ queue3->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ queue2->ShutdownTaskQueue();
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1u, 3u));
+}
+
+TEST_P(SequenceManagerTest, ShutdownTaskQueue_WithDelayedTasks) {
+ CreateTaskQueues(2u);
+
+ // Register three delayed tasks
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ runners_[1]->ShutdownTaskQueue();
+ RunLoop().RunUntilIdle();
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(40));
+ ASSERT_THAT(run_order, ElementsAre(1u, 3u));
+}
+
+namespace {
+void ShutdownQueue(scoped_refptr<TaskQueue> queue) {
+ queue->ShutdownTaskQueue();
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, ShutdownTaskQueue_InTasks) {
+ CreateTaskQueues(3u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&ShutdownQueue, runners_[1]));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&ShutdownQueue, runners_[2]));
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[2]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ ASSERT_THAT(run_order, ElementsAre(1u));
+}
+
+namespace {
+
+class MockObserver : public SequenceManager::Observer {
+ public:
+ MOCK_METHOD0(OnTriedToExecuteBlockedTask, void());
+ MOCK_METHOD0(OnBeginNestedRunLoop, void());
+ MOCK_METHOD0(OnExitNestedRunLoop, void());
+};
+
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop, ShutdownTaskQueueInNestedLoop) {
+ CreateTaskQueues(1u);
+
+ // We retain a reference to the task queue even when the manager has deleted
+ // its reference.
+ scoped_refptr<TaskQueue> task_queue = CreateTaskQueue();
+
+ std::vector<bool> log;
+ std::vector<std::pair<OnceClosure, bool>> tasks_to_post_from_nested_loop;
+
+ // Inside a nested run loop, call task_queue->ShutdownTaskQueue, bookended
+ // by calls to HasOneRefTask to make sure the manager doesn't release its
+ // reference until the nested run loop exits.
+ // NB: This first HasOneRefTask is a sanity check.
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&NopTask), true));
+ tasks_to_post_from_nested_loop.push_back(std::make_pair(
+ BindOnce(&TaskQueue::ShutdownTaskQueue, Unretained(task_queue.get())),
+ true));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&NopTask), true));
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&PostFromNestedRunloop, RetainedRef(runners_[0]),
+ Unretained(&tasks_to_post_from_nested_loop)));
+ RunLoop().RunUntilIdle();
+
+ // Just make sure that we don't crash.
+}
+
+TEST_P(SequenceManagerTest, TimeDomainsAreIndependant) {
+ CreateTaskQueues(2u);
+
+ TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<MockTimeDomain> domain_a =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ std::unique_ptr<MockTimeDomain> domain_b =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[1]->SetTimeDomain(domain_b.get());
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ domain_b->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(4u, 5u, 6u));
+
+ domain_a->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(4u, 5u, 6u, 1u, 2u, 3u));
+
+ runners_[0]->ShutdownTaskQueue();
+ runners_[1]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_P(SequenceManagerTest, TimeDomainMigration) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<MockTimeDomain> domain_a =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_a.get());
+ runners_[0]->SetTimeDomain(domain_a.get());
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(30));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order),
+ TimeDelta::FromMilliseconds(40));
+
+ domain_a->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(20));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ std::unique_ptr<MockTimeDomain> domain_b =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_b.get());
+ runners_[0]->SetTimeDomain(domain_b.get());
+
+ domain_b->SetNowTicks(start_time_ticks + TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_P(SequenceManagerTest, TimeDomainMigrationWithIncomingImmediateTasks) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<MockTimeDomain> domain_a =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ std::unique_ptr<MockTimeDomain> domain_b =
+ std::make_unique<MockTimeDomain>(start_time_ticks);
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->SetTimeDomain(domain_b.get());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1u));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_P(SequenceManagerTest,
+ PostDelayedTasksReverseOrderAlternatingTimeDomains) {
+ CreateTaskQueues(1u);
+
+ std::vector<EnqueueOrder> run_order;
+
+ std::unique_ptr<internal::RealTimeDomain> domain_a =
+ std::make_unique<internal::RealTimeDomain>();
+ std::unique_ptr<internal::RealTimeDomain> domain_b =
+ std::make_unique<internal::RealTimeDomain>();
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order),
+ TimeDelta::FromMilliseconds(40));
+
+ runners_[0]->SetTimeDomain(domain_b.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order),
+ TimeDelta::FromMilliseconds(30));
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order),
+ TimeDelta::FromMilliseconds(20));
+
+ runners_[0]->SetTimeDomain(domain_b.get());
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order),
+ TimeDelta::FromMilliseconds(10));
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(40));
+ EXPECT_THAT(run_order, ElementsAre(4u, 3u, 2u, 1u));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+namespace {
+
+class MockTaskQueueObserver : public TaskQueue::Observer {
+ public:
+ ~MockTaskQueueObserver() override = default;
+
+ MOCK_METHOD2(OnQueueNextWakeUpChanged, void(TaskQueue*, TimeTicks));
+};
+
+} // namespace
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_ImmediateTask) {
+ CreateTaskQueues(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a task is posted on an empty queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(), _));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // But not subsequently.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Unless the immediate work queue is emptied.
+ runners_[0]->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(), _));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTask) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay10s(TimeDelta::FromSeconds(10));
+ TimeDelta delay100s(TimeDelta::FromSeconds(100));
+ TimeDelta delay1s(TimeDelta::FromSeconds(1));
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a delayed task is posted on an empty
+ // queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay10s));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay10s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // We should not get a notification for a longer delay.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay100s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // We should get a notification for a shorter delay.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // When a queue has been enabled, we may get a notification if the
+ // TimeDomain's next scheduled wake-up has changed.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ voter->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTaskMultipleQueues) {
+ CreateTaskQueues(2u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+ runners_[1]->SetObserver(&observer);
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay1s(TimeDelta::FromSeconds(1));
+ TimeDelta delay10s(TimeDelta::FromSeconds(10));
+
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay1s))
+ .Times(1);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[1].get(),
+ start_time + delay10s))
+ .Times(1);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay10s);
+ testing::Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter0 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[1]->CreateQueueEnabledVoter();
+
+ // Disabling a queue should not trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ voter0->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Re-enabling it should should also trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ voter0->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Disabling a queue should not trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ voter1->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Re-enabling it should should trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[1].get(),
+ start_time + delay10s));
+ voter1->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(AnyNumber());
+ runners_[0]->ShutdownTaskQueue();
+ runners_[1]->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedWorkWhichCanRunNow) {
+ // This test checks that when delayed work becomes available
+ // the notification still fires. This usually happens when time advances
+ // and task becomes available in the middle of the scheduling code.
+ // For this test we rely on the fact that notification dispatching code
+ // is the same in all conditions and just change a time domain to
+ // trigger notification.
+
+ CreateTaskQueues(1u);
+
+ TimeDelta delay1s(TimeDelta::FromSeconds(1));
+ TimeDelta delay10s(TimeDelta::FromSeconds(10));
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a delayed task is posted on an empty
+ // queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TimeDomain> mock_time_domain =
+ std::make_unique<internal::RealTimeDomain>();
+ manager_->RegisterTimeDomain(mock_time_domain.get());
+
+ test_task_runner_->AdvanceMockTickClock(delay10s);
+
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ runners_[0]->SetTimeDomain(mock_time_domain.get());
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+class CancelableTask {
+ public:
+ explicit CancelableTask(const TickClock* clock)
+ : clock_(clock), weak_factory_(this) {}
+
+ void RecordTimeTask(std::vector<TimeTicks>* run_times) {
+ run_times->push_back(clock_->NowTicks());
+ }
+
+ const TickClock* clock_;
+ WeakPtrFactory<CancelableTask> weak_factory_;
+};
+
+TEST_P(SequenceManagerTest, TaskQueueObserver_SweepCanceledDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay1))
+ .Times(1);
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+
+ // Sweeping away canceled delayed tasks should trigger a notification.
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay2))
+ .Times(1);
+ manager_->SweepCanceledDelayedTasks();
+}
+
+namespace {
+void ChromiumRunloopInspectionTask(
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner) {
+ // We don't expect more than 1 pending task at any time.
+ EXPECT_GE(1u, test_task_runner->GetPendingTaskCount());
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, NumberOfPendingTasksOnChromiumRunLoop) {
+ CreateTaskQueues(1u);
+
+ // NOTE because tasks posted to the chromiumrun loop are not cancellable, we
+ // will end up with a lot more tasks posted if the delayed tasks were posted
+ // in the reverse order.
+ // TODO(alexclarke): Consider talking to the message pump directly.
+ for (int i = 1; i < 100; i++) {
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&ChromiumRunloopInspectionTask, test_task_runner_),
+ TimeDelta::FromMilliseconds(i));
+ }
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+}
+
+namespace {
+
+class QuadraticTask {
+ public:
+ QuadraticTask(scoped_refptr<TaskQueue> task_queue,
+ TimeDelta delay,
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner)
+ : count_(0),
+ task_queue_(task_queue),
+ delay_(delay),
+ test_task_runner_(test_task_runner) {}
+
+ void SetShouldExit(RepeatingCallback<bool()> should_exit) {
+ should_exit_ = should_exit;
+ }
+
+ void Run() {
+ if (should_exit_.Run())
+ return;
+ count_++;
+ task_queue_->PostDelayedTask(
+ FROM_HERE, BindOnce(&QuadraticTask::Run, Unretained(this)), delay_);
+ task_queue_->PostDelayedTask(
+ FROM_HERE, BindOnce(&QuadraticTask::Run, Unretained(this)), delay_);
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ }
+
+ int Count() const { return count_; }
+
+ private:
+ int count_;
+ scoped_refptr<TaskQueue> task_queue_;
+ TimeDelta delay_;
+ RepeatingCallback<bool()> should_exit_;
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner_;
+};
+
+class LinearTask {
+ public:
+ LinearTask(scoped_refptr<TaskQueue> task_queue,
+ TimeDelta delay,
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner)
+ : count_(0),
+ task_queue_(task_queue),
+ delay_(delay),
+ test_task_runner_(test_task_runner) {}
+
+ void SetShouldExit(RepeatingCallback<bool()> should_exit) {
+ should_exit_ = should_exit;
+ }
+
+ void Run() {
+ if (should_exit_.Run())
+ return;
+ count_++;
+ task_queue_->PostDelayedTask(
+ FROM_HERE, BindOnce(&LinearTask::Run, Unretained(this)), delay_);
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(5));
+ }
+
+ int Count() const { return count_; }
+
+ private:
+ int count_;
+ scoped_refptr<TaskQueue> task_queue_;
+ TimeDelta delay_;
+ RepeatingCallback<bool()> should_exit_;
+ scoped_refptr<TestMockTimeTaskRunner> test_task_runner_;
+};
+
+bool ShouldExit(QuadraticTask* quadratic_task, LinearTask* linear_task) {
+ return quadratic_task->Count() == 1000 || linear_task->Count() == 1000;
+}
+
+} // namespace
+
+TEST_P(SequenceManagerTest,
+ DelayedTasksDontBadlyStarveNonDelayedWork_SameQueue) {
+ CreateTaskQueues(1u);
+
+ QuadraticTask quadratic_delayed_task(
+ runners_[0], TimeDelta::FromMilliseconds(10), test_task_runner_);
+ LinearTask linear_immediate_task(runners_[0], TimeDelta(), test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ ShouldExit, &quadratic_delayed_task, &linear_immediate_task);
+ quadratic_delayed_task.SetShouldExit(should_exit);
+ linear_immediate_task.SetShouldExit(should_exit);
+
+ quadratic_delayed_task.Run();
+ linear_immediate_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_immediate_task.Count()) /
+ static_cast<double>(quadratic_delayed_task.Count());
+
+ EXPECT_GT(ratio, 0.333);
+ EXPECT_LT(ratio, 1.1);
+}
+
+TEST_P(SequenceManagerTest, ImmediateWorkCanStarveDelayedTasks_SameQueue) {
+ CreateTaskQueues(1u);
+
+ QuadraticTask quadratic_immediate_task(runners_[0], TimeDelta(),
+ test_task_runner_);
+ LinearTask linear_delayed_task(runners_[0], TimeDelta::FromMilliseconds(10),
+ test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ &ShouldExit, &quadratic_immediate_task, &linear_delayed_task);
+
+ quadratic_immediate_task.SetShouldExit(should_exit);
+ linear_delayed_task.SetShouldExit(should_exit);
+
+ quadratic_immediate_task.Run();
+ linear_delayed_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_delayed_task.Count()) /
+ static_cast<double>(quadratic_immediate_task.Count());
+
+ // This is by design, we want to enforce a strict ordering in task execution
+ // where by delayed tasks can not skip ahead of non-delayed work.
+ EXPECT_GT(ratio, 0.0);
+ EXPECT_LT(ratio, 0.1);
+}
+
+TEST_P(SequenceManagerTest,
+ DelayedTasksDontBadlyStarveNonDelayedWork_DifferentQueue) {
+ CreateTaskQueues(2u);
+
+ QuadraticTask quadratic_delayed_task(
+ runners_[0], TimeDelta::FromMilliseconds(10), test_task_runner_);
+ LinearTask linear_immediate_task(runners_[1], TimeDelta(), test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ ShouldExit, &quadratic_delayed_task, &linear_immediate_task);
+ quadratic_delayed_task.SetShouldExit(should_exit);
+ linear_immediate_task.SetShouldExit(should_exit);
+
+ quadratic_delayed_task.Run();
+ linear_immediate_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_immediate_task.Count()) /
+ static_cast<double>(quadratic_delayed_task.Count());
+
+ EXPECT_GT(ratio, 0.333);
+ EXPECT_LT(ratio, 1.1);
+}
+
+TEST_P(SequenceManagerTest, ImmediateWorkCanStarveDelayedTasks_DifferentQueue) {
+ CreateTaskQueues(2u);
+
+ QuadraticTask quadratic_immediate_task(runners_[0], TimeDelta(),
+ test_task_runner_);
+ LinearTask linear_delayed_task(runners_[1], TimeDelta::FromMilliseconds(10),
+ test_task_runner_);
+ RepeatingCallback<bool()> should_exit = BindRepeating(
+ &ShouldExit, &quadratic_immediate_task, &linear_delayed_task);
+
+ quadratic_immediate_task.SetShouldExit(should_exit);
+ linear_delayed_task.SetShouldExit(should_exit);
+
+ quadratic_immediate_task.Run();
+ linear_delayed_task.Run();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ double ratio = static_cast<double>(linear_delayed_task.Count()) /
+ static_cast<double>(quadratic_immediate_task.Count());
+
+ // This is by design, we want to enforce a strict ordering in task execution
+ // where by delayed tasks can not skip ahead of non-delayed work.
+ EXPECT_GT(ratio, 0.0);
+ EXPECT_LT(ratio, 0.1);
+}
+
+TEST_P(SequenceManagerTest, CurrentlyExecutingTaskQueue_NoTaskRunning) {
+ CreateTaskQueues(1u);
+
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+namespace {
+void CurrentlyExecutingTaskQueueTestTask(
+ SequenceManagerImpl* sequence_manager,
+ std::vector<internal::TaskQueueImpl*>* task_sources) {
+ task_sources->push_back(sequence_manager->currently_executing_task_queue());
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, CurrentlyExecutingTaskQueue_TaskRunning) {
+ CreateTaskQueues(2u);
+
+ TestTaskQueue* queue0 = runners_[0].get();
+ TestTaskQueue* queue1 = runners_[1].get();
+
+ std::vector<internal::TaskQueueImpl*> task_sources;
+ queue0->PostTask(FROM_HERE, BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources));
+ queue1->PostTask(FROM_HERE, BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(task_sources, ElementsAre(queue0->GetTaskQueueImpl(),
+ queue1->GetTaskQueueImpl()));
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+namespace {
+void RunloopCurrentlyExecutingTaskQueueTestTask(
+ SequenceManagerImpl* sequence_manager,
+ std::vector<internal::TaskQueueImpl*>* task_sources,
+ std::vector<std::pair<OnceClosure, TestTaskQueue*>>* tasks) {
+ task_sources->push_back(sequence_manager->currently_executing_task_queue());
+
+ for (std::pair<OnceClosure, TestTaskQueue*>& pair : *tasks) {
+ pair.second->PostTask(FROM_HERE, std::move(pair.first));
+ }
+
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
+ task_sources->push_back(sequence_manager->currently_executing_task_queue());
+}
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ CurrentlyExecutingTaskQueue_NestedLoop) {
+ CreateTaskQueues(3u);
+
+ TestTaskQueue* queue0 = runners_[0].get();
+ TestTaskQueue* queue1 = runners_[1].get();
+ TestTaskQueue* queue2 = runners_[2].get();
+
+ std::vector<internal::TaskQueueImpl*> task_sources;
+ std::vector<std::pair<OnceClosure, TestTaskQueue*>>
+ tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources),
+ queue1));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources),
+ queue2));
+
+ queue0->PostTask(
+ FROM_HERE,
+ BindOnce(&RunloopCurrentlyExecutingTaskQueueTestTask, manager_.get(),
+ &task_sources, &tasks_to_post_from_nested_loop));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(
+ task_sources,
+ ElementsAre(queue0->GetTaskQueueImpl(), queue1->GetTaskQueueImpl(),
+ queue2->GetTaskQueueImpl(), queue0->GetTaskQueueImpl()));
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+TEST_P(SequenceManagerTestWithMessageLoop, BlameContextAttribution) {
+ using trace_analyzer::Query;
+
+ CreateTaskQueues(1u);
+ TestTaskQueue* queue = runners_[0].get();
+
+ trace_analyzer::Start("*");
+ {
+ trace_event::BlameContext blame_context("cat", "name", "type", "scope", 0,
+ nullptr);
+ blame_context.Initialize();
+ queue->SetBlameContext(&blame_context);
+ queue->PostTask(FROM_HERE, BindOnce(&NopTask));
+ RunLoop().RunUntilIdle();
+ }
+ auto analyzer = trace_analyzer::Stop();
+
+ trace_analyzer::TraceEventVector events;
+ Query q = Query::EventPhaseIs(TRACE_EVENT_PHASE_ENTER_CONTEXT) ||
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_LEAVE_CONTEXT);
+ analyzer->FindEvents(q, &events);
+
+ EXPECT_EQ(2u, events.size());
+}
+
+TEST_P(SequenceManagerTest, NoWakeUpsForCanceledDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<TimeTicks> wake_up_times;
+
+ RunUntilManagerIsIdle(BindRepeating(
+ [](std::set<TimeTicks>* wake_up_times, const TickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, GetTickClock()));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay4));
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay1, start_time + delay4));
+}
+
+TEST_P(SequenceManagerTest, NoWakeUpsForCanceledDelayedTasksReversePostOrder) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<TimeTicks> wake_up_times;
+
+ RunUntilManagerIsIdle(BindRepeating(
+ [](std::set<TimeTicks>* wake_up_times, const TickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, GetTickClock()));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay4));
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay1, start_time + delay4));
+}
+
+TEST_P(SequenceManagerTest, TimeDomainWakeUpOnlyCancelledIfAllUsesCancelled) {
+ CreateTaskQueues(1u);
+
+ TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ // Post a non-canceled task with |delay3|. So we should still get a wake-up at
+ // |delay3| even though we cancel |task3|.
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask, Unretained(&task3), &run_times),
+ delay3);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+ task1.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<TimeTicks> wake_up_times;
+
+ RunUntilManagerIsIdle(BindRepeating(
+ [](std::set<TimeTicks>* wake_up_times, const TickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, GetTickClock()));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay3,
+ start_time + delay4));
+
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay3, start_time + delay4));
+}
+
+TEST_P(SequenceManagerTest, TaskQueueVoters) {
+ CreateTaskQueues(1u);
+
+ // The task queue should be initially enabled.
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter2 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter3 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter4 =
+ runners_[0]->CreateQueueEnabledVoter();
+
+ // Voters should initially vote for the queue to be enabled.
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ // If any voter wants to disable, the queue is disabled.
+ voter1->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // If the voter is deleted then the queue should be re-enabled.
+ voter1.reset();
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ // If any of the remaining voters wants to disable, the queue should be
+ // disabled.
+ voter2->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // If another queue votes to disable, nothing happens because it's already
+ // disabled.
+ voter3->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // There are two votes to disable, so one of them voting to enable does
+ // nothing.
+ voter2->SetQueueEnabled(true);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // IF all queues vote to enable then the queue is enabled.
+ voter3->SetQueueEnabled(true);
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+}
+
+TEST_P(SequenceManagerTest, ShutdownQueueBeforeEnabledVoterDeleted) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue = CreateTaskQueue();
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ queue->CreateQueueEnabledVoter();
+
+ voter->SetQueueEnabled(true); // NOP
+ queue->ShutdownTaskQueue();
+
+ // This should complete without DCHECKing.
+ voter.reset();
+}
+
+TEST_P(SequenceManagerTest, ShutdownQueueBeforeDisabledVoterDeleted) {
+ CreateTaskQueues(1u);
+
+ scoped_refptr<TaskQueue> queue = CreateTaskQueue();
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ queue->CreateQueueEnabledVoter();
+
+ voter->SetQueueEnabled(false);
+ queue->ShutdownTaskQueue();
+
+ // This should complete without DCHECKing.
+ voter.reset();
+}
+
+TEST_P(SequenceManagerTest, SweepCanceledDelayedTasks) {
+ CreateTaskQueues(1u);
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ CancelableTask task3(GetTickClock());
+ CancelableTask task4(GetTickClock());
+ TimeDelta delay1(TimeDelta::FromSeconds(5));
+ TimeDelta delay2(TimeDelta::FromSeconds(10));
+ TimeDelta delay3(TimeDelta::FromSeconds(15));
+ TimeDelta delay4(TimeDelta::FromSeconds(30));
+ std::vector<TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ EXPECT_EQ(4u, runners_[0]->GetNumberOfPendingTasks());
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+ EXPECT_EQ(4u, runners_[0]->GetNumberOfPendingTasks());
+
+ manager_->SweepCanceledDelayedTasks();
+ EXPECT_EQ(2u, runners_[0]->GetNumberOfPendingTasks());
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+ task4.weak_factory_.InvalidateWeakPtrs();
+
+ manager_->SweepCanceledDelayedTasks();
+ EXPECT_EQ(0u, runners_[0]->GetNumberOfPendingTasks());
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask) {
+ CreateTaskQueues(2u);
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(10));
+
+ EXPECT_EQ(TimeDelta::FromSeconds(10), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(15));
+
+ EXPECT_EQ(TimeDelta::FromSeconds(10), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(5));
+
+ EXPECT_EQ(TimeDelta::FromSeconds(5), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ EXPECT_EQ(TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_Disabled) {
+ CreateTaskQueues(1u);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_Fence) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_FenceUnblocking) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_P(SequenceManagerTest, DelayTillNextTask_DelayedTaskReady) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromSeconds(1));
+
+ test_task_runner_->AdvanceMockTickClock(TimeDelta::FromSeconds(10));
+
+ LazyNow lazy_now(GetTickClock());
+ EXPECT_EQ(TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+namespace {
+void MessageLoopTaskWithDelayedQuit(SimpleTestTickClock* now_src,
+ scoped_refptr<TaskQueue> task_queue) {
+ RunLoop run_loop(RunLoop::Type::kNestableTasksAllowed);
+ task_queue->PostDelayedTask(FROM_HERE, run_loop.QuitClosure(),
+ TimeDelta::FromMilliseconds(100));
+ now_src->Advance(TimeDelta::FromMilliseconds(200));
+ run_loop.Run();
+}
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop, DelayedTaskRunsInNestedMessageLoop) {
+ CreateTaskQueues(1u);
+ RunLoop run_loop;
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&MessageLoopTaskWithDelayedQuit, &mock_clock_,
+ RetainedRef(runners_[0])));
+ run_loop.RunUntilIdle();
+}
+
+namespace {
+void MessageLoopTaskWithImmediateQuit(OnceClosure non_nested_quit_closure,
+ scoped_refptr<TaskQueue> task_queue) {
+ RunLoop run_loop(RunLoop::Type::kNestableTasksAllowed);
+ // Needed because entering the nested run loop causes a DoWork to get
+ // posted.
+ task_queue->PostTask(FROM_HERE, BindOnce(&NopTask));
+ task_queue->PostTask(FROM_HERE, run_loop.QuitClosure());
+ run_loop.Run();
+ std::move(non_nested_quit_closure).Run();
+}
+} // namespace
+
+TEST_P(SequenceManagerTestWithMessageLoop,
+ DelayedNestedMessageLoopDoesntPreventTasksRunning) {
+ CreateTaskQueues(1u);
+ RunLoop run_loop;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&MessageLoopTaskWithImmediateQuit, run_loop.QuitClosure(),
+ RetainedRef(runners_[0])),
+ TimeDelta::FromMilliseconds(100));
+
+ mock_clock_.Advance(TimeDelta::FromMilliseconds(200));
+ run_loop.Run();
+}
+
+TEST_P(SequenceManagerTest, CouldTaskRun_DisableAndReenable) {
+ CreateTaskQueues(1u);
+
+ EnqueueOrder enqueue_order = manager_->GetNextSequenceNumber();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ voter->SetQueueEnabled(true);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_P(SequenceManagerTest, CouldTaskRun_Fence) {
+ CreateTaskQueues(1u);
+
+ EnqueueOrder enqueue_order = manager_->GetNextSequenceNumber();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_P(SequenceManagerTest, CouldTaskRun_FenceBeforeThenAfter) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ EnqueueOrder enqueue_order = manager_->GetNextSequenceNumber();
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_P(SequenceManagerTest, DelayedDoWorkNotPostedForDisabledQueue) {
+ CreateTaskQueues(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(1));
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+
+ voter->SetQueueEnabled(true);
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+}
+
+TEST_P(SequenceManagerTest, DisablingQueuesChangesDelayTillNextDoWork) {
+ CreateTaskQueues(3u);
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(1));
+ runners_[1]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(10));
+ runners_[2]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(100));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter0 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[1]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter2 =
+ runners_[2]->CreateQueueEnabledVoter();
+
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ test_task_runner_->NextPendingTaskDelay());
+
+ voter0->SetQueueEnabled(false);
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(10),
+ test_task_runner_->NextPendingTaskDelay());
+
+ voter1->SetQueueEnabled(false);
+ ASSERT_TRUE(test_task_runner_->HasPendingTask());
+ EXPECT_EQ(TimeDelta::FromMilliseconds(100),
+ test_task_runner_->NextPendingTaskDelay());
+
+ voter2->SetQueueEnabled(false);
+ EXPECT_FALSE(test_task_runner_->HasPendingTask());
+}
+
+TEST_P(SequenceManagerTest, GetNextScheduledWakeUp) {
+ CreateTaskQueues(1u);
+
+ EXPECT_EQ(nullopt, runners_[0]->GetNextScheduledWakeUp());
+
+ TimeTicks start_time = manager_->NowTicks();
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(2);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1);
+ EXPECT_EQ(start_time + delay1, runners_[0]->GetNextScheduledWakeUp());
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay2);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // We don't have wake-ups scheduled for disabled queues.
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_EQ(nullopt, runners_[0]->GetNextScheduledWakeUp());
+
+ voter->SetQueueEnabled(true);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // Immediate tasks shouldn't make any difference.
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&NopTask));
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // Neither should fences.
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+}
+
+TEST_P(SequenceManagerTest, SetTimeDomainForDisabledQueue) {
+ CreateTaskQueues(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ TimeDelta::FromMilliseconds(1));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+
+ // We should not get a notification for a disabled queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+
+ std::unique_ptr<MockTimeDomain> domain =
+ std::make_unique<MockTimeDomain>(manager_->NowTicks());
+ manager_->RegisterTimeDomain(domain.get());
+ runners_[0]->SetTimeDomain(domain.get());
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+ manager_->UnregisterTimeDomain(domain.get());
+}
+
+namespace {
+void SetOnTaskHandlers(scoped_refptr<TestTaskQueue> task_queue,
+ int* start_counter,
+ int* complete_counter) {
+ task_queue->GetTaskQueueImpl()->SetOnTaskStartedHandler(BindRepeating(
+ [](int* counter, const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) { ++(*counter); },
+ start_counter));
+ task_queue->GetTaskQueueImpl()->SetOnTaskCompletedHandler(BindRepeating(
+ [](int* counter, const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) { ++(*counter); },
+ complete_counter));
+}
+
+void UnsetOnTaskHandlers(scoped_refptr<TestTaskQueue> task_queue) {
+ task_queue->GetTaskQueueImpl()->SetOnTaskStartedHandler(
+ internal::TaskQueueImpl::OnTaskStartedHandler());
+ task_queue->GetTaskQueueImpl()->SetOnTaskCompletedHandler(
+ internal::TaskQueueImpl::OnTaskStartedHandler());
+}
+} // namespace
+
+TEST_P(SequenceManagerTest, ProcessTasksWithoutTaskTimeObservers) {
+ CreateTaskQueues(1u);
+ int start_counter = 0;
+ int complete_counter = 0;
+ std::vector<EnqueueOrder> run_order;
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 3);
+ EXPECT_EQ(complete_counter, 3);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u));
+
+ UnsetOnTaskHandlers(runners_[0]);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 3);
+ EXPECT_EQ(complete_counter, 3);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u));
+}
+
+TEST_P(SequenceManagerTest, ProcessTasksWithTaskTimeObservers) {
+ CreateTaskQueues(1u);
+ int start_counter = 0;
+ int complete_counter = 0;
+
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+
+ UnsetOnTaskHandlers(runners_[0]);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 4, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u));
+
+ manager_->RemoveTaskTimeObserver(&test_task_time_observer_);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 5, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 6, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u));
+
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 7, &run_order));
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 8, &run_order));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(start_counter, 4);
+ EXPECT_EQ(complete_counter, 4);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u));
+ UnsetOnTaskHandlers(runners_[0]);
+}
+
+TEST_P(SequenceManagerTest, GracefulShutdown) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+ WeakPtr<TestTaskQueue> main_tq_weak_ptr = main_tq->GetWeakPtr();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(250));
+
+ main_tq = nullptr;
+ // Ensure that task queue went away.
+ EXPECT_FALSE(main_tq_weak_ptr.get());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ // Even with TaskQueue gone, tasks are executed.
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200),
+ start_time_ + TimeDelta::FromMilliseconds(300),
+ start_time_ + TimeDelta::FromMilliseconds(400),
+ start_time_ + TimeDelta::FromMilliseconds(500)));
+
+ EXPECT_EQ(0u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+}
+
+TEST_P(SequenceManagerTest, GracefulShutdown_ManagerDeletedInFlight) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> control_tq = CreateTaskQueue();
+ std::vector<scoped_refptr<TestTaskQueue>> main_tqs;
+ std::vector<WeakPtr<TestTaskQueue>> main_tq_weak_ptrs;
+
+ // There might be a race condition - async task queues should be unregistered
+ // first. Increase the number of task queues to surely detect that.
+ // The problem is that pointers are compared in a set and generally for
+ // a small number of allocations value of the pointers increases
+ // monotonically. 100 is large enough to force allocations from different
+ // pages.
+ const int N = 100;
+ for (int i = 0; i < N; ++i) {
+ scoped_refptr<TestTaskQueue> tq = CreateTaskQueue();
+ main_tq_weak_ptrs.push_back(tq->GetWeakPtr());
+ main_tqs.push_back(std::move(tq));
+ }
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tqs[0]->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(250));
+
+ main_tqs.clear();
+ // Ensure that task queues went away.
+ for (int i = 0; i < N; ++i) {
+ EXPECT_FALSE(main_tq_weak_ptrs[i].get());
+ }
+
+ // No leaks should occur when TQM was destroyed before processing
+ // shutdown task and TaskQueueImpl should be safely deleted on a correct
+ // thread.
+ manager_.reset();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+}
+
+TEST_P(SequenceManagerTest,
+ GracefulShutdown_ManagerDeletedWithQueuesToShutdown) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+ WeakPtr<TestTaskQueue> main_tq_weak_ptr = main_tq->GetWeakPtr();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(250));
+
+ main_tq = nullptr;
+ // Ensure that task queue went away.
+ EXPECT_FALSE(main_tq_weak_ptr.get());
+
+ test_task_runner_->FastForwardBy(TimeDelta::FromMilliseconds(1));
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ // Ensure that all queues-to-gracefully-shutdown are properly unregistered.
+ manager_.reset();
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200)));
+}
+
+TEST_P(SequenceManagerTestWithCustomInitialization, DefaultTaskRunnerSupport) {
+ MessageLoop message_loop;
+ scoped_refptr<SingleThreadTaskRunner> original_task_runner =
+ message_loop.task_runner();
+ scoped_refptr<SingleThreadTaskRunner> custom_task_runner =
+ MakeRefCounted<TestSimpleTaskRunner>();
+ {
+ std::unique_ptr<SequenceManagerForTest> manager =
+ SequenceManagerForTest::Create(&message_loop,
+ message_loop.task_runner(), nullptr);
+ manager->SetDefaultTaskRunner(custom_task_runner);
+ DCHECK_EQ(custom_task_runner, message_loop.task_runner());
+ }
+ DCHECK_EQ(original_task_runner, message_loop.task_runner());
+}
+
+TEST_P(SequenceManagerTest, CanceledTasksInQueueCantMakeOtherTasksSkipAhead) {
+ CreateTaskQueues(2u);
+
+ CancelableTask task1(GetTickClock());
+ CancelableTask task2(GetTickClock());
+ std::vector<TimeTicks> run_times;
+
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times));
+ runners_[0]->PostTask(FROM_HERE,
+ BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times));
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[1]->PostTask(FROM_HERE, BindOnce(&TestTask, 1, &run_order));
+
+ runners_[0]->PostTask(FROM_HERE, BindOnce(&TestTask, 2, &run_order));
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+ task2.weak_factory_.InvalidateWeakPtrs();
+ RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1u, 2u));
+}
+
+TEST_P(SequenceManagerTest, TaskQueueDeletedOnAnotherThread) {
+ std::vector<TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+
+ int start_counter = 0;
+ int complete_counter = 0;
+ SetOnTaskHandlers(main_tq, &start_counter, &complete_counter);
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordTimeTask, &run_times, GetTickClock()),
+ TimeDelta::FromMilliseconds(i * 100));
+ }
+
+ // TODO(altimin): do not do this after switching to weak pointer-based
+ // task handlers.
+ UnsetOnTaskHandlers(main_tq);
+
+ WaitableEvent task_queue_deleted(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ std::unique_ptr<Thread> thread = std::make_unique<Thread>("test thread");
+ thread->StartAndWaitForTesting();
+
+ thread->task_runner()->PostTask(
+ FROM_HERE, BindOnce(
+ [](scoped_refptr<SingleThreadTaskRunner> task_queue,
+ WaitableEvent* task_queue_deleted) {
+ task_queue = nullptr;
+ task_queue_deleted->Signal();
+ },
+ std::move(main_tq), &task_queue_deleted));
+ task_queue_deleted.Wait();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ test_task_runner_->FastForwardUntilNoTasksRemain();
+
+ // Even with TaskQueue gone, tasks are executed.
+ EXPECT_THAT(run_times,
+ ElementsAre(start_time_ + TimeDelta::FromMilliseconds(100),
+ start_time_ + TimeDelta::FromMilliseconds(200),
+ start_time_ + TimeDelta::FromMilliseconds(300),
+ start_time_ + TimeDelta::FromMilliseconds(400),
+ start_time_ + TimeDelta::FromMilliseconds(500)));
+
+ EXPECT_EQ(0u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ thread->Stop();
+}
+
+namespace {
+
+void DoNothing() {}
+
+class PostTaskInDestructor {
+ public:
+ explicit PostTaskInDestructor(scoped_refptr<TaskQueue> task_queue)
+ : task_queue_(task_queue) {}
+
+ ~PostTaskInDestructor() {
+ task_queue_->PostTask(FROM_HERE, BindOnce(&DoNothing));
+ }
+
+ void Do() {}
+
+ private:
+ scoped_refptr<TaskQueue> task_queue_;
+};
+
+} // namespace
+
+TEST_P(SequenceManagerTest, TaskQueueUsedInTaskDestructorAfterShutdown) {
+ // This test checks that when a task is posted to a shutdown queue and
+ // destroyed, it can try to post a task to the same queue without deadlocks.
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+
+ WaitableEvent test_executed(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ std::unique_ptr<Thread> thread = std::make_unique<Thread>("test thread");
+ thread->StartAndWaitForTesting();
+
+ manager_.reset();
+
+ thread->task_runner()->PostTask(
+ FROM_HERE, BindOnce(
+ [](scoped_refptr<SingleThreadTaskRunner> task_queue,
+ std::unique_ptr<PostTaskInDestructor> test_object,
+ WaitableEvent* test_executed) {
+ task_queue->PostTask(FROM_HERE,
+ BindOnce(&PostTaskInDestructor::Do,
+ std::move(test_object)));
+ test_executed->Signal();
+ },
+ main_tq, std::make_unique<PostTaskInDestructor>(main_tq),
+ &test_executed));
+ test_executed.Wait();
+}
+
+} // namespace sequence_manager_impl_unittest
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/sequence_manager_perftest.cc b/base/task/sequence_manager/sequence_manager_perftest.cc
new file mode 100644
index 0000000000..c5cd1a00f1
--- /dev/null
+++ b/base/task/sequence_manager/sequence_manager_perftest.cc
@@ -0,0 +1,306 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/sequence_manager.h"
+
+#include <stddef.h>
+#include <memory>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/test/mock_time_domain.h"
+#include "base/task/sequence_manager/test/sequence_manager_for_test.h"
+#include "base/task/sequence_manager/test/test_task_queue.h"
+#include "base/task/sequence_manager/test/test_task_time_observer.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/default_tick_clock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace base {
+namespace sequence_manager {
+
+// To reduce noise related to the OS timer, we use a mock time domain to
+// fast forward the timers.
+class PerfTestTimeDomain : public MockTimeDomain {
+ public:
+ PerfTestTimeDomain() : MockTimeDomain(TimeTicks::Now()) {}
+ ~PerfTestTimeDomain() override = default;
+
+ Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override {
+ Optional<TimeTicks> run_time = NextScheduledRunTime();
+ if (!run_time)
+ return nullopt;
+ SetNowTicks(*run_time);
+ // Makes SequenceManager to continue immediately.
+ return TimeDelta();
+ }
+
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override {
+ // De-dupe DoWorks.
+ if (NumberOfScheduledWakeUps() == 1u)
+ RequestDoWork();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PerfTestTimeDomain);
+};
+
+class SequenceManagerPerfTest : public testing::Test {
+ public:
+ SequenceManagerPerfTest()
+ : num_queues_(0),
+ max_tasks_in_flight_(0),
+ num_tasks_in_flight_(0),
+ num_tasks_to_post_(0),
+ num_tasks_to_run_(0) {}
+
+ void SetUp() override {
+ if (ThreadTicks::IsSupported())
+ ThreadTicks::WaitUntilInitialized();
+ }
+
+ void TearDown() override {
+ queues_.clear();
+ manager_->UnregisterTimeDomain(time_domain_.get());
+ manager_.reset();
+ }
+
+ void Initialize(size_t num_queues) {
+ num_queues_ = num_queues;
+ message_loop_.reset(new MessageLoop());
+ manager_ = SequenceManagerForTest::Create(message_loop_.get(),
+ message_loop_->task_runner(),
+ DefaultTickClock::GetInstance());
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+
+ time_domain_.reset(new PerfTestTimeDomain());
+ manager_->RegisterTimeDomain(time_domain_.get());
+
+ for (size_t i = 0; i < num_queues; i++) {
+ queues_.push_back(manager_->CreateTaskQueue<TestTaskQueue>(
+ TaskQueue::Spec("test").SetTimeDomain(time_domain_.get())));
+ }
+
+ delayed_task_closure_ = BindRepeating(
+ &SequenceManagerPerfTest::TestDelayedTask, Unretained(this));
+
+ immediate_task_closure_ = BindRepeating(
+ &SequenceManagerPerfTest::TestImmediateTask, Unretained(this));
+ }
+
+ void TestDelayedTask() {
+ if (--num_tasks_to_run_ == 0) {
+ run_loop_->QuitWhenIdle();
+ return;
+ }
+
+ num_tasks_in_flight_--;
+ // NOTE there are only up to max_tasks_in_flight_ pending delayed tasks at
+ // any one time. Thanks to the lower_num_tasks_to_post going to zero if
+ // there are a lot of tasks in flight, the total number of task in flight at
+ // any one time is very variable.
+ unsigned int lower_num_tasks_to_post =
+ num_tasks_in_flight_ < (max_tasks_in_flight_ / 2) ? 1 : 0;
+ unsigned int max_tasks_to_post =
+ num_tasks_to_post_ % 2 ? lower_num_tasks_to_post : 10;
+ for (unsigned int i = 0;
+ i < max_tasks_to_post && num_tasks_in_flight_ < max_tasks_in_flight_ &&
+ num_tasks_to_post_ > 0;
+ i++) {
+ // Choose a queue weighted towards queue 0.
+ unsigned int queue = num_tasks_to_post_ % (num_queues_ + 1);
+ if (queue == num_queues_) {
+ queue = 0;
+ }
+ // Simulate a mix of short and longer delays.
+ unsigned int delay =
+ num_tasks_to_post_ % 2 ? 1 : (10 + num_tasks_to_post_ % 10);
+ queues_[queue]->PostDelayedTask(FROM_HERE, delayed_task_closure_,
+ TimeDelta::FromMilliseconds(delay));
+ num_tasks_in_flight_++;
+ num_tasks_to_post_--;
+ }
+ }
+
+ void TestImmediateTask() {
+ if (--num_tasks_to_run_ == 0) {
+ run_loop_->QuitWhenIdle();
+ return;
+ }
+
+ num_tasks_in_flight_--;
+ // NOTE there are only up to max_tasks_in_flight_ pending delayed tasks at
+ // any one time. Thanks to the lower_num_tasks_to_post going to zero if
+ // there are a lot of tasks in flight, the total number of task in flight at
+ // any one time is very variable.
+ unsigned int lower_num_tasks_to_post =
+ num_tasks_in_flight_ < (max_tasks_in_flight_ / 2) ? 1 : 0;
+ unsigned int max_tasks_to_post =
+ num_tasks_to_post_ % 2 ? lower_num_tasks_to_post : 10;
+ for (unsigned int i = 0;
+ i < max_tasks_to_post && num_tasks_in_flight_ < max_tasks_in_flight_ &&
+ num_tasks_to_post_ > 0;
+ i++) {
+ // Choose a queue weighted towards queue 0.
+ unsigned int queue = num_tasks_to_post_ % (num_queues_ + 1);
+ if (queue == num_queues_) {
+ queue = 0;
+ }
+ queues_[queue]->PostTask(FROM_HERE, immediate_task_closure_);
+ num_tasks_in_flight_++;
+ num_tasks_to_post_--;
+ }
+ }
+
+ void ResetAndCallTestDelayedTask(unsigned int num_tasks_to_run) {
+ num_tasks_in_flight_ = 1;
+ num_tasks_to_post_ = num_tasks_to_run;
+ num_tasks_to_run_ = num_tasks_to_run;
+ TestDelayedTask();
+ }
+
+ void ResetAndCallTestImmediateTask(unsigned int num_tasks_to_run) {
+ num_tasks_in_flight_ = 1;
+ num_tasks_to_post_ = num_tasks_to_run;
+ num_tasks_to_run_ = num_tasks_to_run;
+ TestImmediateTask();
+ }
+
+ void Benchmark(const std::string& trace, const RepeatingClosure& test_task) {
+ ThreadTicks start = ThreadTicks::Now();
+ ThreadTicks now;
+ unsigned long long num_iterations = 0;
+ do {
+ test_task.Run();
+ run_loop_.reset(new RunLoop());
+ run_loop_->Run();
+ now = ThreadTicks::Now();
+ num_iterations++;
+ } while (now - start < TimeDelta::FromSeconds(5));
+ perf_test::PrintResult(
+ "task", "", trace,
+ (now - start).InMicroseconds() / static_cast<double>(num_iterations),
+ "us/run", true);
+ }
+
+ size_t num_queues_;
+ unsigned int max_tasks_in_flight_;
+ unsigned int num_tasks_in_flight_;
+ unsigned int num_tasks_to_post_;
+ unsigned int num_tasks_to_run_;
+ std::unique_ptr<MessageLoop> message_loop_;
+ std::unique_ptr<SequenceManager> manager_;
+ std::unique_ptr<RunLoop> run_loop_;
+ std::unique_ptr<TimeDomain> time_domain_;
+ std::vector<scoped_refptr<SingleThreadTaskRunner>> queues_;
+ RepeatingClosure delayed_task_closure_;
+ RepeatingClosure immediate_task_closure_;
+ // TODO(alexclarke): parameterize so we can measure with and without a
+ // TaskTimeObserver.
+ TestTaskTimeObserver test_task_time_observer_;
+};
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_OneQueue) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(1u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with one queue",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_FourQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(4u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with four queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_EightQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(8u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with eight queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandDelayedTasks_ThirtyTwoQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(32u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with thirty two queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestDelayedTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_OneQueue) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(1u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with one queue",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_FourQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(4u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with four queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_EightQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(8u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with eight queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+TEST_F(SequenceManagerPerfTest, RunTenThousandImmediateTasks_ThirtyTwoQueues) {
+ if (!ThreadTicks::IsSupported())
+ return;
+ Initialize(32u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark(
+ "run 10000 immediate tasks with thirty two queues",
+ BindRepeating(&SequenceManagerPerfTest::ResetAndCallTestImmediateTask,
+ Unretained(this), 10000));
+}
+
+// TODO(alexclarke): Add additional tests with different mixes of non-delayed vs
+// delayed tasks.
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/sequenced_task_source.h b/base/task/sequence_manager/sequenced_task_source.h
new file mode 100644
index 0000000000..285f85377d
--- /dev/null
+++ b/base/task/sequence_manager/sequenced_task_source.h
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_
+
+#include "base/optional.h"
+#include "base/pending_task.h"
+#include "base/task/sequence_manager/lazy_now.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// Interface to pass tasks to ThreadController.
+class SequencedTaskSource {
+ public:
+ // Returns the next task to run from this source or nullopt if
+ // there're no more tasks ready to run. If a task is returned,
+ // DidRunTask() must be invoked before the next call to TakeTask().
+ virtual Optional<PendingTask> TakeTask() = 0;
+
+ // Notifies this source that the task previously obtained
+ // from TakeTask() has been completed.
+ virtual void DidRunTask() = 0;
+
+ // Returns the delay till the next task or TimeDelta::Max()
+ // if there are no tasks left.
+ virtual TimeDelta DelayTillNextTask(LazyNow* lazy_now) = 0;
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_
diff --git a/base/task/sequence_manager/task_queue.cc b/base/task/sequence_manager/task_queue.cc
new file mode 100644
index 0000000000..2d3d1525d6
--- /dev/null
+++ b/base/task/sequence_manager/task_queue.cc
@@ -0,0 +1,289 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue.h"
+
+#include "base/bind.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+
+TaskQueue::TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec)
+ : impl_(std::move(impl)),
+ thread_id_(PlatformThread::CurrentId()),
+ sequence_manager_(impl_ ? impl_->GetSequenceManagerWeakPtr() : nullptr),
+ graceful_queue_shutdown_helper_(
+ impl_ ? impl_->GetGracefulQueueShutdownHelper() : nullptr) {}
+
+TaskQueue::~TaskQueue() {
+ // scoped_refptr guarantees us that this object isn't used.
+ if (!impl_)
+ return;
+ if (impl_->IsUnregistered())
+ return;
+ graceful_queue_shutdown_helper_->GracefullyShutdownTaskQueue(
+ TakeTaskQueueImpl());
+}
+
+TaskQueue::Task::Task(TaskQueue::PostedTask task, TimeTicks desired_run_time)
+ : PendingTask(task.posted_from,
+ std::move(task.callback),
+ desired_run_time,
+ task.nestable),
+ task_type_(task.task_type) {}
+
+TaskQueue::TaskTiming::TaskTiming(bool has_wall_time, bool has_thread_time)
+ : has_wall_time_(has_wall_time), has_thread_time_(has_thread_time) {}
+
+void TaskQueue::TaskTiming::RecordTaskStart(LazyNow* now) {
+ if (has_wall_time())
+ start_time_ = now->Now();
+ if (has_thread_time())
+ start_thread_time_ = base::ThreadTicks::Now();
+}
+
+void TaskQueue::TaskTiming::RecordTaskEnd(LazyNow* now) {
+ if (has_wall_time())
+ end_time_ = now->Now();
+ if (has_thread_time())
+ end_thread_time_ = base::ThreadTicks::Now();
+}
+
+TaskQueue::PostedTask::PostedTask(OnceClosure callback,
+ Location posted_from,
+ TimeDelta delay,
+ Nestable nestable,
+ int task_type)
+ : callback(std::move(callback)),
+ posted_from(posted_from),
+ delay(delay),
+ nestable(nestable),
+ task_type(task_type) {}
+
+TaskQueue::PostedTask::PostedTask(PostedTask&& move_from)
+ : callback(std::move(move_from.callback)),
+ posted_from(move_from.posted_from),
+ delay(move_from.delay),
+ nestable(move_from.nestable),
+ task_type(move_from.task_type) {}
+
+TaskQueue::PostedTask::~PostedTask() = default;
+
+void TaskQueue::ShutdownTaskQueue() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ AutoLock lock(impl_lock_);
+ if (!impl_)
+ return;
+ if (!sequence_manager_) {
+ impl_.reset();
+ return;
+ }
+ impl_->SetBlameContext(nullptr);
+ impl_->SetOnTaskStartedHandler(
+ internal::TaskQueueImpl::OnTaskStartedHandler());
+ impl_->SetOnTaskCompletedHandler(
+ internal::TaskQueueImpl::OnTaskCompletedHandler());
+ sequence_manager_->UnregisterTaskQueueImpl(TakeTaskQueueImpl());
+}
+
+bool TaskQueue::RunsTasksInCurrentSequence() const {
+ return IsOnMainThread();
+}
+
+bool TaskQueue::PostDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
+ return PostTaskWithMetadata(
+ PostedTask(std::move(task), from_here, delay, Nestable::kNestable));
+}
+
+bool TaskQueue::PostNonNestableDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
+ return PostTaskWithMetadata(
+ PostedTask(std::move(task), from_here, delay, Nestable::kNonNestable));
+}
+
+bool TaskQueue::PostTaskWithMetadata(PostedTask task) {
+ Optional<MoveableAutoLock> lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return false;
+ internal::TaskQueueImpl::PostTaskResult result(
+ impl_->PostDelayedTask(std::move(task)));
+ if (result.success)
+ return true;
+ // If posting task was unsuccessful then |result| will contain
+ // the original task which should be destructed outside of the lock.
+ lock = nullopt;
+ // Task gets implicitly destructed here.
+ return false;
+}
+
+std::unique_ptr<TaskQueue::QueueEnabledVoter>
+TaskQueue::CreateQueueEnabledVoter() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullptr;
+ return impl_->CreateQueueEnabledVoter(this);
+}
+
+bool TaskQueue::IsQueueEnabled() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->IsQueueEnabled();
+}
+
+bool TaskQueue::IsEmpty() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return true;
+ return impl_->IsEmpty();
+}
+
+size_t TaskQueue::GetNumberOfPendingTasks() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return 0;
+ return impl_->GetNumberOfPendingTasks();
+}
+
+bool TaskQueue::HasTaskToRunImmediately() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->HasTaskToRunImmediately();
+}
+
+Optional<TimeTicks> TaskQueue::GetNextScheduledWakeUp() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullopt;
+ return impl_->GetNextScheduledWakeUp();
+}
+
+void TaskQueue::SetQueuePriority(TaskQueue::QueuePriority priority) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetQueuePriority(priority);
+}
+
+TaskQueue::QueuePriority TaskQueue::GetQueuePriority() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return TaskQueue::QueuePriority::kLowPriority;
+ return impl_->GetQueuePriority();
+}
+
+void TaskQueue::AddTaskObserver(MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->AddTaskObserver(task_observer);
+}
+
+void TaskQueue::RemoveTaskObserver(MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->RemoveTaskObserver(task_observer);
+}
+
+void TaskQueue::SetTimeDomain(TimeDomain* time_domain) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetTimeDomain(time_domain);
+}
+
+TimeDomain* TaskQueue::GetTimeDomain() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullptr;
+ return impl_->GetTimeDomain();
+}
+
+void TaskQueue::SetBlameContext(trace_event::BlameContext* blame_context) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetBlameContext(blame_context);
+}
+
+void TaskQueue::InsertFence(InsertFencePosition position) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->InsertFence(position);
+}
+
+void TaskQueue::InsertFenceAt(TimeTicks time) {
+ impl_->InsertFenceAt(time);
+}
+
+void TaskQueue::RemoveFence() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->RemoveFence();
+}
+
+bool TaskQueue::HasActiveFence() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->HasActiveFence();
+}
+
+bool TaskQueue::BlockedByFence() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->BlockedByFence();
+}
+
+const char* TaskQueue::GetName() const {
+ auto lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return "";
+ return impl_->GetName();
+}
+
+void TaskQueue::SetObserver(Observer* observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ if (observer) {
+ // Observer is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle
+ // is controlled by |this|.
+ impl_->SetOnNextWakeUpChangedCallback(
+ BindRepeating(&TaskQueue::Observer::OnQueueNextWakeUpChanged,
+ Unretained(observer), Unretained(this)));
+ } else {
+ impl_->SetOnNextWakeUpChangedCallback(RepeatingCallback<void(TimeTicks)>());
+ }
+}
+
+bool TaskQueue::IsOnMainThread() const {
+ return thread_id_ == PlatformThread::CurrentId();
+}
+
+Optional<MoveableAutoLock> TaskQueue::AcquireImplReadLockIfNeeded() const {
+ if (IsOnMainThread())
+ return nullopt;
+ return MoveableAutoLock(impl_lock_);
+}
+
+std::unique_ptr<internal::TaskQueueImpl> TaskQueue::TakeTaskQueueImpl() {
+ DCHECK(impl_);
+ return std::move(impl_);
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/task_queue.h b/base/task/sequence_manager/task_queue.h
new file mode 100644
index 0000000000..af6b4dd5da
--- /dev/null
+++ b/base/task/sequence_manager/task_queue.h
@@ -0,0 +1,368 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/optional.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/task/sequence_manager/lazy_now.h"
+#include "base/task/sequence_manager/moveable_auto_lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+namespace trace_event {
+class BlameContext;
+}
+
+namespace sequence_manager {
+
+namespace internal {
+class GracefulQueueShutdownHelper;
+class SequenceManagerImpl;
+class TaskQueueImpl;
+} // namespace internal
+
+class TimeDomain;
+
+class BASE_EXPORT TaskQueue : public SingleThreadTaskRunner {
+ public:
+ class Observer {
+ public:
+ virtual ~Observer() = default;
+
+ // Notify observer that the time at which this queue wants to run
+ // the next task has changed. |next_wakeup| can be in the past
+ // (e.g. TimeTicks() can be used to notify about immediate work).
+ // Can be called on any thread
+ // All methods but SetObserver, SetTimeDomain and GetTimeDomain can be
+ // called on |queue|.
+ //
+ // TODO(altimin): Make it Optional<TimeTicks> to tell
+ // observer about cancellations.
+ virtual void OnQueueNextWakeUpChanged(TaskQueue* queue,
+ TimeTicks next_wake_up) = 0;
+ };
+
+ // A wrapper around OnceClosure with additional metadata to be passed
+ // to PostTask and plumbed until PendingTask is created.
+ struct BASE_EXPORT PostedTask {
+ PostedTask(OnceClosure callback,
+ Location posted_from,
+ TimeDelta delay = TimeDelta(),
+ Nestable nestable = Nestable::kNestable,
+ int task_type = 0);
+ PostedTask(PostedTask&& move_from);
+ PostedTask(const PostedTask& copy_from) = delete;
+ ~PostedTask();
+
+ OnceClosure callback;
+ Location posted_from;
+ TimeDelta delay;
+ Nestable nestable;
+ int task_type;
+ };
+
+ // Prepare the task queue to get released.
+ // All tasks posted after this call will be discarded.
+ virtual void ShutdownTaskQueue();
+
+ // TODO(scheduler-dev): Could we define a more clear list of priorities?
+ // See https://crbug.com/847858.
+ enum QueuePriority {
+ // Queues with control priority will run before any other queue, and will
+ // explicitly starve other queues. Typically this should only be used for
+ // private queues which perform control operations.
+ kControlPriority,
+
+ // The selector will prioritize highest over high, normal and low; and
+ // high over normal and low; and normal over low. However it will ensure
+ // neither of the lower priority queues can be completely starved by higher
+ // priority tasks. All three of these queues will always take priority over
+ // and can starve the best effort queue.
+ kHighestPriority,
+
+ kHighPriority,
+
+ // Queues with normal priority are the default.
+ kNormalPriority,
+ kLowPriority,
+
+ // Queues with best effort priority will only be run if all other queues are
+ // empty. They can be starved by the other queues.
+ kBestEffortPriority,
+ // Must be the last entry.
+ kQueuePriorityCount,
+ kFirstQueuePriority = kControlPriority,
+ };
+
+ // Can be called on any thread.
+ static const char* PriorityToString(QueuePriority priority);
+
+ // Options for constructing a TaskQueue.
+ struct Spec {
+ explicit Spec(const char* name)
+ : name(name),
+ should_monitor_quiescence(false),
+ time_domain(nullptr),
+ should_notify_observers(true) {}
+
+ Spec SetShouldMonitorQuiescence(bool should_monitor) {
+ should_monitor_quiescence = should_monitor;
+ return *this;
+ }
+
+ Spec SetShouldNotifyObservers(bool run_observers) {
+ should_notify_observers = run_observers;
+ return *this;
+ }
+
+ Spec SetTimeDomain(TimeDomain* domain) {
+ time_domain = domain;
+ return *this;
+ }
+
+ const char* name;
+ bool should_monitor_quiescence;
+ TimeDomain* time_domain;
+ bool should_notify_observers;
+ };
+
+ // Interface to pass per-task metadata to RendererScheduler.
+ class BASE_EXPORT Task : public PendingTask {
+ public:
+ Task(PostedTask posted_task, TimeTicks desired_run_time);
+
+ int task_type() const { return task_type_; }
+
+ private:
+ int task_type_;
+ };
+
+ // Information about task execution.
+ //
+ // Wall-time related methods (start_time, end_time, wall_duration) can be
+ // called only when |has_wall_time()| is true.
+ // Thread-time related mehtods (start_thread_time, end_thread_time,
+ // thread_duration) can be called only when |has_thread_time()| is true.
+ //
+ // start_* should be called after RecordTaskStart.
+ // end_* and *_duration should be called after RecordTaskEnd.
+ class BASE_EXPORT TaskTiming {
+ public:
+ TaskTiming(bool has_wall_time, bool has_thread_time);
+
+ bool has_wall_time() const { return has_wall_time_; }
+ bool has_thread_time() const { return has_thread_time_; }
+
+ base::TimeTicks start_time() const {
+ DCHECK(has_wall_time());
+ return start_time_;
+ }
+ base::TimeTicks end_time() const {
+ DCHECK(has_wall_time());
+ return end_time_;
+ }
+ base::TimeDelta wall_duration() const {
+ DCHECK(has_wall_time());
+ return end_time_ - start_time_;
+ }
+ base::ThreadTicks start_thread_time() const {
+ DCHECK(has_thread_time());
+ return start_thread_time_;
+ }
+ base::ThreadTicks end_thread_time() const {
+ DCHECK(has_thread_time());
+ return end_thread_time_;
+ }
+ base::TimeDelta thread_duration() const {
+ DCHECK(has_thread_time());
+ return end_thread_time_ - start_thread_time_;
+ }
+
+ void RecordTaskStart(LazyNow* now);
+ void RecordTaskEnd(LazyNow* now);
+
+ // Protected for tests.
+ protected:
+ bool has_wall_time_;
+ bool has_thread_time_;
+
+ base::TimeTicks start_time_;
+ base::TimeTicks end_time_;
+ base::ThreadTicks start_thread_time_;
+ base::ThreadTicks end_thread_time_;
+ };
+
+ // An interface that lets the owner vote on whether or not the associated
+ // TaskQueue should be enabled.
+ class QueueEnabledVoter {
+ public:
+ QueueEnabledVoter() = default;
+ virtual ~QueueEnabledVoter() = default;
+
+ // Votes to enable or disable the associated TaskQueue. The TaskQueue will
+ // only be enabled if all the voters agree it should be enabled, or if there
+ // are no voters.
+ // NOTE this must be called on the thread the associated TaskQueue was
+ // created on.
+ virtual void SetQueueEnabled(bool enabled) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(QueueEnabledVoter);
+ };
+
+ // Returns an interface that allows the caller to vote on whether or not this
+ // TaskQueue is enabled. The TaskQueue will be enabled if there are no voters
+ // or if all agree it should be enabled.
+ // NOTE this must be called on the thread this TaskQueue was created by.
+ std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter();
+
+ // NOTE this must be called on the thread this TaskQueue was created by.
+ bool IsQueueEnabled() const;
+
+ // Returns true if the queue is completely empty.
+ bool IsEmpty() const;
+
+ // Returns the number of pending tasks in the queue.
+ size_t GetNumberOfPendingTasks() const;
+
+ // Returns true if the queue has work that's ready to execute now.
+ // NOTE: this must be called on the thread this TaskQueue was created by.
+ bool HasTaskToRunImmediately() const;
+
+ // Returns requested run time of next scheduled wake-up for a delayed task
+ // which is not ready to run. If there are no such tasks (immediate tasks
+ // don't count) or the queue is disabled it returns nullopt.
+ // NOTE: this must be called on the thread this TaskQueue was created by.
+ Optional<TimeTicks> GetNextScheduledWakeUp();
+
+ // Can be called on any thread.
+ virtual const char* GetName() const;
+
+ // Set the priority of the queue to |priority|. NOTE this must be called on
+ // the thread this TaskQueue was created by.
+ void SetQueuePriority(QueuePriority priority);
+
+ // Returns the current queue priority.
+ QueuePriority GetQueuePriority() const;
+
+ // These functions can only be called on the same thread that the task queue
+ // manager executes its tasks on.
+ void AddTaskObserver(MessageLoop::TaskObserver* task_observer);
+ void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer);
+
+ // Set the blame context which is entered and left while executing tasks from
+ // this task queue. |blame_context| must be null or outlive this task queue.
+ // Must be called on the thread this TaskQueue was created by.
+ void SetBlameContext(trace_event::BlameContext* blame_context);
+
+ // Removes the task queue from the previous TimeDomain and adds it to
+ // |domain|. This is a moderately expensive operation.
+ void SetTimeDomain(TimeDomain* domain);
+
+ // Returns the queue's current TimeDomain. Can be called from any thread.
+ TimeDomain* GetTimeDomain() const;
+
+ enum class InsertFencePosition {
+ kNow, // Tasks posted on the queue up till this point further may run.
+ // All further tasks are blocked.
+ kBeginningOfTime, // No tasks posted on this queue may run.
+ };
+
+ // Inserts a barrier into the task queue which prevents tasks with an enqueue
+ // order greater than the fence from running until either the fence has been
+ // removed or a subsequent fence has unblocked some tasks within the queue.
+ // Note: delayed tasks get their enqueue order set once their delay has
+ // expired, and non-delayed tasks get their enqueue order set when posted.
+ //
+ // Fences come in three flavours:
+ // - Regular (InsertFence(NOW)) - all tasks posted after this moment
+ // are blocked.
+ // - Fully blocking (InsertFence(kBeginningOfTime)) - all tasks including
+ // already posted are blocked.
+ // - Delayed (InsertFenceAt(timestamp)) - blocks all tasks posted after given
+ // point in time (must be in the future).
+ //
+ // Only one fence can be scheduled at a time. Inserting a new fence
+ // will automatically remove the previous one, regardless of fence type.
+ void InsertFence(InsertFencePosition position);
+ void InsertFenceAt(TimeTicks time);
+
+ // Removes any previously added fence and unblocks execution of any tasks
+ // blocked by it.
+ void RemoveFence();
+
+ // Returns true if the queue has a fence but it isn't necessarily blocking
+ // execution of tasks (it may be the case if tasks enqueue order hasn't
+ // reached the number set for a fence).
+ bool HasActiveFence();
+
+ // Returns true if the queue has a fence which is blocking execution of tasks.
+ bool BlockedByFence() const;
+
+ void SetObserver(Observer* observer);
+
+ // SingleThreadTaskRunner implementation
+ bool RunsTasksInCurrentSequence() const override;
+ bool PostDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) override;
+
+ bool PostTaskWithMetadata(PostedTask task);
+
+ protected:
+ TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec);
+ ~TaskQueue() override;
+
+ internal::TaskQueueImpl* GetTaskQueueImpl() const { return impl_.get(); }
+
+ private:
+ friend class internal::SequenceManagerImpl;
+ friend class internal::TaskQueueImpl;
+
+ bool IsOnMainThread() const;
+
+ Optional<MoveableAutoLock> AcquireImplReadLockIfNeeded() const;
+
+ // TaskQueue has ownership of an underlying implementation but in certain
+ // cases (e.g. detached frames) their lifetime may diverge.
+ // This method should be used to take away the impl for graceful shutdown.
+ // TaskQueue will disregard any calls or posting tasks thereafter.
+ std::unique_ptr<internal::TaskQueueImpl> TakeTaskQueueImpl();
+
+ // |impl_| can be written to on the main thread but can be read from
+ // any thread.
+ // |impl_lock_| must be acquired when writing to |impl_| or when accessing
+ // it from non-main thread. Reading from the main thread does not require
+ // a lock.
+ mutable Lock impl_lock_;
+ std::unique_ptr<internal::TaskQueueImpl> impl_;
+
+ const PlatformThreadId thread_id_;
+
+ const WeakPtr<internal::SequenceManagerImpl> sequence_manager_;
+
+ const scoped_refptr<internal::GracefulQueueShutdownHelper>
+ graceful_queue_shutdown_helper_;
+
+ THREAD_CHECKER(main_thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueue);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
diff --git a/base/task/sequence_manager/task_queue_impl.cc b/base/task/sequence_manager/task_queue_impl.cc
new file mode 100644
index 0000000000..250e8c438c
--- /dev/null
+++ b/base/task/sequence_manager/task_queue_impl.cc
@@ -0,0 +1,1016 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue_impl.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/strings/stringprintf.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/time_domain.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/time/time.h"
+#include "base/trace_event/blame_context.h"
+
+namespace base {
+namespace sequence_manager {
+
+// static
+const char* TaskQueue::PriorityToString(TaskQueue::QueuePriority priority) {
+ switch (priority) {
+ case kControlPriority:
+ return "control";
+ case kHighestPriority:
+ return "highest";
+ case kHighPriority:
+ return "high";
+ case kNormalPriority:
+ return "normal";
+ case kLowPriority:
+ return "low";
+ case kBestEffortPriority:
+ return "best_effort";
+ default:
+ NOTREACHED();
+ return nullptr;
+ }
+}
+
+namespace internal {
+
+TaskQueueImpl::TaskQueueImpl(SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec)
+ : name_(spec.name),
+ thread_id_(PlatformThread::CurrentId()),
+ any_thread_(sequence_manager, time_domain),
+ main_thread_only_(sequence_manager, this, time_domain),
+ should_monitor_quiescence_(spec.should_monitor_quiescence),
+ should_notify_observers_(spec.should_notify_observers) {
+ DCHECK(time_domain);
+}
+
+TaskQueueImpl::~TaskQueueImpl() {
+#if DCHECK_IS_ON()
+ AutoLock lock(any_thread_lock_);
+ // NOTE this check shouldn't fire because |SequenceManagerImpl::queues_|
+ // contains a strong reference to this TaskQueueImpl and the
+ // SequenceManagerImpl destructor calls UnregisterTaskQueue on all task
+ // queues.
+ DCHECK(!any_thread().sequence_manager)
+ << "UnregisterTaskQueue must be called first!";
+#endif
+}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult()
+ : success(false), task(OnceClosure(), Location()) {}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult(bool success,
+ TaskQueue::PostedTask task)
+ : success(success), task(std::move(task)) {}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult(PostTaskResult&& move_from)
+ : success(move_from.success), task(std::move(move_from.task)) {}
+
+TaskQueueImpl::PostTaskResult::~PostTaskResult() = default;
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostTaskResult::Success() {
+ return PostTaskResult(true, TaskQueue::PostedTask(OnceClosure(), Location()));
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostTaskResult::Fail(
+ TaskQueue::PostedTask task) {
+ return PostTaskResult(false, std::move(task));
+}
+
+TaskQueueImpl::Task::Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number)
+ : TaskQueue::Task(std::move(task), desired_run_time) {
+ // It might wrap around to a negative number but it's handled properly.
+ sequence_num = static_cast<int>(sequence_number);
+}
+
+TaskQueueImpl::Task::Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number,
+ EnqueueOrder enqueue_order)
+ : TaskQueue::Task(std::move(task), desired_run_time),
+ enqueue_order_(enqueue_order) {
+ // It might wrap around to a negative number but it's handled properly.
+ sequence_num = static_cast<int>(sequence_number);
+}
+
+TaskQueueImpl::AnyThread::AnyThread(SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain)
+ : sequence_manager(sequence_manager), time_domain(time_domain) {}
+
+TaskQueueImpl::AnyThread::~AnyThread() = default;
+
+TaskQueueImpl::MainThreadOnly::MainThreadOnly(
+ SequenceManagerImpl* sequence_manager,
+ TaskQueueImpl* task_queue,
+ TimeDomain* time_domain)
+ : sequence_manager(sequence_manager),
+ time_domain(time_domain),
+ delayed_work_queue(
+ new WorkQueue(task_queue, "delayed", WorkQueue::QueueType::kDelayed)),
+ immediate_work_queue(new WorkQueue(task_queue,
+ "immediate",
+ WorkQueue::QueueType::kImmediate)),
+ set_index(0),
+ is_enabled_refcount(0),
+ voter_refcount(0),
+ blame_context(nullptr),
+ is_enabled_for_test(true) {}
+
+TaskQueueImpl::MainThreadOnly::~MainThreadOnly() = default;
+
+void TaskQueueImpl::UnregisterTaskQueue() {
+ TaskDeque immediate_incoming_queue;
+
+ {
+ AutoLock lock(any_thread_lock_);
+ AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+
+ if (main_thread_only().time_domain)
+ main_thread_only().time_domain->UnregisterQueue(this);
+
+ if (!any_thread().sequence_manager)
+ return;
+
+ main_thread_only().on_task_completed_handler = OnTaskCompletedHandler();
+ any_thread().time_domain = nullptr;
+ main_thread_only().time_domain = nullptr;
+
+ any_thread().sequence_manager = nullptr;
+ main_thread_only().sequence_manager = nullptr;
+ any_thread().on_next_wake_up_changed_callback =
+ OnNextWakeUpChangedCallback();
+ main_thread_only().on_next_wake_up_changed_callback =
+ OnNextWakeUpChangedCallback();
+ immediate_incoming_queue.swap(immediate_incoming_queue_);
+ }
+
+ // It is possible for a task to hold a scoped_refptr to this, which
+ // will lead to TaskQueueImpl destructor being called when deleting a task.
+ // To avoid use-after-free, we need to clear all fields of a task queue
+ // before starting to delete the tasks.
+ // All work queues and priority queues containing tasks should be moved to
+ // local variables on stack (std::move for unique_ptrs and swap for queues)
+ // before clearing them and deleting tasks.
+
+ // Flush the queues outside of the lock because TSAN complains about a lock
+ // order inversion for tasks that are posted from within a lock, with a
+ // destructor that acquires the same lock.
+
+ std::priority_queue<Task> delayed_incoming_queue;
+ delayed_incoming_queue.swap(main_thread_only().delayed_incoming_queue);
+
+ std::unique_ptr<WorkQueue> immediate_work_queue =
+ std::move(main_thread_only().immediate_work_queue);
+ std::unique_ptr<WorkQueue> delayed_work_queue =
+ std::move(main_thread_only().delayed_work_queue);
+}
+
+const char* TaskQueueImpl::GetName() const {
+ return name_;
+}
+
+bool TaskQueueImpl::RunsTasksInCurrentSequence() const {
+ return PlatformThread::CurrentId() == thread_id_;
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostDelayedTask(
+ TaskQueue::PostedTask task) {
+ if (task.delay.is_zero())
+ return PostImmediateTaskImpl(std::move(task));
+
+ return PostDelayedTaskImpl(std::move(task));
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostImmediateTaskImpl(
+ TaskQueue::PostedTask task) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task.callback);
+ AutoLock lock(any_thread_lock_);
+ if (!any_thread().sequence_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ any_thread().sequence_manager->GetNextSequenceNumber();
+
+ PushOntoImmediateIncomingQueueLocked(Task(std::move(task),
+ any_thread().time_domain->Now(),
+ sequence_number, sequence_number));
+ return PostTaskResult::Success();
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostDelayedTaskImpl(
+ TaskQueue::PostedTask task) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task.callback);
+ DCHECK_GT(task.delay, TimeDelta());
+ if (PlatformThread::CurrentId() == thread_id_) {
+ // Lock-free fast path for delayed tasks posted from the main thread.
+ if (!main_thread_only().sequence_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ main_thread_only().sequence_manager->GetNextSequenceNumber();
+
+ TimeTicks time_domain_now = main_thread_only().time_domain->Now();
+ TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
+ PushOntoDelayedIncomingQueueFromMainThread(
+ Task(std::move(task), time_domain_delayed_run_time, sequence_number),
+ time_domain_now);
+ } else {
+ // NOTE posting a delayed task from a different thread is not expected to
+ // be common. This pathway is less optimal than perhaps it could be
+ // because it causes two main thread tasks to be run. Should this
+ // assumption prove to be false in future, we may need to revisit this.
+ AutoLock lock(any_thread_lock_);
+ if (!any_thread().sequence_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ any_thread().sequence_manager->GetNextSequenceNumber();
+
+ TimeTicks time_domain_now = any_thread().time_domain->Now();
+ TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
+ PushOntoDelayedIncomingQueueLocked(
+ Task(std::move(task), time_domain_delayed_run_time, sequence_number));
+ }
+ return PostTaskResult::Success();
+}
+
+void TaskQueueImpl::PushOntoDelayedIncomingQueueFromMainThread(
+ Task pending_task,
+ TimeTicks now) {
+ main_thread_only().sequence_manager->WillQueueTask(&pending_task);
+ main_thread_only().delayed_incoming_queue.push(std::move(pending_task));
+
+ LazyNow lazy_now(now);
+ UpdateDelayedWakeUp(&lazy_now);
+
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::PushOntoDelayedIncomingQueueLocked(Task pending_task) {
+ any_thread().sequence_manager->WillQueueTask(&pending_task);
+
+ EnqueueOrder thread_hop_task_sequence_number =
+ any_thread().sequence_manager->GetNextSequenceNumber();
+ // TODO(altimin): Add a copy method to Task to capture metadata here.
+ PushOntoImmediateIncomingQueueLocked(Task(
+ TaskQueue::PostedTask(BindOnce(&TaskQueueImpl::ScheduleDelayedWorkTask,
+ Unretained(this), std::move(pending_task)),
+ FROM_HERE, TimeDelta(), Nestable::kNonNestable,
+ pending_task.task_type()),
+ TimeTicks(), thread_hop_task_sequence_number,
+ thread_hop_task_sequence_number));
+}
+
+void TaskQueueImpl::ScheduleDelayedWorkTask(Task pending_task) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ TimeTicks delayed_run_time = pending_task.delayed_run_time;
+ TimeTicks time_domain_now = main_thread_only().time_domain->Now();
+ if (delayed_run_time <= time_domain_now) {
+ // If |delayed_run_time| is in the past then push it onto the work queue
+ // immediately. To ensure the right task ordering we need to temporarily
+ // push it onto the |delayed_incoming_queue|.
+ delayed_run_time = time_domain_now;
+ pending_task.delayed_run_time = time_domain_now;
+ main_thread_only().delayed_incoming_queue.push(std::move(pending_task));
+ LazyNow lazy_now(time_domain_now);
+ WakeUpForDelayedWork(&lazy_now);
+ } else {
+ // If |delayed_run_time| is in the future we can queue it as normal.
+ PushOntoDelayedIncomingQueueFromMainThread(std::move(pending_task),
+ time_domain_now);
+ }
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::PushOntoImmediateIncomingQueueLocked(Task task) {
+ // If the |immediate_incoming_queue| is empty we need a DoWork posted to make
+ // it run.
+ bool was_immediate_incoming_queue_empty;
+
+ EnqueueOrder sequence_number = task.enqueue_order();
+ TimeTicks desired_run_time = task.delayed_run_time;
+
+ {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ was_immediate_incoming_queue_empty = immediate_incoming_queue().empty();
+ any_thread().sequence_manager->WillQueueTask(&task);
+ immediate_incoming_queue().push_back(std::move(task));
+ }
+
+ if (was_immediate_incoming_queue_empty) {
+ // However there's no point posting a DoWork for a blocked queue. NB we can
+ // only tell if it's disabled from the main thread.
+ bool queue_is_blocked =
+ RunsTasksInCurrentSequence() &&
+ (!IsQueueEnabled() || main_thread_only().current_fence);
+ any_thread().sequence_manager->OnQueueHasIncomingImmediateWork(
+ this, sequence_number, queue_is_blocked);
+ if (!any_thread().on_next_wake_up_changed_callback.is_null())
+ any_thread().on_next_wake_up_changed_callback.Run(desired_run_time);
+ }
+
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::ReloadImmediateWorkQueueIfEmpty() {
+ if (!main_thread_only().immediate_work_queue->Empty())
+ return;
+
+ main_thread_only().immediate_work_queue->ReloadEmptyImmediateQueue();
+}
+
+void TaskQueueImpl::ReloadEmptyImmediateQueue(TaskDeque* queue) {
+ DCHECK(queue->empty());
+
+ AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+ queue->swap(immediate_incoming_queue());
+
+ // Activate delayed fence if necessary. This is ideologically similar to
+ // ActivateDelayedFenceIfNeeded, but due to immediate tasks being posted
+ // from any thread we can't generate an enqueue order for the fence there,
+ // so we have to check all immediate tasks and use their enqueue order for
+ // a fence.
+ if (main_thread_only().delayed_fence) {
+ for (const Task& task : *queue) {
+ if (task.delayed_run_time >= main_thread_only().delayed_fence.value()) {
+ main_thread_only().delayed_fence = nullopt;
+ DCHECK(!main_thread_only().current_fence);
+ main_thread_only().current_fence = task.enqueue_order();
+ // Do not trigger WorkQueueSets notification when taking incoming
+ // immediate queue.
+ main_thread_only().immediate_work_queue->InsertFenceSilently(
+ main_thread_only().current_fence);
+ main_thread_only().delayed_work_queue->InsertFenceSilently(
+ main_thread_only().current_fence);
+ break;
+ }
+ }
+ }
+}
+
+bool TaskQueueImpl::IsEmpty() const {
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().delayed_incoming_queue.empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return false;
+ }
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ return immediate_incoming_queue().empty();
+}
+
+size_t TaskQueueImpl::GetNumberOfPendingTasks() const {
+ size_t task_count = 0;
+ task_count += main_thread_only().delayed_work_queue->Size();
+ task_count += main_thread_only().delayed_incoming_queue.size();
+ task_count += main_thread_only().immediate_work_queue->Size();
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ task_count += immediate_incoming_queue().size();
+ return task_count;
+}
+
+bool TaskQueueImpl::HasTaskToRunImmediately() const {
+ // Any work queue tasks count as immediate work.
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return true;
+ }
+
+ // Tasks on |delayed_incoming_queue| that could run now, count as
+ // immediate work.
+ if (!main_thread_only().delayed_incoming_queue.empty() &&
+ main_thread_only().delayed_incoming_queue.top().delayed_run_time <=
+ main_thread_only().time_domain->CreateLazyNow().Now()) {
+ return true;
+ }
+
+ // Finally tasks on |immediate_incoming_queue| count as immediate work.
+ AutoLock lock(immediate_incoming_queue_lock_);
+ return !immediate_incoming_queue().empty();
+}
+
+Optional<TaskQueueImpl::DelayedWakeUp>
+TaskQueueImpl::GetNextScheduledWakeUpImpl() {
+ // Note we don't scheduled a wake-up for disabled queues.
+ if (main_thread_only().delayed_incoming_queue.empty() || !IsQueueEnabled())
+ return nullopt;
+
+ return main_thread_only().delayed_incoming_queue.top().delayed_wake_up();
+}
+
+Optional<TimeTicks> TaskQueueImpl::GetNextScheduledWakeUp() {
+ Optional<DelayedWakeUp> wake_up = GetNextScheduledWakeUpImpl();
+ if (!wake_up)
+ return nullopt;
+ return wake_up->time;
+}
+
+void TaskQueueImpl::WakeUpForDelayedWork(LazyNow* lazy_now) {
+ // Enqueue all delayed tasks that should be running now, skipping any that
+ // have been canceled.
+ while (!main_thread_only().delayed_incoming_queue.empty()) {
+ Task& task =
+ const_cast<Task&>(main_thread_only().delayed_incoming_queue.top());
+ if (!task.task || task.task.IsCancelled()) {
+ main_thread_only().delayed_incoming_queue.pop();
+ continue;
+ }
+ if (task.delayed_run_time > lazy_now->Now())
+ break;
+ ActivateDelayedFenceIfNeeded(task.delayed_run_time);
+ task.set_enqueue_order(
+ main_thread_only().sequence_manager->GetNextSequenceNumber());
+ main_thread_only().delayed_work_queue->Push(std::move(task));
+ main_thread_only().delayed_incoming_queue.pop();
+
+ // Normally WakeUpForDelayedWork is called inside DoWork, but it also
+ // can be called elsewhere (e.g. tests and fast-path for posting
+ // delayed tasks). Ensure that there is a DoWork posting. No-op inside
+ // existing DoWork due to DoWork deduplication.
+ if (IsQueueEnabled() || !main_thread_only().current_fence) {
+ main_thread_only().sequence_manager->MaybeScheduleImmediateWork(
+ FROM_HERE);
+ }
+ }
+
+ UpdateDelayedWakeUp(lazy_now);
+}
+
+void TaskQueueImpl::TraceQueueSize() const {
+ bool is_tracing;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager"), &is_tracing);
+ if (!is_tracing)
+ return;
+
+ // It's only safe to access the work queues from the main thread.
+ // TODO(alexclarke): We should find another way of tracing this
+ if (PlatformThread::CurrentId() != thread_id_)
+ return;
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("sequence_manager"), GetName(),
+ immediate_incoming_queue().size() +
+ main_thread_only().immediate_work_queue->Size() +
+ main_thread_only().delayed_work_queue->Size() +
+ main_thread_only().delayed_incoming_queue.size());
+}
+
+void TaskQueueImpl::SetQueuePriority(TaskQueue::QueuePriority priority) {
+ if (!main_thread_only().sequence_manager || priority == GetQueuePriority())
+ return;
+ main_thread_only()
+ .sequence_manager->main_thread_only()
+ .selector.SetQueuePriority(this, priority);
+}
+
+TaskQueue::QueuePriority TaskQueueImpl::GetQueuePriority() const {
+ size_t set_index = immediate_work_queue()->work_queue_set_index();
+ DCHECK_EQ(set_index, delayed_work_queue()->work_queue_set_index());
+ return static_cast<TaskQueue::QueuePriority>(set_index);
+}
+
+void TaskQueueImpl::AsValueInto(TimeTicks now,
+ trace_event::TracedValue* state) const {
+ AutoLock lock(any_thread_lock_);
+ AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+ state->BeginDictionary();
+ state->SetString("name", GetName());
+ if (!main_thread_only().sequence_manager) {
+ state->SetBoolean("unregistered", true);
+ state->EndDictionary();
+ return;
+ }
+ DCHECK(main_thread_only().time_domain);
+ DCHECK(main_thread_only().delayed_work_queue);
+ DCHECK(main_thread_only().immediate_work_queue);
+
+ state->SetString(
+ "task_queue_id",
+ StringPrintf("0x%" PRIx64,
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(this))));
+ state->SetBoolean("enabled", IsQueueEnabled());
+ state->SetString("time_domain_name",
+ main_thread_only().time_domain->GetName());
+ state->SetInteger("immediate_incoming_queue_size",
+ immediate_incoming_queue().size());
+ state->SetInteger("delayed_incoming_queue_size",
+ main_thread_only().delayed_incoming_queue.size());
+ state->SetInteger("immediate_work_queue_size",
+ main_thread_only().immediate_work_queue->Size());
+ state->SetInteger("delayed_work_queue_size",
+ main_thread_only().delayed_work_queue->Size());
+
+ if (!main_thread_only().delayed_incoming_queue.empty()) {
+ TimeDelta delay_to_next_task =
+ (main_thread_only().delayed_incoming_queue.top().delayed_run_time -
+ main_thread_only().time_domain->CreateLazyNow().Now());
+ state->SetDouble("delay_to_next_task_ms",
+ delay_to_next_task.InMillisecondsF());
+ }
+ if (main_thread_only().current_fence)
+ state->SetInteger("current_fence", main_thread_only().current_fence);
+ if (main_thread_only().delayed_fence) {
+ state->SetDouble(
+ "delayed_fence_seconds_from_now",
+ (main_thread_only().delayed_fence.value() - now).InSecondsF());
+ }
+
+ bool verbose = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("sequence_manager.verbose_snapshots"),
+ &verbose);
+
+ if (verbose) {
+ state->BeginArray("immediate_incoming_queue");
+ QueueAsValueInto(immediate_incoming_queue(), now, state);
+ state->EndArray();
+ state->BeginArray("delayed_work_queue");
+ main_thread_only().delayed_work_queue->AsValueInto(now, state);
+ state->EndArray();
+ state->BeginArray("immediate_work_queue");
+ main_thread_only().immediate_work_queue->AsValueInto(now, state);
+ state->EndArray();
+ state->BeginArray("delayed_incoming_queue");
+ QueueAsValueInto(main_thread_only().delayed_incoming_queue, now, state);
+ state->EndArray();
+ }
+ state->SetString("priority", TaskQueue::PriorityToString(GetQueuePriority()));
+ state->EndDictionary();
+}
+
+void TaskQueueImpl::AddTaskObserver(MessageLoop::TaskObserver* task_observer) {
+ main_thread_only().task_observers.AddObserver(task_observer);
+}
+
+void TaskQueueImpl::RemoveTaskObserver(
+ MessageLoop::TaskObserver* task_observer) {
+ main_thread_only().task_observers.RemoveObserver(task_observer);
+}
+
+void TaskQueueImpl::NotifyWillProcessTask(const PendingTask& pending_task) {
+ DCHECK(should_notify_observers_);
+ if (main_thread_only().blame_context)
+ main_thread_only().blame_context->Enter();
+ for (auto& observer : main_thread_only().task_observers)
+ observer.WillProcessTask(pending_task);
+}
+
+void TaskQueueImpl::NotifyDidProcessTask(const PendingTask& pending_task) {
+ DCHECK(should_notify_observers_);
+ for (auto& observer : main_thread_only().task_observers)
+ observer.DidProcessTask(pending_task);
+ if (main_thread_only().blame_context)
+ main_thread_only().blame_context->Leave();
+}
+
+void TaskQueueImpl::SetTimeDomain(TimeDomain* time_domain) {
+ {
+ AutoLock lock(any_thread_lock_);
+ DCHECK(time_domain);
+ // NOTE this is similar to checking |any_thread().sequence_manager| but
+ // the TaskQueueSelectorTests constructs TaskQueueImpl directly with a null
+ // sequence_manager. Instead we check |any_thread().time_domain| which is
+ // another way of asserting that UnregisterTaskQueue has not been called.
+ DCHECK(any_thread().time_domain);
+ if (!any_thread().time_domain)
+ return;
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (time_domain == main_thread_only().time_domain)
+ return;
+
+ any_thread().time_domain = time_domain;
+ }
+
+ main_thread_only().time_domain->UnregisterQueue(this);
+ main_thread_only().time_domain = time_domain;
+
+ LazyNow lazy_now = time_domain->CreateLazyNow();
+ // Clear scheduled wake up to ensure that new notifications are issued
+ // correctly.
+ // TODO(altimin): Remove this when we won't have to support changing time
+ // domains.
+ main_thread_only().scheduled_wake_up = nullopt;
+ UpdateDelayedWakeUp(&lazy_now);
+}
+
+TimeDomain* TaskQueueImpl::GetTimeDomain() const {
+ if (PlatformThread::CurrentId() == thread_id_)
+ return main_thread_only().time_domain;
+
+ AutoLock lock(any_thread_lock_);
+ return any_thread().time_domain;
+}
+
+void TaskQueueImpl::SetBlameContext(trace_event::BlameContext* blame_context) {
+ main_thread_only().blame_context = blame_context;
+}
+
+void TaskQueueImpl::InsertFence(TaskQueue::InsertFencePosition position) {
+ if (!main_thread_only().sequence_manager)
+ return;
+
+ // Only one fence may be present at a time.
+ main_thread_only().delayed_fence = nullopt;
+
+ EnqueueOrder previous_fence = main_thread_only().current_fence;
+ EnqueueOrder current_fence =
+ position == TaskQueue::InsertFencePosition::kNow
+ ? main_thread_only().sequence_manager->GetNextSequenceNumber()
+ : EnqueueOrder::blocking_fence();
+
+ // Tasks posted after this point will have a strictly higher enqueue order
+ // and will be blocked from running.
+ main_thread_only().current_fence = current_fence;
+ bool task_unblocked =
+ main_thread_only().immediate_work_queue->InsertFence(current_fence);
+ task_unblocked |=
+ main_thread_only().delayed_work_queue->InsertFence(current_fence);
+
+ if (!task_unblocked && previous_fence && previous_fence < current_fence) {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ if (!immediate_incoming_queue().empty() &&
+ immediate_incoming_queue().front().enqueue_order() > previous_fence &&
+ immediate_incoming_queue().front().enqueue_order() < current_fence) {
+ task_unblocked = true;
+ }
+ }
+
+ if (IsQueueEnabled() && task_unblocked) {
+ main_thread_only().sequence_manager->MaybeScheduleImmediateWork(FROM_HERE);
+ }
+}
+
+void TaskQueueImpl::InsertFenceAt(TimeTicks time) {
+ // Task queue can have only one fence, delayed or not.
+ RemoveFence();
+ main_thread_only().delayed_fence = time;
+}
+
+void TaskQueueImpl::RemoveFence() {
+ if (!main_thread_only().sequence_manager)
+ return;
+
+ EnqueueOrder previous_fence = main_thread_only().current_fence;
+ main_thread_only().current_fence = EnqueueOrder::none();
+ main_thread_only().delayed_fence = nullopt;
+
+ bool task_unblocked = main_thread_only().immediate_work_queue->RemoveFence();
+ task_unblocked |= main_thread_only().delayed_work_queue->RemoveFence();
+
+ if (!task_unblocked && previous_fence) {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ if (!immediate_incoming_queue().empty() &&
+ immediate_incoming_queue().front().enqueue_order() > previous_fence) {
+ task_unblocked = true;
+ }
+ }
+
+ if (IsQueueEnabled() && task_unblocked) {
+ main_thread_only().sequence_manager->MaybeScheduleImmediateWork(FROM_HERE);
+ }
+}
+
+bool TaskQueueImpl::BlockedByFence() const {
+ if (!main_thread_only().current_fence)
+ return false;
+
+ if (!main_thread_only().immediate_work_queue->BlockedByFence() ||
+ !main_thread_only().delayed_work_queue->BlockedByFence()) {
+ return false;
+ }
+
+ AutoLock lock(immediate_incoming_queue_lock_);
+ if (immediate_incoming_queue().empty())
+ return true;
+
+ return immediate_incoming_queue().front().enqueue_order() >
+ main_thread_only().current_fence;
+}
+
+bool TaskQueueImpl::HasActiveFence() {
+ if (main_thread_only().delayed_fence &&
+ main_thread_only().time_domain->Now() >
+ main_thread_only().delayed_fence.value()) {
+ return true;
+ }
+ return !!main_thread_only().current_fence;
+}
+
+bool TaskQueueImpl::CouldTaskRun(EnqueueOrder enqueue_order) const {
+ if (!IsQueueEnabled())
+ return false;
+
+ if (!main_thread_only().current_fence)
+ return true;
+
+ return enqueue_order < main_thread_only().current_fence;
+}
+
+// static
+void TaskQueueImpl::QueueAsValueInto(const TaskDeque& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state) {
+ for (const Task& task : queue) {
+ TaskAsValueInto(task, now, state);
+ }
+}
+
+// static
+void TaskQueueImpl::QueueAsValueInto(const std::priority_queue<Task>& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state) {
+ // Remove const to search |queue| in the destructive manner. Restore the
+ // content from |visited| later.
+ std::priority_queue<Task>* mutable_queue =
+ const_cast<std::priority_queue<Task>*>(&queue);
+ std::priority_queue<Task> visited;
+ while (!mutable_queue->empty()) {
+ TaskAsValueInto(mutable_queue->top(), now, state);
+ visited.push(std::move(const_cast<Task&>(mutable_queue->top())));
+ mutable_queue->pop();
+ }
+ *mutable_queue = std::move(visited);
+}
+
+// static
+void TaskQueueImpl::TaskAsValueInto(const Task& task,
+ TimeTicks now,
+ trace_event::TracedValue* state) {
+ state->BeginDictionary();
+ state->SetString("posted_from", task.posted_from.ToString());
+ if (task.enqueue_order_set())
+ state->SetInteger("enqueue_order", task.enqueue_order());
+ state->SetInteger("sequence_num", task.sequence_num);
+ state->SetBoolean("nestable", task.nestable == Nestable::kNestable);
+ state->SetBoolean("is_high_res", task.is_high_res);
+ state->SetBoolean("is_cancelled", task.task.IsCancelled());
+ state->SetDouble("delayed_run_time",
+ (task.delayed_run_time - TimeTicks()).InMillisecondsF());
+ state->SetDouble("delayed_run_time_milliseconds_from_now",
+ (task.delayed_run_time - now).InMillisecondsF());
+ state->EndDictionary();
+}
+
+TaskQueueImpl::QueueEnabledVoterImpl::QueueEnabledVoterImpl(
+ scoped_refptr<TaskQueue> task_queue)
+ : task_queue_(task_queue), enabled_(true) {}
+
+TaskQueueImpl::QueueEnabledVoterImpl::~QueueEnabledVoterImpl() {
+ if (task_queue_->GetTaskQueueImpl())
+ task_queue_->GetTaskQueueImpl()->RemoveQueueEnabledVoter(this);
+}
+
+void TaskQueueImpl::QueueEnabledVoterImpl::SetQueueEnabled(bool enabled) {
+ if (enabled_ == enabled)
+ return;
+
+ task_queue_->GetTaskQueueImpl()->OnQueueEnabledVoteChanged(enabled);
+ enabled_ = enabled;
+}
+
+void TaskQueueImpl::RemoveQueueEnabledVoter(
+ const QueueEnabledVoterImpl* voter) {
+ // Bail out if we're being called from TaskQueueImpl::UnregisterTaskQueue.
+ if (!main_thread_only().time_domain)
+ return;
+
+ bool was_enabled = IsQueueEnabled();
+ if (voter->enabled_) {
+ main_thread_only().is_enabled_refcount--;
+ DCHECK_GE(main_thread_only().is_enabled_refcount, 0);
+ }
+
+ main_thread_only().voter_refcount--;
+ DCHECK_GE(main_thread_only().voter_refcount, 0);
+
+ bool is_enabled = IsQueueEnabled();
+ if (was_enabled != is_enabled)
+ EnableOrDisableWithSelector(is_enabled);
+}
+
+bool TaskQueueImpl::IsQueueEnabled() const {
+ // By default is_enabled_refcount and voter_refcount both equal zero.
+ return (main_thread_only().is_enabled_refcount ==
+ main_thread_only().voter_refcount) &&
+ main_thread_only().is_enabled_for_test;
+}
+
+void TaskQueueImpl::OnQueueEnabledVoteChanged(bool enabled) {
+ bool was_enabled = IsQueueEnabled();
+ if (enabled) {
+ main_thread_only().is_enabled_refcount++;
+ DCHECK_LE(main_thread_only().is_enabled_refcount,
+ main_thread_only().voter_refcount);
+ } else {
+ main_thread_only().is_enabled_refcount--;
+ DCHECK_GE(main_thread_only().is_enabled_refcount, 0);
+ }
+
+ bool is_enabled = IsQueueEnabled();
+ if (was_enabled != is_enabled)
+ EnableOrDisableWithSelector(is_enabled);
+}
+
+void TaskQueueImpl::EnableOrDisableWithSelector(bool enable) {
+ if (!main_thread_only().sequence_manager)
+ return;
+
+ LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
+ UpdateDelayedWakeUp(&lazy_now);
+
+ if (enable) {
+ if (HasPendingImmediateWork() &&
+ !main_thread_only().on_next_wake_up_changed_callback.is_null()) {
+ // Delayed work notification will be issued via time domain.
+ main_thread_only().on_next_wake_up_changed_callback.Run(TimeTicks());
+ }
+
+ // Note the selector calls SequenceManager::OnTaskQueueEnabled which posts
+ // a DoWork if needed.
+ main_thread_only()
+ .sequence_manager->main_thread_only()
+ .selector.EnableQueue(this);
+ } else {
+ main_thread_only()
+ .sequence_manager->main_thread_only()
+ .selector.DisableQueue(this);
+ }
+}
+
+std::unique_ptr<TaskQueue::QueueEnabledVoter>
+TaskQueueImpl::CreateQueueEnabledVoter(scoped_refptr<TaskQueue> task_queue) {
+ DCHECK_EQ(task_queue->GetTaskQueueImpl(), this);
+ main_thread_only().voter_refcount++;
+ main_thread_only().is_enabled_refcount++;
+ return std::make_unique<QueueEnabledVoterImpl>(task_queue);
+}
+
+void TaskQueueImpl::SweepCanceledDelayedTasks(TimeTicks now) {
+ if (main_thread_only().delayed_incoming_queue.empty())
+ return;
+
+ // Remove canceled tasks.
+ std::priority_queue<Task> remaining_tasks;
+ while (!main_thread_only().delayed_incoming_queue.empty()) {
+ if (!main_thread_only().delayed_incoming_queue.top().task.IsCancelled()) {
+ remaining_tasks.push(std::move(
+ const_cast<Task&>(main_thread_only().delayed_incoming_queue.top())));
+ }
+ main_thread_only().delayed_incoming_queue.pop();
+ }
+
+ main_thread_only().delayed_incoming_queue = std::move(remaining_tasks);
+
+ LazyNow lazy_now(now);
+ UpdateDelayedWakeUp(&lazy_now);
+}
+
+void TaskQueueImpl::PushImmediateIncomingTaskForTest(
+ TaskQueueImpl::Task&& task) {
+ AutoLock lock(immediate_incoming_queue_lock_);
+ immediate_incoming_queue().push_back(std::move(task));
+}
+
+void TaskQueueImpl::RequeueDeferredNonNestableTask(
+ DeferredNonNestableTask task) {
+ DCHECK(task.task.nestable == Nestable::kNonNestable);
+ // The re-queued tasks have to be pushed onto the front because we'd otherwise
+ // violate the strict monotonically increasing enqueue order within the
+ // WorkQueue. We can't assign them a new enqueue order here because that will
+ // not behave correctly with fences and things will break (e.g Idle TQ).
+ if (task.work_queue_type == WorkQueueType::kDelayed) {
+ main_thread_only().delayed_work_queue->PushNonNestableTaskToFront(
+ std::move(task.task));
+ } else {
+ main_thread_only().immediate_work_queue->PushNonNestableTaskToFront(
+ std::move(task.task));
+ }
+}
+
+void TaskQueueImpl::SetOnNextWakeUpChangedCallback(
+ TaskQueueImpl::OnNextWakeUpChangedCallback callback) {
+#if DCHECK_IS_ON()
+ if (callback) {
+ DCHECK(main_thread_only().on_next_wake_up_changed_callback.is_null())
+ << "Can't assign two different observers to "
+ "blink::scheduler::TaskQueue";
+ }
+#endif
+ AutoLock lock(any_thread_lock_);
+ any_thread().on_next_wake_up_changed_callback = callback;
+ main_thread_only().on_next_wake_up_changed_callback = callback;
+}
+
+void TaskQueueImpl::UpdateDelayedWakeUp(LazyNow* lazy_now) {
+ return UpdateDelayedWakeUpImpl(lazy_now, GetNextScheduledWakeUpImpl());
+}
+
+void TaskQueueImpl::UpdateDelayedWakeUpImpl(
+ LazyNow* lazy_now,
+ Optional<TaskQueueImpl::DelayedWakeUp> wake_up) {
+ if (main_thread_only().scheduled_wake_up == wake_up)
+ return;
+ main_thread_only().scheduled_wake_up = wake_up;
+
+ if (wake_up &&
+ !main_thread_only().on_next_wake_up_changed_callback.is_null() &&
+ !HasPendingImmediateWork()) {
+ main_thread_only().on_next_wake_up_changed_callback.Run(wake_up->time);
+ }
+
+ main_thread_only().time_domain->SetNextWakeUpForQueue(this, wake_up,
+ lazy_now);
+}
+
+void TaskQueueImpl::SetDelayedWakeUpForTesting(
+ Optional<TaskQueueImpl::DelayedWakeUp> wake_up) {
+ LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
+ UpdateDelayedWakeUpImpl(&lazy_now, wake_up);
+}
+
+bool TaskQueueImpl::HasPendingImmediateWork() {
+ // Any work queue tasks count as immediate work.
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return true;
+ }
+
+ // Finally tasks on |immediate_incoming_queue| count as immediate work.
+ AutoLock lock(immediate_incoming_queue_lock_);
+ return !immediate_incoming_queue().empty();
+}
+
+void TaskQueueImpl::SetOnTaskStartedHandler(
+ TaskQueueImpl::OnTaskStartedHandler handler) {
+ main_thread_only().on_task_started_handler = std::move(handler);
+}
+
+void TaskQueueImpl::OnTaskStarted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) {
+ if (!main_thread_only().on_task_started_handler.is_null())
+ main_thread_only().on_task_started_handler.Run(task, task_timing);
+}
+
+void TaskQueueImpl::SetOnTaskCompletedHandler(
+ TaskQueueImpl::OnTaskCompletedHandler handler) {
+ main_thread_only().on_task_completed_handler = std::move(handler);
+}
+
+void TaskQueueImpl::OnTaskCompleted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing) {
+ if (!main_thread_only().on_task_completed_handler.is_null())
+ main_thread_only().on_task_completed_handler.Run(task, task_timing);
+}
+
+bool TaskQueueImpl::RequiresTaskTiming() const {
+ return !main_thread_only().on_task_started_handler.is_null() ||
+ !main_thread_only().on_task_completed_handler.is_null();
+}
+
+bool TaskQueueImpl::IsUnregistered() const {
+ AutoLock lock(any_thread_lock_);
+ return !any_thread().sequence_manager;
+}
+
+WeakPtr<SequenceManagerImpl> TaskQueueImpl::GetSequenceManagerWeakPtr() {
+ return main_thread_only().sequence_manager->GetWeakPtr();
+}
+
+scoped_refptr<GracefulQueueShutdownHelper>
+TaskQueueImpl::GetGracefulQueueShutdownHelper() {
+ return main_thread_only().sequence_manager->GetGracefulQueueShutdownHelper();
+}
+
+void TaskQueueImpl::SetQueueEnabledForTest(bool enabled) {
+ main_thread_only().is_enabled_for_test = enabled;
+ EnableOrDisableWithSelector(IsQueueEnabled());
+}
+
+void TaskQueueImpl::ActivateDelayedFenceIfNeeded(TimeTicks now) {
+ if (!main_thread_only().delayed_fence)
+ return;
+ if (main_thread_only().delayed_fence.value() > now)
+ return;
+ InsertFence(TaskQueue::InsertFencePosition::kNow);
+ main_thread_only().delayed_fence = nullopt;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/task_queue_impl.h b/base/task/sequence_manager/task_queue_impl.h
new file mode 100644
index 0000000000..b64dd9fd46
--- /dev/null
+++ b/base/task/sequence_manager/task_queue_impl.h
@@ -0,0 +1,471 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+
+#include "base/callback.h"
+#include "base/containers/circular_deque.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/lazily_deallocated_deque.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/task/sequence_manager/task_queue.h"
+#include "base/threading/thread_checker.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+
+class LazyNow;
+class TimeDomain;
+
+namespace internal {
+
+class SequenceManagerImpl;
+class WorkQueue;
+class WorkQueueSets;
+
+struct IncomingImmediateWorkList {
+ IncomingImmediateWorkList* next = nullptr;
+ TaskQueueImpl* queue = nullptr;
+ internal::EnqueueOrder order;
+};
+
+// TaskQueueImpl has four main queues:
+//
+// Immediate (non-delayed) tasks:
+// |immediate_incoming_queue| - PostTask enqueues tasks here.
+// |immediate_work_queue| - SequenceManager takes immediate tasks here.
+//
+// Delayed tasks
+// |delayed_incoming_queue| - PostDelayedTask enqueues tasks here.
+// |delayed_work_queue| - SequenceManager takes delayed tasks here.
+//
+// The |immediate_incoming_queue| can be accessed from any thread, the other
+// queues are main-thread only. To reduce the overhead of locking,
+// |immediate_work_queue| is swapped with |immediate_incoming_queue| when
+// |immediate_work_queue| becomes empty.
+//
+// Delayed tasks are initially posted to |delayed_incoming_queue| and a wake-up
+// is scheduled with the TimeDomain. When the delay has elapsed, the TimeDomain
+// calls UpdateDelayedWorkQueue and ready delayed tasks are moved into the
+// |delayed_work_queue|. Note the EnqueueOrder (used for ordering) for a delayed
+// task is not set until it's moved into the |delayed_work_queue|.
+//
+// TaskQueueImpl uses the WorkQueueSets and the TaskQueueSelector to implement
+// prioritization. Task selection is done by the TaskQueueSelector and when a
+// queue is selected, it round-robins between the |immediate_work_queue| and
+// |delayed_work_queue|. The reason for this is we want to make sure delayed
+// tasks (normally the most common type) don't starve out immediate work.
+class BASE_EXPORT TaskQueueImpl {
+ public:
+ TaskQueueImpl(SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec);
+
+ ~TaskQueueImpl();
+
+ // Represents a time at which a task wants to run. Tasks scheduled for the
+ // same point in time will be ordered by their sequence numbers.
+ struct DelayedWakeUp {
+ TimeTicks time;
+ int sequence_num;
+
+ bool operator!=(const DelayedWakeUp& other) const {
+ return time != other.time || other.sequence_num != sequence_num;
+ }
+
+ bool operator==(const DelayedWakeUp& other) const {
+ return !(*this != other);
+ }
+
+ bool operator<=(const DelayedWakeUp& other) const {
+ if (time == other.time) {
+ // Debug gcc builds can compare an element against itself.
+ DCHECK(sequence_num != other.sequence_num || this == &other);
+ // |PostedTask::sequence_num| is int and might wrap around to
+ // a negative number when casted from EnqueueOrder.
+ // This way of comparison handles that properly.
+ return (sequence_num - other.sequence_num) <= 0;
+ }
+ return time < other.time;
+ }
+ };
+
+ class BASE_EXPORT Task : public TaskQueue::Task {
+ public:
+ Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number);
+
+ Task(TaskQueue::PostedTask task,
+ TimeTicks desired_run_time,
+ EnqueueOrder sequence_number,
+ EnqueueOrder enqueue_order);
+
+ DelayedWakeUp delayed_wake_up() const {
+ // Since we use |sequence_num| in DelayedWakeUp for ordering purposes
+ // and integer overflow handling is type-sensitive it's worth to protect
+ // it from an unnoticed potential change in the PendingTask base class.
+ static_assert(std::is_same<decltype(sequence_num), int>::value, "");
+ return DelayedWakeUp{delayed_run_time, sequence_num};
+ }
+
+ EnqueueOrder enqueue_order() const {
+ DCHECK(enqueue_order_);
+ return enqueue_order_;
+ }
+
+ void set_enqueue_order(EnqueueOrder enqueue_order) {
+ DCHECK(!enqueue_order_);
+ enqueue_order_ = enqueue_order;
+ }
+
+ bool enqueue_order_set() const { return enqueue_order_; }
+
+ private:
+ // Similar to sequence number, but ultimately the |enqueue_order_| is what
+ // the scheduler uses for task ordering. For immediate tasks |enqueue_order|
+ // is set when posted, but for delayed tasks it's not defined until they are
+ // enqueued on the |delayed_work_queue_|. This is because otherwise delayed
+ // tasks could run before an immediate task posted after the delayed task.
+ EnqueueOrder enqueue_order_;
+ };
+
+ // A result retuned by PostDelayedTask. When scheduler failed to post a task
+ // due to being shutdown a task is returned to be destroyed outside the lock.
+ struct PostTaskResult {
+ PostTaskResult();
+ PostTaskResult(bool success, TaskQueue::PostedTask task);
+ PostTaskResult(PostTaskResult&& move_from);
+ PostTaskResult(const PostTaskResult& copy_from) = delete;
+ ~PostTaskResult();
+
+ static PostTaskResult Success();
+ static PostTaskResult Fail(TaskQueue::PostedTask task);
+
+ bool success;
+ TaskQueue::PostedTask task;
+ };
+
+ // Types of queues TaskQueueImpl is maintaining internally.
+ enum class WorkQueueType { kImmediate, kDelayed };
+
+ // Non-nestable tasks may get deferred but such queue is being maintained on
+ // SequenceManager side, so we need to keep information how to requeue it.
+ struct DeferredNonNestableTask {
+ internal::TaskQueueImpl::Task task;
+ internal::TaskQueueImpl* task_queue;
+ WorkQueueType work_queue_type;
+ };
+
+ using OnNextWakeUpChangedCallback = RepeatingCallback<void(TimeTicks)>;
+ using OnTaskStartedHandler =
+ RepeatingCallback<void(const TaskQueue::Task&,
+ const TaskQueue::TaskTiming&)>;
+ using OnTaskCompletedHandler =
+ RepeatingCallback<void(const TaskQueue::Task&,
+ const TaskQueue::TaskTiming&)>;
+
+ // TaskQueue implementation.
+ const char* GetName() const;
+ bool RunsTasksInCurrentSequence() const;
+ PostTaskResult PostDelayedTask(TaskQueue::PostedTask task);
+ // Require a reference to enclosing task queue for lifetime control.
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> CreateQueueEnabledVoter(
+ scoped_refptr<TaskQueue> owning_task_queue);
+ bool IsQueueEnabled() const;
+ bool IsEmpty() const;
+ size_t GetNumberOfPendingTasks() const;
+ bool HasTaskToRunImmediately() const;
+ Optional<TimeTicks> GetNextScheduledWakeUp();
+ Optional<DelayedWakeUp> GetNextScheduledWakeUpImpl();
+ void SetQueuePriority(TaskQueue::QueuePriority priority);
+ TaskQueue::QueuePriority GetQueuePriority() const;
+ void AddTaskObserver(MessageLoop::TaskObserver* task_observer);
+ void RemoveTaskObserver(MessageLoop::TaskObserver* task_observer);
+ void SetTimeDomain(TimeDomain* time_domain);
+ TimeDomain* GetTimeDomain() const;
+ void SetBlameContext(trace_event::BlameContext* blame_context);
+ void InsertFence(TaskQueue::InsertFencePosition position);
+ void InsertFenceAt(TimeTicks time);
+ void RemoveFence();
+ bool HasActiveFence();
+ bool BlockedByFence() const;
+ // Implementation of TaskQueue::SetObserver.
+ void SetOnNextWakeUpChangedCallback(OnNextWakeUpChangedCallback callback);
+
+ void UnregisterTaskQueue();
+
+ // Returns true if a (potentially hypothetical) task with the specified
+ // |enqueue_order| could run on the queue. Must be called from the main
+ // thread.
+ bool CouldTaskRun(EnqueueOrder enqueue_order) const;
+
+ // Must only be called from the thread this task queue was created on.
+ void ReloadImmediateWorkQueueIfEmpty();
+
+ void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
+
+ bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
+ bool GetShouldNotifyObservers() const { return should_notify_observers_; }
+
+ void NotifyWillProcessTask(const PendingTask& pending_task);
+ void NotifyDidProcessTask(const PendingTask& pending_task);
+
+ // Check for available tasks in immediate work queues.
+ // Used to check if we need to generate notifications about delayed work.
+ bool HasPendingImmediateWork();
+
+ WorkQueue* delayed_work_queue() {
+ return main_thread_only().delayed_work_queue.get();
+ }
+
+ const WorkQueue* delayed_work_queue() const {
+ return main_thread_only().delayed_work_queue.get();
+ }
+
+ WorkQueue* immediate_work_queue() {
+ return main_thread_only().immediate_work_queue.get();
+ }
+
+ const WorkQueue* immediate_work_queue() const {
+ return main_thread_only().immediate_work_queue.get();
+ }
+
+ // Protected by SequenceManagerImpl's AnyThread lock.
+ IncomingImmediateWorkList* immediate_work_list_storage() {
+ return &immediate_work_list_storage_;
+ }
+
+ // Enqueues any delayed tasks which should be run now on the
+ // |delayed_work_queue|.
+ // Must be called from the main thread.
+ void WakeUpForDelayedWork(LazyNow* lazy_now);
+
+ HeapHandle heap_handle() const { return main_thread_only().heap_handle; }
+
+ void set_heap_handle(HeapHandle heap_handle) {
+ main_thread_only().heap_handle = heap_handle;
+ }
+
+ // Pushes |task| onto the front of the specified work queue. Caution must be
+ // taken with this API because you could easily starve out other work.
+ // TODO(kraynov): Simplify non-nestable task logic https://crbug.com/845437.
+ void RequeueDeferredNonNestableTask(DeferredNonNestableTask task);
+
+ void PushImmediateIncomingTaskForTest(TaskQueueImpl::Task&& task);
+
+ class QueueEnabledVoterImpl : public TaskQueue::QueueEnabledVoter {
+ public:
+ explicit QueueEnabledVoterImpl(scoped_refptr<TaskQueue> task_queue);
+ ~QueueEnabledVoterImpl() override;
+
+ // QueueEnabledVoter implementation.
+ void SetQueueEnabled(bool enabled) override;
+
+ TaskQueueImpl* GetTaskQueueForTest() const {
+ return task_queue_->GetTaskQueueImpl();
+ }
+
+ private:
+ friend class TaskQueueImpl;
+
+ scoped_refptr<TaskQueue> task_queue_;
+ bool enabled_;
+ };
+
+ // Iterates over |delayed_incoming_queue| removing canceled tasks.
+ void SweepCanceledDelayedTasks(TimeTicks now);
+
+ // Allows wrapping TaskQueue to set a handler to subscribe for notifications
+ // about started and completed tasks.
+ void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
+ void OnTaskStarted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing);
+ void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
+ void OnTaskCompleted(const TaskQueue::Task& task,
+ const TaskQueue::TaskTiming& task_timing);
+ bool RequiresTaskTiming() const;
+
+ WeakPtr<SequenceManagerImpl> GetSequenceManagerWeakPtr();
+
+ scoped_refptr<GracefulQueueShutdownHelper> GetGracefulQueueShutdownHelper();
+
+ // Returns true if this queue is unregistered or task queue manager is deleted
+ // and this queue can be safely deleted on any thread.
+ bool IsUnregistered() const;
+
+ // Disables queue for testing purposes, when a QueueEnabledVoter can't be
+ // constructed due to not having TaskQueue.
+ void SetQueueEnabledForTest(bool enabled);
+
+ protected:
+ void SetDelayedWakeUpForTesting(Optional<DelayedWakeUp> wake_up);
+
+ private:
+ friend class WorkQueue;
+ friend class WorkQueueTest;
+
+ struct AnyThread {
+ AnyThread(SequenceManagerImpl* sequence_manager, TimeDomain* time_domain);
+ ~AnyThread();
+
+ // SequenceManagerImpl, TimeDomain and Observer are maintained in two
+ // copies: inside AnyThread and inside MainThreadOnly. They can be changed
+ // only from main thread, so it should be locked before accessing from other
+ // threads.
+ SequenceManagerImpl* sequence_manager;
+ TimeDomain* time_domain;
+ // Callback corresponding to TaskQueue::Observer::OnQueueNextChanged.
+ OnNextWakeUpChangedCallback on_next_wake_up_changed_callback;
+ };
+
+ struct MainThreadOnly {
+ MainThreadOnly(SequenceManagerImpl* sequence_manager,
+ TaskQueueImpl* task_queue,
+ TimeDomain* time_domain);
+ ~MainThreadOnly();
+
+ // Another copy of SequenceManagerImpl, TimeDomain and Observer
+ // for lock-free access from the main thread.
+ // See description inside struct AnyThread for details.
+ SequenceManagerImpl* sequence_manager;
+ TimeDomain* time_domain;
+ // Callback corresponding to TaskQueue::Observer::OnQueueNextChanged.
+ OnNextWakeUpChangedCallback on_next_wake_up_changed_callback;
+
+ std::unique_ptr<WorkQueue> delayed_work_queue;
+ std::unique_ptr<WorkQueue> immediate_work_queue;
+ std::priority_queue<TaskQueueImpl::Task> delayed_incoming_queue;
+ ObserverList<MessageLoop::TaskObserver> task_observers;
+ size_t set_index;
+ HeapHandle heap_handle;
+ int is_enabled_refcount;
+ int voter_refcount;
+ trace_event::BlameContext* blame_context; // Not owned.
+ EnqueueOrder current_fence;
+ Optional<TimeTicks> delayed_fence;
+ OnTaskStartedHandler on_task_started_handler;
+ OnTaskCompletedHandler on_task_completed_handler;
+ // Last reported wake up, used only in UpdateWakeUp to avoid
+ // excessive calls.
+ Optional<DelayedWakeUp> scheduled_wake_up;
+ // If false, queue will be disabled. Used only for tests.
+ bool is_enabled_for_test;
+ };
+
+ PostTaskResult PostImmediateTaskImpl(TaskQueue::PostedTask task);
+ PostTaskResult PostDelayedTaskImpl(TaskQueue::PostedTask task);
+
+ // Push the task onto the |delayed_incoming_queue|. Lock-free main thread
+ // only fast path.
+ void PushOntoDelayedIncomingQueueFromMainThread(Task pending_task,
+ TimeTicks now);
+
+ // Push the task onto the |delayed_incoming_queue|. Slow path from other
+ // threads.
+ void PushOntoDelayedIncomingQueueLocked(Task pending_task);
+
+ void ScheduleDelayedWorkTask(Task pending_task);
+
+ void MoveReadyImmediateTasksToImmediateWorkQueueLocked();
+
+ // Push the task onto the |immediate_incoming_queue| and for auto pumped
+ // queues it calls MaybePostDoWorkOnMainRunner if the Incoming queue was
+ // empty.
+ void PushOntoImmediateIncomingQueueLocked(Task task);
+
+ using TaskDeque = circular_deque<Task>;
+
+ // Extracts all the tasks from the immediate incoming queue and swaps it with
+ // |queue| which must be empty.
+ // Can be called from any thread.
+ void ReloadEmptyImmediateQueue(TaskDeque* queue);
+
+ void TraceQueueSize() const;
+ static void QueueAsValueInto(const TaskDeque& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state);
+ static void QueueAsValueInto(const std::priority_queue<Task>& queue,
+ TimeTicks now,
+ trace_event::TracedValue* state);
+ static void TaskAsValueInto(const Task& task,
+ TimeTicks now,
+ trace_event::TracedValue* state);
+
+ void RemoveQueueEnabledVoter(const QueueEnabledVoterImpl* voter);
+ void OnQueueEnabledVoteChanged(bool enabled);
+ void EnableOrDisableWithSelector(bool enable);
+
+ // Schedules delayed work on time domain and calls the observer.
+ void UpdateDelayedWakeUp(LazyNow* lazy_now);
+ void UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
+ Optional<DelayedWakeUp> wake_up);
+
+ // Activate a delayed fence if a time has come.
+ void ActivateDelayedFenceIfNeeded(TimeTicks now);
+
+ const char* name_;
+
+ const PlatformThreadId thread_id_;
+
+ mutable Lock any_thread_lock_;
+ AnyThread any_thread_;
+ struct AnyThread& any_thread() {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+ const struct AnyThread& any_thread() const {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+
+ ThreadChecker main_thread_checker_;
+ MainThreadOnly main_thread_only_;
+ MainThreadOnly& main_thread_only() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ return main_thread_only_;
+ }
+ const MainThreadOnly& main_thread_only() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ return main_thread_only_;
+ }
+
+ mutable Lock immediate_incoming_queue_lock_;
+ TaskDeque immediate_incoming_queue_;
+ TaskDeque& immediate_incoming_queue() {
+ immediate_incoming_queue_lock_.AssertAcquired();
+ return immediate_incoming_queue_;
+ }
+ const TaskDeque& immediate_incoming_queue() const {
+ immediate_incoming_queue_lock_.AssertAcquired();
+ return immediate_incoming_queue_;
+ }
+
+ // Protected by SequenceManagerImpl's AnyThread lock.
+ IncomingImmediateWorkList immediate_work_list_storage_;
+
+ const bool should_monitor_quiescence_;
+ const bool should_notify_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
diff --git a/base/task/sequence_manager/task_queue_selector.cc b/base/task/sequence_manager/task_queue_selector.cc
new file mode 100644
index 0000000000..30a88bd9a9
--- /dev/null
+++ b/base/task/sequence_manager/task_queue_selector.cc
@@ -0,0 +1,407 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue_selector.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+namespace {
+
+TaskQueueSelectorLogic QueuePriorityToSelectorLogic(
+ TaskQueue::QueuePriority priority) {
+ switch (priority) {
+ case TaskQueue::kControlPriority:
+ return TaskQueueSelectorLogic::kControlPriorityLogic;
+ case TaskQueue::kHighestPriority:
+ return TaskQueueSelectorLogic::kHighestPriorityLogic;
+ case TaskQueue::kHighPriority:
+ return TaskQueueSelectorLogic::kHighPriorityLogic;
+ case TaskQueue::kNormalPriority:
+ return TaskQueueSelectorLogic::kNormalPriorityLogic;
+ case TaskQueue::kLowPriority:
+ return TaskQueueSelectorLogic::kLowPriorityLogic;
+ case TaskQueue::kBestEffortPriority:
+ return TaskQueueSelectorLogic::kBestEffortPriorityLogic;
+ default:
+ NOTREACHED();
+ return TaskQueueSelectorLogic::kCount;
+ }
+}
+
+// Helper function used to report the number of times a selector logic is
+// trigerred. This will create a histogram for the enumerated data.
+void ReportTaskSelectionLogic(TaskQueueSelectorLogic selector_logic) {
+ UMA_HISTOGRAM_ENUMERATION("TaskQueueSelector.TaskServicedPerSelectorLogic",
+ selector_logic, TaskQueueSelectorLogic::kCount);
+}
+
+} // namespace
+
+TaskQueueSelector::TaskQueueSelector()
+ : prioritizing_selector_(this, "enabled"),
+ immediate_starvation_count_(0),
+ high_priority_starvation_score_(0),
+ normal_priority_starvation_score_(0),
+ low_priority_starvation_score_(0),
+ task_queue_selector_observer_(nullptr) {}
+
+TaskQueueSelector::~TaskQueueSelector() = default;
+
+void TaskQueueSelector::AddQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(queue->IsQueueEnabled());
+ prioritizing_selector_.AddQueue(queue, TaskQueue::kNormalPriority);
+}
+
+void TaskQueueSelector::RemoveQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (queue->IsQueueEnabled()) {
+ prioritizing_selector_.RemoveQueue(queue);
+ }
+}
+
+void TaskQueueSelector::EnableQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(queue->IsQueueEnabled());
+ prioritizing_selector_.AddQueue(queue, queue->GetQueuePriority());
+ if (task_queue_selector_observer_)
+ task_queue_selector_observer_->OnTaskQueueEnabled(queue);
+}
+
+void TaskQueueSelector::DisableQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(!queue->IsQueueEnabled());
+ prioritizing_selector_.RemoveQueue(queue);
+}
+
+void TaskQueueSelector::SetQueuePriority(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+ DCHECK_LT(priority, TaskQueue::kQueuePriorityCount);
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (queue->IsQueueEnabled()) {
+ prioritizing_selector_.ChangeSetIndex(queue, priority);
+ } else {
+ // Disabled queue is not in any set so we can't use ChangeSetIndex here
+ // and have to assign priority for the queue itself.
+ queue->delayed_work_queue()->AssignSetIndex(priority);
+ queue->immediate_work_queue()->AssignSetIndex(priority);
+ }
+ DCHECK_EQ(priority, queue->GetQueuePriority());
+}
+
+TaskQueue::QueuePriority TaskQueueSelector::NextPriority(
+ TaskQueue::QueuePriority priority) {
+ DCHECK(priority < TaskQueue::kQueuePriorityCount);
+ return static_cast<TaskQueue::QueuePriority>(static_cast<int>(priority) + 1);
+}
+
+TaskQueueSelector::PrioritizingSelector::PrioritizingSelector(
+ TaskQueueSelector* task_queue_selector,
+ const char* name)
+ : task_queue_selector_(task_queue_selector),
+ delayed_work_queue_sets_(TaskQueue::kQueuePriorityCount, name),
+ immediate_work_queue_sets_(TaskQueue::kQueuePriorityCount, name) {}
+
+void TaskQueueSelector::PrioritizingSelector::AddQueue(
+ internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+#if DCHECK_IS_ON()
+ DCHECK(!CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.AddQueue(queue->delayed_work_queue(), priority);
+ immediate_work_queue_sets_.AddQueue(queue->immediate_work_queue(), priority);
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+}
+
+void TaskQueueSelector::PrioritizingSelector::ChangeSetIndex(
+ internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.ChangeSetIndex(queue->delayed_work_queue(),
+ priority);
+ immediate_work_queue_sets_.ChangeSetIndex(queue->immediate_work_queue(),
+ priority);
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+}
+
+void TaskQueueSelector::PrioritizingSelector::RemoveQueue(
+ internal::TaskQueueImpl* queue) {
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.RemoveQueue(queue->delayed_work_queue());
+ immediate_work_queue_sets_.RemoveQueue(queue->immediate_work_queue());
+
+#if DCHECK_IS_ON()
+ DCHECK(!CheckContainsQueueForTest(queue));
+#endif
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestImmediateTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const {
+ return immediate_work_queue_sets_.GetOldestQueueInSet(priority,
+ out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestDelayedTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const {
+ return delayed_work_queue_sets_.GetOldestQueueInSet(priority, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestImmediateOrDelayedTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const {
+ WorkQueue* immediate_queue;
+ DCHECK_EQ(*out_chose_delayed_over_immediate, false);
+ EnqueueOrder immediate_enqueue_order;
+ if (immediate_work_queue_sets_.GetOldestQueueAndEnqueueOrderInSet(
+ priority, &immediate_queue, &immediate_enqueue_order)) {
+ WorkQueue* delayed_queue;
+ EnqueueOrder delayed_enqueue_order;
+ if (delayed_work_queue_sets_.GetOldestQueueAndEnqueueOrderInSet(
+ priority, &delayed_queue, &delayed_enqueue_order)) {
+ if (immediate_enqueue_order < delayed_enqueue_order) {
+ *out_work_queue = immediate_queue;
+ } else {
+ *out_chose_delayed_over_immediate = true;
+ *out_work_queue = delayed_queue;
+ }
+ } else {
+ *out_work_queue = immediate_queue;
+ }
+ return true;
+ }
+ return delayed_work_queue_sets_.GetOldestQueueInSet(priority, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::ChooseOldestWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const {
+ // Select an immediate work queue if we are starving immediate tasks.
+ if (task_queue_selector_->immediate_starvation_count_ >=
+ kMaxDelayedStarvationTasks) {
+ if (ChooseOldestImmediateTaskWithPriority(priority, out_work_queue))
+ return true;
+ return ChooseOldestDelayedTaskWithPriority(priority, out_work_queue);
+ }
+ return ChooseOldestImmediateOrDelayedTaskWithPriority(
+ priority, out_chose_delayed_over_immediate, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::SelectWorkQueueToService(
+ TaskQueue::QueuePriority max_priority,
+ WorkQueue** out_work_queue,
+ bool* out_chose_delayed_over_immediate) {
+ DCHECK(task_queue_selector_->main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(*out_chose_delayed_over_immediate, false);
+
+ // Always service the control queue if it has any work.
+ if (max_priority > TaskQueue::kControlPriority &&
+ ChooseOldestWithPriority(TaskQueue::kControlPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(TaskQueueSelectorLogic::kControlPriorityLogic);
+ return true;
+ }
+
+ // Select from the low priority queue if we are starving it.
+ if (max_priority > TaskQueue::kLowPriority &&
+ task_queue_selector_->low_priority_starvation_score_ >=
+ kMaxLowPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kLowPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kLowPriorityStarvationLogic);
+ return true;
+ }
+
+ // Select from the normal priority queue if we are starving it.
+ if (max_priority > TaskQueue::kNormalPriority &&
+ task_queue_selector_->normal_priority_starvation_score_ >=
+ kMaxNormalPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kNormalPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kNormalPriorityStarvationLogic);
+ return true;
+ }
+
+ // Select from the high priority queue if we are starving it.
+ if (max_priority > TaskQueue::kHighPriority &&
+ task_queue_selector_->high_priority_starvation_score_ >=
+ kMaxHighPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kHighPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kHighPriorityStarvationLogic);
+ return true;
+ }
+
+ // Otherwise choose in priority order.
+ for (TaskQueue::QueuePriority priority = TaskQueue::kHighestPriority;
+ priority < max_priority; priority = NextPriority(priority)) {
+ if (ChooseOldestWithPriority(priority, out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(QueuePriorityToSelectorLogic(priority));
+ return true;
+ }
+ }
+ return false;
+}
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+bool TaskQueueSelector::PrioritizingSelector::CheckContainsQueueForTest(
+ const internal::TaskQueueImpl* queue) const {
+ bool contains_delayed_work_queue =
+ delayed_work_queue_sets_.ContainsWorkQueueForTest(
+ queue->delayed_work_queue());
+
+ bool contains_immediate_work_queue =
+ immediate_work_queue_sets_.ContainsWorkQueueForTest(
+ queue->immediate_work_queue());
+
+ DCHECK_EQ(contains_delayed_work_queue, contains_immediate_work_queue);
+ return contains_delayed_work_queue;
+}
+#endif
+
+bool TaskQueueSelector::SelectWorkQueueToService(WorkQueue** out_work_queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ bool chose_delayed_over_immediate = false;
+ bool found_queue = prioritizing_selector_.SelectWorkQueueToService(
+ TaskQueue::kQueuePriorityCount, out_work_queue,
+ &chose_delayed_over_immediate);
+ if (!found_queue)
+ return false;
+
+ // We could use |(*out_work_queue)->task_queue()->GetQueuePriority()| here but
+ // for re-queued non-nestable tasks |task_queue()| returns null.
+ DidSelectQueueWithPriority(static_cast<TaskQueue::QueuePriority>(
+ (*out_work_queue)->work_queue_set_index()),
+ chose_delayed_over_immediate);
+ return true;
+}
+
+void TaskQueueSelector::DidSelectQueueWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool chose_delayed_over_immediate) {
+ switch (priority) {
+ case TaskQueue::kControlPriority:
+ break;
+ case TaskQueue::kHighestPriority:
+ low_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kLowPriority)
+ ? kSmallScoreIncrementForLowPriorityStarvation
+ : 0;
+ normal_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kNormalPriority)
+ ? kSmallScoreIncrementForNormalPriorityStarvation
+ : 0;
+ high_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kHighPriority)
+ ? kSmallScoreIncrementForHighPriorityStarvation
+ : 0;
+ break;
+ case TaskQueue::kHighPriority:
+ low_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kLowPriority)
+ ? kLargeScoreIncrementForLowPriorityStarvation
+ : 0;
+ normal_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kNormalPriority)
+ ? kLargeScoreIncrementForNormalPriorityStarvation
+ : 0;
+ high_priority_starvation_score_ = 0;
+ break;
+ case TaskQueue::kNormalPriority:
+ low_priority_starvation_score_ +=
+ HasTasksWithPriority(TaskQueue::kLowPriority)
+ ? kLargeScoreIncrementForLowPriorityStarvation
+ : 0;
+ normal_priority_starvation_score_ = 0;
+ break;
+ case TaskQueue::kLowPriority:
+ case TaskQueue::kBestEffortPriority:
+ low_priority_starvation_score_ = 0;
+ high_priority_starvation_score_ = 0;
+ normal_priority_starvation_score_ = 0;
+ break;
+ default:
+ NOTREACHED();
+ }
+ if (chose_delayed_over_immediate) {
+ immediate_starvation_count_++;
+ } else {
+ immediate_starvation_count_ = 0;
+ }
+}
+
+void TaskQueueSelector::AsValueInto(trace_event::TracedValue* state) const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ state->SetInteger("high_priority_starvation_score",
+ high_priority_starvation_score_);
+ state->SetInteger("normal_priority_starvation_score",
+ normal_priority_starvation_score_);
+ state->SetInteger("low_priority_starvation_score",
+ low_priority_starvation_score_);
+ state->SetInteger("immediate_starvation_count", immediate_starvation_count_);
+}
+
+void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
+ task_queue_selector_observer_ = observer;
+}
+
+bool TaskQueueSelector::AllEnabledWorkQueuesAreEmpty() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ for (TaskQueue::QueuePriority priority = TaskQueue::kControlPriority;
+ priority < TaskQueue::kQueuePriorityCount;
+ priority = NextPriority(priority)) {
+ if (!prioritizing_selector_.delayed_work_queue_sets()->IsSetEmpty(
+ priority) ||
+ !prioritizing_selector_.immediate_work_queue_sets()->IsSetEmpty(
+ priority)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void TaskQueueSelector::SetImmediateStarvationCountForTest(
+ size_t immediate_starvation_count) {
+ immediate_starvation_count_ = immediate_starvation_count;
+}
+
+bool TaskQueueSelector::HasTasksWithPriority(
+ TaskQueue::QueuePriority priority) {
+ return !prioritizing_selector_.delayed_work_queue_sets()->IsSetEmpty(
+ priority) ||
+ !prioritizing_selector_.immediate_work_queue_sets()->IsSetEmpty(
+ priority);
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/task_queue_selector.h b/base/task/sequence_manager/task_queue_selector.h
new file mode 100644
index 0000000000..182158be3a
--- /dev/null
+++ b/base/task/sequence_manager/task_queue_selector.h
@@ -0,0 +1,225 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/pending_task.h"
+#include "base/task/sequence_manager/task_queue_selector_logic.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// TaskQueueSelector is used by the SchedulerHelper to enable prioritization
+// of particular task queues.
+class BASE_EXPORT TaskQueueSelector {
+ public:
+ TaskQueueSelector();
+ ~TaskQueueSelector();
+
+ // Called to register a queue that can be selected. This function is called
+ // on the main thread.
+ void AddQueue(internal::TaskQueueImpl* queue);
+
+ // The specified work will no longer be considered for selection. This
+ // function is called on the main thread.
+ void RemoveQueue(internal::TaskQueueImpl* queue);
+
+ // Make |queue| eligible for selection. This function is called on the main
+ // thread. Must only be called if |queue| is disabled.
+ void EnableQueue(internal::TaskQueueImpl* queue);
+
+ // Disable selection from |queue|. Must only be called if |queue| is enabled.
+ void DisableQueue(internal::TaskQueueImpl* queue);
+
+ // Called get or set the priority of |queue|.
+ void SetQueuePriority(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+
+ // Called to choose the work queue from which the next task should be taken
+ // and run. Return true if |out_work_queue| indicates the queue to service or
+ // false to avoid running any task.
+ //
+ // This function is called on the main thread.
+ bool SelectWorkQueueToService(WorkQueue** out_work_queue);
+
+ // Serialize the selector state for tracing.
+ void AsValueInto(trace_event::TracedValue* state) const;
+
+ class BASE_EXPORT Observer {
+ public:
+ virtual ~Observer() = default;
+
+ // Called when |queue| transitions from disabled to enabled.
+ virtual void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) = 0;
+ };
+
+ // Called once to set the Observer. This function is called
+ // on the main thread. If |observer| is null, then no callbacks will occur.
+ void SetTaskQueueSelectorObserver(Observer* observer);
+
+ // Returns true if all the enabled work queues are empty. Returns false
+ // otherwise.
+ bool AllEnabledWorkQueuesAreEmpty() const;
+
+ protected:
+ class BASE_EXPORT PrioritizingSelector {
+ public:
+ PrioritizingSelector(TaskQueueSelector* task_queue_selector,
+ const char* name);
+
+ void ChangeSetIndex(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+ void AddQueue(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+ void RemoveQueue(internal::TaskQueueImpl* queue);
+
+ bool SelectWorkQueueToService(TaskQueue::QueuePriority max_priority,
+ WorkQueue** out_work_queue,
+ bool* out_chose_delayed_over_immediate);
+
+ WorkQueueSets* delayed_work_queue_sets() {
+ return &delayed_work_queue_sets_;
+ }
+ WorkQueueSets* immediate_work_queue_sets() {
+ return &immediate_work_queue_sets_;
+ }
+
+ const WorkQueueSets* delayed_work_queue_sets() const {
+ return &delayed_work_queue_sets_;
+ }
+ const WorkQueueSets* immediate_work_queue_sets() const {
+ return &immediate_work_queue_sets_;
+ }
+
+ bool ChooseOldestWithPriority(TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const;
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+ bool CheckContainsQueueForTest(const internal::TaskQueueImpl* queue) const;
+#endif
+
+ private:
+ bool ChooseOldestImmediateTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const;
+
+ bool ChooseOldestDelayedTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const;
+
+ // Return true if |out_queue| contains the queue with the oldest pending
+ // task from the set of queues of |priority|, or false if all queues of that
+ // priority are empty. In addition |out_chose_delayed_over_immediate| is set
+ // to true iff we chose a delayed work queue in favour of an immediate work
+ // queue.
+ bool ChooseOldestImmediateOrDelayedTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const;
+
+ const TaskQueueSelector* task_queue_selector_;
+ WorkQueueSets delayed_work_queue_sets_;
+ WorkQueueSets immediate_work_queue_sets_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrioritizingSelector);
+ };
+
+ // Return true if |out_queue| contains the queue with the oldest pending task
+ // from the set of queues of |priority|, or false if all queues of that
+ // priority are empty. In addition |out_chose_delayed_over_immediate| is set
+ // to true iff we chose a delayed work queue in favour of an immediate work
+ // queue. This method will force select an immediate task if those are being
+ // starved by delayed tasks.
+ void SetImmediateStarvationCountForTest(size_t immediate_starvation_count);
+
+ PrioritizingSelector* prioritizing_selector_for_test() {
+ return &prioritizing_selector_;
+ }
+
+ // Maximum score to accumulate before high priority tasks are run even in
+ // the presence of highest priority tasks.
+ static const size_t kMaxHighPriorityStarvationScore = 3;
+
+ // Increment to be applied to the high priority starvation score when a task
+ // should have only a small effect on the score. E.g. A number of highest
+ // priority tasks must run before the high priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForHighPriorityStarvation = 1;
+
+ // Maximum score to accumulate before normal priority tasks are run even in
+ // the presence of higher priority tasks i.e. highest and high priority tasks.
+ static const size_t kMaxNormalPriorityStarvationScore = 5;
+
+ // Increment to be applied to the normal priority starvation score when a task
+ // should have a large effect on the score. E.g Only a few high priority
+ // priority tasks must run before the normal priority queue is considered
+ // starved.
+ static const size_t kLargeScoreIncrementForNormalPriorityStarvation = 2;
+
+ // Increment to be applied to the normal priority starvation score when a task
+ // should have only a small effect on the score. E.g. A number of highest
+ // priority tasks must run before the normal priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForNormalPriorityStarvation = 1;
+
+ // Maximum score to accumulate before low priority tasks are run even in the
+ // presence of highest, high, or normal priority tasks.
+ static const size_t kMaxLowPriorityStarvationScore = 25;
+
+ // Increment to be applied to the low priority starvation score when a task
+ // should have a large effect on the score. E.g. Only a few normal/high
+ // priority tasks must run before the low priority queue is considered
+ // starved.
+ static const size_t kLargeScoreIncrementForLowPriorityStarvation = 5;
+
+ // Increment to be applied to the low priority starvation score when a task
+ // should have only a small effect on the score. E.g. A lot of highest
+ // priority tasks must run before the low priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForLowPriorityStarvation = 1;
+
+ // Maximum number of delayed tasks tasks which can be run while there's a
+ // waiting non-delayed task.
+ static const size_t kMaxDelayedStarvationTasks = 3;
+
+ private:
+ // Returns the priority which is next after |priority|.
+ static TaskQueue::QueuePriority NextPriority(
+ TaskQueue::QueuePriority priority);
+
+ bool SelectWorkQueueToServiceInternal(WorkQueue** out_work_queue);
+
+ // Called whenever the selector chooses a task queue for execution with the
+ // priority |priority|.
+ void DidSelectQueueWithPriority(TaskQueue::QueuePriority priority,
+ bool chose_delayed_over_immediate);
+
+ // Returns true if there are pending tasks with priority |priority|.
+ bool HasTasksWithPriority(TaskQueue::QueuePriority priority);
+
+ ThreadChecker main_thread_checker_;
+
+ PrioritizingSelector prioritizing_selector_;
+ size_t immediate_starvation_count_;
+ size_t high_priority_starvation_score_;
+ size_t normal_priority_starvation_score_;
+ size_t low_priority_starvation_score_;
+
+ Observer* task_queue_selector_observer_; // Not owned.
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueSelector);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
diff --git a/base/task/sequence_manager/task_queue_selector_logic.h b/base/task/sequence_manager/task_queue_selector_logic.h
new file mode 100644
index 0000000000..8cf8933783
--- /dev/null
+++ b/base/task/sequence_manager/task_queue_selector_logic.h
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// Used to describe the logic trigerred when a task queue is selected to
+// service.
+// This enum is used for histograms and should not be renumbered.
+enum class TaskQueueSelectorLogic {
+
+ // Selected due to priority rules.
+ kControlPriorityLogic = 0,
+ kHighestPriorityLogic = 1,
+ kHighPriorityLogic = 2,
+ kNormalPriorityLogic = 3,
+ kLowPriorityLogic = 4,
+ kBestEffortPriorityLogic = 5,
+
+ // Selected due to starvation logic.
+ kHighPriorityStarvationLogic = 6,
+ kNormalPriorityStarvationLogic = 7,
+ kLowPriorityStarvationLogic = 8,
+
+ kCount = 9,
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
diff --git a/base/task/sequence_manager/task_queue_selector_unittest.cc b/base/task/sequence_manager/task_queue_selector_unittest.cc
new file mode 100644
index 0000000000..c3742a2b2e
--- /dev/null
+++ b/base/task/sequence_manager/task_queue_selector_unittest.cc
@@ -0,0 +1,885 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/task_queue_selector.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/pending_task.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/test/mock_time_domain.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+// To avoid symbol collisions in jumbo builds.
+namespace task_queue_selector_unittest {
+
+class MockObserver : public TaskQueueSelector::Observer {
+ public:
+ MockObserver() = default;
+ ~MockObserver() override = default;
+
+ MOCK_METHOD1(OnTaskQueueEnabled, void(internal::TaskQueueImpl*));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockObserver);
+};
+
+class TaskQueueSelectorForTest : public TaskQueueSelector {
+ public:
+ using TaskQueueSelector::prioritizing_selector_for_test;
+ using TaskQueueSelector::PrioritizingSelector;
+ using TaskQueueSelector::SetImmediateStarvationCountForTest;
+
+ // Returns the number of highest priority tasks needed to starve high priority
+ // task.
+ static constexpr size_t NumberOfHighestPriorityToStarveHighPriority() {
+ return (kMaxHighPriorityStarvationScore +
+ kSmallScoreIncrementForHighPriorityStarvation - 1) /
+ kSmallScoreIncrementForHighPriorityStarvation;
+ }
+
+ // Returns the number of highest priority tasks needed to starve normal
+ // priority tasks.
+ static constexpr size_t NumberOfHighestPriorityToStarveNormalPriority() {
+ return (kMaxNormalPriorityStarvationScore +
+ kSmallScoreIncrementForNormalPriorityStarvation - 1) /
+ kSmallScoreIncrementForNormalPriorityStarvation;
+ }
+
+ // Returns the number of high priority tasks needed to starve normal priority
+ // tasks.
+ static constexpr size_t NumberOfHighPriorityToStarveNormalPriority() {
+ return (kMaxNormalPriorityStarvationScore +
+ kLargeScoreIncrementForNormalPriorityStarvation - 1) /
+ kLargeScoreIncrementForNormalPriorityStarvation;
+ }
+
+ // Returns the number of highest priority tasks needed to starve low priority
+ // ones.
+ static constexpr size_t NumberOfHighestPriorityToStarveLowPriority() {
+ return (kMaxLowPriorityStarvationScore +
+ kSmallScoreIncrementForLowPriorityStarvation - 1) /
+ kSmallScoreIncrementForLowPriorityStarvation;
+ }
+
+ // Returns the number of high/normal priority tasks needed to starve low
+ // priority ones.
+ static constexpr size_t NumberOfHighAndNormalPriorityToStarveLowPriority() {
+ return (kMaxLowPriorityStarvationScore +
+ kLargeScoreIncrementForLowPriorityStarvation - 1) /
+ kLargeScoreIncrementForLowPriorityStarvation;
+ }
+};
+
+class TaskQueueSelectorTest : public testing::Test {
+ public:
+ TaskQueueSelectorTest()
+ : test_closure_(BindRepeating(&TaskQueueSelectorTest::TestFunction)) {}
+ ~TaskQueueSelectorTest() override = default;
+
+ TaskQueueSelectorForTest::PrioritizingSelector* prioritizing_selector() {
+ return selector_.prioritizing_selector_for_test();
+ }
+
+ WorkQueueSets* delayed_work_queue_sets() {
+ return prioritizing_selector()->delayed_work_queue_sets();
+ }
+ WorkQueueSets* immediate_work_queue_sets() {
+ return prioritizing_selector()->immediate_work_queue_sets();
+ }
+
+ void PushTasks(const size_t queue_indices[], size_t num_tasks) {
+ std::set<size_t> changed_queue_set;
+ EnqueueOrder::Generator enqueue_order_generator;
+ for (size_t i = 0; i < num_tasks; i++) {
+ changed_queue_set.insert(queue_indices[i]);
+ task_queues_[queue_indices[i]]->immediate_work_queue()->Push(
+ TaskQueueImpl::Task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder(),
+ enqueue_order_generator.GenerateNext()));
+ }
+ }
+
+ void PushTasksWithEnqueueOrder(const size_t queue_indices[],
+ const size_t enqueue_orders[],
+ size_t num_tasks) {
+ std::set<size_t> changed_queue_set;
+ for (size_t i = 0; i < num_tasks; i++) {
+ changed_queue_set.insert(queue_indices[i]);
+ task_queues_[queue_indices[i]]->immediate_work_queue()->Push(
+ TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(),
+ EnqueueOrder::FromIntForTesting(enqueue_orders[i])));
+ }
+ }
+
+ std::vector<size_t> PopTasks() {
+ std::vector<size_t> order;
+ WorkQueue* chosen_work_queue;
+ while (selector_.SelectWorkQueueToService(&chosen_work_queue)) {
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ order.push_back(chosen_queue_index);
+ chosen_work_queue->PopTaskForTesting();
+ immediate_work_queue_sets()->OnPopQueue(chosen_work_queue);
+ }
+ return order;
+ }
+
+ static void TestFunction() {}
+
+ protected:
+ void SetUp() final {
+ time_domain_ = std::make_unique<MockTimeDomain>(TimeTicks() +
+ TimeDelta::FromSeconds(1));
+ for (size_t i = 0; i < kTaskQueueCount; i++) {
+ std::unique_ptr<TaskQueueImpl> task_queue =
+ std::make_unique<TaskQueueImpl>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+ selector_.AddQueue(task_queue.get());
+ task_queues_.push_back(std::move(task_queue));
+ }
+ for (size_t i = 0; i < kTaskQueueCount; i++) {
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[i]->GetQueuePriority())
+ << i;
+ queue_to_index_map_.insert(std::make_pair(task_queues_[i].get(), i));
+ }
+ histogram_tester_.reset(new HistogramTester());
+ }
+
+ void TearDown() final {
+ for (std::unique_ptr<TaskQueueImpl>& task_queue : task_queues_) {
+ // Note since this test doesn't have a SequenceManager we need to
+ // manually remove |task_queue| from the |selector_|. Normally
+ // UnregisterTaskQueue would do that.
+ selector_.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+ }
+ }
+
+ std::unique_ptr<TaskQueueImpl> NewTaskQueueWithBlockReporting() {
+ return std::make_unique<TaskQueueImpl>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+ }
+
+ const size_t kTaskQueueCount = 5;
+ RepeatingClosure test_closure_;
+ TaskQueueSelectorForTest selector_;
+ std::unique_ptr<TimeDomain> time_domain_;
+ std::vector<std::unique_ptr<TaskQueueImpl>> task_queues_;
+ std::map<TaskQueueImpl*, size_t> queue_to_index_map_;
+ std::unique_ptr<HistogramTester> histogram_tester_;
+};
+
+TEST_F(TaskQueueSelectorTest, TestDefaultPriority) {
+ size_t queue_order[] = {4, 3, 2, 1, 0};
+ PushTasks(queue_order, 5);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(4, 3, 2, 1, 0));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kNormalPriorityLogic)),
+ 5);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 0, 1, 3, 4));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kHighestPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kLowPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 1, 3, 4, 0));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kHighPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestLowPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(0, 1, 3, 4, 2));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kLowPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestBestEffortPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kBestEffortPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+ selector_.SetQueuePriority(task_queues_[3].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(3, 1, 4, 2, 0));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kBestEffortPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestControlPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[4].get(),
+ TaskQueue::kControlPriority);
+ EXPECT_EQ(TaskQueue::kControlPriority, task_queues_[4]->GetQueuePriority());
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_EQ(TaskQueue::kHighestPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(4, 2, 0, 1, 3));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kControlPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithEnabledQueue) {
+ task_queues_[1]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[1].get());
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(1);
+ task_queues_[1]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[1].get());
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestObserverWithSetQueuePriorityAndQueueAlreadyEnabled) {
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(0);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kNormalPriority);
+}
+
+TEST_F(TaskQueueSelectorTest, TestDisableEnable) {
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ task_queues_[2]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[2].get());
+ task_queues_[4]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[4].get());
+ // Disabling a queue should not affect its priority.
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[4]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(0, 1, 3));
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(2);
+ task_queues_[2]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[2].get());
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kBestEffortPriority);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(2));
+ task_queues_[4]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[4].get());
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(4));
+}
+
+TEST_F(TaskQueueSelectorTest, TestDisableChangePriorityThenEnable) {
+ EXPECT_TRUE(task_queues_[2]->delayed_work_queue()->Empty());
+ EXPECT_TRUE(task_queues_[2]->immediate_work_queue()->Empty());
+
+ task_queues_[2]->SetQueueEnabledForTest(false);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+
+ EXPECT_TRUE(task_queues_[2]->delayed_work_queue()->Empty());
+ EXPECT_FALSE(task_queues_[2]->immediate_work_queue()->Empty());
+ task_queues_[2]->SetQueueEnabledForTest(true);
+
+ EXPECT_EQ(TaskQueue::kHighestPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 0, 1, 3, 4));
+}
+
+TEST_F(TaskQueueSelectorTest, TestEmptyQueues) {
+ WorkQueue* chosen_work_queue = nullptr;
+ EXPECT_FALSE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Test only disabled queues.
+ size_t queue_order[] = {0};
+ PushTasks(queue_order, 1);
+ task_queues_[0]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[0].get());
+ EXPECT_FALSE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // These tests are unusual since there's no TQM. To avoid a later DCHECK when
+ // deleting the task queue, we re-enable the queue here so the selector
+ // doesn't get out of sync.
+ task_queues_[0]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[0].get());
+}
+
+TEST_F(TaskQueueSelectorTest, TestAge) {
+ size_t enqueue_order[] = {10, 1, 2, 9, 4};
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasksWithEnqueueOrder(queue_order, enqueue_order, 5);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(1, 2, 4, 3, 0));
+}
+
+TEST_F(TaskQueueSelectorTest, TestControlStarvesOthers) {
+ size_t queue_order[] = {0, 1, 2, 3};
+ PushTasks(queue_order, 4);
+ selector_.SetQueuePriority(task_queues_[3].get(),
+ TaskQueue::kControlPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kBestEffortPriority);
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[3].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriorityDoesNotStarveHigh) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+ EXPECT_GT(counts[1], 0ul); // Check highest doesn't starve high.
+ EXPECT_GT(counts[0], counts[1]); // Check highest gets more chance to run.
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriorityDoesNotStarveHighOrNormal) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check highest runs more frequently then high.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check high runs at least as frequently as normal.
+ EXPECT_GE(counts[1], counts[2]);
+
+ // Check normal isn't starved.
+ EXPECT_GT(counts[2], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestHighestPriorityDoesNotStarveHighOrNormalOrLow) {
+ size_t queue_order[] = {0, 1, 2, 3};
+ PushTasks(queue_order, 4);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[3].get(), TaskQueue::kLowPriority);
+
+ size_t counts[] = {0, 0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check highest runs more frequently then high.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check high runs at least as frequently as normal.
+ EXPECT_GE(counts[1], counts[2]);
+
+ // Check normal runs more frequently than low.
+ EXPECT_GT(counts[2], counts[3]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[3], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriorityDoesNotStarveNormal) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check high runs more frequently then normal.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[1], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriorityDoesNotStarveNormalOrLow) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check high runs more frequently than normal.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check normal runs more frequently than low.
+ EXPECT_GT(counts[1], counts[2]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[2], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestNormalPriorityDoesNotStarveLow) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kLowPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kBestEffortPriority);
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+ EXPECT_GT(counts[0], 0ul); // Check normal doesn't starve low.
+ EXPECT_GT(counts[2], counts[0]); // Check normal gets more chance to run.
+ EXPECT_EQ(0ul, counts[1]); // Check best effort is starved.
+}
+
+TEST_F(TaskQueueSelectorTest, TestBestEffortGetsStarved) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kBestEffortPriority);
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[1]->GetQueuePriority());
+
+ // Check that normal priority tasks starve best effort.
+ WorkQueue* chosen_work_queue = nullptr;
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that highest priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that high priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that low priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kLowPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that control priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kControlPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestHighPriorityStarvationScoreIncreasedOnlyWhenTasksArePresent) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+
+ // Run a number of highest priority tasks needed to starve high priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <=
+ TaskQueueSelectorForTest::NumberOfHighestPriorityToStarveHighPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ // Post a high priority task.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Check that the high priority task is not considered starved, and thus isn't
+ // processed.
+ EXPECT_NE(
+ static_cast<int>(
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestNormalPriorityStarvationScoreIncreasedOnllWhenTasksArePresent) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+
+ // Run a number of highest priority tasks needed to starve normal priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <= TaskQueueSelectorForTest::
+ NumberOfHighestPriorityToStarveNormalPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ // Run a number of high priority tasks needed to starve normal priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <=
+ TaskQueueSelectorForTest::NumberOfHighPriorityToStarveNormalPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ // Post a normal priority task.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kNormalPriority);
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Check that the normal priority task is not considered starved, and thus
+ // isn't processed.
+ EXPECT_NE(
+ static_cast<int>(
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestLowPriorityTaskStarvationOnlyIncreasedWhenTasksArePresent) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+
+ // Run a number of highest priority tasks needed to starve low priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <=
+ TaskQueueSelectorForTest::NumberOfHighestPriorityToStarveLowPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kNormalPriority);
+
+ // Run a number of high/normal priority tasks needed to starve low priority
+ // tasks (when present).
+ for (size_t num_tasks = 0;
+ num_tasks <= TaskQueueSelectorForTest::
+ NumberOfHighAndNormalPriorityToStarveLowPriority();
+ num_tasks++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ // Don't remove task from queue to simulate the queue is still full.
+ }
+
+ // Post a low priority task.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kLowPriority);
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Check that the low priority task is not considered starved, and thus
+ // isn't processed.
+ EXPECT_NE(
+ static_cast<int>(
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty) {
+ EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+
+ EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+ PopTasks();
+ EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
+}
+
+TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty_ControlPriority) {
+ size_t queue_order[] = {0};
+ PushTasks(queue_order, 1);
+
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kControlPriority);
+
+ EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_Empty) {
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_FALSE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_OnlyDelayed) {
+ task_queues_[0]->delayed_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(2)));
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue, task_queues_[0]->delayed_work_queue());
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_OnlyImmediate) {
+ task_queues_[0]->immediate_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(2)));
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue, task_queues_[0]->immediate_work_queue());
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithOneBlockedQueue) {
+ TaskQueueSelectorForTest selector;
+ MockObserver mock_observer;
+ selector.SetTaskQueueSelectorObserver(&mock_observer);
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(1);
+
+ std::unique_ptr<TaskQueueImpl> task_queue(NewTaskQueueWithBlockReporting());
+ selector.AddQueue(task_queue.get());
+
+ task_queue->SetQueueEnabledForTest(false);
+ selector.DisableQueue(task_queue.get());
+
+ TaskQueueImpl::Task task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder(),
+ EnqueueOrder::FromIntForTesting(2));
+ task_queue->immediate_work_queue()->Push(std::move(task));
+
+ WorkQueue* chosen_work_queue;
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+
+ task_queue->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue.get());
+ selector.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithTwoBlockedQueues) {
+ TaskQueueSelectorForTest selector;
+ MockObserver mock_observer;
+ selector.SetTaskQueueSelectorObserver(&mock_observer);
+
+ std::unique_ptr<TaskQueueImpl> task_queue(NewTaskQueueWithBlockReporting());
+ std::unique_ptr<TaskQueueImpl> task_queue2(NewTaskQueueWithBlockReporting());
+ selector.AddQueue(task_queue.get());
+ selector.AddQueue(task_queue2.get());
+
+ task_queue->SetQueueEnabledForTest(false);
+ task_queue2->SetQueueEnabledForTest(false);
+ selector.DisableQueue(task_queue.get());
+ selector.DisableQueue(task_queue2.get());
+
+ selector.SetQueuePriority(task_queue2.get(), TaskQueue::kControlPriority);
+
+ TaskQueueImpl::Task task1(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder::FromIntForTesting(2),
+ EnqueueOrder::FromIntForTesting(2));
+ TaskQueueImpl::Task task2(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ TimeTicks(), EnqueueOrder::FromIntForTesting(3),
+ EnqueueOrder::FromIntForTesting(3));
+ task_queue->immediate_work_queue()->Push(std::move(task1));
+ task_queue2->immediate_work_queue()->Push(std::move(task2));
+
+ WorkQueue* chosen_work_queue;
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+ testing::Mock::VerifyAndClearExpectations(&mock_observer);
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(2);
+
+ task_queue->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue.get());
+
+ selector.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+
+ task_queue2->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue2.get());
+ selector.RemoveQueue(task_queue2.get());
+ task_queue2->UnregisterTaskQueue();
+}
+
+struct ChooseOldestWithPriorityTestParam {
+ int delayed_task_enqueue_order;
+ int immediate_task_enqueue_order;
+ int immediate_starvation_count;
+ const char* expected_work_queue_name;
+ bool expected_did_starve_immediate_queue;
+};
+
+static const ChooseOldestWithPriorityTestParam
+ kChooseOldestWithPriorityTestCases[] = {
+ {1, 2, 0, "delayed", true}, {1, 2, 1, "delayed", true},
+ {1, 2, 2, "delayed", true}, {1, 2, 3, "immediate", false},
+ {1, 2, 4, "immediate", false}, {2, 1, 4, "immediate", false},
+ {2, 1, 4, "immediate", false},
+};
+
+class ChooseOldestWithPriorityTest
+ : public TaskQueueSelectorTest,
+ public testing::WithParamInterface<ChooseOldestWithPriorityTestParam> {};
+
+TEST_P(ChooseOldestWithPriorityTest, RoundRobinTest) {
+ task_queues_[0]->immediate_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder::FromIntForTesting(GetParam().immediate_task_enqueue_order),
+ EnqueueOrder::FromIntForTesting(
+ GetParam().immediate_task_enqueue_order)));
+
+ task_queues_[0]->delayed_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder::FromIntForTesting(GetParam().delayed_task_enqueue_order),
+ EnqueueOrder::FromIntForTesting(GetParam().delayed_task_enqueue_order)));
+
+ selector_.SetImmediateStarvationCountForTest(
+ GetParam().immediate_starvation_count);
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue->task_queue(), task_queues_[0].get());
+ EXPECT_STREQ(chosen_work_queue->name(), GetParam().expected_work_queue_name);
+ EXPECT_EQ(chose_delayed_over_immediate,
+ GetParam().expected_did_starve_immediate_queue);
+}
+
+INSTANTIATE_TEST_CASE_P(ChooseOldestWithPriorityTest,
+ ChooseOldestWithPriorityTest,
+ testing::ValuesIn(kChooseOldestWithPriorityTestCases));
+
+} // namespace task_queue_selector_unittest
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/task_time_observer.h b/base/task/sequence_manager/task_time_observer.h
new file mode 100644
index 0000000000..151a94119b
--- /dev/null
+++ b/base/task/sequence_manager/task_time_observer.h
@@ -0,0 +1,32 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
+
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+
+// TaskTimeObserver provides an API for observing completion of tasks.
+class TaskTimeObserver {
+ public:
+ TaskTimeObserver() = default;
+ virtual ~TaskTimeObserver() = default;
+
+ // To be called when task is about to start.
+ virtual void WillProcessTask(TimeTicks start_time) = 0;
+
+ // To be called when task is completed.
+ virtual void DidProcessTask(TimeTicks start_time, TimeTicks end_time) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskTimeObserver);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
diff --git a/base/task/sequence_manager/test/fake_task.cc b/base/task/sequence_manager/test/fake_task.cc
new file mode 100644
index 0000000000..2ddd14eaf0
--- /dev/null
+++ b/base/task/sequence_manager/test/fake_task.cc
@@ -0,0 +1,35 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/test/fake_task.h"
+
+namespace base {
+namespace sequence_manager {
+
+FakeTask::FakeTask()
+ : TaskQueue::Task(TaskQueue::PostedTask(OnceClosure(), FROM_HERE),
+ TimeTicks()) {}
+
+FakeTaskTiming::FakeTaskTiming()
+ : TaskTiming(false /* has_wall_time */, false /* has_thread_time */) {}
+
+FakeTaskTiming::FakeTaskTiming(TimeTicks start, TimeTicks end)
+ : FakeTaskTiming() {
+ has_wall_time_ = true;
+ start_time_ = start;
+ end_time_ = end;
+}
+
+FakeTaskTiming::FakeTaskTiming(TimeTicks start,
+ TimeTicks end,
+ ThreadTicks thread_start,
+ ThreadTicks thread_end)
+ : FakeTaskTiming(start, end) {
+ has_thread_time_ = true;
+ start_thread_time_ = thread_start;
+ end_thread_time_ = thread_end;
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/test/fake_task.h b/base/task/sequence_manager/test/fake_task.h
new file mode 100644
index 0000000000..54cc3ac04d
--- /dev/null
+++ b/base/task/sequence_manager/test/fake_task.h
@@ -0,0 +1,31 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TEST_FAKE_TASK_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TEST_FAKE_TASK_H_
+
+#include "base/task/sequence_manager/task_queue.h"
+
+namespace base {
+namespace sequence_manager {
+
+class FakeTask : public TaskQueue::Task {
+ public:
+ FakeTask();
+};
+
+class FakeTaskTiming : public TaskQueue::TaskTiming {
+ public:
+ FakeTaskTiming();
+ FakeTaskTiming(TimeTicks start, TimeTicks end);
+ FakeTaskTiming(TimeTicks start,
+ TimeTicks end,
+ ThreadTicks thread_start,
+ ThreadTicks thread_end);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TEST_FAKE_TASK_H_
diff --git a/base/task/sequence_manager/test/lazy_thread_controller_for_test.cc b/base/task/sequence_manager/test/lazy_thread_controller_for_test.cc
new file mode 100644
index 0000000000..39a7a1104f
--- /dev/null
+++ b/base/task/sequence_manager/test/lazy_thread_controller_for_test.cc
@@ -0,0 +1,123 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/test/lazy_thread_controller_for_test.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/time/default_tick_clock.h"
+
+namespace base {
+namespace sequence_manager {
+
+LazyThreadControllerForTest::LazyThreadControllerForTest()
+ : ThreadControllerImpl(MessageLoop::current(),
+ nullptr,
+ DefaultTickClock::GetInstance()),
+ thread_ref_(PlatformThread::CurrentRef()) {
+ if (message_loop_)
+ task_runner_ = message_loop_->task_runner();
+}
+
+LazyThreadControllerForTest::~LazyThreadControllerForTest() = default;
+
+void LazyThreadControllerForTest::EnsureMessageLoop() {
+ if (message_loop_)
+ return;
+ DCHECK(RunsTasksInCurrentSequence());
+ message_loop_ = MessageLoop::current();
+ DCHECK(message_loop_);
+ task_runner_ = message_loop_->task_runner();
+ if (pending_observer_) {
+ RunLoop::AddNestingObserverOnCurrentThread(this);
+ pending_observer_ = false;
+ }
+ if (pending_default_task_runner_) {
+ ThreadControllerImpl::SetDefaultTaskRunner(pending_default_task_runner_);
+ pending_default_task_runner_ = nullptr;
+ }
+}
+
+bool LazyThreadControllerForTest::HasMessageLoop() {
+ return !!message_loop_;
+}
+
+void LazyThreadControllerForTest::AddNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ // While |observer| _could_ be associated with the current thread regardless
+ // of the presence of a MessageLoop, the association is delayed until
+ // EnsureMessageLoop() is invoked. This works around a state issue where
+ // otherwise many tests fail because of the following sequence:
+ // 1) blink::scheduler::CreateRendererSchedulerForTests()
+ // -> SequenceManager::SequenceManager()
+ // -> LazySchedulerMessageLoopDelegateForTests::AddNestingObserver()
+ // 2) Any test framework with a MessageLoop member (and not caring
+ // about the blink scheduler) does:
+ // blink::scheduler::GetSingleThreadTaskRunnerForTesting()->PostTask(
+ // FROM_HERE, an_init_task_with_a_nested_loop);
+ // RunLoop.RunUntilIdle();
+ // 3) |a_task_with_a_nested_loop| triggers
+ // SequenceManager::OnBeginNestedLoop() which:
+ // a) flags any_thread().is_nested = true;
+ // b) posts a task to self, which triggers:
+ // LazySchedulerMessageLoopDelegateForTests::PostDelayedTask()
+ // 4) This self-task in turn triggers SequenceManager::DoWork()
+ // which expects to be the only one to trigger nested loops (doesn't
+ // support SequenceManager::OnBeginNestedLoop() being invoked before
+ // it kicks in), resulting in it hitting:
+ // DCHECK_EQ(any_thread().is_nested, delegate_->IsNested()); (1 vs 0).
+ // TODO(skyostil): fix this convolution as part of http://crbug.com/495659.
+ ThreadControllerImpl::nesting_observer_ = observer;
+ if (!HasMessageLoop()) {
+ DCHECK(!pending_observer_);
+ pending_observer_ = true;
+ return;
+ }
+ RunLoop::AddNestingObserverOnCurrentThread(this);
+}
+
+void LazyThreadControllerForTest::RemoveNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ ThreadControllerImpl::nesting_observer_ = nullptr;
+ if (!HasMessageLoop()) {
+ DCHECK(pending_observer_);
+ pending_observer_ = false;
+ return;
+ }
+ if (MessageLoop::current() != message_loop_)
+ return;
+ RunLoop::RemoveNestingObserverOnCurrentThread(this);
+}
+
+bool LazyThreadControllerForTest::RunsTasksInCurrentSequence() {
+ return thread_ref_ == PlatformThread::CurrentRef();
+}
+
+void LazyThreadControllerForTest::ScheduleWork() {
+ EnsureMessageLoop();
+ ThreadControllerImpl::ScheduleWork();
+}
+
+void LazyThreadControllerForTest::SetNextDelayedDoWork(LazyNow* lazy_now,
+ TimeTicks run_time) {
+ EnsureMessageLoop();
+ ThreadControllerImpl::SetNextDelayedDoWork(lazy_now, run_time);
+}
+
+void LazyThreadControllerForTest::SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ if (!HasMessageLoop()) {
+ pending_default_task_runner_ = task_runner;
+ return;
+ }
+ ThreadControllerImpl::SetDefaultTaskRunner(task_runner);
+}
+
+void LazyThreadControllerForTest::RestoreDefaultTaskRunner() {
+ pending_default_task_runner_ = nullptr;
+ if (HasMessageLoop() && MessageLoop::current() == message_loop_)
+ ThreadControllerImpl::RestoreDefaultTaskRunner();
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/test/lazy_thread_controller_for_test.h b/base/task/sequence_manager/test/lazy_thread_controller_for_test.h
new file mode 100644
index 0000000000..6cc0e523b6
--- /dev/null
+++ b/base/task/sequence_manager/test/lazy_thread_controller_for_test.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TEST_LAZY_THREAD_CONTROLLER_FOR_TEST_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TEST_LAZY_THREAD_CONTROLLER_FOR_TEST_H_
+
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/thread_controller_impl.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace sequence_manager {
+
+// This class connects the scheduler to a MessageLoop, but unlike
+// ThreadControllerImpl it allows the message loop to be created lazily
+// after the scheduler has been brought up. This is needed in testing scenarios
+// where Blink is initialized before a MessageLoop has been created.
+//
+// TODO(skyostil): Fix the relevant test suites and remove this class
+// (crbug.com/495659).
+class LazyThreadControllerForTest : public internal::ThreadControllerImpl {
+ public:
+ LazyThreadControllerForTest();
+ ~LazyThreadControllerForTest() override;
+
+ // internal::ThreadControllerImpl:
+ void AddNestingObserver(RunLoop::NestingObserver* observer) override;
+ void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
+ bool RunsTasksInCurrentSequence() override;
+ void ScheduleWork() override;
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
+ void SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) override;
+ void RestoreDefaultTaskRunner() override;
+
+ private:
+ bool HasMessageLoop();
+ void EnsureMessageLoop();
+
+ PlatformThreadRef thread_ref_;
+
+ bool pending_observer_ = false;
+ scoped_refptr<SingleThreadTaskRunner> pending_default_task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(LazyThreadControllerForTest);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TEST_LAZY_THREAD_CONTROLLER_FOR_TEST_H_
diff --git a/base/task/sequence_manager/test/mock_time_domain.cc b/base/task/sequence_manager/test/mock_time_domain.cc
new file mode 100644
index 0000000000..b0ced82a70
--- /dev/null
+++ b/base/task/sequence_manager/test/mock_time_domain.cc
@@ -0,0 +1,39 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/test/mock_time_domain.h"
+
+namespace base {
+namespace sequence_manager {
+
+MockTimeDomain::MockTimeDomain(TimeTicks initial_now_ticks)
+ : now_ticks_(initial_now_ticks) {}
+
+MockTimeDomain::~MockTimeDomain() = default;
+
+LazyNow MockTimeDomain::CreateLazyNow() const {
+ return LazyNow(now_ticks_);
+}
+
+TimeTicks MockTimeDomain::Now() const {
+ return now_ticks_;
+}
+
+void MockTimeDomain::SetNowTicks(TimeTicks now_ticks) {
+ now_ticks_ = now_ticks;
+}
+
+Optional<TimeDelta> MockTimeDomain::DelayTillNextTask(LazyNow* lazy_now) {
+ return nullopt;
+}
+
+void MockTimeDomain::SetNextDelayedDoWork(LazyNow* lazy_now,
+ TimeTicks run_time) {}
+
+const char* MockTimeDomain::GetName() const {
+ return "MockTimeDomain";
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/test/mock_time_domain.h b/base/task/sequence_manager/test/mock_time_domain.h
new file mode 100644
index 0000000000..0744e696a4
--- /dev/null
+++ b/base/task/sequence_manager/test/mock_time_domain.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TEST_MOCK_TIME_DOMAIN_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TEST_MOCK_TIME_DOMAIN_H_
+
+#include "base/task/sequence_manager/time_domain.h"
+
+namespace base {
+namespace sequence_manager {
+
+// TimeDomain with a mock clock and not invoking SequenceManager.
+// NOTE: All methods are main thread only.
+class MockTimeDomain : public TimeDomain {
+ public:
+ explicit MockTimeDomain(TimeTicks initial_now_ticks);
+ ~MockTimeDomain() override;
+
+ void SetNowTicks(TimeTicks now_ticks);
+
+ // TimeDomain implementation:
+ LazyNow CreateLazyNow() const override;
+ TimeTicks Now() const override;
+ Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override;
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
+ const char* GetName() const override;
+
+ private:
+ TimeTicks now_ticks_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTimeDomain);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TEST_MOCK_TIME_DOMAIN_H_
diff --git a/base/task/sequence_manager/test/sequence_manager_for_test.cc b/base/task/sequence_manager/test/sequence_manager_for_test.cc
new file mode 100644
index 0000000000..3442957f48
--- /dev/null
+++ b/base/task/sequence_manager/test/sequence_manager_for_test.cc
@@ -0,0 +1,79 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/test/sequence_manager_for_test.h"
+
+#include "base/task/sequence_manager/thread_controller_impl.h"
+
+namespace base {
+namespace sequence_manager {
+
+namespace {
+
+class ThreadControllerForTest : public internal::ThreadControllerImpl {
+ public:
+ ThreadControllerForTest(MessageLoop* message_loop,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const TickClock* time_source)
+ : ThreadControllerImpl(message_loop,
+ std::move(task_runner),
+ time_source) {}
+
+ void AddNestingObserver(RunLoop::NestingObserver* observer) override {
+ if (!message_loop_)
+ return;
+ ThreadControllerImpl::AddNestingObserver(observer);
+ }
+
+ void RemoveNestingObserver(RunLoop::NestingObserver* observer) override {
+ if (!message_loop_)
+ return;
+ ThreadControllerImpl::RemoveNestingObserver(observer);
+ }
+
+ ~ThreadControllerForTest() override = default;
+};
+
+} // namespace
+
+SequenceManagerForTest::SequenceManagerForTest(
+ std::unique_ptr<internal::ThreadController> thread_controller)
+ : SequenceManagerImpl(std::move(thread_controller)) {}
+
+// static
+std::unique_ptr<SequenceManagerForTest> SequenceManagerForTest::Create(
+ MessageLoop* message_loop,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const TickClock* clock) {
+ return std::make_unique<SequenceManagerForTest>(
+ std::make_unique<ThreadControllerForTest>(message_loop,
+ std::move(task_runner), clock));
+}
+
+size_t SequenceManagerForTest::ActiveQueuesCount() const {
+ return main_thread_only().active_queues.size();
+}
+
+bool SequenceManagerForTest::HasImmediateWork() const {
+ return !main_thread_only().selector.AllEnabledWorkQueuesAreEmpty();
+}
+
+size_t SequenceManagerForTest::PendingTasksCount() const {
+ size_t task_count = 0;
+ for (auto* const queue : main_thread_only().active_queues)
+ task_count += queue->GetNumberOfPendingTasks();
+ return task_count;
+}
+
+size_t SequenceManagerForTest::QueuesToDeleteCount() const {
+ return main_thread_only().queues_to_delete.size();
+}
+
+size_t SequenceManagerForTest::QueuesToShutdownCount() {
+ TakeQueuesToGracefullyShutdownFromHelper();
+ return main_thread_only().queues_to_gracefully_shutdown.size();
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/test/sequence_manager_for_test.h b/base/task/sequence_manager/test/sequence_manager_for_test.h
new file mode 100644
index 0000000000..442c9a8595
--- /dev/null
+++ b/base/task/sequence_manager/test/sequence_manager_for_test.h
@@ -0,0 +1,46 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TEST_SEQUENCE_MANAGER_FOR_TEST_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TEST_SEQUENCE_MANAGER_FOR_TEST_H_
+
+#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+class MessageLoop;
+
+namespace sequence_manager {
+
+class SequenceManagerForTest : public internal::SequenceManagerImpl {
+ public:
+ explicit SequenceManagerForTest(
+ std::unique_ptr<internal::ThreadController> thread_controller);
+
+ ~SequenceManagerForTest() override = default;
+
+ // Creates SequenceManagerImpl using ThreadControllerImpl constructed with
+ // the given arguments. ThreadControllerImpl is slightly overridden to skip
+ // nesting observers registration if message loop is absent.
+ static std::unique_ptr<SequenceManagerForTest> Create(
+ MessageLoop* message_loop,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const TickClock* clock);
+
+ size_t ActiveQueuesCount() const;
+ bool HasImmediateWork() const;
+ size_t PendingTasksCount() const;
+ size_t QueuesToDeleteCount() const;
+ size_t QueuesToShutdownCount();
+
+ using internal::SequenceManagerImpl::GetNextSequenceNumber;
+ using internal::SequenceManagerImpl::WakeUpReadyDelayedQueues;
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TEST_SEQUENCE_MANAGER_FOR_TEST_H_
diff --git a/base/task/sequence_manager/test/test_task_queue.cc b/base/task/sequence_manager/test/test_task_queue.cc
new file mode 100644
index 0000000000..19abe11383
--- /dev/null
+++ b/base/task/sequence_manager/test/test_task_queue.cc
@@ -0,0 +1,23 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/test/test_task_queue.h"
+
+#include "base/task/sequence_manager/task_queue_impl.h"
+
+namespace base {
+namespace sequence_manager {
+
+TestTaskQueue::TestTaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec)
+ : TaskQueue(std::move(impl), spec), weak_factory_(this) {}
+
+TestTaskQueue::~TestTaskQueue() = default;
+
+WeakPtr<TestTaskQueue> TestTaskQueue::GetWeakPtr() {
+ return weak_factory_.GetWeakPtr();
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/test/test_task_queue.h b/base/task/sequence_manager/test/test_task_queue.h
new file mode 100644
index 0000000000..2f5a64e16d
--- /dev/null
+++ b/base/task/sequence_manager/test/test_task_queue.h
@@ -0,0 +1,33 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TEST_TEST_TASK_QUEUE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TEST_TEST_TASK_QUEUE_H_
+
+#include "base/memory/weak_ptr.h"
+#include "base/task/sequence_manager/task_queue.h"
+
+namespace base {
+namespace sequence_manager {
+
+class TestTaskQueue : public TaskQueue {
+ public:
+ explicit TestTaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec);
+
+ using TaskQueue::GetTaskQueueImpl;
+
+ WeakPtr<TestTaskQueue> GetWeakPtr();
+
+ private:
+ ~TestTaskQueue() override; // Ref-counted.
+
+ // Used to ensure that task queue is deleted in tests.
+ WeakPtrFactory<TestTaskQueue> weak_factory_;
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TEST_TEST_TASK_QUEUE_H_
diff --git a/base/task/sequence_manager/test/test_task_time_observer.h b/base/task/sequence_manager/test/test_task_time_observer.h
new file mode 100644
index 0000000000..54e4ff45fa
--- /dev/null
+++ b/base/task/sequence_manager/test/test_task_time_observer.h
@@ -0,0 +1,23 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TEST_TEST_TASK_TIME_OBSERVER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TEST_TEST_TASK_TIME_OBSERVER_H_
+
+#include "base/task/sequence_manager/task_time_observer.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+
+class TestTaskTimeObserver : public TaskTimeObserver {
+ public:
+ void WillProcessTask(TimeTicks start_time) override {}
+ void DidProcessTask(TimeTicks start_time, TimeTicks end_time) override {}
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TEST_TEST_TASK_TIME_OBSERVER_H_
diff --git a/base/task/sequence_manager/thread_controller.h b/base/task/sequence_manager/thread_controller.h
new file mode 100644
index 0000000000..539530602b
--- /dev/null
+++ b/base/task/sequence_manager/thread_controller.h
@@ -0,0 +1,85 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_
+#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_
+
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/lazy_now.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class TickClock;
+struct PendingTask;
+
+namespace sequence_manager {
+namespace internal {
+
+class SequencedTaskSource;
+
+// Implementation of this interface is used by SequenceManager to schedule
+// actual work to be run. Hopefully we can stop using MessageLoop and this
+// interface will become more concise.
+class ThreadController {
+ public:
+ virtual ~ThreadController() = default;
+
+ // Sets the number of tasks executed in a single invocation of DoWork.
+ // Increasing the batch size can reduce the overhead of yielding back to the
+ // main message loop.
+ virtual void SetWorkBatchSize(int work_batch_size = 1) = 0;
+
+ // Notifies that |pending_task| is about to be enqueued. Needed for tracing
+ // purposes. The impl may use this opportunity add metadata to |pending_task|
+ // before it is moved into the queue.
+ virtual void WillQueueTask(PendingTask* pending_task) = 0;
+
+ // Notify the controller that its associated sequence has immediate work
+ // to run. Shortly after this is called, the thread associated with this
+ // controller will run a task returned by sequence->TakeTask(). Can be called
+ // from any sequence.
+ //
+ // TODO(altimin): Change this to "the thread associated with this
+ // controller will run tasks returned by sequence->TakeTask() until it
+ // returns null or sequence->DidRunTask() returns false" once the
+ // code is changed to work that way.
+ virtual void ScheduleWork() = 0;
+
+ // Notify the controller that SequencedTaskSource will have a delayed work
+ // ready to be run at |run_time|. This call cancels any previously
+ // scheduled delayed work. Can only be called from the main sequence.
+ // NOTE: DelayTillNextTask might return a different value as it also takes
+ // immediate work into account.
+ // TODO(kraynov): Remove |lazy_now| parameter.
+ virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) = 0;
+
+ // Sets the sequenced task source from which to take tasks after
+ // a Schedule*Work() call is made.
+ // Must be called before the first call to Schedule*Work().
+ virtual void SetSequencedTaskSource(SequencedTaskSource*) = 0;
+
+ // TODO(altimin): Get rid of the methods below.
+ // These methods exist due to current integration of SequenceManager
+ // with MessageLoop.
+
+ virtual bool RunsTasksInCurrentSequence() = 0;
+
+ virtual const TickClock* GetClock() = 0;
+
+ virtual void SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner>) = 0;
+
+ virtual void RestoreDefaultTaskRunner() = 0;
+
+ virtual void AddNestingObserver(RunLoop::NestingObserver* observer) = 0;
+
+ virtual void RemoveNestingObserver(RunLoop::NestingObserver* observer) = 0;
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_
diff --git a/base/task/sequence_manager/thread_controller_impl.cc b/base/task/sequence_manager/thread_controller_impl.cc
new file mode 100644
index 0000000000..efa80fb053
--- /dev/null
+++ b/base/task/sequence_manager/thread_controller_impl.cc
@@ -0,0 +1,269 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/thread_controller_impl.h"
+
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/task/sequence_manager/lazy_now.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+ThreadControllerImpl::ThreadControllerImpl(
+ MessageLoop* message_loop,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const TickClock* time_source)
+ : message_loop_(message_loop),
+ task_runner_(task_runner),
+ message_loop_task_runner_(message_loop ? message_loop->task_runner()
+ : nullptr),
+ time_source_(time_source),
+ weak_factory_(this) {
+ immediate_do_work_closure_ =
+ BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
+ WorkType::kImmediate);
+ delayed_do_work_closure_ =
+ BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
+ WorkType::kDelayed);
+}
+
+ThreadControllerImpl::~ThreadControllerImpl() = default;
+
+ThreadControllerImpl::AnySequence::AnySequence() = default;
+
+ThreadControllerImpl::AnySequence::~AnySequence() = default;
+
+ThreadControllerImpl::MainSequenceOnly::MainSequenceOnly() = default;
+
+ThreadControllerImpl::MainSequenceOnly::~MainSequenceOnly() = default;
+
+std::unique_ptr<ThreadControllerImpl> ThreadControllerImpl::Create(
+ MessageLoop* message_loop,
+ const TickClock* time_source) {
+ return WrapUnique(new ThreadControllerImpl(
+ message_loop, message_loop->task_runner(), time_source));
+}
+
+void ThreadControllerImpl::SetSequencedTaskSource(
+ SequencedTaskSource* sequence) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence);
+ DCHECK(!sequence_);
+ sequence_ = sequence;
+}
+
+void ThreadControllerImpl::ScheduleWork() {
+ DCHECK(sequence_);
+ AutoLock lock(any_sequence_lock_);
+ // Don't post a DoWork if there's an immediate DoWork in flight or if we're
+ // inside a top level DoWork. We can rely on a continuation being posted as
+ // needed.
+ if (any_sequence().immediate_do_work_posted ||
+ (any_sequence().do_work_running_count > any_sequence().nesting_depth)) {
+ return;
+ }
+ any_sequence().immediate_do_work_posted = true;
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::ScheduleWork::PostTask");
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+}
+
+void ThreadControllerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
+ TimeTicks run_time) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence_);
+
+ if (main_sequence_only().next_delayed_do_work == run_time)
+ return;
+
+ // Cancel DoWork if it was scheduled and we set an "infinite" delay now.
+ if (run_time == TimeTicks::Max()) {
+ cancelable_delayed_do_work_closure_.Cancel();
+ main_sequence_only().next_delayed_do_work = TimeTicks::Max();
+ return;
+ }
+
+ // If DoWork is running then we don't need to do anything because it will post
+ // a continuation as needed. Bailing out here is by far the most common case.
+ if (main_sequence_only().do_work_running_count >
+ main_sequence_only().nesting_depth) {
+ return;
+ }
+
+ // If DoWork is about to run then we also don't need to do anything.
+ {
+ AutoLock lock(any_sequence_lock_);
+ if (any_sequence().immediate_do_work_posted)
+ return;
+ }
+
+ base::TimeDelta delay = std::max(TimeDelta(), run_time - lazy_now->Now());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::SetNextDelayedDoWork::PostDelayedTask",
+ "delay_ms", delay.InMillisecondsF());
+
+ main_sequence_only().next_delayed_do_work = run_time;
+ // Reset also causes cancellation of the previous DoWork task.
+ cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
+ task_runner_->PostDelayedTask(
+ FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
+}
+
+bool ThreadControllerImpl::RunsTasksInCurrentSequence() {
+ return task_runner_->RunsTasksInCurrentSequence();
+}
+
+const TickClock* ThreadControllerImpl::GetClock() {
+ return time_source_;
+}
+
+void ThreadControllerImpl::SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ if (!message_loop_)
+ return;
+ message_loop_->SetTaskRunner(task_runner);
+}
+
+void ThreadControllerImpl::RestoreDefaultTaskRunner() {
+ if (!message_loop_)
+ return;
+ message_loop_->SetTaskRunner(message_loop_task_runner_);
+}
+
+void ThreadControllerImpl::WillQueueTask(PendingTask* pending_task) {
+ task_annotator_.WillQueueTask("SequenceManager::PostTask", pending_task);
+}
+
+void ThreadControllerImpl::DoWork(WorkType work_type) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence_);
+
+ {
+ AutoLock lock(any_sequence_lock_);
+ if (work_type == WorkType::kImmediate)
+ any_sequence().immediate_do_work_posted = false;
+ any_sequence().do_work_running_count++;
+ }
+
+ main_sequence_only().do_work_running_count++;
+
+ WeakPtr<ThreadControllerImpl> weak_ptr = weak_factory_.GetWeakPtr();
+ // TODO(scheduler-dev): Consider moving to a time based work batch instead.
+ for (int i = 0; i < main_sequence_only().work_batch_size_; i++) {
+ Optional<PendingTask> task = sequence_->TakeTask();
+ if (!task)
+ break;
+
+ TRACE_TASK_EXECUTION("ThreadControllerImpl::DoWork", *task);
+ task_annotator_.RunTask("ThreadControllerImpl::DoWork", &*task);
+
+ if (!weak_ptr)
+ return;
+
+ sequence_->DidRunTask();
+
+ // NOTE: https://crbug.com/828835.
+ // When we're running inside a nested RunLoop it may quit anytime, so any
+ // outstanding pending tasks must run in the outer RunLoop
+ // (see SequenceManagerTestWithMessageLoop.QuitWhileNested test).
+ // Unfortunately, it's MessageLoop who's receving that signal and we can't
+ // know it before we return from DoWork, hence, OnExitNestedRunLoop
+ // will be called later. Since we must implement ThreadController and
+ // SequenceManager in conformance with MessageLoop task runners, we need
+ // to disable this batching optimization while nested.
+ // Implementing RunLoop::Delegate ourselves will help to resolve this issue.
+ if (main_sequence_only().nesting_depth > 0)
+ break;
+ }
+
+ main_sequence_only().do_work_running_count--;
+
+ {
+ AutoLock lock(any_sequence_lock_);
+ any_sequence().do_work_running_count--;
+ DCHECK_GE(any_sequence().do_work_running_count, 0);
+ LazyNow lazy_now(time_source_);
+ TimeDelta delay_till_next_task = sequence_->DelayTillNextTask(&lazy_now);
+ if (delay_till_next_task <= TimeDelta()) {
+ // The next task needs to run immediately, post a continuation if needed.
+ if (!any_sequence().immediate_do_work_posted) {
+ any_sequence().immediate_do_work_posted = true;
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ }
+ } else if (delay_till_next_task < TimeDelta::Max()) {
+ // The next task needs to run after a delay, post a continuation if
+ // needed.
+ TimeTicks next_task_at = lazy_now.Now() + delay_till_next_task;
+ if (next_task_at != main_sequence_only().next_delayed_do_work) {
+ main_sequence_only().next_delayed_do_work = next_task_at;
+ cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
+ task_runner_->PostDelayedTask(
+ FROM_HERE, cancelable_delayed_do_work_closure_.callback(),
+ delay_till_next_task);
+ }
+ } else {
+ // There is no next task scheduled.
+ main_sequence_only().next_delayed_do_work = TimeTicks::Max();
+ }
+ }
+}
+
+void ThreadControllerImpl::AddNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ nesting_observer_ = observer;
+ RunLoop::AddNestingObserverOnCurrentThread(this);
+}
+
+void ThreadControllerImpl::RemoveNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK_EQ(observer, nesting_observer_);
+ nesting_observer_ = nullptr;
+ RunLoop::RemoveNestingObserverOnCurrentThread(this);
+}
+
+void ThreadControllerImpl::OnBeginNestedRunLoop() {
+ main_sequence_only().nesting_depth++;
+ {
+ // We just entered a nested run loop, make sure there's a DoWork posted or
+ // the system will grind to a halt.
+ AutoLock lock(any_sequence_lock_);
+ any_sequence().nesting_depth++;
+ if (!any_sequence().immediate_do_work_posted) {
+ any_sequence().immediate_do_work_posted = true;
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::OnBeginNestedRunLoop::PostTask");
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ }
+ }
+ if (nesting_observer_)
+ nesting_observer_->OnBeginNestedRunLoop();
+}
+
+void ThreadControllerImpl::OnExitNestedRunLoop() {
+ main_sequence_only().nesting_depth--;
+ {
+ AutoLock lock(any_sequence_lock_);
+ any_sequence().nesting_depth--;
+ DCHECK_GE(any_sequence().nesting_depth, 0);
+ }
+ if (nesting_observer_)
+ nesting_observer_->OnExitNestedRunLoop();
+}
+
+void ThreadControllerImpl::SetWorkBatchSize(int work_batch_size) {
+ main_sequence_only().work_batch_size_ = work_batch_size;
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/thread_controller_impl.h b/base/task/sequence_manager/thread_controller_impl.h
new file mode 100644
index 0000000000..794feefb4b
--- /dev/null
+++ b/base/task/sequence_manager/thread_controller_impl.h
@@ -0,0 +1,130 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
+
+#include "base/cancelable_callback.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/run_loop.h"
+#include "base/sequence_checker.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/thread_controller.h"
+
+namespace base {
+
+// TODO(kraynov): https://crbug.com/828835
+// Consider going away from using MessageLoop in the renderer process.
+class MessageLoop;
+
+namespace sequence_manager {
+namespace internal {
+
+// TODO(kraynov): Rename to ThreadControllerWithMessageLoopImpl.
+class BASE_EXPORT ThreadControllerImpl : public ThreadController,
+ public RunLoop::NestingObserver {
+ public:
+ ~ThreadControllerImpl() override;
+
+ static std::unique_ptr<ThreadControllerImpl> Create(
+ MessageLoop* message_loop,
+ const TickClock* time_source);
+
+ // ThreadController:
+ void SetWorkBatchSize(int work_batch_size) override;
+ void WillQueueTask(PendingTask* pending_task) override;
+ void ScheduleWork() override;
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
+ void SetSequencedTaskSource(SequencedTaskSource* sequence) override;
+ bool RunsTasksInCurrentSequence() override;
+ const TickClock* GetClock() override;
+ void SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner>) override;
+ void RestoreDefaultTaskRunner() override;
+ void AddNestingObserver(RunLoop::NestingObserver* observer) override;
+ void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
+
+ // RunLoop::NestingObserver:
+ void OnBeginNestedRunLoop() override;
+ void OnExitNestedRunLoop() override;
+
+ protected:
+ ThreadControllerImpl(MessageLoop* message_loop,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const TickClock* time_source);
+
+ // TODO(altimin): Make these const. Blocked on removing
+ // lazy initialisation support.
+ MessageLoop* message_loop_;
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ RunLoop::NestingObserver* nesting_observer_ = nullptr;
+
+ private:
+ enum class WorkType { kImmediate, kDelayed };
+
+ void DoWork(WorkType work_type);
+
+ struct AnySequence {
+ AnySequence();
+ ~AnySequence();
+
+ int do_work_running_count = 0;
+ int nesting_depth = 0;
+ bool immediate_do_work_posted = false;
+ };
+
+ mutable Lock any_sequence_lock_;
+ AnySequence any_sequence_;
+
+ struct AnySequence& any_sequence() {
+ any_sequence_lock_.AssertAcquired();
+ return any_sequence_;
+ }
+ const struct AnySequence& any_sequence() const {
+ any_sequence_lock_.AssertAcquired();
+ return any_sequence_;
+ }
+
+ struct MainSequenceOnly {
+ MainSequenceOnly();
+ ~MainSequenceOnly();
+
+ int do_work_running_count = 0;
+ int nesting_depth = 0;
+ int work_batch_size_ = 1;
+
+ TimeTicks next_delayed_do_work = TimeTicks::Max();
+ };
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ MainSequenceOnly main_sequence_only_;
+ MainSequenceOnly& main_sequence_only() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return main_sequence_only_;
+ }
+ const MainSequenceOnly& main_sequence_only() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return main_sequence_only_;
+ }
+
+ scoped_refptr<SingleThreadTaskRunner> message_loop_task_runner_;
+ const TickClock* time_source_;
+ RepeatingClosure immediate_do_work_closure_;
+ RepeatingClosure delayed_do_work_closure_;
+ CancelableClosure cancelable_delayed_do_work_closure_;
+ SequencedTaskSource* sequence_ = nullptr; // Not owned.
+ debug::TaskAnnotator task_annotator_;
+
+ WeakPtrFactory<ThreadControllerImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadControllerImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
diff --git a/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc b/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
new file mode 100644
index 0000000000..fbed88b404
--- /dev/null
+++ b/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
@@ -0,0 +1,205 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
+
+#include "base/auto_reset.h"
+#include "base/message_loop/message_pump_default.h"
+#include "base/time/tick_clock.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
+ TickClock* time_source)
+ : main_thread_id_(PlatformThread::CurrentId()),
+ pump_(new MessagePumpDefault()),
+ time_source_(time_source) {
+ RunLoop::RegisterDelegateForCurrentThread(this);
+}
+
+ThreadControllerWithMessagePumpImpl::~ThreadControllerWithMessagePumpImpl() {
+ // Destructors of RunLoop::Delegate and ThreadTaskRunnerHandle
+ // will do all the clean-up.
+}
+
+ThreadControllerWithMessagePumpImpl::MainThreadOnly::MainThreadOnly() = default;
+
+ThreadControllerWithMessagePumpImpl::MainThreadOnly::~MainThreadOnly() =
+ default;
+
+void ThreadControllerWithMessagePumpImpl::SetSequencedTaskSource(
+ SequencedTaskSource* task_source) {
+ DCHECK(task_source);
+ DCHECK(!main_thread_only().task_source);
+ main_thread_only().task_source = task_source;
+}
+
+void ThreadControllerWithMessagePumpImpl::SetWorkBatchSize(
+ int work_batch_size) {
+ DCHECK_GE(work_batch_size, 1);
+ main_thread_only().batch_size = work_batch_size;
+}
+
+void ThreadControllerWithMessagePumpImpl::WillQueueTask(
+ PendingTask* pending_task) {
+ task_annotator_.WillQueueTask("ThreadController::Task", pending_task);
+}
+
+void ThreadControllerWithMessagePumpImpl::ScheduleWork() {
+ // Continuation will be posted if necessary.
+ if (RunsTasksInCurrentSequence() && is_doing_work())
+ return;
+
+ pump_->ScheduleWork();
+}
+
+void ThreadControllerWithMessagePumpImpl::SetNextDelayedDoWork(
+ LazyNow* lazy_now,
+ TimeTicks run_time) {
+ if (main_thread_only().next_delayed_work == run_time)
+ return;
+ main_thread_only().next_delayed_work = run_time;
+
+ if (run_time == TimeTicks::Max())
+ return;
+
+ // Continuation will be posted if necessary.
+ if (is_doing_work())
+ return;
+
+ // |lazy_now| will be removed in this method soon.
+ DCHECK_LT(time_source_->NowTicks(), run_time);
+ pump_->ScheduleDelayedWork(run_time);
+}
+
+const TickClock* ThreadControllerWithMessagePumpImpl::GetClock() {
+ return time_source_;
+}
+
+bool ThreadControllerWithMessagePumpImpl::RunsTasksInCurrentSequence() {
+ return main_thread_id_ == PlatformThread::CurrentId();
+}
+
+void ThreadControllerWithMessagePumpImpl::SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ main_thread_only().thread_task_runner_handle =
+ std::make_unique<ThreadTaskRunnerHandle>(task_runner);
+}
+
+void ThreadControllerWithMessagePumpImpl::RestoreDefaultTaskRunner() {
+ // There's no default task runner unlike with the MessageLoop.
+ main_thread_only().thread_task_runner_handle.reset();
+}
+
+void ThreadControllerWithMessagePumpImpl::AddNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_LE(main_thread_only().run_depth, 1);
+ DCHECK(!main_thread_only().nesting_observer);
+ DCHECK(observer);
+ main_thread_only().nesting_observer = observer;
+}
+
+void ThreadControllerWithMessagePumpImpl::RemoveNestingObserver(
+ RunLoop::NestingObserver* observer) {
+ DCHECK_EQ(main_thread_only().nesting_observer, observer);
+ main_thread_only().nesting_observer = nullptr;
+}
+
+bool ThreadControllerWithMessagePumpImpl::DoWork() {
+ DCHECK(main_thread_only().task_source);
+ bool task_ran = false;
+
+ {
+ AutoReset<int> do_work_scope(&main_thread_only().do_work_depth,
+ main_thread_only().do_work_depth + 1);
+
+ for (int i = 0; i < main_thread_only().batch_size; i++) {
+ Optional<PendingTask> task = main_thread_only().task_source->TakeTask();
+ if (!task)
+ break;
+
+ TRACE_TASK_EXECUTION("ThreadController::Task", *task);
+ task_annotator_.RunTask("ThreadController::Task", &*task);
+ task_ran = true;
+
+ main_thread_only().task_source->DidRunTask();
+
+ if (main_thread_only().quit_do_work) {
+ // When Quit() is called we must stop running the batch because
+ // caller expects per-task granularity.
+ main_thread_only().quit_do_work = false;
+ return true;
+ }
+ }
+ } // DoWorkScope.
+
+ LazyNow lazy_now(time_source_);
+ TimeDelta do_work_delay =
+ main_thread_only().task_source->DelayTillNextTask(&lazy_now);
+ DCHECK_GE(do_work_delay, TimeDelta());
+ // Schedule a continuation.
+ if (do_work_delay.is_zero()) {
+ // Need to run new work immediately.
+ pump_->ScheduleWork();
+ } else if (do_work_delay != TimeDelta::Max()) {
+ SetNextDelayedDoWork(&lazy_now, lazy_now.Now() + do_work_delay);
+ } else {
+ SetNextDelayedDoWork(&lazy_now, TimeTicks::Max());
+ }
+
+ return task_ran;
+}
+
+bool ThreadControllerWithMessagePumpImpl::DoDelayedWork(
+ TimeTicks* next_run_time) {
+ // Delayed work is getting processed in DoWork().
+ return false;
+}
+
+bool ThreadControllerWithMessagePumpImpl::DoIdleWork() {
+ // RunLoop::Delegate knows whether we called Run() or RunUntilIdle().
+ if (ShouldQuitWhenIdle())
+ Quit();
+ return false;
+}
+
+void ThreadControllerWithMessagePumpImpl::Run(bool application_tasks_allowed) {
+ // No system messages are being processed by this class.
+ DCHECK(application_tasks_allowed);
+
+ // We already have a MessagePump::Run() running, so we're in a nested RunLoop.
+ if (main_thread_only().run_depth > 0 && main_thread_only().nesting_observer)
+ main_thread_only().nesting_observer->OnBeginNestedRunLoop();
+
+ {
+ AutoReset<int> run_scope(&main_thread_only().run_depth,
+ main_thread_only().run_depth + 1);
+ // MessagePump::Run() blocks until Quit() called, but previously started
+ // Run() calls continue to block.
+ pump_->Run(this);
+ }
+
+ // We'll soon continue to run an outer MessagePump::Run() loop.
+ if (main_thread_only().run_depth > 0 && main_thread_only().nesting_observer)
+ main_thread_only().nesting_observer->OnExitNestedRunLoop();
+}
+
+void ThreadControllerWithMessagePumpImpl::Quit() {
+ // Interrupt a batch of work.
+ if (is_doing_work())
+ main_thread_only().quit_do_work = true;
+ // If we're in a nested RunLoop, continuation will be posted if necessary.
+ pump_->Quit();
+}
+
+void ThreadControllerWithMessagePumpImpl::EnsureWorkScheduled() {
+ ScheduleWork();
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/thread_controller_with_message_pump_impl.h b/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
new file mode 100644
index 0000000000..c19a2e8992
--- /dev/null
+++ b/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
@@ -0,0 +1,109 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
+#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
+
+#include "base/debug/task_annotator.h"
+#include "base/message_loop/message_pump.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/task/sequence_manager/thread_controller.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// EXPERIMENTAL ThreadController implementation which doesn't use
+// MessageLoop or a task runner to schedule their DoWork calls.
+// See https://crbug.com/828835.
+class BASE_EXPORT ThreadControllerWithMessagePumpImpl
+ : public ThreadController,
+ public MessagePump::Delegate,
+ public RunLoop::Delegate {
+ public:
+ explicit ThreadControllerWithMessagePumpImpl(TickClock* time_source);
+ ~ThreadControllerWithMessagePumpImpl() override;
+
+ // ThreadController implementation:
+ void SetSequencedTaskSource(SequencedTaskSource* task_source) override;
+ void SetWorkBatchSize(int work_batch_size) override;
+ void WillQueueTask(PendingTask* pending_task) override;
+ void ScheduleWork() override;
+ void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
+ const TickClock* GetClock() override;
+ bool RunsTasksInCurrentSequence() override;
+ void SetDefaultTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) override;
+ void RestoreDefaultTaskRunner() override;
+ void AddNestingObserver(RunLoop::NestingObserver* observer) override;
+ void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
+
+ private:
+ friend class DoWorkScope;
+ friend class RunScope;
+
+ // MessagePump::Delegate implementation.
+ bool DoWork() override;
+ bool DoDelayedWork(TimeTicks* next_run_time) override;
+ bool DoIdleWork() override;
+
+ // RunLoop::Delegate implementation.
+ void Run(bool application_tasks_allowed) override;
+ void Quit() override;
+ void EnsureWorkScheduled() override;
+
+ struct MainThreadOnly {
+ MainThreadOnly();
+ ~MainThreadOnly();
+
+ SequencedTaskSource* task_source = nullptr; // Not owned.
+ RunLoop::NestingObserver* nesting_observer = nullptr; // Not owned.
+ std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle;
+
+ // Next delayed DoWork time for scheduling de-duplication purpose.
+ TimeTicks next_delayed_work;
+
+ // Indicates that we should yield DoWork ASAP.
+ bool quit_do_work = false;
+
+ // Number of tasks processed in a single DoWork invocation.
+ int batch_size = 1;
+
+ // Number of RunLoop layers currently running.
+ int run_depth = 0;
+
+ // Number of DoWork running, but only the inner-most one can take tasks.
+ // Must be equal to |run_depth| or |run_depth - 1|.
+ int do_work_depth = 0;
+ };
+
+ MainThreadOnly& main_thread_only() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+
+ // Returns true if there's a DoWork running on the inner-most nesting layer.
+ bool is_doing_work() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_.do_work_depth == main_thread_only_.run_depth &&
+ main_thread_only_.do_work_depth != 0;
+ }
+
+ MainThreadOnly main_thread_only_;
+ const PlatformThreadId main_thread_id_;
+ std::unique_ptr<MessagePump> pump_;
+ debug::TaskAnnotator task_annotator_;
+ TickClock* time_source_; // Not owned.
+
+ THREAD_CHECKER(main_thread_checker_);
+ DISALLOW_COPY_AND_ASSIGN(ThreadControllerWithMessagePumpImpl);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
diff --git a/base/task/sequence_manager/time_domain.cc b/base/task/sequence_manager/time_domain.cc
new file mode 100644
index 0000000000..8f47eb3a23
--- /dev/null
+++ b/base/task/sequence_manager/time_domain.cc
@@ -0,0 +1,136 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/time_domain.h"
+
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+
+namespace base {
+namespace sequence_manager {
+
+TimeDomain::TimeDomain() : sequence_manager_(nullptr) {}
+
+TimeDomain::~TimeDomain() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+}
+
+void TimeDomain::OnRegisterWithSequenceManager(
+ internal::SequenceManagerImpl* sequence_manager) {
+ DCHECK(sequence_manager);
+ DCHECK(!sequence_manager_);
+ sequence_manager_ = sequence_manager;
+}
+
+SequenceManager* TimeDomain::sequence_manager() const {
+ DCHECK(sequence_manager_);
+ return sequence_manager_;
+}
+
+// TODO(kraynov): https://crbug.com/857101 Consider making an interface
+// for SequenceManagerImpl which will expose SetNextDelayedDoWork and
+// MaybeScheduleImmediateWork methods to make the functions below pure-virtual.
+
+void TimeDomain::SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) {
+ sequence_manager_->SetNextDelayedDoWork(lazy_now, run_time);
+}
+
+void TimeDomain::RequestDoWork() {
+ sequence_manager_->MaybeScheduleImmediateWork(FROM_HERE);
+}
+
+void TimeDomain::UnregisterQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(queue->GetTimeDomain(), this);
+ LazyNow lazy_now(CreateLazyNow());
+ SetNextWakeUpForQueue(queue, nullopt, &lazy_now);
+}
+
+void TimeDomain::SetNextWakeUpForQueue(
+ internal::TaskQueueImpl* queue,
+ Optional<internal::TaskQueueImpl::DelayedWakeUp> wake_up,
+ LazyNow* lazy_now) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(queue->GetTimeDomain(), this);
+ DCHECK(queue->IsQueueEnabled() || !wake_up);
+
+ Optional<TimeTicks> previous_wake_up;
+ if (!delayed_wake_up_queue_.empty())
+ previous_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
+
+ if (wake_up) {
+ // Insert a new wake-up into the heap.
+ if (queue->heap_handle().IsValid()) {
+ // O(log n)
+ delayed_wake_up_queue_.ChangeKey(queue->heap_handle(),
+ {wake_up.value(), queue});
+ } else {
+ // O(log n)
+ delayed_wake_up_queue_.insert({wake_up.value(), queue});
+ }
+ } else {
+ // Remove a wake-up from heap if present.
+ if (queue->heap_handle().IsValid())
+ delayed_wake_up_queue_.erase(queue->heap_handle());
+ }
+
+ Optional<TimeTicks> new_wake_up;
+ if (!delayed_wake_up_queue_.empty())
+ new_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
+
+ // TODO(kraynov): https://crbug.com/857101 Review the relationship with
+ // SequenceManager's time. Right now it's not an issue since
+ // VirtualTimeDomain doesn't invoke SequenceManager itself.
+
+ if (new_wake_up) {
+ if (new_wake_up != previous_wake_up) {
+ // Update the wake-up.
+ SetNextDelayedDoWork(lazy_now, new_wake_up.value());
+ }
+ } else {
+ if (previous_wake_up) {
+ // No new wake-up to be set, cancel the previous one.
+ SetNextDelayedDoWork(lazy_now, TimeTicks::Max());
+ }
+ }
+}
+
+void TimeDomain::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ // Wake up any queues with pending delayed work. Note std::multimap stores
+ // the elements sorted by key, so the begin() iterator points to the earliest
+ // queue to wake-up.
+ while (!delayed_wake_up_queue_.empty() &&
+ delayed_wake_up_queue_.Min().wake_up.time <= lazy_now->Now()) {
+ internal::TaskQueueImpl* queue = delayed_wake_up_queue_.Min().queue;
+ queue->WakeUpForDelayedWork(lazy_now);
+ }
+}
+
+Optional<TimeTicks> TimeDomain::NextScheduledRunTime() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (delayed_wake_up_queue_.empty())
+ return nullopt;
+ return delayed_wake_up_queue_.Min().wake_up.time;
+}
+
+void TimeDomain::AsValueInto(trace_event::TracedValue* state) const {
+ state->BeginDictionary();
+ state->SetString("name", GetName());
+ state->SetInteger("registered_delay_count", delayed_wake_up_queue_.size());
+ if (!delayed_wake_up_queue_.empty()) {
+ TimeDelta delay = delayed_wake_up_queue_.Min().wake_up.time - Now();
+ state->SetDouble("next_delay_ms", delay.InMillisecondsF());
+ }
+ AsValueIntoInternal(state);
+ state->EndDictionary();
+}
+
+void TimeDomain::AsValueIntoInternal(trace_event::TracedValue* state) const {
+ // Can be overriden to trace some additional state.
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/time_domain.h b/base/task/sequence_manager/time_domain.h
new file mode 100644
index 0000000000..e9e487bd40
--- /dev/null
+++ b/base/task/sequence_manager/time_domain.h
@@ -0,0 +1,139 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
+#define BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
+
+#include <map>
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/lazy_now.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace sequence_manager {
+
+class SequenceManager;
+
+namespace internal {
+class SequenceManagerImpl;
+class TaskQueueImpl;
+} // namespace internal
+
+// TimeDomain wakes up TaskQueues when their delayed tasks are due to run.
+// This class allows overrides to enable clock overriding on some TaskQueues
+// (e.g. auto-advancing virtual time, throttled clock, etc).
+//
+// TaskQueue maintains its own next wake-up time and communicates it
+// to the TimeDomain, which aggregates wake-ups across registered TaskQueues
+// into a global wake-up, which ultimately gets passed to the ThreadController.
+class BASE_EXPORT TimeDomain {
+ public:
+ virtual ~TimeDomain();
+
+ // Returns LazyNow in TimeDomain's time.
+ // Can be called from any thread.
+ // TODO(alexclarke): Make this main thread only.
+ virtual LazyNow CreateLazyNow() const = 0;
+
+ // Evaluates TimeDomain's time.
+ // Can be called from any thread.
+ // TODO(alexclarke): Make this main thread only.
+ virtual TimeTicks Now() const = 0;
+
+ // Computes the delay until the time when TimeDomain needs to wake up
+ // some TaskQueue. Specific time domains (e.g. virtual or throttled) may
+ // return TimeDelata() if TaskQueues have any delayed tasks they deem
+ // eligible to run. It's also allowed to advance time domains's internal
+ // clock when this method is called.
+ // Can be called from main thread only.
+ // NOTE: |lazy_now| and the return value are in the SequenceManager's time.
+ virtual Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) = 0;
+
+ void AsValueInto(trace_event::TracedValue* state) const;
+
+ protected:
+ TimeDomain();
+
+ SequenceManager* sequence_manager() const;
+
+ // Returns the earliest scheduled wake up in the TimeDomain's time.
+ Optional<TimeTicks> NextScheduledRunTime() const;
+
+ size_t NumberOfScheduledWakeUps() const {
+ return delayed_wake_up_queue_.size();
+ }
+
+ // Tells SequenceManager to schedule delayed work, use TimeTicks::Max()
+ // to unschedule. Also cancels any previous requests.
+ // May be overriden to control wake ups manually.
+ virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
+
+ // Tells SequenceManager to schedule immediate work.
+ // May be overriden to control wake ups manually.
+ virtual void RequestDoWork();
+
+ // For implementation-specific tracing.
+ virtual void AsValueIntoInternal(trace_event::TracedValue* state) const;
+ virtual const char* GetName() const = 0;
+
+ private:
+ friend class internal::TaskQueueImpl;
+ friend class internal::SequenceManagerImpl;
+ friend class TestTimeDomain;
+
+ // Called when the TimeDomain is registered.
+ // TODO(kraynov): Pass SequenceManager in the constructor.
+ void OnRegisterWithSequenceManager(
+ internal::SequenceManagerImpl* sequence_manager);
+
+ // Schedule TaskQueue to wake up at certain time, repeating calls with
+ // the same |queue| invalidate previous requests.
+ // Nullopt |wake_up| cancels a previously set wake up for |queue|.
+ // NOTE: |lazy_now| is provided in TimeDomain's time.
+ void SetNextWakeUpForQueue(
+ internal::TaskQueueImpl* queue,
+ Optional<internal::TaskQueueImpl::DelayedWakeUp> wake_up,
+ LazyNow* lazy_now);
+
+ // Remove the TaskQueue from any internal data sctructures.
+ void UnregisterQueue(internal::TaskQueueImpl* queue);
+
+ // Wake up each TaskQueue where the delay has elapsed.
+ void WakeUpReadyDelayedQueues(LazyNow* lazy_now);
+
+ struct ScheduledDelayedWakeUp {
+ internal::TaskQueueImpl::DelayedWakeUp wake_up;
+ internal::TaskQueueImpl* queue;
+
+ bool operator<=(const ScheduledDelayedWakeUp& other) const {
+ return wake_up <= other.wake_up;
+ }
+
+ void SetHeapHandle(internal::HeapHandle handle) {
+ DCHECK(handle.IsValid());
+ queue->set_heap_handle(handle);
+ }
+
+ void ClearHeapHandle() {
+ DCHECK(queue->heap_handle().IsValid());
+ queue->set_heap_handle(internal::HeapHandle());
+ }
+ };
+
+ internal::SequenceManagerImpl* sequence_manager_; // Not owned.
+ internal::IntrusiveHeap<ScheduledDelayedWakeUp> delayed_wake_up_queue_;
+
+ ThreadChecker main_thread_checker_;
+ DISALLOW_COPY_AND_ASSIGN(TimeDomain);
+};
+
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
diff --git a/base/task/sequence_manager/time_domain_unittest.cc b/base/task/sequence_manager/time_domain_unittest.cc
new file mode 100644
index 0000000000..951314f5a4
--- /dev/null
+++ b/base/task/sequence_manager/time_domain_unittest.cc
@@ -0,0 +1,324 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/time_domain.h"
+
+#include <memory>
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::_;
+using testing::AnyNumber;
+using testing::Mock;
+
+namespace base {
+namespace sequence_manager {
+
+class TaskQueueImplForTest : public internal::TaskQueueImpl {
+ public:
+ TaskQueueImplForTest(internal::SequenceManagerImpl* sequence_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec)
+ : TaskQueueImpl(sequence_manager, time_domain, spec) {}
+ ~TaskQueueImplForTest() {}
+
+ using TaskQueueImpl::SetDelayedWakeUpForTesting;
+};
+
+class TestTimeDomain : public TimeDomain {
+ public:
+ TestTimeDomain() : now_(TimeTicks() + TimeDelta::FromSeconds(1)) {}
+
+ ~TestTimeDomain() override = default;
+
+ using TimeDomain::NextScheduledRunTime;
+ using TimeDomain::SetNextWakeUpForQueue;
+ using TimeDomain::UnregisterQueue;
+ using TimeDomain::WakeUpReadyDelayedQueues;
+
+ LazyNow CreateLazyNow() const override { return LazyNow(now_); }
+ TimeTicks Now() const override { return now_; }
+
+ Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override {
+ return Optional<TimeDelta>();
+ }
+
+ void AsValueIntoInternal(trace_event::TracedValue* state) const override {}
+ const char* GetName() const override { return "Test"; }
+
+ internal::TaskQueueImpl* NextScheduledTaskQueue() const {
+ if (delayed_wake_up_queue_.empty())
+ return nullptr;
+ return delayed_wake_up_queue_.Min().queue;
+ }
+
+ MOCK_METHOD2(SetNextDelayedDoWork,
+ void(LazyNow* lazy_now, TimeTicks run_time));
+
+ void SetNow(TimeTicks now) { now_ = now; }
+
+ private:
+ TimeTicks now_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestTimeDomain);
+};
+
+class TimeDomainTest : public testing::Test {
+ public:
+ void SetUp() final {
+ time_domain_ = WrapUnique(CreateTestTimeDomain());
+ task_queue_ = std::make_unique<TaskQueueImplForTest>(
+ nullptr, time_domain_.get(), TaskQueue::Spec("test"));
+ }
+
+ void TearDown() final {
+ if (task_queue_)
+ task_queue_->UnregisterTaskQueue();
+ }
+
+ virtual TestTimeDomain* CreateTestTimeDomain() {
+ return new TestTimeDomain();
+ }
+
+ std::unique_ptr<TestTimeDomain> time_domain_;
+ std::unique_ptr<TaskQueueImplForTest> task_queue_;
+};
+
+TEST_F(TimeDomainTest, ScheduleWakeUpForQueue) {
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+ TimeTicks delayed_runtime = time_domain_->Now() + delay;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime));
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay, 0});
+
+ EXPECT_EQ(delayed_runtime, time_domain_->NextScheduledRunTime());
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()))
+ .Times(AnyNumber());
+}
+
+TEST_F(TimeDomainTest, ScheduleWakeUpForQueueSupersedesPreviousWakeUp) {
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(100);
+ TimeTicks delayed_runtime1 = time_domain_->Now() + delay1;
+ TimeTicks delayed_runtime2 = time_domain_->Now() + delay2;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime1));
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime1, 0});
+
+ EXPECT_EQ(delayed_runtime1, time_domain_->NextScheduledRunTime());
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ // Now schedule a later wake_up, which should replace the previously
+ // requested one.
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime2));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime2, 0});
+
+ EXPECT_EQ(delayed_runtime2, time_domain_->NextScheduledRunTime());
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()))
+ .Times(AnyNumber());
+}
+
+TEST_F(TimeDomainTest, SetNextDelayedDoWork_OnlyCalledForEarlierTasks) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue3 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue4 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ TimeDelta delay1 = TimeDelta::FromMilliseconds(10);
+ TimeDelta delay2 = TimeDelta::FromMilliseconds(20);
+ TimeDelta delay3 = TimeDelta::FromMilliseconds(30);
+ TimeDelta delay4 = TimeDelta::FromMilliseconds(1);
+
+ // SetNextDelayedDoWork should always be called if there are no other
+ // wake-ups.
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, now + delay1));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay1, 0});
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ // SetNextDelayedDoWork should not be called when scheduling later tasks.
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _)).Times(0);
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay2, 0});
+ task_queue3->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay3, 0});
+
+ // SetNextDelayedDoWork should be called when scheduling earlier tasks.
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, now + delay4));
+ task_queue4->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay4, 0});
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _)).Times(2);
+ task_queue2->UnregisterTaskQueue();
+ task_queue3->UnregisterTaskQueue();
+ task_queue4->UnregisterTaskQueue();
+}
+
+TEST_F(TimeDomainTest, UnregisterQueue) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2_ =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks wake_up1 = now + TimeDelta::FromMilliseconds(10);
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, wake_up1)).Times(1);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{wake_up1, 0});
+ TimeTicks wake_up2 = now + TimeDelta::FromMilliseconds(100);
+ task_queue2_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{wake_up2, 0});
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+
+ testing::Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, wake_up2)).Times(1);
+
+ time_domain_->UnregisterQueue(task_queue_.get());
+ task_queue_ = std::unique_ptr<TaskQueueImplForTest>();
+ EXPECT_EQ(task_queue2_.get(), time_domain_->NextScheduledTaskQueue());
+
+ testing::Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()))
+ .Times(1);
+
+ time_domain_->UnregisterQueue(task_queue2_.get());
+ EXPECT_FALSE(time_domain_->NextScheduledTaskQueue());
+}
+
+TEST_F(TimeDomainTest, WakeUpReadyDelayedQueues) {
+ TimeDelta delay = TimeDelta::FromMilliseconds(50);
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now_1(now);
+ TimeTicks delayed_runtime = now + delay;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, 0});
+
+ EXPECT_EQ(delayed_runtime, time_domain_->NextScheduledRunTime());
+
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now_1);
+ EXPECT_EQ(delayed_runtime, time_domain_->NextScheduledRunTime());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()));
+ time_domain_->SetNow(delayed_runtime);
+ LazyNow lazy_now_2(time_domain_->CreateLazyNow());
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now_2);
+ ASSERT_FALSE(time_domain_->NextScheduledRunTime());
+}
+
+TEST_F(TimeDomainTest, WakeUpReadyDelayedQueuesWithIdenticalRuntimes) {
+ int sequence_num = 0;
+ TimeDelta delay = TimeDelta::FromMilliseconds(50);
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks delayed_runtime = now + delay;
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime));
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, ++sequence_num});
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, ++sequence_num});
+
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now);
+
+ // The second task queue should wake up first since it has a lower sequence
+ // number.
+ EXPECT_EQ(task_queue2.get(), time_domain_->NextScheduledTaskQueue());
+
+ task_queue2->UnregisterTaskQueue();
+}
+
+TEST_F(TimeDomainTest, CancelDelayedWork) {
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks run_time = now + TimeDelta::FromMilliseconds(20);
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, run_time));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time, 0});
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max()));
+ task_queue_->SetDelayedWakeUpForTesting(nullopt);
+ EXPECT_FALSE(time_domain_->NextScheduledTaskQueue());
+}
+
+TEST_F(TimeDomainTest, CancelDelayedWork_TwoQueues) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ TimeTicks run_time1 = now + TimeDelta::FromMilliseconds(20);
+ TimeTicks run_time2 = now + TimeDelta::FromMilliseconds(40);
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, run_time1));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time1, 0});
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _)).Times(0);
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time2, 0});
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue());
+
+ EXPECT_EQ(run_time1, time_domain_->NextScheduledRunTime());
+
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, run_time2));
+ task_queue_->SetDelayedWakeUpForTesting(nullopt);
+ EXPECT_EQ(task_queue2.get(), time_domain_->NextScheduledTaskQueue());
+
+ EXPECT_EQ(run_time2, time_domain_->NextScheduledRunTime());
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+ EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, _))
+ .Times(AnyNumber());
+
+ // Tidy up.
+ task_queue2->UnregisterTaskQueue();
+}
+
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/work_queue.cc b/base/task/sequence_manager/work_queue.cc
new file mode 100644
index 0000000000..4d95f4b773
--- /dev/null
+++ b/base/task/sequence_manager/work_queue.cc
@@ -0,0 +1,236 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue.h"
+
+#include "base/task/sequence_manager/work_queue_sets.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
+ const char* name,
+ QueueType queue_type)
+ : task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
+
+void WorkQueue::AsValueInto(TimeTicks now,
+ trace_event::TracedValue* state) const {
+ for (const TaskQueueImpl::Task& task : tasks_) {
+ TaskQueueImpl::TaskAsValueInto(task, now, state);
+ }
+}
+
+WorkQueue::~WorkQueue() {
+ DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
+ << work_queue_sets_->GetName() << " : " << name_;
+}
+
+const TaskQueueImpl::Task* WorkQueue::GetFrontTask() const {
+ if (tasks_.empty())
+ return nullptr;
+ return &tasks_.front();
+}
+
+const TaskQueueImpl::Task* WorkQueue::GetBackTask() const {
+ if (tasks_.empty())
+ return nullptr;
+ return &tasks_.back();
+}
+
+bool WorkQueue::BlockedByFence() const {
+ if (!fence_)
+ return false;
+
+ // If the queue is empty then any future tasks will have a higher enqueue
+ // order and will be blocked. The queue is also blocked if the head is past
+ // the fence.
+ return tasks_.empty() || tasks_.front().enqueue_order() >= fence_;
+}
+
+bool WorkQueue::GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const {
+ if (tasks_.empty() || BlockedByFence())
+ return false;
+ // Quick sanity check.
+ DCHECK_LE(tasks_.front().enqueue_order(), tasks_.back().enqueue_order())
+ << task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
+ << name_;
+ *enqueue_order = tasks_.front().enqueue_order();
+ return true;
+}
+
+void WorkQueue::Push(TaskQueueImpl::Task task) {
+ bool was_empty = tasks_.empty();
+#ifndef NDEBUG
+ DCHECK(task.enqueue_order_set());
+#endif
+
+ // Make sure the |enqueue_order()| is monotonically increasing.
+ DCHECK(was_empty || tasks_.rbegin()->enqueue_order() < task.enqueue_order());
+
+ // Amoritized O(1).
+ tasks_.push_back(std::move(task));
+
+ if (!was_empty)
+ return;
+
+ // If we hit the fence, pretend to WorkQueueSets that we're empty.
+ if (work_queue_sets_ && !BlockedByFence())
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+}
+
+void WorkQueue::PushNonNestableTaskToFront(TaskQueueImpl::Task task) {
+ DCHECK(task.nestable == Nestable::kNonNestable);
+
+ bool was_empty = tasks_.empty();
+ bool was_blocked = BlockedByFence();
+#ifndef NDEBUG
+ DCHECK(task.enqueue_order_set());
+#endif
+
+ if (!was_empty) {
+ // Make sure the |enqueue_order()| is monotonically increasing.
+ DCHECK_LE(task.enqueue_order(), tasks_.front().enqueue_order())
+ << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
+ << " : " << name_;
+ }
+
+ // Amoritized O(1).
+ tasks_.push_front(std::move(task));
+
+ if (!work_queue_sets_)
+ return;
+
+ // Pretend to WorkQueueSets that nothing has changed if we're blocked.
+ if (BlockedByFence())
+ return;
+
+ // Pushing task to front may unblock the fence.
+ if (was_empty || was_blocked) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ } else {
+ work_queue_sets_->OnFrontTaskChanged(this);
+ }
+}
+
+void WorkQueue::ReloadEmptyImmediateQueue() {
+ DCHECK(tasks_.empty());
+
+ task_queue_->ReloadEmptyImmediateQueue(&tasks_);
+ if (tasks_.empty())
+ return;
+
+ // If we hit the fence, pretend to WorkQueueSets that we're empty.
+ if (work_queue_sets_ && !BlockedByFence())
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+}
+
+TaskQueueImpl::Task WorkQueue::TakeTaskFromWorkQueue() {
+ DCHECK(work_queue_sets_);
+ DCHECK(!tasks_.empty());
+
+ TaskQueueImpl::Task pending_task = std::move(tasks_.front());
+ tasks_.pop_front();
+ // NB immediate tasks have a different pipeline to delayed ones.
+ if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
+ // Short-circuit the queue reload so that OnPopQueue does the right thing.
+ task_queue_->ReloadEmptyImmediateQueue(&tasks_);
+ }
+
+ // OnPopQueue calls GetFrontTaskEnqueueOrder which checks BlockedByFence() so
+ // we don't need to here.
+ work_queue_sets_->OnPopQueue(this);
+ task_queue_->TraceQueueSize();
+ return pending_task;
+}
+
+bool WorkQueue::RemoveAllCanceledTasksFromFront() {
+ DCHECK(work_queue_sets_);
+ bool task_removed = false;
+ while (!tasks_.empty() &&
+ (!tasks_.front().task || tasks_.front().task.IsCancelled())) {
+ tasks_.pop_front();
+ task_removed = true;
+ }
+ if (task_removed) {
+ // NB immediate tasks have a different pipeline to delayed ones.
+ if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
+ // Short-circuit the queue reload so that OnPopQueue does the right thing.
+ task_queue_->ReloadEmptyImmediateQueue(&tasks_);
+ }
+ work_queue_sets_->OnPopQueue(this);
+ task_queue_->TraceQueueSize();
+ }
+ return task_removed;
+}
+
+void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
+ work_queue_sets_ = work_queue_sets;
+}
+
+void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
+ work_queue_set_index_ = work_queue_set_index;
+}
+
+bool WorkQueue::InsertFenceImpl(EnqueueOrder fence) {
+ DCHECK_NE(fence, 0u);
+ DCHECK(fence >= fence_ || fence == EnqueueOrder::blocking_fence());
+ bool was_blocked_by_fence = BlockedByFence();
+ fence_ = fence;
+ return was_blocked_by_fence;
+}
+
+void WorkQueue::InsertFenceSilently(EnqueueOrder fence) {
+ // Ensure that there is no fence present or a new one blocks queue completely.
+ DCHECK(!fence_ || fence_ == EnqueueOrder::blocking_fence());
+ InsertFenceImpl(fence);
+}
+
+bool WorkQueue::InsertFence(EnqueueOrder fence) {
+ bool was_blocked_by_fence = InsertFenceImpl(fence);
+
+ // Moving the fence forward may unblock some tasks.
+ if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence &&
+ !BlockedByFence()) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ return true;
+ }
+ // Fence insertion may have blocked all tasks in this work queue.
+ if (BlockedByFence())
+ work_queue_sets_->OnQueueBlocked(this);
+ return false;
+}
+
+bool WorkQueue::RemoveFence() {
+ bool was_blocked_by_fence = BlockedByFence();
+ fence_ = EnqueueOrder::none();
+ if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ return true;
+ }
+ return false;
+}
+
+bool WorkQueue::ShouldRunBefore(const WorkQueue* other_queue) const {
+ DCHECK(!tasks_.empty());
+ DCHECK(!other_queue->tasks_.empty());
+ EnqueueOrder enqueue_order;
+ EnqueueOrder other_enqueue_order;
+ bool have_task = GetFrontTaskEnqueueOrder(&enqueue_order);
+ bool have_other_task =
+ other_queue->GetFrontTaskEnqueueOrder(&other_enqueue_order);
+ DCHECK(have_task);
+ DCHECK(have_other_task);
+ return enqueue_order < other_enqueue_order;
+}
+
+void WorkQueue::PopTaskForTesting() {
+ if (tasks_.empty())
+ return;
+ tasks_.pop_front();
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/work_queue.h b/base/task/sequence_manager/work_queue.h
new file mode 100644
index 0000000000..5197949c50
--- /dev/null
+++ b/base/task/sequence_manager/work_queue.h
@@ -0,0 +1,152 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
+#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
+
+#include "base/base_export.h"
+#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class WorkQueueSets;
+
+// This class keeps track of immediate and delayed tasks which are due to run
+// now. It interfaces deeply with WorkQueueSets which keeps track of which queue
+// (with a given priority) contains the oldest task.
+//
+// If a fence is inserted, WorkQueue behaves normally up until
+// TakeTaskFromWorkQueue reaches or exceeds the fence. At that point it the
+// API subset used by WorkQueueSets pretends the WorkQueue is empty until the
+// fence is removed. This functionality is a primitive intended for use by
+// throttling mechanisms.
+class BASE_EXPORT WorkQueue {
+ public:
+ using QueueType = internal::TaskQueueImpl::WorkQueueType;
+
+ // Note |task_queue| can be null if queue_type is kNonNestable.
+ WorkQueue(TaskQueueImpl* task_queue, const char* name, QueueType queue_type);
+ ~WorkQueue();
+
+ // Associates this work queue with the given work queue sets. This must be
+ // called before any tasks can be inserted into this work queue.
+ void AssignToWorkQueueSets(WorkQueueSets* work_queue_sets);
+
+ // Assigns the current set index.
+ void AssignSetIndex(size_t work_queue_set_index);
+
+ void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
+
+ // Returns true if the |tasks_| is empty. This method ignores any fences.
+ bool Empty() const { return tasks_.empty(); }
+
+ // If the |tasks_| isn't empty and a fence hasn't been reached,
+ // |enqueue_order| gets set to the enqueue order of the front task and the
+ // function returns true. Otherwise the function returns false.
+ bool GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const;
+
+ // Returns the first task in this queue or null if the queue is empty. This
+ // method ignores any fences.
+ const TaskQueueImpl::Task* GetFrontTask() const;
+
+ // Returns the last task in this queue or null if the queue is empty. This
+ // method ignores any fences.
+ const TaskQueueImpl::Task* GetBackTask() const;
+
+ // Pushes the task onto the |tasks_| and if a fence hasn't been reached
+ // it informs the WorkQueueSets if the head changed.
+ void Push(TaskQueueImpl::Task task);
+
+ // Pushes the task onto the front of the |tasks_| and if it's before any
+ // fence it informs the WorkQueueSets the head changed. Use with caution this
+ // API can easily lead to task starvation if misused.
+ void PushNonNestableTaskToFront(TaskQueueImpl::Task task);
+
+ // Reloads the empty |tasks_| with
+ // |task_queue_->TakeImmediateIncomingQueue| and if a fence hasn't been
+ // reached it informs the WorkQueueSets if the head changed.
+ void ReloadEmptyImmediateQueue();
+
+ size_t Size() const { return tasks_.size(); }
+
+ // Pulls a task off the |tasks_| and informs the WorkQueueSets. If the
+ // task removed had an enqueue order >= the current fence then WorkQueue
+ // pretends to be empty as far as the WorkQueueSets is concerned.
+ TaskQueueImpl::Task TakeTaskFromWorkQueue();
+
+ // Removes all canceled tasks from the head of the list. Returns true if any
+ // tasks were removed.
+ bool RemoveAllCanceledTasksFromFront();
+
+ const char* name() const { return name_; }
+
+ TaskQueueImpl* task_queue() const { return task_queue_; }
+
+ WorkQueueSets* work_queue_sets() const { return work_queue_sets_; }
+
+ size_t work_queue_set_index() const { return work_queue_set_index_; }
+
+ HeapHandle heap_handle() const { return heap_handle_; }
+
+ void set_heap_handle(HeapHandle handle) { heap_handle_ = handle; }
+
+ QueueType queue_type() const { return queue_type_; }
+
+ // Returns true if the front task in this queue has an older enqueue order
+ // than the front task of |other_queue|. Both queue are assumed to be
+ // non-empty. This method ignores any fences.
+ bool ShouldRunBefore(const WorkQueue* other_queue) const;
+
+ // Submit a fence. When TakeTaskFromWorkQueue encounters a task whose
+ // enqueue_order is >= |fence| then the WorkQueue will start pretending to be.
+ // empty.
+ // Inserting a fence may supersede a previous one and unblock some tasks.
+ // Returns true if any tasks where unblocked, returns false otherwise.
+ bool InsertFence(EnqueueOrder fence);
+
+ // Submit a fence without triggering a WorkQueueSets notification.
+ // Caller must ensure that WorkQueueSets are properly updated.
+ // This method should not be called when a fence is already present.
+ void InsertFenceSilently(EnqueueOrder fence);
+
+ // Removes any fences that where added and if WorkQueue was pretending to be
+ // empty, then the real value is reported to WorkQueueSets. Returns true if
+ // any tasks where unblocked.
+ bool RemoveFence();
+
+ // Returns true if any tasks are blocked by the fence. Returns true if the
+ // queue is empty and fence has been set (i.e. future tasks would be blocked).
+ // Otherwise returns false.
+ bool BlockedByFence() const;
+
+ // Test support function. This should not be used in production code.
+ void PopTaskForTesting();
+
+ private:
+ bool InsertFenceImpl(EnqueueOrder fence);
+
+ TaskQueueImpl::TaskDeque tasks_;
+ WorkQueueSets* work_queue_sets_ = nullptr; // NOT OWNED.
+ TaskQueueImpl* const task_queue_; // NOT OWNED.
+ size_t work_queue_set_index_ = 0;
+ HeapHandle heap_handle_;
+ const char* const name_;
+ EnqueueOrder fence_;
+ const QueueType queue_type_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkQueue);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
diff --git a/base/task/sequence_manager/work_queue_sets.cc b/base/task/sequence_manager/work_queue_sets.cc
new file mode 100644
index 0000000000..e56fc82e0b
--- /dev/null
+++ b/base/task/sequence_manager/work_queue_sets.cc
@@ -0,0 +1,172 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue_sets.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+WorkQueueSets::WorkQueueSets(size_t num_sets, const char* name)
+ : work_queue_heaps_(num_sets), name_(name) {}
+
+WorkQueueSets::~WorkQueueSets() = default;
+
+void WorkQueueSets::AddQueue(WorkQueue* work_queue, size_t set_index) {
+ DCHECK(!work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ work_queue->AssignToWorkQueueSets(this);
+ work_queue->AssignSetIndex(set_index);
+ if (!has_enqueue_order)
+ return;
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::RemoveQueue(WorkQueue* work_queue) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ work_queue->AssignToWorkQueueSets(nullptr);
+ HeapHandle heap_handle = work_queue->heap_handle();
+ if (!heap_handle.IsValid())
+ return;
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ work_queue_heaps_[set_index].erase(heap_handle);
+}
+
+void WorkQueueSets::ChangeSetIndex(WorkQueue* work_queue, size_t set_index) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ size_t old_set = work_queue->work_queue_set_index();
+ DCHECK_LT(old_set, work_queue_heaps_.size());
+ DCHECK_NE(old_set, set_index);
+ work_queue->AssignSetIndex(set_index);
+ if (!has_enqueue_order)
+ return;
+ work_queue_heaps_[old_set].erase(work_queue->heap_handle());
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnFrontTaskChanged(WorkQueue* work_queue) {
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ DCHECK(has_enqueue_order);
+ size_t set = work_queue->work_queue_set_index();
+ work_queue_heaps_[set].ChangeKey(work_queue->heap_handle(),
+ {enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnTaskPushedToEmptyQueue(WorkQueue* work_queue) {
+ // NOTE if this function changes, we need to keep |WorkQueueSets::AddQueue| in
+ // sync.
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ DCHECK(has_enqueue_order);
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size())
+ << " set_index = " << set_index;
+ // |work_queue| should not be in work_queue_heaps_[set_index].
+ DCHECK(!work_queue->heap_handle().IsValid());
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnPopQueue(WorkQueue* work_queue) {
+ // Assume that |work_queue| contains the lowest enqueue_order.
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ DCHECK(!work_queue_heaps_[set_index].empty()) << " set_index = " << set_index;
+ DCHECK_EQ(work_queue_heaps_[set_index].Min().value, work_queue)
+ << " set_index = " << set_index;
+ DCHECK(work_queue->heap_handle().IsValid());
+ EnqueueOrder enqueue_order;
+ if (work_queue->GetFrontTaskEnqueueOrder(&enqueue_order)) {
+ // O(log n)
+ work_queue_heaps_[set_index].ReplaceMin({enqueue_order, work_queue});
+ } else {
+ // O(log n)
+ work_queue_heaps_[set_index].Pop();
+ DCHECK(work_queue_heaps_[set_index].empty() ||
+ work_queue_heaps_[set_index].Min().value != work_queue);
+ }
+}
+
+void WorkQueueSets::OnQueueBlocked(WorkQueue* work_queue) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ HeapHandle heap_handle = work_queue->heap_handle();
+ if (!heap_handle.IsValid())
+ return;
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ work_queue_heaps_[set_index].erase(heap_handle);
+}
+
+bool WorkQueueSets::GetOldestQueueInSet(size_t set_index,
+ WorkQueue** out_work_queue) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ if (work_queue_heaps_[set_index].empty())
+ return false;
+ *out_work_queue = work_queue_heaps_[set_index].Min().value;
+ DCHECK_EQ(set_index, (*out_work_queue)->work_queue_set_index());
+ DCHECK((*out_work_queue)->heap_handle().IsValid());
+ return true;
+}
+
+bool WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet(
+ size_t set_index,
+ WorkQueue** out_work_queue,
+ EnqueueOrder* out_enqueue_order) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ if (work_queue_heaps_[set_index].empty())
+ return false;
+ const OldestTaskEnqueueOrder& oldest = work_queue_heaps_[set_index].Min();
+ *out_work_queue = oldest.value;
+ *out_enqueue_order = oldest.key;
+ EnqueueOrder enqueue_order;
+ DCHECK(oldest.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
+ oldest.key == enqueue_order);
+ return true;
+}
+
+bool WorkQueueSets::IsSetEmpty(size_t set_index) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size())
+ << " set_index = " << set_index;
+ return work_queue_heaps_[set_index].empty();
+}
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+bool WorkQueueSets::ContainsWorkQueueForTest(
+ const WorkQueue* work_queue) const {
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+
+ for (const IntrusiveHeap<OldestTaskEnqueueOrder>& heap : work_queue_heaps_) {
+ for (const OldestTaskEnqueueOrder& heap_value_pair : heap) {
+ if (heap_value_pair.value == work_queue) {
+ DCHECK(has_enqueue_order);
+ DCHECK_EQ(heap_value_pair.key, enqueue_order);
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ return true;
+ }
+ }
+ }
+
+ if (work_queue->work_queue_sets() == this) {
+ DCHECK(!has_enqueue_order);
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/work_queue_sets.h b/base/task/sequence_manager/work_queue_sets.h
new file mode 100644
index 0000000000..01db04084c
--- /dev/null
+++ b/base/task/sequence_manager/work_queue_sets.h
@@ -0,0 +1,102 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
+#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
+
+#include <map>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/task/sequence_manager/intrusive_heap.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// There is a WorkQueueSet for each scheduler priority and each WorkQueueSet
+// uses a EnqueueOrderToWorkQueueMap to keep track of which queue in the set has
+// the oldest task (i.e. the one that should be run next if the
+// TaskQueueSelector chooses to run a task a given priority). The reason this
+// works is because std::map is a tree based associative container and all the
+// values are kept in sorted order.
+class BASE_EXPORT WorkQueueSets {
+ public:
+ WorkQueueSets(size_t num_sets, const char* name);
+ ~WorkQueueSets();
+
+ // O(log num queues)
+ void AddQueue(WorkQueue* queue, size_t set_index);
+
+ // O(log num queues)
+ void RemoveQueue(WorkQueue* work_queue);
+
+ // O(log num queues)
+ void ChangeSetIndex(WorkQueue* queue, size_t set_index);
+
+ // O(log num queues)
+ void OnFrontTaskChanged(WorkQueue* queue);
+
+ // O(log num queues)
+ void OnTaskPushedToEmptyQueue(WorkQueue* work_queue);
+
+ // If empty it's O(1) amortized, otherwise it's O(log num queues)
+ // Assumes |work_queue| contains the lowest enqueue order in the set.
+ void OnPopQueue(WorkQueue* work_queue);
+
+ // O(log num queues)
+ void OnQueueBlocked(WorkQueue* work_queue);
+
+ // O(1)
+ bool GetOldestQueueInSet(size_t set_index, WorkQueue** out_work_queue) const;
+
+ // O(1)
+ bool GetOldestQueueAndEnqueueOrderInSet(
+ size_t set_index,
+ WorkQueue** out_work_queue,
+ EnqueueOrder* out_enqueue_order) const;
+
+ // O(1)
+ bool IsSetEmpty(size_t set_index) const;
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+ // Note this iterates over everything in |work_queue_heaps_|.
+ // It's intended for use with DCHECKS and for testing
+ bool ContainsWorkQueueForTest(const WorkQueue* queue) const;
+#endif
+
+ const char* GetName() const { return name_; }
+
+ private:
+ struct OldestTaskEnqueueOrder {
+ EnqueueOrder key;
+ WorkQueue* value;
+
+ bool operator<=(const OldestTaskEnqueueOrder& other) const {
+ return key <= other.key;
+ }
+
+ void SetHeapHandle(HeapHandle handle) { value->set_heap_handle(handle); }
+
+ void ClearHeapHandle() { value->set_heap_handle(HeapHandle()); }
+ };
+
+ // For each set |work_queue_heaps_| has a queue of WorkQueue ordered by the
+ // oldest task in each WorkQueue.
+ std::vector<IntrusiveHeap<OldestTaskEnqueueOrder>> work_queue_heaps_;
+ const char* const name_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkQueueSets);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
diff --git a/base/task/sequence_manager/work_queue_sets_unittest.cc b/base/task/sequence_manager/work_queue_sets_unittest.cc
new file mode 100644
index 0000000000..b849eec079
--- /dev/null
+++ b/base/task/sequence_manager/work_queue_sets_unittest.cc
@@ -0,0 +1,328 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue_sets.h"
+
+#include <stddef.h>
+
+#include "base/memory/ptr_util.h"
+#include "base/task/sequence_manager/work_queue.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace sequence_manager {
+
+class TimeDomain;
+
+namespace internal {
+
+class WorkQueueSetsTest : public testing::Test {
+ public:
+ void SetUp() override {
+ work_queue_sets_.reset(new WorkQueueSets(kNumSets, "test"));
+ }
+
+ void TearDown() override {
+ for (std::unique_ptr<WorkQueue>& work_queue : work_queues_) {
+ if (work_queue->work_queue_sets())
+ work_queue_sets_->RemoveQueue(work_queue.get());
+ }
+ }
+
+ protected:
+ enum {
+ kNumSets = 5 // An arbitary choice.
+ };
+
+ WorkQueue* NewTaskQueue(const char* queue_name) {
+ WorkQueue* queue =
+ new WorkQueue(nullptr, "test", WorkQueue::QueueType::kImmediate);
+ work_queues_.push_back(WrapUnique(queue));
+ work_queue_sets_->AddQueue(queue, TaskQueue::kControlPriority);
+ return queue;
+ }
+
+ TaskQueueImpl::Task FakeTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce([] {}), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeNonNestableTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce([] {}), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ fake_task.nestable = Nestable::kNonNestable;
+ return fake_task;
+ }
+
+ std::vector<std::unique_ptr<WorkQueue>> work_queues_;
+ std::unique_ptr<WorkQueueSets> work_queue_sets_;
+};
+
+TEST_F(WorkQueueSetsTest, ChangeSetIndex) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ EXPECT_EQ(set, work_queue->work_queue_set_index());
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_QueueEmpty) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_FALSE(
+ work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+}
+
+TEST_F(WorkQueueSetsTest, OnTaskPushedToEmptyQueue) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_FALSE(
+ work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+
+ // Calls OnTaskPushedToEmptyQueue.
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(work_queue, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_SingleTaskInSet) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(work_queue, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueAndEnqueueOrderInSet) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueAndEnqueueOrderInSet(
+ set, &selected_work_queue, &enqueue_order));
+ EXPECT_EQ(work_queue, selected_work_queue);
+ EXPECT_EQ(10u, enqueue_order);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_MultipleAgesInSet) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue2");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 2;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, OnPopQueue) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(1));
+ queue2->Push(FakeTaskWithEnqueueOrder(3));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 3;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+
+ queue2->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(queue2);
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, OnPopQueue_QueueBecomesEmpty) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 4;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+
+ queue3->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(queue3);
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest,
+ GetOldestQueueInSet_MultipleAgesInSetIntegerRollover) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(0x7ffffff1));
+ queue2->Push(FakeTaskWithEnqueueOrder(0x7ffffff0));
+ queue3->Push(FakeTaskWithEnqueueOrder(-0x7ffffff1));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_MultipleAgesInSet_RemoveQueue) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+ work_queue_sets_->RemoveQueue(queue3);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, ChangeSetIndex_Complex) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ WorkQueue* queue4 = NewTaskQueue("queue4");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ queue4->Push(FakeTaskWithEnqueueOrder(3));
+ size_t set1 = 1;
+ size_t set2 = 2;
+ work_queue_sets_->ChangeSetIndex(queue1, set1);
+ work_queue_sets_->ChangeSetIndex(queue2, set1);
+ work_queue_sets_->ChangeSetIndex(queue3, set2);
+ work_queue_sets_->ChangeSetIndex(queue4, set2);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set1, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set2, &selected_work_queue));
+ EXPECT_EQ(queue4, selected_work_queue);
+
+ work_queue_sets_->ChangeSetIndex(queue4, set1);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set1, &selected_work_queue));
+ EXPECT_EQ(queue4, selected_work_queue);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set2, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, IsSetEmpty_NoWork) {
+ size_t set = 2;
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+}
+
+TEST_F(WorkQueueSetsTest, IsSetEmpty_Work) {
+ size_t set = 2;
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(1));
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+ EXPECT_FALSE(work_queue_sets_->IsSetEmpty(set));
+
+ work_queue->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(work_queue);
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+}
+
+TEST_F(WorkQueueSetsTest, BlockQueuesByFence) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(7));
+ queue1->Push(FakeTaskWithEnqueueOrder(8));
+ queue2->Push(FakeTaskWithEnqueueOrder(9));
+
+ size_t set = TaskQueue::kControlPriority;
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(selected_work_queue, queue1);
+
+ queue1->InsertFence(EnqueueOrder::blocking_fence());
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(selected_work_queue, queue2);
+}
+
+TEST_F(WorkQueueSetsTest, PushNonNestableTaskToFront) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 4;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+
+ queue1->PushNonNestableTaskToFront(FakeNonNestableTaskWithEnqueueOrder(2));
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue1, selected_work_queue);
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/base/task/sequence_manager/work_queue_unittest.cc b/base/task/sequence_manager/work_queue_unittest.cc
new file mode 100644
index 0000000000..a71cebcabc
--- /dev/null
+++ b/base/task/sequence_manager/work_queue_unittest.cc
@@ -0,0 +1,475 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/work_queue.h"
+
+#include <stddef.h>
+#include <memory>
+
+#include "base/bind.h"
+#include "base/task/sequence_manager/real_time_domain.h"
+#include "base/task/sequence_manager/task_queue_impl.h"
+#include "base/task/sequence_manager/work_queue_sets.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+namespace {
+
+void NopTask() {}
+
+struct Cancelable {
+ Cancelable() : weak_ptr_factory(this) {}
+
+ void NopTask() {}
+
+ WeakPtrFactory<Cancelable> weak_ptr_factory;
+};
+
+} // namespace
+
+class WorkQueueTest : public testing::Test {
+ public:
+ void SetUp() override {
+ time_domain_.reset(new RealTimeDomain());
+ task_queue_ = std::make_unique<TaskQueueImpl>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ work_queue_.reset(new WorkQueue(task_queue_.get(), "test",
+ WorkQueue::QueueType::kImmediate));
+ work_queue_sets_.reset(new WorkQueueSets(1, "test"));
+ work_queue_sets_->AddQueue(work_queue_.get(), 0);
+ }
+
+ void TearDown() override { work_queue_sets_->RemoveQueue(work_queue_.get()); }
+
+ protected:
+ TaskQueueImpl::Task FakeCancelableTaskWithEnqueueOrder(
+ int enqueue_order,
+ WeakPtr<Cancelable> weak_ptr) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce(&Cancelable::NopTask, weak_ptr),
+ FROM_HERE),
+ TimeTicks(), EnqueueOrder(),
+ EnqueueOrder::FromIntForTesting(enqueue_order));
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce(&NopTask), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeNonNestableTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(BindOnce(&NopTask), FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(enqueue_order));
+ fake_task.nestable = Nestable::kNonNestable;
+ return fake_task;
+ }
+
+ std::unique_ptr<RealTimeDomain> time_domain_;
+ std::unique_ptr<TaskQueueImpl> task_queue_;
+ std::unique_ptr<WorkQueue> work_queue_;
+ std::unique_ptr<WorkQueueSets> work_queue_sets_;
+ std::unique_ptr<TaskQueueImpl::TaskDeque> incoming_queue_;
+};
+
+TEST_F(WorkQueueTest, Empty) {
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, Empty_IgnoresFences) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ EXPECT_FALSE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskEnqueueOrderQueueEmpty) {
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskEnqueueOrder) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskQueueEmpty) {
+ EXPECT_EQ(nullptr, work_queue_->GetFrontTask());
+}
+
+TEST_F(WorkQueueTest, GetFrontTask) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, GetBackTask_Empty) {
+ EXPECT_EQ(nullptr, work_queue_->GetBackTask());
+}
+
+TEST_F(WorkQueueTest, GetBackTask) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, Push) {
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_EQ(work_queue_.get(), work_queue);
+}
+
+TEST_F(WorkQueueTest, PushAfterFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFront) {
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(3));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_EQ(work_queue_.get(), work_queue);
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+ EXPECT_EQ(3ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFrontAfterFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFrontBeforeFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3));
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, ReloadEmptyImmediateQueue) {
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(2));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(3));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->ReloadEmptyImmediateQueue();
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, ReloadEmptyImmediateQueueAfterFenceHit) {
+ work_queue_->InsertFence(EnqueueOrder::blocking_fence());
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(2));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(3));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->ReloadEmptyImmediateQueue();
+
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, TakeTaskFromWorkQueue) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_EQ(3ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, TakeTaskFromWorkQueue_HitFence) {
+ work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InsertFenceBeforeEnqueueing) {
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueueingNonBlocking) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(5)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueueing) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ // NB in reality a fence will always be greater than any currently enqueued
+ // tasks.
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, InsertNewFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ // Note until TakeTaskFromWorkQueue() is called we don't hit the fence.
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ // Inserting the new fence should temporarily unblock the queue until the new
+ // one is hit.
+ EXPECT_TRUE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(6)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(4ull, enqueue_order);
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, PushWithNonEmptyQueueDoesNotHitFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3));
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->RemoveFence());
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFenceButNoFence) {
+ EXPECT_FALSE(work_queue_->RemoveFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFenceNothingUnblocked) {
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_FALSE(work_queue_->RemoveFence());
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFence) {
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFencePopBecomesEmpty) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFencePop) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InitiallyEmptyBlockedByFenceNewFenceUnblocks) {
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(3)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFenceNewFenceUnblocks) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(2)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->InsertFence(EnqueueOrder::FromIntForTesting(4)));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueuing) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_FALSE(work_queue_->InsertFence(EnqueueOrder::blocking_fence()));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, RemoveAllCanceledTasksFromFront) {
+ {
+ Cancelable cancelable;
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 2, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 3, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 4, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ }
+ EXPECT_TRUE(work_queue_->RemoveAllCanceledTasksFromFront());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(5ull, enqueue_order);
+}
+
+TEST_F(WorkQueueTest, RemoveAllCanceledTasksFromFrontTasksNotCanceled) {
+ {
+ Cancelable cancelable;
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 2, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 3, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 4, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ EXPECT_FALSE(work_queue_->RemoveAllCanceledTasksFromFront());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+ }
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base