summaryrefslogtreecommitdiff
path: root/mojo/edk/system
diff options
context:
space:
mode:
Diffstat (limited to 'mojo/edk/system')
-rw-r--r--mojo/edk/system/BUILD.gn205
-rw-r--r--mojo/edk/system/atomic_flag.h57
-rw-r--r--mojo/edk/system/broker.h52
-rw-r--r--mojo/edk/system/broker_host.cc153
-rw-r--r--mojo/edk/system/broker_host.h64
-rw-r--r--mojo/edk/system/broker_messages.h80
-rw-r--r--mojo/edk/system/broker_posix.cc125
-rw-r--r--mojo/edk/system/broker_win.cc155
-rw-r--r--mojo/edk/system/channel.cc683
-rw-r--r--mojo/edk/system/channel.h303
-rw-r--r--mojo/edk/system/channel_posix.cc572
-rw-r--r--mojo/edk/system/channel_unittest.cc177
-rw-r--r--mojo/edk/system/channel_win.cc360
-rw-r--r--mojo/edk/system/configuration.cc25
-rw-r--r--mojo/edk/system/configuration.h29
-rw-r--r--mojo/edk/system/core.cc1019
-rw-r--r--mojo/edk/system/core.h297
-rw-r--r--mojo/edk/system/core_test_base.cc272
-rw-r--r--mojo/edk/system/core_test_base.h94
-rw-r--r--mojo/edk/system/core_unittest.cc971
-rw-r--r--mojo/edk/system/data_pipe_consumer_dispatcher.cc562
-rw-r--r--mojo/edk/system/data_pipe_consumer_dispatcher.h123
-rw-r--r--mojo/edk/system/data_pipe_control_message.cc35
-rw-r--r--mojo/edk/system/data_pipe_control_message.h43
-rw-r--r--mojo/edk/system/data_pipe_producer_dispatcher.cc507
-rw-r--r--mojo/edk/system/data_pipe_producer_dispatcher.h123
-rw-r--r--mojo/edk/system/data_pipe_unittest.cc2034
-rw-r--r--mojo/edk/system/dispatcher.cc198
-rw-r--r--mojo/edk/system/dispatcher.h245
-rw-r--r--mojo/edk/system/handle_signals_state.h13
-rw-r--r--mojo/edk/system/handle_table.cc135
-rw-r--r--mojo/edk/system/handle_table.h75
-rw-r--r--mojo/edk/system/mach_port_relay.cc248
-rw-r--r--mojo/edk/system/mach_port_relay.h94
-rw-r--r--mojo/edk/system/mapping_table.cc48
-rw-r--r--mojo/edk/system/mapping_table.h57
-rw-r--r--mojo/edk/system/message_for_transit.cc136
-rw-r--r--mojo/edk/system/message_for_transit.h115
-rw-r--r--mojo/edk/system/message_pipe_dispatcher.cc554
-rw-r--r--mojo/edk/system/message_pipe_dispatcher.h115
-rw-r--r--mojo/edk/system/message_pipe_perftest.cc167
-rw-r--r--mojo/edk/system/message_pipe_unittest.cc699
-rw-r--r--mojo/edk/system/multiprocess_message_pipe_unittest.cc1366
-rw-r--r--mojo/edk/system/node_channel.cc905
-rw-r--r--mojo/edk/system/node_channel.h219
-rw-r--r--mojo/edk/system/node_controller.cc1470
-rw-r--r--mojo/edk/system/node_controller.h378
-rw-r--r--mojo/edk/system/options_validation.h97
-rw-r--r--mojo/edk/system/options_validation_unittest.cc134
-rw-r--r--mojo/edk/system/platform_handle_dispatcher.cc104
-rw-r--r--mojo/edk/system/platform_handle_dispatcher.h61
-rw-r--r--mojo/edk/system/platform_handle_dispatcher_unittest.cc123
-rw-r--r--mojo/edk/system/platform_wrapper_unittest.cc212
-rw-r--r--mojo/edk/system/ports/BUILD.gn46
-rw-r--r--mojo/edk/system/ports/event.cc46
-rw-r--r--mojo/edk/system/ports/event.h111
-rw-r--r--mojo/edk/system/ports/message.cc100
-rw-r--r--mojo/edk/system/ports/message.h93
-rw-r--r--mojo/edk/system/ports/message_filter.h29
-rw-r--r--mojo/edk/system/ports/message_queue.cc87
-rw-r--r--mojo/edk/system/ports/message_queue.h73
-rw-r--r--mojo/edk/system/ports/name.cc26
-rw-r--r--mojo/edk/system/ports/name.h74
-rw-r--r--mojo/edk/system/ports/node.cc1385
-rw-r--r--mojo/edk/system/ports/node.h228
-rw-r--r--mojo/edk/system/ports/node_delegate.h48
-rw-r--r--mojo/edk/system/ports/port.cc24
-rw-r--r--mojo/edk/system/ports/port.h60
-rw-r--r--mojo/edk/system/ports/port_ref.cc36
-rw-r--r--mojo/edk/system/ports/port_ref.h41
-rw-r--r--mojo/edk/system/ports/ports_unittest.cc1478
-rw-r--r--mojo/edk/system/ports/user_data.h25
-rw-r--r--mojo/edk/system/ports_message.cc62
-rw-r--r--mojo/edk/system/ports_message.h69
-rw-r--r--mojo/edk/system/request_context.cc110
-rw-r--r--mojo/edk/system/request_context.h107
-rw-r--r--mojo/edk/system/shared_buffer_dispatcher.cc339
-rw-r--r--mojo/edk/system/shared_buffer_dispatcher.h127
-rw-r--r--mojo/edk/system/shared_buffer_dispatcher_unittest.cc312
-rw-r--r--mojo/edk/system/shared_buffer_unittest.cc318
-rw-r--r--mojo/edk/system/signals_unittest.cc76
-rw-r--r--mojo/edk/system/system_impl_export.h29
-rw-r--r--mojo/edk/system/test_utils.cc76
-rw-r--r--mojo/edk/system/test_utils.h59
-rw-r--r--mojo/edk/system/watch.cc83
-rw-r--r--mojo/edk/system/watch.h124
-rw-r--r--mojo/edk/system/watcher_dispatcher.cc232
-rw-r--r--mojo/edk/system/watcher_dispatcher.h101
-rw-r--r--mojo/edk/system/watcher_set.cc82
-rw-r--r--mojo/edk/system/watcher_set.h71
-rw-r--r--mojo/edk/system/watcher_unittest.cc1637
91 files changed, 25112 insertions, 0 deletions
diff --git a/mojo/edk/system/BUILD.gn b/mojo/edk/system/BUILD.gn
new file mode 100644
index 0000000000..a68cd44ff1
--- /dev/null
+++ b/mojo/edk/system/BUILD.gn
@@ -0,0 +1,205 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/nacl/config.gni")
+import("//testing/test.gni")
+import("../../../mojo/public/tools/bindings/mojom.gni")
+
+if (is_android) {
+ import("//build/config/android/config.gni")
+ import("//build/config/android/rules.gni")
+}
+
+component("system") {
+ output_name = "mojo_system_impl"
+
+ sources = [
+ "atomic_flag.h",
+ "broker.h",
+ "broker_host.cc",
+ "broker_host.h",
+ "broker_posix.cc",
+ "broker_win.cc",
+ "channel.cc",
+ "channel.h",
+ "channel_posix.cc",
+ "channel_win.cc",
+ "configuration.cc",
+ "configuration.h",
+ "core.cc",
+ "core.h",
+ "data_pipe_consumer_dispatcher.cc",
+ "data_pipe_consumer_dispatcher.h",
+ "data_pipe_control_message.cc",
+ "data_pipe_control_message.h",
+ "data_pipe_producer_dispatcher.cc",
+ "data_pipe_producer_dispatcher.h",
+ "dispatcher.cc",
+ "dispatcher.h",
+ "handle_signals_state.h",
+ "handle_table.cc",
+ "handle_table.h",
+ "mapping_table.cc",
+ "mapping_table.h",
+ "message_for_transit.cc",
+ "message_for_transit.h",
+ "message_pipe_dispatcher.cc",
+ "message_pipe_dispatcher.h",
+ "node_channel.cc",
+ "node_channel.h",
+ "node_controller.cc",
+ "node_controller.h",
+ "options_validation.h",
+ "platform_handle_dispatcher.cc",
+ "platform_handle_dispatcher.h",
+ "ports_message.cc",
+ "ports_message.h",
+ "request_context.cc",
+ "request_context.h",
+ "shared_buffer_dispatcher.cc",
+ "shared_buffer_dispatcher.h",
+ "watch.cc",
+ "watch.h",
+ "watcher_dispatcher.cc",
+ "watcher_dispatcher.h",
+ "watcher_set.cc",
+ "watcher_set.h",
+ ]
+
+ defines = [ "MOJO_SYSTEM_IMPL_IMPLEMENTATION" ]
+
+ public_deps = [
+ "//mojo/edk/embedder",
+ "//mojo/edk/embedder:platform",
+ "//mojo/edk/system/ports",
+ "//mojo/public/c/system",
+ "//mojo/public/cpp/system",
+ ]
+
+ deps = [
+ "//base",
+ ]
+
+ if (!is_nacl) {
+ deps += [ "//crypto" ]
+ }
+
+ if (is_win) {
+ cflags = [ "/wd4324" ] # Structure was padded due to __declspec(align()),
+ # which is uninteresting.
+ }
+
+ if (is_mac && !is_ios) {
+ sources += [
+ "mach_port_relay.cc",
+ "mach_port_relay.h",
+ ]
+ }
+
+ if (is_nacl && !is_nacl_nonsfi) {
+ sources -= [
+ "broker_host.cc",
+ "broker_posix.cc",
+ "channel_posix.cc",
+ ]
+ }
+
+ # Use target_os == "chromeos" instead of is_chromeos because we need to
+ # build NaCl targets (i.e. IRT) for ChromeOS the same as the rest of ChromeOS.
+ if (is_android || target_os == "chromeos") {
+ defines += [ "MOJO_EDK_LEGACY_PROTOCOL" ]
+ }
+
+ allow_circular_includes_from = [ "//mojo/edk/embedder" ]
+}
+
+group("tests") {
+ testonly = true
+ deps = [
+ ":mojo_system_unittests",
+ ]
+
+ if (!is_ios) {
+ deps += [ ":mojo_message_pipe_perftests" ]
+ }
+}
+
+source_set("test_utils") {
+ testonly = true
+
+ sources = [
+ "test_utils.cc",
+ "test_utils.h",
+ ]
+
+ public_deps = [
+ "//mojo/public/c/system",
+ "//mojo/public/cpp/system",
+ ]
+
+ deps = [
+ "//base",
+ "//base/test:test_support",
+ "//mojo/edk/test:test_support",
+ "//testing/gtest:gtest",
+ ]
+}
+
+test("mojo_system_unittests") {
+ sources = [
+ "channel_unittest.cc",
+ "core_test_base.cc",
+ "core_test_base.h",
+ "core_unittest.cc",
+ "message_pipe_unittest.cc",
+ "options_validation_unittest.cc",
+ "platform_handle_dispatcher_unittest.cc",
+ "shared_buffer_dispatcher_unittest.cc",
+ "shared_buffer_unittest.cc",
+ "signals_unittest.cc",
+ "watcher_unittest.cc",
+ ]
+
+ if (!is_ios) {
+ sources += [
+ "data_pipe_unittest.cc",
+ "multiprocess_message_pipe_unittest.cc",
+ "platform_wrapper_unittest.cc",
+ ]
+ }
+
+ deps = [
+ ":test_utils",
+ "//base",
+ "//base/test:test_support",
+ "//mojo/edk/embedder:embedder_unittests",
+ "//mojo/edk/system",
+ "//mojo/edk/system/ports:tests",
+ "//mojo/edk/test:run_all_unittests",
+ "//mojo/edk/test:test_support",
+ "//mojo/public/cpp/system",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ allow_circular_includes_from = [ "//mojo/edk/embedder:embedder_unittests" ]
+}
+
+if (!is_ios) {
+ test("mojo_message_pipe_perftests") {
+ sources = [
+ "message_pipe_perftest.cc",
+ ]
+
+ deps = [
+ ":test_utils",
+ "//base",
+ "//base/test:test_support",
+ "//mojo/edk/system",
+ "//mojo/edk/test:run_all_perftests",
+ "//mojo/edk/test:test_support",
+ "//testing/gtest",
+ ]
+ }
+}
diff --git a/mojo/edk/system/atomic_flag.h b/mojo/edk/system/atomic_flag.h
new file mode 100644
index 0000000000..6bdcfaaddd
--- /dev/null
+++ b/mojo/edk/system/atomic_flag.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_ATOMIC_FLAG_H_
+#define MOJO_EDK_SYSTEM_ATOMIC_FLAG_H_
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+
+namespace mojo {
+namespace edk {
+
+// AtomicFlag is a boolean flag that can be set and tested atomically. It is
+// intended to be used to fast-path checks where the common case would normally
+// release the governing mutex immediately after checking.
+//
+// Example usage:
+// void DoFoo(Bar* bar) {
+// AutoLock l(lock_);
+// queue_.push_back(bar);
+// flag_.Set(true);
+// }
+//
+// void Baz() {
+// if (!flag_) // Assume this is the common case.
+// return;
+//
+// AutoLock l(lock_);
+// ... drain queue_ ...
+// flag_.Set(false);
+// }
+class AtomicFlag {
+ public:
+ AtomicFlag() : flag_(0) {}
+ ~AtomicFlag() {}
+
+ void Set(bool value) {
+ base::subtle::Release_Store(&flag_, value ? 1 : 0);
+ }
+
+ bool Get() const {
+ return base::subtle::Acquire_Load(&flag_) ? true : false;
+ }
+
+ operator const bool() const { return Get(); }
+
+ private:
+ base::subtle::Atomic32 flag_;
+
+ DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_ATOMIC_FLAG_H_
diff --git a/mojo/edk/system/broker.h b/mojo/edk/system/broker.h
new file mode 100644
index 0000000000..1577972a6d
--- /dev/null
+++ b/mojo/edk/system/broker.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_BROKER_H_
+#define MOJO_EDK_SYSTEM_BROKER_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+
+namespace mojo {
+namespace edk {
+
+class PlatformSharedBuffer;
+
+// The Broker is a channel to the parent process, which allows synchronous IPCs.
+class Broker {
+ public:
+ // Note: This is blocking, and will wait for the first message over
+ // |platform_handle|.
+ explicit Broker(ScopedPlatformHandle platform_handle);
+ ~Broker();
+
+ // Returns the platform handle that should be used to establish a NodeChannel
+ // to the parent process.
+ ScopedPlatformHandle GetParentPlatformHandle();
+
+ // Request a shared buffer from the parent process. Blocks the current thread.
+ scoped_refptr<PlatformSharedBuffer> GetSharedBuffer(size_t num_bytes);
+
+ private:
+ // Handle to the parent process, used for synchronous IPCs.
+ ScopedPlatformHandle sync_channel_;
+
+ // Handle to the parent process which is recieved in the first first message
+ // over |sync_channel_|.
+ ScopedPlatformHandle parent_channel_;
+
+ // Lock to only allow one sync message at a time. This avoids having to deal
+ // with message ordering since we can only have one request at a time
+ // in-flight.
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(Broker);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_BROKER_H_
diff --git a/mojo/edk/system/broker_host.cc b/mojo/edk/system/broker_host.cc
new file mode 100644
index 0000000000..6096034fa2
--- /dev/null
+++ b/mojo/edk/system/broker_host.cc
@@ -0,0 +1,153 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/broker_host.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "mojo/edk/embedder/named_platform_channel_pair.h"
+#include "mojo/edk/embedder/named_platform_handle.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/broker_messages.h"
+
+namespace mojo {
+namespace edk {
+
+BrokerHost::BrokerHost(base::ProcessHandle client_process,
+ ScopedPlatformHandle platform_handle)
+#if defined(OS_WIN)
+ : client_process_(client_process)
+#endif
+{
+ CHECK(platform_handle.is_valid());
+
+ base::MessageLoop::current()->AddDestructionObserver(this);
+
+ channel_ = Channel::Create(this, ConnectionParams(std::move(platform_handle)),
+ base::ThreadTaskRunnerHandle::Get());
+ channel_->Start();
+}
+
+BrokerHost::~BrokerHost() {
+ // We're always destroyed on the creation thread, which is the IO thread.
+ base::MessageLoop::current()->RemoveDestructionObserver(this);
+
+ if (channel_)
+ channel_->ShutDown();
+}
+
+bool BrokerHost::PrepareHandlesForClient(PlatformHandleVector* handles) {
+#if defined(OS_WIN)
+ if (!Channel::Message::RewriteHandles(
+ base::GetCurrentProcessHandle(), client_process_, handles)) {
+ // NOTE: We only log an error here. We do not signal a logical error or
+ // prevent any message from being sent. The client should handle unexpected
+ // invalid handles appropriately.
+ DLOG(ERROR) << "Failed to rewrite one or more handles to broker client.";
+ return false;
+ }
+#endif
+ return true;
+}
+
+bool BrokerHost::SendChannel(ScopedPlatformHandle handle) {
+ CHECK(handle.is_valid());
+ CHECK(channel_);
+
+#if defined(OS_WIN)
+ InitData* data;
+ Channel::MessagePtr message =
+ CreateBrokerMessage(BrokerMessageType::INIT, 1, 0, &data);
+ data->pipe_name_length = 0;
+#else
+ Channel::MessagePtr message =
+ CreateBrokerMessage(BrokerMessageType::INIT, 1, nullptr);
+#endif
+ ScopedPlatformHandleVectorPtr handles;
+ handles.reset(new PlatformHandleVector(1));
+ handles->at(0) = handle.release();
+
+ // This may legitimately fail on Windows if the client process is in another
+ // session, e.g., is an elevated process.
+ if (!PrepareHandlesForClient(handles.get()))
+ return false;
+
+ message->SetHandles(std::move(handles));
+ channel_->Write(std::move(message));
+ return true;
+}
+
+#if defined(OS_WIN)
+
+void BrokerHost::SendNamedChannel(const base::StringPiece16& pipe_name) {
+ InitData* data;
+ base::char16* name_data;
+ Channel::MessagePtr message = CreateBrokerMessage(
+ BrokerMessageType::INIT, 0, sizeof(*name_data) * pipe_name.length(),
+ &data, reinterpret_cast<void**>(&name_data));
+ data->pipe_name_length = static_cast<uint32_t>(pipe_name.length());
+ std::copy(pipe_name.begin(), pipe_name.end(), name_data);
+ channel_->Write(std::move(message));
+}
+
+#endif // defined(OS_WIN)
+
+void BrokerHost::OnBufferRequest(uint32_t num_bytes) {
+ scoped_refptr<PlatformSharedBuffer> read_only_buffer;
+ scoped_refptr<PlatformSharedBuffer> buffer =
+ PlatformSharedBuffer::Create(num_bytes);
+ if (buffer)
+ read_only_buffer = buffer->CreateReadOnlyDuplicate();
+ if (!read_only_buffer)
+ buffer = nullptr;
+
+ Channel::MessagePtr message = CreateBrokerMessage(
+ BrokerMessageType::BUFFER_RESPONSE, buffer ? 2 : 0, nullptr);
+ if (buffer) {
+ ScopedPlatformHandleVectorPtr handles;
+ handles.reset(new PlatformHandleVector(2));
+ handles->at(0) = buffer->PassPlatformHandle().release();
+ handles->at(1) = read_only_buffer->PassPlatformHandle().release();
+ PrepareHandlesForClient(handles.get());
+ message->SetHandles(std::move(handles));
+ }
+
+ channel_->Write(std::move(message));
+}
+
+void BrokerHost::OnChannelMessage(const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) {
+ if (payload_size < sizeof(BrokerMessageHeader))
+ return;
+
+ const BrokerMessageHeader* header =
+ static_cast<const BrokerMessageHeader*>(payload);
+ switch (header->type) {
+ case BrokerMessageType::BUFFER_REQUEST:
+ if (payload_size ==
+ sizeof(BrokerMessageHeader) + sizeof(BufferRequestData)) {
+ const BufferRequestData* request =
+ reinterpret_cast<const BufferRequestData*>(header + 1);
+ OnBufferRequest(request->size);
+ }
+ break;
+
+ default:
+ LOG(ERROR) << "Unexpected broker message type: " << header->type;
+ break;
+ }
+}
+
+void BrokerHost::OnChannelError() { delete this; }
+
+void BrokerHost::WillDestroyCurrentMessageLoop() { delete this; }
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/broker_host.h b/mojo/edk/system/broker_host.h
new file mode 100644
index 0000000000..a7995d2b0f
--- /dev/null
+++ b/mojo/edk/system/broker_host.h
@@ -0,0 +1,64 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_BROKER_HOST_H_
+#define MOJO_EDK_SYSTEM_BROKER_HOST_H_
+
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_piece.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/channel.h"
+
+namespace mojo {
+namespace edk {
+
+// The BrokerHost is a channel to the child process, which services synchronous
+// IPCs.
+class BrokerHost : public Channel::Delegate,
+ public base::MessageLoop::DestructionObserver {
+ public:
+ BrokerHost(base::ProcessHandle client_process, ScopedPlatformHandle handle);
+
+ // Send |handle| to the child, to be used to establish a NodeChannel to us.
+ bool SendChannel(ScopedPlatformHandle handle);
+
+#if defined(OS_WIN)
+ // Sends a named channel to the child. Like above, but for named pipes.
+ void SendNamedChannel(const base::StringPiece16& pipe_name);
+#endif
+
+ private:
+ ~BrokerHost() override;
+
+ bool PrepareHandlesForClient(PlatformHandleVector* handles);
+
+ // Channel::Delegate:
+ void OnChannelMessage(const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) override;
+ void OnChannelError() override;
+
+ // base::MessageLoop::DestructionObserver:
+ void WillDestroyCurrentMessageLoop() override;
+
+ void OnBufferRequest(uint32_t num_bytes);
+
+#if defined(OS_WIN)
+ base::ProcessHandle client_process_;
+#endif
+
+ scoped_refptr<Channel> channel_;
+
+ DISALLOW_COPY_AND_ASSIGN(BrokerHost);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_BROKER_HOST_H_
diff --git a/mojo/edk/system/broker_messages.h b/mojo/edk/system/broker_messages.h
new file mode 100644
index 0000000000..0f0dd9dc42
--- /dev/null
+++ b/mojo/edk/system/broker_messages.h
@@ -0,0 +1,80 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_BROKER_MESSAGES_H_
+#define MOJO_EDK_SYSTEM_BROKER_MESSAGES_H_
+
+#include "mojo/edk/system/channel.h"
+
+namespace mojo {
+namespace edk {
+
+#pragma pack(push, 1)
+
+enum BrokerMessageType : uint32_t {
+ INIT,
+ BUFFER_REQUEST,
+ BUFFER_RESPONSE,
+};
+
+struct BrokerMessageHeader {
+ BrokerMessageType type;
+ uint32_t padding;
+};
+
+static_assert(IsAlignedForChannelMessage(sizeof(BrokerMessageHeader)),
+ "Invalid header size.");
+
+struct BufferRequestData {
+ uint32_t size;
+};
+
+#if defined(OS_WIN)
+struct InitData {
+ // NOTE: InitData in the payload is followed by string16 data with exactly
+ // |pipe_name_length| wide characters (i.e., |pipe_name_length|*2 bytes.)
+ // This applies to Windows only.
+ uint32_t pipe_name_length;
+};
+#endif
+
+#pragma pack(pop)
+
+template <typename T>
+inline Channel::MessagePtr CreateBrokerMessage(
+ BrokerMessageType type,
+ size_t num_handles,
+ size_t extra_data_size,
+ T** out_message_data,
+ void** out_extra_data = nullptr) {
+ const size_t message_size = sizeof(BrokerMessageHeader) +
+ sizeof(**out_message_data) + extra_data_size;
+ Channel::MessagePtr message(new Channel::Message(message_size, num_handles));
+ BrokerMessageHeader* header =
+ reinterpret_cast<BrokerMessageHeader*>(message->mutable_payload());
+ header->type = type;
+ header->padding = 0;
+ *out_message_data = reinterpret_cast<T*>(header + 1);
+ if (out_extra_data)
+ *out_extra_data = *out_message_data + 1;
+ return message;
+}
+
+inline Channel::MessagePtr CreateBrokerMessage(
+ BrokerMessageType type,
+ size_t num_handles,
+ std::nullptr_t** dummy_out_data) {
+ Channel::MessagePtr message(
+ new Channel::Message(sizeof(BrokerMessageHeader), num_handles));
+ BrokerMessageHeader* header =
+ reinterpret_cast<BrokerMessageHeader*>(message->mutable_payload());
+ header->type = type;
+ header->padding = 0;
+ return message;
+}
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_BROKER_MESSAGES_H_
diff --git a/mojo/edk/system/broker_posix.cc b/mojo/edk/system/broker_posix.cc
new file mode 100644
index 0000000000..8742f709a7
--- /dev/null
+++ b/mojo/edk/system/broker_posix.cc
@@ -0,0 +1,125 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/broker.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <utility>
+
+#include "base/logging.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/embedder/platform_channel_utils_posix.h"
+#include "mojo/edk/embedder/platform_handle_utils.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/broker_messages.h"
+#include "mojo/edk/system/channel.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+bool WaitForBrokerMessage(PlatformHandle platform_handle,
+ BrokerMessageType expected_type,
+ size_t expected_num_handles,
+ std::deque<PlatformHandle>* incoming_handles) {
+ Channel::MessagePtr message(
+ new Channel::Message(sizeof(BrokerMessageHeader), expected_num_handles));
+ std::deque<PlatformHandle> incoming_platform_handles;
+ ssize_t read_result = PlatformChannelRecvmsg(
+ platform_handle, const_cast<void*>(message->data()),
+ message->data_num_bytes(), &incoming_platform_handles, true /* block */);
+ bool error = false;
+ if (read_result < 0) {
+ PLOG(ERROR) << "Recvmsg error";
+ error = true;
+ } else if (static_cast<size_t>(read_result) != message->data_num_bytes()) {
+ LOG(ERROR) << "Invalid node channel message";
+ error = true;
+ } else if (incoming_platform_handles.size() != expected_num_handles) {
+ LOG(ERROR) << "Received unexpected number of handles";
+ error = true;
+ }
+
+ if (!error) {
+ const BrokerMessageHeader* header =
+ reinterpret_cast<const BrokerMessageHeader*>(message->payload());
+ if (header->type != expected_type) {
+ LOG(ERROR) << "Unexpected message";
+ error = true;
+ }
+ }
+
+ if (error) {
+ CloseAllPlatformHandles(&incoming_platform_handles);
+ } else {
+ if (incoming_handles)
+ incoming_handles->swap(incoming_platform_handles);
+ }
+ return !error;
+}
+
+} // namespace
+
+Broker::Broker(ScopedPlatformHandle platform_handle)
+ : sync_channel_(std::move(platform_handle)) {
+ CHECK(sync_channel_.is_valid());
+
+ // Mark the channel as blocking.
+ int flags = fcntl(sync_channel_.get().handle, F_GETFL);
+ PCHECK(flags != -1);
+ flags = fcntl(sync_channel_.get().handle, F_SETFL, flags & ~O_NONBLOCK);
+ PCHECK(flags != -1);
+
+ // Wait for the first message, which should contain a handle.
+ std::deque<PlatformHandle> incoming_platform_handles;
+ if (WaitForBrokerMessage(sync_channel_.get(), BrokerMessageType::INIT, 1,
+ &incoming_platform_handles)) {
+ parent_channel_ = ScopedPlatformHandle(incoming_platform_handles.front());
+ }
+}
+
+Broker::~Broker() = default;
+
+ScopedPlatformHandle Broker::GetParentPlatformHandle() {
+ return std::move(parent_channel_);
+}
+
+scoped_refptr<PlatformSharedBuffer> Broker::GetSharedBuffer(size_t num_bytes) {
+ base::AutoLock lock(lock_);
+
+ BufferRequestData* buffer_request;
+ Channel::MessagePtr out_message = CreateBrokerMessage(
+ BrokerMessageType::BUFFER_REQUEST, 0, 0, &buffer_request);
+ buffer_request->size = num_bytes;
+ ssize_t write_result = PlatformChannelWrite(
+ sync_channel_.get(), out_message->data(), out_message->data_num_bytes());
+ if (write_result < 0) {
+ PLOG(ERROR) << "Error sending sync broker message";
+ return nullptr;
+ } else if (static_cast<size_t>(write_result) !=
+ out_message->data_num_bytes()) {
+ LOG(ERROR) << "Error sending complete broker message";
+ return nullptr;
+ }
+
+ std::deque<PlatformHandle> incoming_platform_handles;
+ if (WaitForBrokerMessage(sync_channel_.get(),
+ BrokerMessageType::BUFFER_RESPONSE, 2,
+ &incoming_platform_handles)) {
+ ScopedPlatformHandle rw_handle(incoming_platform_handles.front());
+ incoming_platform_handles.pop_front();
+ ScopedPlatformHandle ro_handle(incoming_platform_handles.front());
+ return PlatformSharedBuffer::CreateFromPlatformHandlePair(
+ num_bytes, std::move(rw_handle), std::move(ro_handle));
+ }
+
+ return nullptr;
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/broker_win.cc b/mojo/edk/system/broker_win.cc
new file mode 100644
index 0000000000..063282c146
--- /dev/null
+++ b/mojo/edk/system/broker_win.cc
@@ -0,0 +1,155 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include <limits>
+#include <utility>
+
+#include "base/debug/alias.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_piece.h"
+#include "mojo/edk/embedder/named_platform_handle.h"
+#include "mojo/edk/embedder/named_platform_handle_utils.h"
+#include "mojo/edk/embedder/platform_handle.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/broker.h"
+#include "mojo/edk/system/broker_messages.h"
+#include "mojo/edk/system/channel.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+// 256 bytes should be enough for anyone!
+const size_t kMaxBrokerMessageSize = 256;
+
+bool TakeHandlesFromBrokerMessage(Channel::Message* message,
+ size_t num_handles,
+ ScopedPlatformHandle* out_handles) {
+ if (message->num_handles() != num_handles) {
+ DLOG(ERROR) << "Received unexpected number of handles in broker message";
+ return false;
+ }
+
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ DCHECK(handles);
+ DCHECK_EQ(handles->size(), num_handles);
+ DCHECK(out_handles);
+
+ for (size_t i = 0; i < num_handles; ++i)
+ out_handles[i] = ScopedPlatformHandle((*handles)[i]);
+ handles->clear();
+ return true;
+}
+
+Channel::MessagePtr WaitForBrokerMessage(PlatformHandle platform_handle,
+ BrokerMessageType expected_type) {
+ char buffer[kMaxBrokerMessageSize];
+ DWORD bytes_read = 0;
+ BOOL result = ::ReadFile(platform_handle.handle, buffer,
+ kMaxBrokerMessageSize, &bytes_read, nullptr);
+ if (!result) {
+ // The pipe may be broken if the browser side has been closed, e.g. during
+ // browser shutdown. In that case the ReadFile call will fail and we
+ // shouldn't continue waiting.
+ PLOG(ERROR) << "Error reading broker pipe";
+ return nullptr;
+ }
+
+ Channel::MessagePtr message =
+ Channel::Message::Deserialize(buffer, static_cast<size_t>(bytes_read));
+ if (!message || message->payload_size() < sizeof(BrokerMessageHeader)) {
+ LOG(ERROR) << "Invalid broker message";
+
+ base::debug::Alias(&buffer[0]);
+ base::debug::Alias(&bytes_read);
+ base::debug::Alias(message.get());
+ CHECK(false);
+ return nullptr;
+ }
+
+ const BrokerMessageHeader* header =
+ reinterpret_cast<const BrokerMessageHeader*>(message->payload());
+ if (header->type != expected_type) {
+ LOG(ERROR) << "Unexpected broker message type";
+
+ base::debug::Alias(&buffer[0]);
+ base::debug::Alias(&bytes_read);
+ base::debug::Alias(message.get());
+ CHECK(false);
+ return nullptr;
+ }
+
+ return message;
+}
+
+} // namespace
+
+Broker::Broker(ScopedPlatformHandle handle) : sync_channel_(std::move(handle)) {
+ CHECK(sync_channel_.is_valid());
+ Channel::MessagePtr message =
+ WaitForBrokerMessage(sync_channel_.get(), BrokerMessageType::INIT);
+
+ // If we fail to read a message (broken pipe), just return early. The parent
+ // handle will be null and callers must handle this gracefully.
+ if (!message)
+ return;
+
+ if (!TakeHandlesFromBrokerMessage(message.get(), 1, &parent_channel_)) {
+ // If the message has no handles, we expect it to carry pipe name instead.
+ const BrokerMessageHeader* header =
+ static_cast<const BrokerMessageHeader*>(message->payload());
+ CHECK_GE(message->payload_size(),
+ sizeof(BrokerMessageHeader) + sizeof(InitData));
+ const InitData* data = reinterpret_cast<const InitData*>(header + 1);
+ CHECK_EQ(message->payload_size(),
+ sizeof(BrokerMessageHeader) + sizeof(InitData) +
+ data->pipe_name_length * sizeof(base::char16));
+ const base::char16* name_data =
+ reinterpret_cast<const base::char16*>(data + 1);
+ CHECK(data->pipe_name_length);
+ parent_channel_ = CreateClientHandle(NamedPlatformHandle(
+ base::StringPiece16(name_data, data->pipe_name_length)));
+ }
+}
+
+Broker::~Broker() {}
+
+ScopedPlatformHandle Broker::GetParentPlatformHandle() {
+ return std::move(parent_channel_);
+}
+
+scoped_refptr<PlatformSharedBuffer> Broker::GetSharedBuffer(size_t num_bytes) {
+ base::AutoLock lock(lock_);
+ BufferRequestData* buffer_request;
+ Channel::MessagePtr out_message = CreateBrokerMessage(
+ BrokerMessageType::BUFFER_REQUEST, 0, 0, &buffer_request);
+ buffer_request->size = base::checked_cast<uint32_t>(num_bytes);
+ DWORD bytes_written = 0;
+ BOOL result = ::WriteFile(sync_channel_.get().handle, out_message->data(),
+ static_cast<DWORD>(out_message->data_num_bytes()),
+ &bytes_written, nullptr);
+ if (!result ||
+ static_cast<size_t>(bytes_written) != out_message->data_num_bytes()) {
+ LOG(ERROR) << "Error sending sync broker message";
+ return nullptr;
+ }
+
+ ScopedPlatformHandle handles[2];
+ Channel::MessagePtr response = WaitForBrokerMessage(
+ sync_channel_.get(), BrokerMessageType::BUFFER_RESPONSE);
+ if (response &&
+ TakeHandlesFromBrokerMessage(response.get(), 2, &handles[0])) {
+ return PlatformSharedBuffer::CreateFromPlatformHandlePair(
+ num_bytes, std::move(handles[0]), std::move(handles[1]));
+ }
+
+ return nullptr;
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/channel.cc b/mojo/edk/system/channel.cc
new file mode 100644
index 0000000000..8a44d36024
--- /dev/null
+++ b/mojo/edk/system/channel.cc
@@ -0,0 +1,683 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/channel.h"
+
+#include <stddef.h>
+#include <string.h>
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "base/macros.h"
+#include "base/memory/aligned_memory.h"
+#include "base/process/process_handle.h"
+#include "mojo/edk/embedder/platform_handle.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include "base/mac/mach_logging.h"
+#elif defined(OS_WIN)
+#include "base/win/win_util.h"
+#endif
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+static_assert(
+ IsAlignedForChannelMessage(sizeof(Channel::Message::LegacyHeader)),
+ "Invalid LegacyHeader size.");
+
+static_assert(IsAlignedForChannelMessage(sizeof(Channel::Message::Header)),
+ "Invalid Header size.");
+
+static_assert(sizeof(Channel::Message::LegacyHeader) == 8,
+ "LegacyHeader must be 8 bytes on ChromeOS and Android");
+
+static_assert(offsetof(Channel::Message::LegacyHeader, num_bytes) ==
+ offsetof(Channel::Message::Header, num_bytes),
+ "num_bytes should be at the same offset in both Header structs.");
+static_assert(offsetof(Channel::Message::LegacyHeader, message_type) ==
+ offsetof(Channel::Message::Header, message_type),
+ "message_type should be at the same offset in both Header "
+ "structs.");
+
+} // namespace
+
+const size_t kReadBufferSize = 4096;
+const size_t kMaxUnusedReadBufferCapacity = 4096;
+const size_t kMaxChannelMessageSize = 256 * 1024 * 1024;
+const size_t kMaxAttachedHandles = 128;
+
+Channel::Message::Message(size_t payload_size, size_t max_handles)
+#if defined(MOJO_EDK_LEGACY_PROTOCOL)
+ : Message(payload_size, max_handles, MessageType::NORMAL_LEGACY) {
+}
+#else
+ : Message(payload_size, max_handles, MessageType::NORMAL) {
+}
+#endif
+
+Channel::Message::Message(size_t payload_size,
+ size_t max_handles,
+ MessageType message_type)
+ : max_handles_(max_handles) {
+ DCHECK_LE(max_handles_, kMaxAttachedHandles);
+
+ const bool is_legacy_message = (message_type == MessageType::NORMAL_LEGACY);
+ size_t extra_header_size = 0;
+#if defined(OS_WIN)
+ // On Windows we serialize HANDLEs into the extra header space.
+ extra_header_size = max_handles_ * sizeof(HandleEntry);
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ // On OSX, some of the platform handles may be mach ports, which are
+ // serialised into the message buffer. Since there could be a mix of fds and
+ // mach ports, we store the mach ports as an <index, port> pair (of uint32_t),
+ // so that the original ordering of handles can be re-created.
+ if (max_handles) {
+ extra_header_size =
+ sizeof(MachPortsExtraHeader) + (max_handles * sizeof(MachPortsEntry));
+ }
+#endif
+ // Pad extra header data to be aliged to |kChannelMessageAlignment| bytes.
+ if (!IsAlignedForChannelMessage(extra_header_size)) {
+ extra_header_size += kChannelMessageAlignment -
+ (extra_header_size % kChannelMessageAlignment);
+ }
+ DCHECK(IsAlignedForChannelMessage(extra_header_size));
+ const size_t header_size =
+ is_legacy_message ? sizeof(LegacyHeader) : sizeof(Header);
+ DCHECK(extra_header_size == 0 || !is_legacy_message);
+
+ size_ = header_size + extra_header_size + payload_size;
+ data_ = static_cast<char*>(base::AlignedAlloc(size_,
+ kChannelMessageAlignment));
+ // Only zero out the header and not the payload. Since the payload is going to
+ // be memcpy'd, zeroing the payload is unnecessary work and a significant
+ // performance issue when dealing with large messages. Any sanitizer errors
+ // complaining about an uninitialized read in the payload area should be
+ // treated as an error and fixed.
+ memset(data_, 0, header_size + extra_header_size);
+
+ DCHECK_LE(size_, std::numeric_limits<uint32_t>::max());
+ legacy_header()->num_bytes = static_cast<uint32_t>(size_);
+
+ DCHECK_LE(header_size + extra_header_size,
+ std::numeric_limits<uint16_t>::max());
+ legacy_header()->message_type = message_type;
+
+ if (is_legacy_message) {
+ legacy_header()->num_handles = static_cast<uint16_t>(max_handles);
+ } else {
+ header()->num_header_bytes =
+ static_cast<uint16_t>(header_size + extra_header_size);
+ }
+
+ if (max_handles_ > 0) {
+#if defined(OS_WIN)
+ handles_ = reinterpret_cast<HandleEntry*>(mutable_extra_header());
+ // Initialize all handles to invalid values.
+ for (size_t i = 0; i < max_handles_; ++i)
+ handles_[i].handle = base::win::HandleToUint32(INVALID_HANDLE_VALUE);
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ mach_ports_header_ =
+ reinterpret_cast<MachPortsExtraHeader*>(mutable_extra_header());
+ mach_ports_header_->num_ports = 0;
+ // Initialize all handles to invalid values.
+ for (size_t i = 0; i < max_handles_; ++i) {
+ mach_ports_header_->entries[i] =
+ {0, static_cast<uint32_t>(MACH_PORT_NULL)};
+ }
+#endif
+ }
+}
+
+Channel::Message::~Message() {
+ base::AlignedFree(data_);
+}
+
+// static
+Channel::MessagePtr Channel::Message::Deserialize(const void* data,
+ size_t data_num_bytes) {
+ if (data_num_bytes < sizeof(LegacyHeader))
+ return nullptr;
+
+ const LegacyHeader* legacy_header =
+ reinterpret_cast<const LegacyHeader*>(data);
+ if (legacy_header->num_bytes != data_num_bytes) {
+ DLOG(ERROR) << "Decoding invalid message: " << legacy_header->num_bytes
+ << " != " << data_num_bytes;
+ return nullptr;
+ }
+
+ const Header* header = nullptr;
+ if (legacy_header->message_type == MessageType::NORMAL)
+ header = reinterpret_cast<const Header*>(data);
+
+ uint32_t extra_header_size = 0;
+ size_t payload_size = 0;
+ const char* payload = nullptr;
+ if (!header) {
+ payload_size = data_num_bytes - sizeof(LegacyHeader);
+ payload = static_cast<const char*>(data) + sizeof(LegacyHeader);
+ } else {
+ if (header->num_bytes < header->num_header_bytes ||
+ header->num_header_bytes < sizeof(Header)) {
+ DLOG(ERROR) << "Decoding invalid message: " << header->num_bytes << " < "
+ << header->num_header_bytes;
+ return nullptr;
+ }
+ extra_header_size = header->num_header_bytes - sizeof(Header);
+ payload_size = data_num_bytes - header->num_header_bytes;
+ payload = static_cast<const char*>(data) + header->num_header_bytes;
+ }
+
+#if defined(OS_WIN)
+ uint32_t max_handles = extra_header_size / sizeof(HandleEntry);
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ if (extra_header_size > 0 &&
+ extra_header_size < sizeof(MachPortsExtraHeader)) {
+ DLOG(ERROR) << "Decoding invalid message: " << extra_header_size << " < "
+ << sizeof(MachPortsExtraHeader);
+ return nullptr;
+ }
+ uint32_t max_handles =
+ extra_header_size == 0
+ ? 0
+ : (extra_header_size - sizeof(MachPortsExtraHeader)) /
+ sizeof(MachPortsEntry);
+#else
+ const uint32_t max_handles = 0;
+#endif // defined(OS_WIN)
+
+ const uint16_t num_handles =
+ header ? header->num_handles : legacy_header->num_handles;
+ if (num_handles > max_handles || max_handles > kMaxAttachedHandles) {
+ DLOG(ERROR) << "Decoding invalid message: " << num_handles << " > "
+ << max_handles;
+ return nullptr;
+ }
+
+ MessagePtr message(
+ new Message(payload_size, max_handles, legacy_header->message_type));
+ DCHECK_EQ(message->data_num_bytes(), data_num_bytes);
+
+ // Copy all payload bytes.
+ if (payload_size)
+ memcpy(message->mutable_payload(), payload, payload_size);
+
+ if (header) {
+ DCHECK_EQ(message->extra_header_size(), extra_header_size);
+ DCHECK_EQ(message->header()->num_header_bytes, header->num_header_bytes);
+
+ if (message->extra_header_size()) {
+ // Copy extra header bytes.
+ memcpy(message->mutable_extra_header(),
+ static_cast<const char*>(data) + sizeof(Header),
+ message->extra_header_size());
+ }
+ message->header()->num_handles = header->num_handles;
+ } else {
+ message->legacy_header()->num_handles = legacy_header->num_handles;
+ }
+
+#if defined(OS_WIN)
+ ScopedPlatformHandleVectorPtr handles(new PlatformHandleVector(num_handles));
+ for (size_t i = 0; i < num_handles; i++) {
+ (*handles)[i].handle =
+ base::win::Uint32ToHandle(message->handles_[i].handle);
+ }
+ message->SetHandles(std::move(handles));
+#endif
+
+ return message;
+}
+
+const void* Channel::Message::extra_header() const {
+ DCHECK(!is_legacy_message());
+ return data_ + sizeof(Header);
+}
+
+void* Channel::Message::mutable_extra_header() {
+ DCHECK(!is_legacy_message());
+ return data_ + sizeof(Header);
+}
+
+size_t Channel::Message::extra_header_size() const {
+ return header()->num_header_bytes - sizeof(Header);
+}
+
+void* Channel::Message::mutable_payload() {
+ if (is_legacy_message())
+ return static_cast<void*>(legacy_header() + 1);
+ return data_ + header()->num_header_bytes;
+}
+
+const void* Channel::Message::payload() const {
+ if (is_legacy_message())
+ return static_cast<const void*>(legacy_header() + 1);
+ return data_ + header()->num_header_bytes;
+}
+
+size_t Channel::Message::payload_size() const {
+ if (is_legacy_message())
+ return legacy_header()->num_bytes - sizeof(LegacyHeader);
+ return size_ - header()->num_header_bytes;
+}
+
+size_t Channel::Message::num_handles() const {
+ return is_legacy_message() ? legacy_header()->num_handles
+ : header()->num_handles;
+}
+
+bool Channel::Message::has_handles() const {
+ return (is_legacy_message() ? legacy_header()->num_handles
+ : header()->num_handles) > 0;
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+bool Channel::Message::has_mach_ports() const {
+ if (!has_handles())
+ return false;
+
+ for (const auto& handle : (*handle_vector_)) {
+ if (handle.type == PlatformHandle::Type::MACH ||
+ handle.type == PlatformHandle::Type::MACH_NAME) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+bool Channel::Message::is_legacy_message() const {
+ return legacy_header()->message_type == MessageType::NORMAL_LEGACY;
+}
+
+Channel::Message::LegacyHeader* Channel::Message::legacy_header() const {
+ return reinterpret_cast<LegacyHeader*>(data_);
+}
+
+Channel::Message::Header* Channel::Message::header() const {
+ DCHECK(!is_legacy_message());
+ return reinterpret_cast<Header*>(data_);
+}
+
+void Channel::Message::SetHandles(ScopedPlatformHandleVectorPtr new_handles) {
+ if (is_legacy_message()) {
+ // Old semantics for ChromeOS and Android
+ if (legacy_header()->num_handles == 0) {
+ CHECK(!new_handles || new_handles->size() == 0);
+ return;
+ }
+ CHECK(new_handles && new_handles->size() == legacy_header()->num_handles);
+ std::swap(handle_vector_, new_handles);
+ return;
+ }
+
+ if (max_handles_ == 0) {
+ CHECK(!new_handles || new_handles->size() == 0);
+ return;
+ }
+
+ CHECK(new_handles && new_handles->size() <= max_handles_);
+ header()->num_handles = static_cast<uint16_t>(new_handles->size());
+ std::swap(handle_vector_, new_handles);
+#if defined(OS_WIN)
+ memset(handles_, 0, extra_header_size());
+ for (size_t i = 0; i < handle_vector_->size(); i++)
+ handles_[i].handle = base::win::HandleToUint32((*handle_vector_)[i].handle);
+#endif // defined(OS_WIN)
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ size_t mach_port_index = 0;
+ if (mach_ports_header_) {
+ for (size_t i = 0; i < max_handles_; ++i) {
+ mach_ports_header_->entries[i] =
+ {0, static_cast<uint32_t>(MACH_PORT_NULL)};
+ }
+ for (size_t i = 0; i < handle_vector_->size(); i++) {
+ if ((*handle_vector_)[i].type == PlatformHandle::Type::MACH ||
+ (*handle_vector_)[i].type == PlatformHandle::Type::MACH_NAME) {
+ mach_port_t port = (*handle_vector_)[i].port;
+ mach_ports_header_->entries[mach_port_index].index = i;
+ mach_ports_header_->entries[mach_port_index].mach_port = port;
+ mach_port_index++;
+ }
+ }
+ mach_ports_header_->num_ports = static_cast<uint16_t>(mach_port_index);
+ }
+#endif
+}
+
+ScopedPlatformHandleVectorPtr Channel::Message::TakeHandles() {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ if (mach_ports_header_) {
+ for (size_t i = 0; i < max_handles_; ++i) {
+ mach_ports_header_->entries[i] =
+ {0, static_cast<uint32_t>(MACH_PORT_NULL)};
+ }
+ mach_ports_header_->num_ports = 0;
+ }
+#endif
+ if (is_legacy_message())
+ legacy_header()->num_handles = 0;
+ else
+ header()->num_handles = 0;
+ return std::move(handle_vector_);
+}
+
+ScopedPlatformHandleVectorPtr Channel::Message::TakeHandlesForTransport() {
+#if defined(OS_WIN)
+ // Not necessary on Windows.
+ NOTREACHED();
+ return nullptr;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ if (handle_vector_) {
+ for (auto it = handle_vector_->begin(); it != handle_vector_->end(); ) {
+ if (it->type == PlatformHandle::Type::MACH ||
+ it->type == PlatformHandle::Type::MACH_NAME) {
+ // For Mach port names, we can can just leak them. They're not real
+ // ports anyways. For real ports, they're leaked because this is a child
+ // process and the remote process will take ownership.
+ it = handle_vector_->erase(it);
+ } else {
+ ++it;
+ }
+ }
+ }
+ return std::move(handle_vector_);
+#else
+ return std::move(handle_vector_);
+#endif
+}
+
+#if defined(OS_WIN)
+// static
+bool Channel::Message::RewriteHandles(base::ProcessHandle from_process,
+ base::ProcessHandle to_process,
+ PlatformHandleVector* handles) {
+ bool success = true;
+ for (size_t i = 0; i < handles->size(); ++i) {
+ if (!(*handles)[i].is_valid()) {
+ DLOG(ERROR) << "Refusing to duplicate invalid handle.";
+ continue;
+ }
+ DCHECK_EQ((*handles)[i].owning_process, from_process);
+ BOOL result = DuplicateHandle(
+ from_process, (*handles)[i].handle, to_process,
+ &(*handles)[i].handle, 0, FALSE,
+ DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE);
+ if (result) {
+ (*handles)[i].owning_process = to_process;
+ } else {
+ success = false;
+
+ // If handle duplication fails, the source handle will already be closed
+ // due to DUPLICATE_CLOSE_SOURCE. Replace the handle in the message with
+ // an invalid handle.
+ (*handles)[i].handle = INVALID_HANDLE_VALUE;
+ (*handles)[i].owning_process = base::GetCurrentProcessHandle();
+ }
+ }
+ return success;
+}
+#endif
+
+// Helper class for managing a Channel's read buffer allocations. This maintains
+// a single contiguous buffer with the layout:
+//
+// [discarded bytes][occupied bytes][unoccupied bytes]
+//
+// The Reserve() method ensures that a certain capacity of unoccupied bytes are
+// available. It does not claim that capacity and only allocates new capacity
+// when strictly necessary.
+//
+// Claim() marks unoccupied bytes as occupied.
+//
+// Discard() marks occupied bytes as discarded, signifying that their contents
+// can be forgotten or overwritten.
+//
+// Realign() moves occupied bytes to the front of the buffer so that those
+// occupied bytes are properly aligned.
+//
+// The most common Channel behavior in practice should result in very few
+// allocations and copies, as memory is claimed and discarded shortly after
+// being reserved, and future reservations will immediately reuse discarded
+// memory.
+class Channel::ReadBuffer {
+ public:
+ ReadBuffer() {
+ size_ = kReadBufferSize;
+ data_ = static_cast<char*>(base::AlignedAlloc(size_,
+ kChannelMessageAlignment));
+ }
+
+ ~ReadBuffer() {
+ DCHECK(data_);
+ base::AlignedFree(data_);
+ }
+
+ const char* occupied_bytes() const { return data_ + num_discarded_bytes_; }
+
+ size_t num_occupied_bytes() const {
+ return num_occupied_bytes_ - num_discarded_bytes_;
+ }
+
+ // Ensures the ReadBuffer has enough contiguous space allocated to hold
+ // |num_bytes| more bytes; returns the address of the first available byte.
+ char* Reserve(size_t num_bytes) {
+ if (num_occupied_bytes_ + num_bytes > size_) {
+ size_ = std::max(size_ * 2, num_occupied_bytes_ + num_bytes);
+ void* new_data = base::AlignedAlloc(size_, kChannelMessageAlignment);
+ memcpy(new_data, data_, num_occupied_bytes_);
+ base::AlignedFree(data_);
+ data_ = static_cast<char*>(new_data);
+ }
+
+ return data_ + num_occupied_bytes_;
+ }
+
+ // Marks the first |num_bytes| unoccupied bytes as occupied.
+ void Claim(size_t num_bytes) {
+ DCHECK_LE(num_occupied_bytes_ + num_bytes, size_);
+ num_occupied_bytes_ += num_bytes;
+ }
+
+ // Marks the first |num_bytes| occupied bytes as discarded. This may result in
+ // shrinkage of the internal buffer, and it is not safe to assume the result
+ // of a previous Reserve() call is still valid after this.
+ void Discard(size_t num_bytes) {
+ DCHECK_LE(num_discarded_bytes_ + num_bytes, num_occupied_bytes_);
+ num_discarded_bytes_ += num_bytes;
+
+ if (num_discarded_bytes_ == num_occupied_bytes_) {
+ // We can just reuse the buffer from the beginning in this common case.
+ num_discarded_bytes_ = 0;
+ num_occupied_bytes_ = 0;
+ }
+
+ if (num_discarded_bytes_ > kMaxUnusedReadBufferCapacity) {
+ // In the uncommon case that we have a lot of discarded data at the
+ // front of the buffer, simply move remaining data to a smaller buffer.
+ size_t num_preserved_bytes = num_occupied_bytes_ - num_discarded_bytes_;
+ size_ = std::max(num_preserved_bytes, kReadBufferSize);
+ char* new_data = static_cast<char*>(
+ base::AlignedAlloc(size_, kChannelMessageAlignment));
+ memcpy(new_data, data_ + num_discarded_bytes_, num_preserved_bytes);
+ base::AlignedFree(data_);
+ data_ = new_data;
+ num_discarded_bytes_ = 0;
+ num_occupied_bytes_ = num_preserved_bytes;
+ }
+
+ if (num_occupied_bytes_ == 0 && size_ > kMaxUnusedReadBufferCapacity) {
+ // Opportunistically shrink the read buffer back down to a small size if
+ // it's grown very large. We only do this if there are no remaining
+ // unconsumed bytes in the buffer to avoid copies in most the common
+ // cases.
+ size_ = kMaxUnusedReadBufferCapacity;
+ base::AlignedFree(data_);
+ data_ = static_cast<char*>(
+ base::AlignedAlloc(size_, kChannelMessageAlignment));
+ }
+ }
+
+ void Realign() {
+ size_t num_bytes = num_occupied_bytes();
+ memmove(data_, occupied_bytes(), num_bytes);
+ num_discarded_bytes_ = 0;
+ num_occupied_bytes_ = num_bytes;
+ }
+
+ private:
+ char* data_ = nullptr;
+
+ // The total size of the allocated buffer.
+ size_t size_ = 0;
+
+ // The number of discarded bytes at the beginning of the allocated buffer.
+ size_t num_discarded_bytes_ = 0;
+
+ // The total number of occupied bytes, including discarded bytes.
+ size_t num_occupied_bytes_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBuffer);
+};
+
+Channel::Channel(Delegate* delegate)
+ : delegate_(delegate), read_buffer_(new ReadBuffer) {
+}
+
+Channel::~Channel() {
+}
+
+void Channel::ShutDown() {
+ delegate_ = nullptr;
+ ShutDownImpl();
+}
+
+char* Channel::GetReadBuffer(size_t *buffer_capacity) {
+ DCHECK(read_buffer_);
+ size_t required_capacity = *buffer_capacity;
+ if (!required_capacity)
+ required_capacity = kReadBufferSize;
+
+ *buffer_capacity = required_capacity;
+ return read_buffer_->Reserve(required_capacity);
+}
+
+bool Channel::OnReadComplete(size_t bytes_read, size_t *next_read_size_hint) {
+ bool did_dispatch_message = false;
+ read_buffer_->Claim(bytes_read);
+ while (read_buffer_->num_occupied_bytes() >= sizeof(Message::LegacyHeader)) {
+ // Ensure the occupied data is properly aligned. If it isn't, a SIGBUS could
+ // happen on architectures that don't allow misaligned words access (i.e.
+ // anything other than x86). Only re-align when necessary to avoid copies.
+ if (!IsAlignedForChannelMessage(
+ reinterpret_cast<uintptr_t>(read_buffer_->occupied_bytes()))) {
+ read_buffer_->Realign();
+ }
+
+ // We have at least enough data available for a LegacyHeader.
+ const Message::LegacyHeader* legacy_header =
+ reinterpret_cast<const Message::LegacyHeader*>(
+ read_buffer_->occupied_bytes());
+
+ if (legacy_header->num_bytes < sizeof(Message::LegacyHeader) ||
+ legacy_header->num_bytes > kMaxChannelMessageSize) {
+ LOG(ERROR) << "Invalid message size: " << legacy_header->num_bytes;
+ return false;
+ }
+
+ if (read_buffer_->num_occupied_bytes() < legacy_header->num_bytes) {
+ // Not enough data available to read the full message. Hint to the
+ // implementation that it should try reading the full size of the message.
+ *next_read_size_hint =
+ legacy_header->num_bytes - read_buffer_->num_occupied_bytes();
+ return true;
+ }
+
+ const Message::Header* header = nullptr;
+ if (legacy_header->message_type != Message::MessageType::NORMAL_LEGACY) {
+ header = reinterpret_cast<const Message::Header*>(legacy_header);
+ }
+
+ size_t extra_header_size = 0;
+ const void* extra_header = nullptr;
+ size_t payload_size = 0;
+ void* payload = nullptr;
+ if (header) {
+ if (header->num_header_bytes < sizeof(Message::Header) ||
+ header->num_header_bytes > header->num_bytes) {
+ LOG(ERROR) << "Invalid message header size: "
+ << header->num_header_bytes;
+ return false;
+ }
+ extra_header_size = header->num_header_bytes - sizeof(Message::Header);
+ extra_header = extra_header_size ? header + 1 : nullptr;
+ payload_size = header->num_bytes - header->num_header_bytes;
+ payload = payload_size
+ ? reinterpret_cast<Message::Header*>(
+ const_cast<char*>(read_buffer_->occupied_bytes()) +
+ header->num_header_bytes)
+ : nullptr;
+ } else {
+ payload_size = legacy_header->num_bytes - sizeof(Message::LegacyHeader);
+ payload = payload_size
+ ? const_cast<Message::LegacyHeader*>(&legacy_header[1])
+ : nullptr;
+ }
+
+ const uint16_t num_handles =
+ header ? header->num_handles : legacy_header->num_handles;
+ ScopedPlatformHandleVectorPtr handles;
+ if (num_handles > 0) {
+ if (!GetReadPlatformHandles(num_handles, extra_header, extra_header_size,
+ &handles)) {
+ return false;
+ }
+
+ if (!handles) {
+ // Not enough handles available for this message.
+ break;
+ }
+ }
+
+ // We've got a complete message! Dispatch it and try another.
+ if (legacy_header->message_type != Message::MessageType::NORMAL_LEGACY &&
+ legacy_header->message_type != Message::MessageType::NORMAL) {
+ if (!OnControlMessage(legacy_header->message_type, payload, payload_size,
+ std::move(handles))) {
+ return false;
+ }
+ did_dispatch_message = true;
+ } else if (delegate_) {
+ delegate_->OnChannelMessage(payload, payload_size, std::move(handles));
+ did_dispatch_message = true;
+ }
+
+ read_buffer_->Discard(legacy_header->num_bytes);
+ }
+
+ *next_read_size_hint = did_dispatch_message ? 0 : kReadBufferSize;
+ return true;
+}
+
+void Channel::OnError() {
+ if (delegate_)
+ delegate_->OnChannelError();
+}
+
+bool Channel::OnControlMessage(Message::MessageType message_type,
+ const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) {
+ return false;
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/channel.h b/mojo/edk/system/channel.h
new file mode 100644
index 0000000000..33a510c6f0
--- /dev/null
+++ b/mojo/edk/system/channel.h
@@ -0,0 +1,303 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_CHANNEL_H_
+#define MOJO_EDK_SYSTEM_CHANNEL_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/process/process_handle.h"
+#include "base/task_runner.h"
+#include "mojo/edk/embedder/connection_params.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+
+namespace mojo {
+namespace edk {
+
+const size_t kChannelMessageAlignment = 8;
+
+constexpr bool IsAlignedForChannelMessage(size_t n) {
+ return n % kChannelMessageAlignment == 0;
+}
+
+// Channel provides a thread-safe interface to read and write arbitrary
+// delimited messages over an underlying I/O channel, optionally transferring
+// one or more platform handles in the process.
+class MOJO_SYSTEM_IMPL_EXPORT Channel
+ : public base::RefCountedThreadSafe<Channel> {
+ public:
+ struct Message;
+
+ using MessagePtr = std::unique_ptr<Message>;
+
+ // A message to be written to a channel.
+ struct MOJO_SYSTEM_IMPL_EXPORT Message {
+ enum class MessageType : uint16_t {
+ // An old format normal message, that uses the LegacyHeader.
+ // Only used on Android and ChromeOS.
+ // TODO(jcivelli): remove legacy support when Arc++ has updated to Mojo
+ // with normal versioned messages. crbug.com/695645
+ NORMAL_LEGACY = 0,
+#if defined(OS_MACOSX)
+ // A control message containing handles to echo back.
+ HANDLES_SENT,
+ // A control message containing handles that can now be closed.
+ HANDLES_SENT_ACK,
+#endif
+ // A normal message that uses Header and can contain extra header values.
+ NORMAL,
+ };
+
+#pragma pack(push, 1)
+ // Old message wire format for ChromeOS and Android, used by NORMAL_LEGACY
+ // messages.
+ struct LegacyHeader {
+ // Message size in bytes, including the header.
+ uint32_t num_bytes;
+
+ // Number of attached handles.
+ uint16_t num_handles;
+
+ MessageType message_type;
+ };
+
+ // Header used by NORMAL messages.
+ // To preserve backward compatibility with LegacyHeader, the num_bytes and
+ // message_type field must be at the same offset as in LegacyHeader.
+ struct Header {
+ // Message size in bytes, including the header.
+ uint32_t num_bytes;
+
+ // Total size of header, including extra header data (i.e. HANDLEs on
+ // windows).
+ uint16_t num_header_bytes;
+
+ MessageType message_type;
+
+ // Number of attached handles. May be less than the reserved handle
+ // storage size in this message on platforms that serialise handles as
+ // data (i.e. HANDLEs on Windows, Mach ports on OSX).
+ uint16_t num_handles;
+
+ char padding[6];
+ };
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ struct MachPortsEntry {
+ // Index of Mach port in the original vector of PlatformHandles.
+ uint16_t index;
+
+ // Mach port name.
+ uint32_t mach_port;
+ static_assert(sizeof(mach_port_t) <= sizeof(uint32_t),
+ "mach_port_t must be no larger than uint32_t");
+ };
+ static_assert(sizeof(MachPortsEntry) == 6,
+ "sizeof(MachPortsEntry) must be 6 bytes");
+
+ // Structure of the extra header field when present on OSX.
+ struct MachPortsExtraHeader {
+ // Actual number of Mach ports encoded in the extra header.
+ uint16_t num_ports;
+
+ // Array of encoded Mach ports. If |num_ports| > 0, |entries[0]| through
+ // to |entries[num_ports-1]| inclusive are valid.
+ MachPortsEntry entries[0];
+ };
+ static_assert(sizeof(MachPortsExtraHeader) == 2,
+ "sizeof(MachPortsExtraHeader) must be 2 bytes");
+#elif defined(OS_WIN)
+ struct HandleEntry {
+ // The windows HANDLE. HANDLEs are guaranteed to fit inside 32-bits.
+ // See: https://msdn.microsoft.com/en-us/library/aa384203(VS.85).aspx
+ uint32_t handle;
+ };
+ static_assert(sizeof(HandleEntry) == 4,
+ "sizeof(HandleEntry) must be 4 bytes");
+#endif
+#pragma pack(pop)
+
+ // Allocates and owns a buffer for message data with enough capacity for
+ // |payload_size| bytes plus a header, plus |max_handles| platform handles.
+ Message(size_t payload_size, size_t max_handles);
+ Message(size_t payload_size, size_t max_handles, MessageType message_type);
+ ~Message();
+
+ // Constructs a Message from serialized message data.
+ static MessagePtr Deserialize(const void* data, size_t data_num_bytes);
+
+ const void* data() const { return data_; }
+ size_t data_num_bytes() const { return size_; }
+
+ const void* extra_header() const;
+ void* mutable_extra_header();
+ size_t extra_header_size() const;
+
+ void* mutable_payload();
+ const void* payload() const;
+ size_t payload_size() const;
+
+ size_t num_handles() const;
+ bool has_handles() const;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ bool has_mach_ports() const;
+#endif
+
+ bool is_legacy_message() const;
+ LegacyHeader* legacy_header() const;
+ Header* header() const;
+
+ // Note: SetHandles() and TakeHandles() invalidate any previous value of
+ // handles().
+ void SetHandles(ScopedPlatformHandleVectorPtr new_handles);
+ ScopedPlatformHandleVectorPtr TakeHandles();
+ // Version of TakeHandles that returns a vector of platform handles suitable
+ // for transfer over an underlying OS mechanism. i.e. file descriptors over
+ // a unix domain socket. Any handle that cannot be transferred this way,
+ // such as Mach ports, will be removed.
+ ScopedPlatformHandleVectorPtr TakeHandlesForTransport();
+
+#if defined(OS_WIN)
+ // Prepares the handles in this message for use in a different process.
+ // Upon calling this the handles should belong to |from_process|; after the
+ // call they'll belong to |to_process|. The source handles are always
+ // closed by this call. Returns false iff one or more handles failed
+ // duplication.
+ static bool RewriteHandles(base::ProcessHandle from_process,
+ base::ProcessHandle to_process,
+ PlatformHandleVector* handles);
+#endif
+
+ void SetVersionForTest(uint16_t version_number);
+
+ private:
+ size_t size_ = 0;
+ size_t max_handles_ = 0;
+ char* data_ = nullptr;
+
+ ScopedPlatformHandleVectorPtr handle_vector_;
+
+#if defined(OS_WIN)
+ // On Windows, handles are serialised into the extra header section.
+ HandleEntry* handles_ = nullptr;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ // On OSX, handles are serialised into the extra header section.
+ MachPortsExtraHeader* mach_ports_header_ = nullptr;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Message);
+ };
+
+ // Delegate methods are called from the I/O task runner with which the Channel
+ // was created (see Channel::Create).
+ class Delegate {
+ public:
+ virtual ~Delegate() {}
+
+ // Notify of a received message. |payload| is not owned and must not be
+ // retained; it will be null if |payload_size| is 0. |handles| are
+ // transferred to the callee.
+ virtual void OnChannelMessage(const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) = 0;
+
+ // Notify that an error has occured and the Channel will cease operation.
+ virtual void OnChannelError() = 0;
+ };
+
+ // Creates a new Channel around a |platform_handle|, taking ownership of the
+ // handle. All I/O on the handle will be performed on |io_task_runner|.
+ // Note that ShutDown() MUST be called on the Channel some time before
+ // |delegate| is destroyed.
+ static scoped_refptr<Channel> Create(
+ Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner);
+
+ // Request that the channel be shut down. This should always be called before
+ // releasing the last reference to a Channel to ensure that it's cleaned up
+ // on its I/O task runner's thread.
+ //
+ // Delegate methods will no longer be invoked after this call.
+ void ShutDown();
+
+ // Begin processing I/O events. Delegate methods must only be invoked after
+ // this call.
+ virtual void Start() = 0;
+
+ // Stop processing I/O events.
+ virtual void ShutDownImpl() = 0;
+
+ // Queues an outgoing message on the Channel. This message will either
+ // eventually be written or will fail to write and trigger
+ // Delegate::OnChannelError.
+ virtual void Write(MessagePtr message) = 0;
+
+ // Causes the platform handle to leak when this channel is shut down instead
+ // of closing it.
+ virtual void LeakHandle() = 0;
+
+ protected:
+ explicit Channel(Delegate* delegate);
+ virtual ~Channel();
+
+ // Called by the implementation when it wants somewhere to stick data.
+ // |*buffer_capacity| may be set by the caller to indicate the desired buffer
+ // size. If 0, a sane default size will be used instead.
+ //
+ // Returns the address of a buffer which can be written to, and indicates its
+ // actual capacity in |*buffer_capacity|.
+ char* GetReadBuffer(size_t* buffer_capacity);
+
+ // Called by the implementation when new data is available in the read
+ // buffer. Returns false to indicate an error. Upon success,
+ // |*next_read_size_hint| will be set to a recommended size for the next
+ // read done by the implementation.
+ bool OnReadComplete(size_t bytes_read, size_t* next_read_size_hint);
+
+ // Called by the implementation when something goes horribly wrong. It is NOT
+ // OK to call this synchronously from any public interface methods.
+ void OnError();
+
+ // Retrieves the set of platform handles read for a given message.
+ // |extra_header| and |extra_header_size| correspond to the extra header data.
+ // Depending on the Channel implementation, this body may encode platform
+ // handles, or handles may be stored and managed elsewhere by the
+ // implementation.
+ //
+ // Returns |false| on unrecoverable error (i.e. the Channel should be closed).
+ // Returns |true| otherwise. Note that it is possible on some platforms for an
+ // insufficient number of handles to be available when this call is made, but
+ // this is not necessarily an error condition. In such cases this returns
+ // |true| but |*handles| will also be reset to null.
+ virtual bool GetReadPlatformHandles(
+ size_t num_handles,
+ const void* extra_header,
+ size_t extra_header_size,
+ ScopedPlatformHandleVectorPtr* handles) = 0;
+
+ // Handles a received control message. Returns |true| if the message is
+ // accepted, or |false| otherwise.
+ virtual bool OnControlMessage(Message::MessageType message_type,
+ const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles);
+
+ private:
+ friend class base::RefCountedThreadSafe<Channel>;
+
+ class ReadBuffer;
+
+ Delegate* delegate_;
+ const std::unique_ptr<ReadBuffer> read_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(Channel);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_CHANNEL_H_
diff --git a/mojo/edk/system/channel_posix.cc b/mojo/edk/system/channel_posix.cc
new file mode 100644
index 0000000000..8b4ca7fdf3
--- /dev/null
+++ b/mojo/edk/system/channel_posix.cc
@@ -0,0 +1,572 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/channel.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include <algorithm>
+#include <deque>
+#include <limits>
+#include <memory>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/lock.h"
+#include "base/task_runner.h"
+#include "mojo/edk/embedder/platform_channel_utils_posix.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+
+#if !defined(OS_NACL)
+#include <sys/uio.h>
+#endif
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+const size_t kMaxBatchReadCapacity = 256 * 1024;
+
+// A view over a Channel::Message object. The write queue uses these since
+// large messages may need to be sent in chunks.
+class MessageView {
+ public:
+ // Owns |message|. |offset| indexes the first unsent byte in the message.
+ MessageView(Channel::MessagePtr message, size_t offset)
+ : message_(std::move(message)),
+ offset_(offset),
+ handles_(message_->TakeHandlesForTransport()) {
+ DCHECK_GT(message_->data_num_bytes(), offset_);
+ }
+
+ MessageView(MessageView&& other) { *this = std::move(other); }
+
+ MessageView& operator=(MessageView&& other) {
+ message_ = std::move(other.message_);
+ offset_ = other.offset_;
+ handles_ = std::move(other.handles_);
+ return *this;
+ }
+
+ ~MessageView() {}
+
+ const void* data() const {
+ return static_cast<const char*>(message_->data()) + offset_;
+ }
+
+ size_t data_num_bytes() const { return message_->data_num_bytes() - offset_; }
+
+ size_t data_offset() const { return offset_; }
+ void advance_data_offset(size_t num_bytes) {
+ DCHECK_GT(message_->data_num_bytes(), offset_ + num_bytes);
+ offset_ += num_bytes;
+ }
+
+ ScopedPlatformHandleVectorPtr TakeHandles() { return std::move(handles_); }
+ Channel::MessagePtr TakeMessage() { return std::move(message_); }
+
+ void SetHandles(ScopedPlatformHandleVectorPtr handles) {
+ handles_ = std::move(handles);
+ }
+
+ private:
+ Channel::MessagePtr message_;
+ size_t offset_;
+ ScopedPlatformHandleVectorPtr handles_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageView);
+};
+
+class ChannelPosix : public Channel,
+ public base::MessageLoop::DestructionObserver,
+ public base::MessageLoopForIO::Watcher {
+ public:
+ ChannelPosix(Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner)
+ : Channel(delegate),
+ self_(this),
+ handle_(connection_params.TakeChannelHandle()),
+ io_task_runner_(io_task_runner)
+#if defined(OS_MACOSX)
+ ,
+ handles_to_close_(new PlatformHandleVector)
+#endif
+ {
+ CHECK(handle_.is_valid());
+ }
+
+ void Start() override {
+ if (io_task_runner_->RunsTasksOnCurrentThread()) {
+ StartOnIOThread();
+ } else {
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&ChannelPosix::StartOnIOThread, this));
+ }
+ }
+
+ void ShutDownImpl() override {
+ // Always shut down asynchronously when called through the public interface.
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&ChannelPosix::ShutDownOnIOThread, this));
+ }
+
+ void Write(MessagePtr message) override {
+ bool write_error = false;
+ {
+ base::AutoLock lock(write_lock_);
+ if (reject_writes_)
+ return;
+ if (outgoing_messages_.empty()) {
+ if (!WriteNoLock(MessageView(std::move(message), 0)))
+ reject_writes_ = write_error = true;
+ } else {
+ outgoing_messages_.emplace_back(std::move(message), 0);
+ }
+ }
+ if (write_error) {
+ // Do not synchronously invoke OnError(). Write() may have been called by
+ // the delegate and we don't want to re-enter it.
+ io_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&ChannelPosix::OnError, this));
+ }
+ }
+
+ void LeakHandle() override {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+ leak_handle_ = true;
+ }
+
+ bool GetReadPlatformHandles(
+ size_t num_handles,
+ const void* extra_header,
+ size_t extra_header_size,
+ ScopedPlatformHandleVectorPtr* handles) override {
+ if (num_handles > std::numeric_limits<uint16_t>::max())
+ return false;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // On OSX, we can have mach ports which are located in the extra header
+ // section.
+ using MachPortsEntry = Channel::Message::MachPortsEntry;
+ using MachPortsExtraHeader = Channel::Message::MachPortsExtraHeader;
+ CHECK(extra_header_size >=
+ sizeof(MachPortsExtraHeader) + num_handles * sizeof(MachPortsEntry));
+ const MachPortsExtraHeader* mach_ports_header =
+ reinterpret_cast<const MachPortsExtraHeader*>(extra_header);
+ size_t num_mach_ports = mach_ports_header->num_ports;
+ CHECK(num_mach_ports <= num_handles);
+ if (incoming_platform_handles_.size() + num_mach_ports < num_handles) {
+ handles->reset();
+ return true;
+ }
+
+ handles->reset(new PlatformHandleVector(num_handles));
+ const MachPortsEntry* mach_ports = mach_ports_header->entries;
+ for (size_t i = 0, mach_port_index = 0; i < num_handles; ++i) {
+ if (mach_port_index < num_mach_ports &&
+ mach_ports[mach_port_index].index == i) {
+ (*handles)->at(i) = PlatformHandle(
+ static_cast<mach_port_t>(mach_ports[mach_port_index].mach_port));
+ CHECK((*handles)->at(i).type == PlatformHandle::Type::MACH);
+ // These are actually just Mach port names until they're resolved from
+ // the remote process.
+ (*handles)->at(i).type = PlatformHandle::Type::MACH_NAME;
+ mach_port_index++;
+ } else {
+ CHECK(!incoming_platform_handles_.empty());
+ (*handles)->at(i) = incoming_platform_handles_.front();
+ incoming_platform_handles_.pop_front();
+ }
+ }
+#else
+ if (incoming_platform_handles_.size() < num_handles) {
+ handles->reset();
+ return true;
+ }
+
+ handles->reset(new PlatformHandleVector(num_handles));
+ for (size_t i = 0; i < num_handles; ++i) {
+ (*handles)->at(i) = incoming_platform_handles_.front();
+ incoming_platform_handles_.pop_front();
+ }
+#endif
+
+ return true;
+ }
+
+ private:
+ ~ChannelPosix() override {
+ DCHECK(!read_watcher_);
+ DCHECK(!write_watcher_);
+ for (auto handle : incoming_platform_handles_)
+ handle.CloseIfNecessary();
+ }
+
+ void StartOnIOThread() {
+ DCHECK(!read_watcher_);
+ DCHECK(!write_watcher_);
+ read_watcher_.reset(
+ new base::MessageLoopForIO::FileDescriptorWatcher(FROM_HERE));
+ base::MessageLoop::current()->AddDestructionObserver(this);
+ if (handle_.get().needs_connection) {
+ base::MessageLoopForIO::current()->WatchFileDescriptor(
+ handle_.get().handle, false /* persistent */,
+ base::MessageLoopForIO::WATCH_READ, read_watcher_.get(), this);
+ } else {
+ write_watcher_.reset(
+ new base::MessageLoopForIO::FileDescriptorWatcher(FROM_HERE));
+ base::MessageLoopForIO::current()->WatchFileDescriptor(
+ handle_.get().handle, true /* persistent */,
+ base::MessageLoopForIO::WATCH_READ, read_watcher_.get(), this);
+ base::AutoLock lock(write_lock_);
+ FlushOutgoingMessagesNoLock();
+ }
+ }
+
+ void WaitForWriteOnIOThread() {
+ base::AutoLock lock(write_lock_);
+ WaitForWriteOnIOThreadNoLock();
+ }
+
+ void WaitForWriteOnIOThreadNoLock() {
+ if (pending_write_)
+ return;
+ if (!write_watcher_)
+ return;
+ if (io_task_runner_->RunsTasksOnCurrentThread()) {
+ pending_write_ = true;
+ base::MessageLoopForIO::current()->WatchFileDescriptor(
+ handle_.get().handle, false /* persistent */,
+ base::MessageLoopForIO::WATCH_WRITE, write_watcher_.get(), this);
+ } else {
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&ChannelPosix::WaitForWriteOnIOThread, this));
+ }
+ }
+
+ void ShutDownOnIOThread() {
+ base::MessageLoop::current()->RemoveDestructionObserver(this);
+
+ read_watcher_.reset();
+ write_watcher_.reset();
+ if (leak_handle_)
+ ignore_result(handle_.release());
+ handle_.reset();
+#if defined(OS_MACOSX)
+ handles_to_close_.reset();
+#endif
+
+ // May destroy the |this| if it was the last reference.
+ self_ = nullptr;
+ }
+
+ // base::MessageLoop::DestructionObserver:
+ void WillDestroyCurrentMessageLoop() override {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+ if (self_)
+ ShutDownOnIOThread();
+ }
+
+ // base::MessageLoopForIO::Watcher:
+ void OnFileCanReadWithoutBlocking(int fd) override {
+ CHECK_EQ(fd, handle_.get().handle);
+ if (handle_.get().needs_connection) {
+#if !defined(OS_NACL)
+ read_watcher_.reset();
+ base::MessageLoop::current()->RemoveDestructionObserver(this);
+
+ ScopedPlatformHandle accept_fd;
+ ServerAcceptConnection(handle_.get(), &accept_fd);
+ if (!accept_fd.is_valid()) {
+ OnError();
+ return;
+ }
+ handle_ = std::move(accept_fd);
+ StartOnIOThread();
+#else
+ NOTREACHED();
+#endif
+ return;
+ }
+
+ bool read_error = false;
+ size_t next_read_size = 0;
+ size_t buffer_capacity = 0;
+ size_t total_bytes_read = 0;
+ size_t bytes_read = 0;
+ do {
+ buffer_capacity = next_read_size;
+ char* buffer = GetReadBuffer(&buffer_capacity);
+ DCHECK_GT(buffer_capacity, 0u);
+
+ ssize_t read_result = PlatformChannelRecvmsg(
+ handle_.get(),
+ buffer,
+ buffer_capacity,
+ &incoming_platform_handles_);
+
+ if (read_result > 0) {
+ bytes_read = static_cast<size_t>(read_result);
+ total_bytes_read += bytes_read;
+ if (!OnReadComplete(bytes_read, &next_read_size)) {
+ read_error = true;
+ break;
+ }
+ } else if (read_result == 0 ||
+ (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ read_error = true;
+ break;
+ }
+ } while (bytes_read == buffer_capacity &&
+ total_bytes_read < kMaxBatchReadCapacity &&
+ next_read_size > 0);
+ if (read_error) {
+ // Stop receiving read notifications.
+ read_watcher_.reset();
+
+ OnError();
+ }
+ }
+
+ void OnFileCanWriteWithoutBlocking(int fd) override {
+ bool write_error = false;
+ {
+ base::AutoLock lock(write_lock_);
+ pending_write_ = false;
+ if (!FlushOutgoingMessagesNoLock())
+ reject_writes_ = write_error = true;
+ }
+ if (write_error)
+ OnError();
+ }
+
+ // Attempts to write a message directly to the channel. If the full message
+ // cannot be written, it's queued and a wait is initiated to write the message
+ // ASAP on the I/O thread.
+ bool WriteNoLock(MessageView message_view) {
+ if (handle_.get().needs_connection) {
+ outgoing_messages_.emplace_front(std::move(message_view));
+ return true;
+ }
+ size_t bytes_written = 0;
+ do {
+ message_view.advance_data_offset(bytes_written);
+
+ ssize_t result;
+ ScopedPlatformHandleVectorPtr handles = message_view.TakeHandles();
+ if (handles && handles->size()) {
+ iovec iov = {
+ const_cast<void*>(message_view.data()),
+ message_view.data_num_bytes()
+ };
+ // TODO: Handle lots of handles.
+ result = PlatformChannelSendmsgWithHandles(
+ handle_.get(), &iov, 1, handles->data(), handles->size());
+ if (result >= 0) {
+#if defined(OS_MACOSX)
+ // There is a bug on OSX which makes it dangerous to close
+ // a file descriptor while it is in transit. So instead we
+ // store the file descriptor in a set and send a message to
+ // the recipient, which is queued AFTER the message that
+ // sent the FD. The recipient will reply to the message,
+ // letting us know that it is now safe to close the file
+ // descriptor. For more information, see:
+ // http://crbug.com/298276
+ std::vector<int> fds;
+ for (auto& handle : *handles)
+ fds.push_back(handle.handle);
+ {
+ base::AutoLock l(handles_to_close_lock_);
+ for (auto& handle : *handles)
+ handles_to_close_->push_back(handle);
+ }
+ MessagePtr fds_message(
+ new Channel::Message(sizeof(fds[0]) * fds.size(), 0,
+ Message::MessageType::HANDLES_SENT));
+ memcpy(fds_message->mutable_payload(), fds.data(),
+ sizeof(fds[0]) * fds.size());
+ outgoing_messages_.emplace_back(std::move(fds_message), 0);
+ handles->clear();
+#else
+ handles.reset();
+#endif // defined(OS_MACOSX)
+ }
+ } else {
+ result = PlatformChannelWrite(handle_.get(), message_view.data(),
+ message_view.data_num_bytes());
+ }
+
+ if (result < 0) {
+ if (errno != EAGAIN && errno != EWOULDBLOCK
+#if defined(OS_MACOSX)
+ // On OS X if sendmsg() is trying to send fds between processes and
+ // there isn't enough room in the output buffer to send the fd
+ // structure over atomically then EMSGSIZE is returned.
+ //
+ // EMSGSIZE presents a problem since the system APIs can only call
+ // us when there's room in the socket buffer and not when there is
+ // "enough" room.
+ //
+ // The current behavior is to return to the event loop when EMSGSIZE
+ // is received and hopefull service another FD. This is however
+ // still technically a busy wait since the event loop will call us
+ // right back until the receiver has read enough data to allow
+ // passing the FD over atomically.
+ && errno != EMSGSIZE
+#endif
+ ) {
+ return false;
+ }
+ message_view.SetHandles(std::move(handles));
+ outgoing_messages_.emplace_front(std::move(message_view));
+ WaitForWriteOnIOThreadNoLock();
+ return true;
+ }
+
+ bytes_written = static_cast<size_t>(result);
+ } while (bytes_written < message_view.data_num_bytes());
+
+ return FlushOutgoingMessagesNoLock();
+ }
+
+ bool FlushOutgoingMessagesNoLock() {
+ std::deque<MessageView> messages;
+ std::swap(outgoing_messages_, messages);
+
+ while (!messages.empty()) {
+ if (!WriteNoLock(std::move(messages.front())))
+ return false;
+
+ messages.pop_front();
+ if (!outgoing_messages_.empty()) {
+ // The message was requeued by WriteNoLock(), so we have to wait for
+ // pipe to become writable again. Repopulate the message queue and exit.
+ // If sending the message triggered any control messages, they may be
+ // in |outgoing_messages_| in addition to or instead of the message
+ // being sent.
+ std::swap(messages, outgoing_messages_);
+ while (!messages.empty()) {
+ outgoing_messages_.push_front(std::move(messages.back()));
+ messages.pop_back();
+ }
+ return true;
+ }
+ }
+
+ return true;
+ }
+
+#if defined(OS_MACOSX)
+ bool OnControlMessage(Message::MessageType message_type,
+ const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) override {
+ switch (message_type) {
+ case Message::MessageType::HANDLES_SENT: {
+ if (payload_size == 0)
+ break;
+ MessagePtr message(new Channel::Message(
+ payload_size, 0, Message::MessageType::HANDLES_SENT_ACK));
+ memcpy(message->mutable_payload(), payload, payload_size);
+ Write(std::move(message));
+ return true;
+ }
+
+ case Message::MessageType::HANDLES_SENT_ACK: {
+ size_t num_fds = payload_size / sizeof(int);
+ if (num_fds == 0 || payload_size % sizeof(int) != 0)
+ break;
+
+ const int* fds = reinterpret_cast<const int*>(payload);
+ if (!CloseHandles(fds, num_fds))
+ break;
+ return true;
+ }
+
+ default:
+ break;
+ }
+
+ return false;
+ }
+
+ // Closes handles referenced by |fds|. Returns false if |num_fds| is 0, or if
+ // |fds| does not match a sequence of handles in |handles_to_close_|.
+ bool CloseHandles(const int* fds, size_t num_fds) {
+ base::AutoLock l(handles_to_close_lock_);
+ if (!num_fds)
+ return false;
+
+ auto start =
+ std::find_if(handles_to_close_->begin(), handles_to_close_->end(),
+ [&fds](const PlatformHandle& handle) {
+ return handle.handle == fds[0];
+ });
+ if (start == handles_to_close_->end())
+ return false;
+
+ auto it = start;
+ size_t i = 0;
+ // The FDs in the message should match a sequence of handles in
+ // |handles_to_close_|.
+ for (; i < num_fds && it != handles_to_close_->end(); i++, ++it) {
+ if (it->handle != fds[i])
+ return false;
+
+ it->CloseIfNecessary();
+ }
+ if (i != num_fds)
+ return false;
+
+ handles_to_close_->erase(start, it);
+ return true;
+ }
+#endif // defined(OS_MACOSX)
+
+ // Keeps the Channel alive at least until explicit shutdown on the IO thread.
+ scoped_refptr<Channel> self_;
+
+ ScopedPlatformHandle handle_;
+ scoped_refptr<base::TaskRunner> io_task_runner_;
+
+ // These watchers must only be accessed on the IO thread.
+ std::unique_ptr<base::MessageLoopForIO::FileDescriptorWatcher> read_watcher_;
+ std::unique_ptr<base::MessageLoopForIO::FileDescriptorWatcher> write_watcher_;
+
+ std::deque<PlatformHandle> incoming_platform_handles_;
+
+ // Protects |pending_write_| and |outgoing_messages_|.
+ base::Lock write_lock_;
+ bool pending_write_ = false;
+ bool reject_writes_ = false;
+ std::deque<MessageView> outgoing_messages_;
+
+ bool leak_handle_ = false;
+
+#if defined(OS_MACOSX)
+ base::Lock handles_to_close_lock_;
+ ScopedPlatformHandleVectorPtr handles_to_close_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ChannelPosix);
+};
+
+} // namespace
+
+// static
+scoped_refptr<Channel> Channel::Create(
+ Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner) {
+ return new ChannelPosix(delegate, std::move(connection_params),
+ io_task_runner);
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/channel_unittest.cc b/mojo/edk/system/channel_unittest.cc
new file mode 100644
index 0000000000..ce2c804d55
--- /dev/null
+++ b/mojo/edk/system/channel_unittest.cc
@@ -0,0 +1,177 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/channel.h"
+#include "base/memory/ptr_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+class TestChannel : public Channel {
+ public:
+ TestChannel(Channel::Delegate* delegate) : Channel(delegate) {}
+
+ char* GetReadBufferTest(size_t* buffer_capacity) {
+ return GetReadBuffer(buffer_capacity);
+ }
+
+ bool OnReadCompleteTest(size_t bytes_read, size_t* next_read_size_hint) {
+ return OnReadComplete(bytes_read, next_read_size_hint);
+ }
+
+ MOCK_METHOD4(GetReadPlatformHandles,
+ bool(size_t num_handles,
+ const void* extra_header,
+ size_t extra_header_size,
+ ScopedPlatformHandleVectorPtr* handles));
+ MOCK_METHOD0(Start, void());
+ MOCK_METHOD0(ShutDownImpl, void());
+ MOCK_METHOD0(LeakHandle, void());
+
+ void Write(MessagePtr message) {}
+
+ protected:
+ ~TestChannel() override {}
+};
+
+// Not using GMock as I don't think it supports movable types.
+class MockChannelDelegate : public Channel::Delegate {
+ public:
+ MockChannelDelegate() {}
+
+ size_t GetReceivedPayloadSize() const { return payload_size_; }
+
+ const void* GetReceivedPayload() const { return payload_.get(); }
+
+ protected:
+ void OnChannelMessage(const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) override {
+ payload_.reset(new char[payload_size]);
+ memcpy(payload_.get(), payload, payload_size);
+ payload_size_ = payload_size;
+ }
+
+ // Notify that an error has occured and the Channel will cease operation.
+ void OnChannelError() override {}
+
+ private:
+ size_t payload_size_ = 0;
+ std::unique_ptr<char[]> payload_;
+};
+
+Channel::MessagePtr CreateDefaultMessage(bool legacy_message) {
+ const size_t payload_size = 100;
+ Channel::MessagePtr message = base::MakeUnique<Channel::Message>(
+ payload_size, 0,
+ legacy_message ? Channel::Message::MessageType::NORMAL_LEGACY
+ : Channel::Message::MessageType::NORMAL);
+ char* payload = static_cast<char*>(message->mutable_payload());
+ for (size_t i = 0; i < payload_size; i++) {
+ payload[i] = static_cast<char>(i);
+ }
+ return message;
+}
+
+void TestMemoryEqual(const void* data1,
+ size_t data1_size,
+ const void* data2,
+ size_t data2_size) {
+ ASSERT_EQ(data1_size, data2_size);
+ const unsigned char* data1_char = static_cast<const unsigned char*>(data1);
+ const unsigned char* data2_char = static_cast<const unsigned char*>(data2);
+ for (size_t i = 0; i < data1_size; i++) {
+ // ASSERT so we don't log tons of errors if the data is different.
+ ASSERT_EQ(data1_char[i], data2_char[i]);
+ }
+}
+
+void TestMessagesAreEqual(Channel::Message* message1,
+ Channel::Message* message2,
+ bool legacy_messages) {
+ // If any of the message is null, this is probably not what you wanted to
+ // test.
+ ASSERT_NE(nullptr, message1);
+ ASSERT_NE(nullptr, message2);
+
+ ASSERT_EQ(message1->payload_size(), message2->payload_size());
+ EXPECT_EQ(message1->has_handles(), message2->has_handles());
+
+ TestMemoryEqual(message1->payload(), message1->payload_size(),
+ message2->payload(), message2->payload_size());
+
+ if (legacy_messages)
+ return;
+
+ ASSERT_EQ(message1->extra_header_size(), message2->extra_header_size());
+ TestMemoryEqual(message1->extra_header(), message1->extra_header_size(),
+ message2->extra_header(), message2->extra_header_size());
+}
+
+TEST(ChannelTest, LegacyMessageDeserialization) {
+ Channel::MessagePtr message = CreateDefaultMessage(true /* legacy_message */);
+ Channel::MessagePtr deserialized_message =
+ Channel::Message::Deserialize(message->data(), message->data_num_bytes());
+ TestMessagesAreEqual(message.get(), deserialized_message.get(),
+ true /* legacy_message */);
+}
+
+TEST(ChannelTest, NonLegacyMessageDeserialization) {
+ Channel::MessagePtr message =
+ CreateDefaultMessage(false /* legacy_message */);
+ Channel::MessagePtr deserialized_message =
+ Channel::Message::Deserialize(message->data(), message->data_num_bytes());
+ TestMessagesAreEqual(message.get(), deserialized_message.get(),
+ false /* legacy_message */);
+}
+
+TEST(ChannelTest, OnReadLegacyMessage) {
+ size_t buffer_size = 100 * 1024;
+ Channel::MessagePtr message = CreateDefaultMessage(true /* legacy_message */);
+
+ MockChannelDelegate channel_delegate;
+ scoped_refptr<TestChannel> channel = new TestChannel(&channel_delegate);
+ char* read_buffer = channel->GetReadBufferTest(&buffer_size);
+ ASSERT_LT(message->data_num_bytes(),
+ buffer_size); // Bad test. Increase buffer
+ // size.
+ memcpy(read_buffer, message->data(), message->data_num_bytes());
+
+ size_t next_read_size_hint = 0;
+ EXPECT_TRUE(channel->OnReadCompleteTest(message->data_num_bytes(),
+ &next_read_size_hint));
+
+ TestMemoryEqual(message->payload(), message->payload_size(),
+ channel_delegate.GetReceivedPayload(),
+ channel_delegate.GetReceivedPayloadSize());
+}
+
+TEST(ChannelTest, OnReadNonLegacyMessage) {
+ size_t buffer_size = 100 * 1024;
+ Channel::MessagePtr message =
+ CreateDefaultMessage(false /* legacy_message */);
+
+ MockChannelDelegate channel_delegate;
+ scoped_refptr<TestChannel> channel = new TestChannel(&channel_delegate);
+ char* read_buffer = channel->GetReadBufferTest(&buffer_size);
+ ASSERT_LT(message->data_num_bytes(),
+ buffer_size); // Bad test. Increase buffer
+ // size.
+ memcpy(read_buffer, message->data(), message->data_num_bytes());
+
+ size_t next_read_size_hint = 0;
+ EXPECT_TRUE(channel->OnReadCompleteTest(message->data_num_bytes(),
+ &next_read_size_hint));
+
+ TestMemoryEqual(message->payload(), message->payload_size(),
+ channel_delegate.GetReceivedPayload(),
+ channel_delegate.GetReceivedPayloadSize());
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/channel_win.cc b/mojo/edk/system/channel_win.cc
new file mode 100644
index 0000000000..c15df16bb1
--- /dev/null
+++ b/mojo/edk/system/channel_win.cc
@@ -0,0 +1,360 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/channel.h"
+
+#include <stdint.h>
+#include <windows.h>
+
+#include <algorithm>
+#include <deque>
+#include <limits>
+#include <memory>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/lock.h"
+#include "base/task_runner.h"
+#include "base/win/win_util.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+// A view over a Channel::Message object. The write queue uses these since
+// large messages may need to be sent in chunks.
+class MessageView {
+ public:
+ // Owns |message|. |offset| indexes the first unsent byte in the message.
+ MessageView(Channel::MessagePtr message, size_t offset)
+ : message_(std::move(message)),
+ offset_(offset) {
+ DCHECK_GT(message_->data_num_bytes(), offset_);
+ }
+
+ MessageView(MessageView&& other) { *this = std::move(other); }
+
+ MessageView& operator=(MessageView&& other) {
+ message_ = std::move(other.message_);
+ offset_ = other.offset_;
+ return *this;
+ }
+
+ ~MessageView() {}
+
+ const void* data() const {
+ return static_cast<const char*>(message_->data()) + offset_;
+ }
+
+ size_t data_num_bytes() const { return message_->data_num_bytes() - offset_; }
+
+ size_t data_offset() const { return offset_; }
+ void advance_data_offset(size_t num_bytes) {
+ DCHECK_GE(message_->data_num_bytes(), offset_ + num_bytes);
+ offset_ += num_bytes;
+ }
+
+ Channel::MessagePtr TakeChannelMessage() { return std::move(message_); }
+
+ private:
+ Channel::MessagePtr message_;
+ size_t offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageView);
+};
+
+class ChannelWin : public Channel,
+ public base::MessageLoop::DestructionObserver,
+ public base::MessageLoopForIO::IOHandler {
+ public:
+ ChannelWin(Delegate* delegate,
+ ScopedPlatformHandle handle,
+ scoped_refptr<base::TaskRunner> io_task_runner)
+ : Channel(delegate),
+ self_(this),
+ handle_(std::move(handle)),
+ io_task_runner_(io_task_runner) {
+ CHECK(handle_.is_valid());
+
+ wait_for_connect_ = handle_.get().needs_connection;
+ }
+
+ void Start() override {
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&ChannelWin::StartOnIOThread, this));
+ }
+
+ void ShutDownImpl() override {
+ // Always shut down asynchronously when called through the public interface.
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&ChannelWin::ShutDownOnIOThread, this));
+ }
+
+ void Write(MessagePtr message) override {
+ bool write_error = false;
+ {
+ base::AutoLock lock(write_lock_);
+ if (reject_writes_)
+ return;
+
+ bool write_now = !delay_writes_ && outgoing_messages_.empty();
+ outgoing_messages_.emplace_back(std::move(message), 0);
+
+ if (write_now && !WriteNoLock(outgoing_messages_.front()))
+ reject_writes_ = write_error = true;
+ }
+ if (write_error) {
+ // Do not synchronously invoke OnError(). Write() may have been called by
+ // the delegate and we don't want to re-enter it.
+ io_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&ChannelWin::OnError, this));
+ }
+ }
+
+ void LeakHandle() override {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+ leak_handle_ = true;
+ }
+
+ bool GetReadPlatformHandles(
+ size_t num_handles,
+ const void* extra_header,
+ size_t extra_header_size,
+ ScopedPlatformHandleVectorPtr* handles) override {
+ if (num_handles > std::numeric_limits<uint16_t>::max())
+ return false;
+ using HandleEntry = Channel::Message::HandleEntry;
+ size_t handles_size = sizeof(HandleEntry) * num_handles;
+ if (handles_size > extra_header_size)
+ return false;
+ DCHECK(extra_header);
+ handles->reset(new PlatformHandleVector(num_handles));
+ const HandleEntry* extra_header_handles =
+ reinterpret_cast<const HandleEntry*>(extra_header);
+ for (size_t i = 0; i < num_handles; i++) {
+ (*handles)->at(i).handle =
+ base::win::Uint32ToHandle(extra_header_handles[i].handle);
+ }
+ return true;
+ }
+
+ private:
+ // May run on any thread.
+ ~ChannelWin() override {}
+
+ void StartOnIOThread() {
+ base::MessageLoop::current()->AddDestructionObserver(this);
+ base::MessageLoopForIO::current()->RegisterIOHandler(
+ handle_.get().handle, this);
+
+ if (wait_for_connect_) {
+ BOOL ok = ConnectNamedPipe(handle_.get().handle,
+ &connect_context_.overlapped);
+ if (ok) {
+ PLOG(ERROR) << "Unexpected success while waiting for pipe connection";
+ OnError();
+ return;
+ }
+
+ const DWORD err = GetLastError();
+ switch (err) {
+ case ERROR_PIPE_CONNECTED:
+ wait_for_connect_ = false;
+ break;
+ case ERROR_IO_PENDING:
+ AddRef();
+ return;
+ case ERROR_NO_DATA:
+ OnError();
+ return;
+ }
+ }
+
+ // Now that we have registered our IOHandler, we can start writing.
+ {
+ base::AutoLock lock(write_lock_);
+ if (delay_writes_) {
+ delay_writes_ = false;
+ WriteNextNoLock();
+ }
+ }
+
+ // Keep this alive in case we synchronously run shutdown.
+ scoped_refptr<ChannelWin> keep_alive(this);
+ ReadMore(0);
+ }
+
+ void ShutDownOnIOThread() {
+ base::MessageLoop::current()->RemoveDestructionObserver(this);
+
+ // BUG(crbug.com/583525): This function is expected to be called once, and
+ // |handle_| should be valid at this point.
+ CHECK(handle_.is_valid());
+ CancelIo(handle_.get().handle);
+ if (leak_handle_)
+ ignore_result(handle_.release());
+ handle_.reset();
+
+ // May destroy the |this| if it was the last reference.
+ self_ = nullptr;
+ }
+
+ // base::MessageLoop::DestructionObserver:
+ void WillDestroyCurrentMessageLoop() override {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+ if (self_)
+ ShutDownOnIOThread();
+ }
+
+ // base::MessageLoop::IOHandler:
+ void OnIOCompleted(base::MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered,
+ DWORD error) override {
+ if (error != ERROR_SUCCESS) {
+ OnError();
+ } else if (context == &connect_context_) {
+ DCHECK(wait_for_connect_);
+ wait_for_connect_ = false;
+ ReadMore(0);
+
+ base::AutoLock lock(write_lock_);
+ if (delay_writes_) {
+ delay_writes_ = false;
+ WriteNextNoLock();
+ }
+ } else if (context == &read_context_) {
+ OnReadDone(static_cast<size_t>(bytes_transfered));
+ } else {
+ CHECK(context == &write_context_);
+ OnWriteDone(static_cast<size_t>(bytes_transfered));
+ }
+ Release(); // Balancing reference taken after ReadFile / WriteFile.
+ }
+
+ void OnReadDone(size_t bytes_read) {
+ if (bytes_read > 0) {
+ size_t next_read_size = 0;
+ if (OnReadComplete(bytes_read, &next_read_size)) {
+ ReadMore(next_read_size);
+ } else {
+ OnError();
+ }
+ } else if (bytes_read == 0) {
+ OnError();
+ }
+ }
+
+ void OnWriteDone(size_t bytes_written) {
+ if (bytes_written == 0)
+ return;
+
+ bool write_error = false;
+ {
+ base::AutoLock lock(write_lock_);
+
+ DCHECK(!outgoing_messages_.empty());
+
+ MessageView& message_view = outgoing_messages_.front();
+ message_view.advance_data_offset(bytes_written);
+ if (message_view.data_num_bytes() == 0) {
+ Channel::MessagePtr message = message_view.TakeChannelMessage();
+ outgoing_messages_.pop_front();
+
+ // Clear any handles so they don't get closed on destruction.
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ if (handles)
+ handles->clear();
+ }
+
+ if (!WriteNextNoLock())
+ reject_writes_ = write_error = true;
+ }
+ if (write_error)
+ OnError();
+ }
+
+ void ReadMore(size_t next_read_size_hint) {
+ size_t buffer_capacity = next_read_size_hint;
+ char* buffer = GetReadBuffer(&buffer_capacity);
+ DCHECK_GT(buffer_capacity, 0u);
+
+ BOOL ok = ReadFile(handle_.get().handle,
+ buffer,
+ static_cast<DWORD>(buffer_capacity),
+ NULL,
+ &read_context_.overlapped);
+
+ if (ok || GetLastError() == ERROR_IO_PENDING) {
+ AddRef(); // Will be balanced in OnIOCompleted
+ } else {
+ OnError();
+ }
+ }
+
+ // Attempts to write a message directly to the channel. If the full message
+ // cannot be written, it's queued and a wait is initiated to write the message
+ // ASAP on the I/O thread.
+ bool WriteNoLock(const MessageView& message_view) {
+ BOOL ok = WriteFile(handle_.get().handle,
+ message_view.data(),
+ static_cast<DWORD>(message_view.data_num_bytes()),
+ NULL,
+ &write_context_.overlapped);
+
+ if (ok || GetLastError() == ERROR_IO_PENDING) {
+ AddRef(); // Will be balanced in OnIOCompleted.
+ return true;
+ }
+ return false;
+ }
+
+ bool WriteNextNoLock() {
+ if (outgoing_messages_.empty())
+ return true;
+ return WriteNoLock(outgoing_messages_.front());
+ }
+
+ // Keeps the Channel alive at least until explicit shutdown on the IO thread.
+ scoped_refptr<Channel> self_;
+
+ ScopedPlatformHandle handle_;
+ scoped_refptr<base::TaskRunner> io_task_runner_;
+
+ base::MessageLoopForIO::IOContext connect_context_;
+ base::MessageLoopForIO::IOContext read_context_;
+ base::MessageLoopForIO::IOContext write_context_;
+
+ // Protects |reject_writes_| and |outgoing_messages_|.
+ base::Lock write_lock_;
+
+ bool delay_writes_ = true;
+
+ bool reject_writes_ = false;
+ std::deque<MessageView> outgoing_messages_;
+
+ bool wait_for_connect_;
+
+ bool leak_handle_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(ChannelWin);
+};
+
+} // namespace
+
+// static
+scoped_refptr<Channel> Channel::Create(
+ Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner) {
+ return new ChannelWin(delegate, connection_params.TakeChannelHandle(),
+ io_task_runner);
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/configuration.cc b/mojo/edk/system/configuration.cc
new file mode 100644
index 0000000000..f5eb2b8f6f
--- /dev/null
+++ b/mojo/edk/system/configuration.cc
@@ -0,0 +1,25 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/configuration.h"
+
+namespace mojo {
+namespace edk {
+namespace internal {
+
+// These default values should be synced with the documentation in
+// mojo/edk/embedder/configuration.h.
+Configuration g_configuration = {
+ 1000000, // max_handle_table_size
+ 1000000, // max_mapping_table_sze
+ 4 * 1024 * 1024, // max_message_num_bytes
+ 10000, // max_message_num_handles
+ 256 * 1024 * 1024, // max_data_pipe_capacity_bytes
+ 1024 * 1024, // default_data_pipe_capacity_bytes
+ 16, // data_pipe_buffer_alignment_bytes
+ 1024 * 1024 * 1024}; // max_shared_memory_num_bytes
+
+} // namespace internal
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/configuration.h b/mojo/edk/system/configuration.h
new file mode 100644
index 0000000000..038835ffdd
--- /dev/null
+++ b/mojo/edk/system/configuration.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_CONFIGURATION_H_
+#define MOJO_EDK_SYSTEM_CONFIGURATION_H_
+
+#include "mojo/edk/embedder/configuration.h"
+#include "mojo/edk/system/system_impl_export.h"
+
+namespace mojo {
+namespace edk {
+
+namespace internal {
+MOJO_SYSTEM_IMPL_EXPORT extern Configuration g_configuration;
+} // namespace internal
+
+MOJO_SYSTEM_IMPL_EXPORT inline const Configuration& GetConfiguration() {
+ return internal::g_configuration;
+}
+
+MOJO_SYSTEM_IMPL_EXPORT inline Configuration* GetMutableConfiguration() {
+ return &internal::g_configuration;
+}
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_CONFIGURATION_H_
diff --git a/mojo/edk/system/core.cc b/mojo/edk/system/core.cc
new file mode 100644
index 0000000000..360e8c3012
--- /dev/null
+++ b/mojo/edk/system/core.cc
@@ -0,0 +1,1019 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/core.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/containers/stack_container.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/rand_util.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "mojo/edk/embedder/embedder.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/channel.h"
+#include "mojo/edk/system/configuration.h"
+#include "mojo/edk/system/data_pipe_consumer_dispatcher.h"
+#include "mojo/edk/system/data_pipe_producer_dispatcher.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/message_for_transit.h"
+#include "mojo/edk/system/message_pipe_dispatcher.h"
+#include "mojo/edk/system/platform_handle_dispatcher.h"
+#include "mojo/edk/system/ports/name.h"
+#include "mojo/edk/system/ports/node.h"
+#include "mojo/edk/system/request_context.h"
+#include "mojo/edk/system/shared_buffer_dispatcher.h"
+#include "mojo/edk/system/watcher_dispatcher.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+// This is an unnecessarily large limit that is relatively easy to enforce.
+const uint32_t kMaxHandlesPerMessage = 1024 * 1024;
+
+// TODO(rockot): Maybe we could negotiate a debugging pipe ID for cross-process
+// pipes too; for now we just use a constant. This only affects bootstrap pipes.
+const uint64_t kUnknownPipeIdForDebug = 0x7f7f7f7f7f7f7f7fUL;
+
+MojoResult MojoPlatformHandleToScopedPlatformHandle(
+ const MojoPlatformHandle* platform_handle,
+ ScopedPlatformHandle* out_handle) {
+ if (platform_handle->struct_size != sizeof(MojoPlatformHandle))
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (platform_handle->type == MOJO_PLATFORM_HANDLE_TYPE_INVALID) {
+ out_handle->reset();
+ return MOJO_RESULT_OK;
+ }
+
+ PlatformHandle handle;
+ switch (platform_handle->type) {
+#if defined(OS_POSIX)
+ case MOJO_PLATFORM_HANDLE_TYPE_FILE_DESCRIPTOR:
+ handle.handle = static_cast<int>(platform_handle->value);
+ break;
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ case MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT:
+ handle.type = PlatformHandle::Type::MACH;
+ handle.port = static_cast<mach_port_t>(platform_handle->value);
+ break;
+#endif
+
+#if defined(OS_WIN)
+ case MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE:
+ handle.handle = reinterpret_cast<HANDLE>(platform_handle->value);
+ break;
+#endif
+
+ default:
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+
+ out_handle->reset(handle);
+ return MOJO_RESULT_OK;
+}
+
+MojoResult ScopedPlatformHandleToMojoPlatformHandle(
+ ScopedPlatformHandle handle,
+ MojoPlatformHandle* platform_handle) {
+ if (platform_handle->struct_size != sizeof(MojoPlatformHandle))
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (!handle.is_valid()) {
+ platform_handle->type = MOJO_PLATFORM_HANDLE_TYPE_INVALID;
+ return MOJO_RESULT_OK;
+ }
+
+#if defined(OS_POSIX)
+ switch (handle.get().type) {
+ case PlatformHandle::Type::POSIX:
+ platform_handle->type = MOJO_PLATFORM_HANDLE_TYPE_FILE_DESCRIPTOR;
+ platform_handle->value = static_cast<uint64_t>(handle.release().handle);
+ break;
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ case PlatformHandle::Type::MACH:
+ platform_handle->type = MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT;
+ platform_handle->value = static_cast<uint64_t>(handle.release().port);
+ break;
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+ default:
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+#elif defined(OS_WIN)
+ platform_handle->type = MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE;
+ platform_handle->value = reinterpret_cast<uint64_t>(handle.release().handle);
+#endif // defined(OS_WIN)
+
+ return MOJO_RESULT_OK;
+}
+
+} // namespace
+
+Core::Core() {}
+
+Core::~Core() {
+ if (node_controller_ && node_controller_->io_task_runner()) {
+ // If this races with IO thread shutdown the callback will be dropped and
+ // the NodeController will be shutdown on this thread anyway, which is also
+ // just fine.
+ scoped_refptr<base::TaskRunner> io_task_runner =
+ node_controller_->io_task_runner();
+ io_task_runner->PostTask(FROM_HERE,
+ base::Bind(&Core::PassNodeControllerToIOThread,
+ base::Passed(&node_controller_)));
+ }
+}
+
+void Core::SetIOTaskRunner(scoped_refptr<base::TaskRunner> io_task_runner) {
+ GetNodeController()->SetIOTaskRunner(io_task_runner);
+}
+
+NodeController* Core::GetNodeController() {
+ base::AutoLock lock(node_controller_lock_);
+ if (!node_controller_)
+ node_controller_.reset(new NodeController(this));
+ return node_controller_.get();
+}
+
+scoped_refptr<Dispatcher> Core::GetDispatcher(MojoHandle handle) {
+ base::AutoLock lock(handles_lock_);
+ return handles_.GetDispatcher(handle);
+}
+
+void Core::SetDefaultProcessErrorCallback(
+ const ProcessErrorCallback& callback) {
+ default_process_error_callback_ = callback;
+}
+
+void Core::AddChild(base::ProcessHandle process_handle,
+ ConnectionParams connection_params,
+ const std::string& child_token,
+ const ProcessErrorCallback& process_error_callback) {
+ GetNodeController()->ConnectToChild(process_handle,
+ std::move(connection_params), child_token,
+ process_error_callback);
+}
+
+void Core::ChildLaunchFailed(const std::string& child_token) {
+ RequestContext request_context;
+ GetNodeController()->CloseChildPorts(child_token);
+}
+
+ScopedMessagePipeHandle Core::ConnectToPeerProcess(
+ ScopedPlatformHandle pipe_handle,
+ const std::string& peer_token) {
+ RequestContext request_context;
+ ports::PortRef port0, port1;
+ GetNodeController()->node()->CreatePortPair(&port0, &port1);
+ MojoHandle handle = AddDispatcher(new MessagePipeDispatcher(
+ GetNodeController(), port0, kUnknownPipeIdForDebug, 0));
+ ConnectionParams connection_params(std::move(pipe_handle));
+ GetNodeController()->ConnectToPeer(std::move(connection_params), port1,
+ peer_token);
+ return ScopedMessagePipeHandle(MessagePipeHandle(handle));
+}
+
+void Core::ClosePeerConnection(const std::string& peer_token) {
+ GetNodeController()->ClosePeerConnection(peer_token);
+}
+
+void Core::InitChild(ConnectionParams connection_params) {
+ GetNodeController()->ConnectToParent(std::move(connection_params));
+}
+
+void Core::SetMachPortProvider(base::PortProvider* port_provider) {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ GetNodeController()->CreateMachPortRelay(port_provider);
+#endif
+}
+
+MojoHandle Core::AddDispatcher(scoped_refptr<Dispatcher> dispatcher) {
+ base::AutoLock lock(handles_lock_);
+ return handles_.AddDispatcher(dispatcher);
+}
+
+bool Core::AddDispatchersFromTransit(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers,
+ MojoHandle* handles) {
+ bool failed = false;
+ {
+ base::AutoLock lock(handles_lock_);
+ if (!handles_.AddDispatchersFromTransit(dispatchers, handles))
+ failed = true;
+ }
+ if (failed) {
+ for (auto d : dispatchers)
+ d.dispatcher->Close();
+ return false;
+ }
+ return true;
+}
+
+MojoResult Core::CreatePlatformHandleWrapper(
+ ScopedPlatformHandle platform_handle,
+ MojoHandle* wrapper_handle) {
+ MojoHandle h = AddDispatcher(
+ PlatformHandleDispatcher::Create(std::move(platform_handle)));
+ if (h == MOJO_HANDLE_INVALID)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ *wrapper_handle = h;
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::PassWrappedPlatformHandle(
+ MojoHandle wrapper_handle,
+ ScopedPlatformHandle* platform_handle) {
+ base::AutoLock lock(handles_lock_);
+ scoped_refptr<Dispatcher> d;
+ MojoResult result = handles_.GetAndRemoveDispatcher(wrapper_handle, &d);
+ if (result != MOJO_RESULT_OK)
+ return result;
+ if (d->GetType() == Dispatcher::Type::PLATFORM_HANDLE) {
+ PlatformHandleDispatcher* phd =
+ static_cast<PlatformHandleDispatcher*>(d.get());
+ *platform_handle = phd->PassPlatformHandle();
+ } else {
+ result = MOJO_RESULT_INVALID_ARGUMENT;
+ }
+ d->Close();
+ return result;
+}
+
+MojoResult Core::CreateSharedBufferWrapper(
+ base::SharedMemoryHandle shared_memory_handle,
+ size_t num_bytes,
+ bool read_only,
+ MojoHandle* mojo_wrapper_handle) {
+ DCHECK(num_bytes);
+ scoped_refptr<PlatformSharedBuffer> platform_buffer =
+ PlatformSharedBuffer::CreateFromSharedMemoryHandle(num_bytes, read_only,
+ shared_memory_handle);
+ if (!platform_buffer)
+ return MOJO_RESULT_UNKNOWN;
+
+ scoped_refptr<SharedBufferDispatcher> dispatcher;
+ MojoResult result = SharedBufferDispatcher::CreateFromPlatformSharedBuffer(
+ platform_buffer, &dispatcher);
+ if (result != MOJO_RESULT_OK)
+ return result;
+ MojoHandle h = AddDispatcher(dispatcher);
+ if (h == MOJO_HANDLE_INVALID)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ *mojo_wrapper_handle = h;
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::PassSharedMemoryHandle(
+ MojoHandle mojo_handle,
+ base::SharedMemoryHandle* shared_memory_handle,
+ size_t* num_bytes,
+ bool* read_only) {
+ if (!shared_memory_handle)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ scoped_refptr<Dispatcher> dispatcher;
+ MojoResult result = MOJO_RESULT_OK;
+ {
+ base::AutoLock lock(handles_lock_);
+ // Get the dispatcher and check it before removing it from the handle table
+ // to ensure that the dispatcher is of the correct type. This ensures we
+ // don't close and remove the wrong type of dispatcher.
+ dispatcher = handles_.GetDispatcher(mojo_handle);
+ if (!dispatcher || dispatcher->GetType() != Dispatcher::Type::SHARED_BUFFER)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ result = handles_.GetAndRemoveDispatcher(mojo_handle, &dispatcher);
+ if (result != MOJO_RESULT_OK)
+ return result;
+ }
+
+ SharedBufferDispatcher* shm_dispatcher =
+ static_cast<SharedBufferDispatcher*>(dispatcher.get());
+ scoped_refptr<PlatformSharedBuffer> platform_shared_buffer =
+ shm_dispatcher->PassPlatformSharedBuffer();
+
+ if (!platform_shared_buffer)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (num_bytes)
+ *num_bytes = platform_shared_buffer->GetNumBytes();
+ if (read_only)
+ *read_only = platform_shared_buffer->IsReadOnly();
+ *shared_memory_handle = platform_shared_buffer->DuplicateSharedMemoryHandle();
+
+ shm_dispatcher->Close();
+ return result;
+}
+
+void Core::RequestShutdown(const base::Closure& callback) {
+ GetNodeController()->RequestShutdown(callback);
+}
+
+ScopedMessagePipeHandle Core::CreateParentMessagePipe(
+ const std::string& token, const std::string& child_token) {
+ RequestContext request_context;
+ ports::PortRef port0, port1;
+ GetNodeController()->node()->CreatePortPair(&port0, &port1);
+ MojoHandle handle = AddDispatcher(
+ new MessagePipeDispatcher(GetNodeController(), port0,
+ kUnknownPipeIdForDebug, 0));
+ GetNodeController()->ReservePort(token, port1, child_token);
+ return ScopedMessagePipeHandle(MessagePipeHandle(handle));
+}
+
+ScopedMessagePipeHandle Core::CreateChildMessagePipe(const std::string& token) {
+ RequestContext request_context;
+ ports::PortRef port0, port1;
+ GetNodeController()->node()->CreatePortPair(&port0, &port1);
+ MojoHandle handle = AddDispatcher(
+ new MessagePipeDispatcher(GetNodeController(), port0,
+ kUnknownPipeIdForDebug, 1));
+ GetNodeController()->MergePortIntoParent(token, port1);
+ return ScopedMessagePipeHandle(MessagePipeHandle(handle));
+}
+
+MojoResult Core::SetProperty(MojoPropertyType type, const void* value) {
+ base::AutoLock locker(property_lock_);
+ switch (type) {
+ case MOJO_PROPERTY_TYPE_SYNC_CALL_ALLOWED:
+ property_sync_call_allowed_ = *static_cast<const bool*>(value);
+ return MOJO_RESULT_OK;
+ default:
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+}
+
+MojoTimeTicks Core::GetTimeTicksNow() {
+ return base::TimeTicks::Now().ToInternalValue();
+}
+
+MojoResult Core::Close(MojoHandle handle) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher;
+ {
+ base::AutoLock lock(handles_lock_);
+ MojoResult rv = handles_.GetAndRemoveDispatcher(handle, &dispatcher);
+ if (rv != MOJO_RESULT_OK)
+ return rv;
+ }
+ dispatcher->Close();
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::QueryHandleSignalsState(
+ MojoHandle handle,
+ MojoHandleSignalsState* signals_state) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher = GetDispatcher(handle);
+ if (!dispatcher || !signals_state)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ *signals_state = dispatcher->GetHandleSignalsState();
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::CreateWatcher(MojoWatcherCallback callback,
+ MojoHandle* watcher_handle) {
+ RequestContext request_context;
+ if (!watcher_handle)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ *watcher_handle = AddDispatcher(new WatcherDispatcher(callback));
+ if (*watcher_handle == MOJO_HANDLE_INVALID)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::Watch(MojoHandle watcher_handle,
+ MojoHandle handle,
+ MojoHandleSignals signals,
+ uintptr_t context) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> watcher = GetDispatcher(watcher_handle);
+ if (!watcher || watcher->GetType() != Dispatcher::Type::WATCHER)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ scoped_refptr<Dispatcher> dispatcher = GetDispatcher(handle);
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watcher->WatchDispatcher(dispatcher, signals, context);
+}
+
+MojoResult Core::CancelWatch(MojoHandle watcher_handle, uintptr_t context) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> watcher = GetDispatcher(watcher_handle);
+ if (!watcher || watcher->GetType() != Dispatcher::Type::WATCHER)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watcher->CancelWatch(context);
+}
+
+MojoResult Core::ArmWatcher(MojoHandle watcher_handle,
+ uint32_t* num_ready_contexts,
+ uintptr_t* ready_contexts,
+ MojoResult* ready_results,
+ MojoHandleSignalsState* ready_signals_states) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> watcher = GetDispatcher(watcher_handle);
+ if (!watcher || watcher->GetType() != Dispatcher::Type::WATCHER)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watcher->Arm(num_ready_contexts, ready_contexts, ready_results,
+ ready_signals_states);
+}
+
+MojoResult Core::AllocMessage(uint32_t num_bytes,
+ const MojoHandle* handles,
+ uint32_t num_handles,
+ MojoAllocMessageFlags flags,
+ MojoMessageHandle* message) {
+ if (!message)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (num_handles == 0) { // Fast path: no handles.
+ std::unique_ptr<MessageForTransit> msg;
+ MojoResult rv = MessageForTransit::Create(&msg, num_bytes, nullptr, 0);
+ if (rv != MOJO_RESULT_OK)
+ return rv;
+
+ *message = reinterpret_cast<MojoMessageHandle>(msg.release());
+ return MOJO_RESULT_OK;
+ }
+
+ if (!handles)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (num_handles > kMaxHandlesPerMessage)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ std::vector<Dispatcher::DispatcherInTransit> dispatchers;
+ {
+ base::AutoLock lock(handles_lock_);
+ MojoResult rv = handles_.BeginTransit(handles, num_handles, &dispatchers);
+ if (rv != MOJO_RESULT_OK) {
+ handles_.CancelTransit(dispatchers);
+ return rv;
+ }
+ }
+ DCHECK_EQ(num_handles, dispatchers.size());
+
+ std::unique_ptr<MessageForTransit> msg;
+ MojoResult rv = MessageForTransit::Create(
+ &msg, num_bytes, dispatchers.data(), num_handles);
+
+ {
+ base::AutoLock lock(handles_lock_);
+ if (rv == MOJO_RESULT_OK) {
+ handles_.CompleteTransitAndClose(dispatchers);
+ *message = reinterpret_cast<MojoMessageHandle>(msg.release());
+ } else {
+ handles_.CancelTransit(dispatchers);
+ }
+ }
+
+ return rv;
+}
+
+MojoResult Core::FreeMessage(MojoMessageHandle message) {
+ if (!message)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ delete reinterpret_cast<MessageForTransit*>(message);
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::GetMessageBuffer(MojoMessageHandle message, void** buffer) {
+ if (!message)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ *buffer = reinterpret_cast<MessageForTransit*>(message)->mutable_bytes();
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::GetProperty(MojoPropertyType type, void* value) {
+ base::AutoLock locker(property_lock_);
+ switch (type) {
+ case MOJO_PROPERTY_TYPE_SYNC_CALL_ALLOWED:
+ *static_cast<bool*>(value) = property_sync_call_allowed_;
+ return MOJO_RESULT_OK;
+ default:
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+}
+
+MojoResult Core::CreateMessagePipe(
+ const MojoCreateMessagePipeOptions* options,
+ MojoHandle* message_pipe_handle0,
+ MojoHandle* message_pipe_handle1) {
+ RequestContext request_context;
+ ports::PortRef port0, port1;
+ GetNodeController()->node()->CreatePortPair(&port0, &port1);
+
+ CHECK(message_pipe_handle0);
+ CHECK(message_pipe_handle1);
+
+ uint64_t pipe_id = base::RandUint64();
+
+ *message_pipe_handle0 = AddDispatcher(
+ new MessagePipeDispatcher(GetNodeController(), port0, pipe_id, 0));
+ if (*message_pipe_handle0 == MOJO_HANDLE_INVALID)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ *message_pipe_handle1 = AddDispatcher(
+ new MessagePipeDispatcher(GetNodeController(), port1, pipe_id, 1));
+ if (*message_pipe_handle1 == MOJO_HANDLE_INVALID) {
+ scoped_refptr<Dispatcher> unused;
+ unused->Close();
+
+ base::AutoLock lock(handles_lock_);
+ handles_.GetAndRemoveDispatcher(*message_pipe_handle0, &unused);
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::WriteMessage(MojoHandle message_pipe_handle,
+ const void* bytes,
+ uint32_t num_bytes,
+ const MojoHandle* handles,
+ uint32_t num_handles,
+ MojoWriteMessageFlags flags) {
+ if (num_bytes && !bytes)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ MojoMessageHandle message;
+ MojoResult rv = AllocMessage(num_bytes, handles, num_handles,
+ MOJO_ALLOC_MESSAGE_FLAG_NONE, &message);
+ if (rv != MOJO_RESULT_OK)
+ return rv;
+
+ if (num_bytes) {
+ void* buffer = nullptr;
+ rv = GetMessageBuffer(message, &buffer);
+ DCHECK_EQ(rv, MOJO_RESULT_OK);
+ memcpy(buffer, bytes, num_bytes);
+ }
+
+ return WriteMessageNew(message_pipe_handle, message, flags);
+}
+
+MojoResult Core::WriteMessageNew(MojoHandle message_pipe_handle,
+ MojoMessageHandle message,
+ MojoWriteMessageFlags flags) {
+ RequestContext request_context;
+ std::unique_ptr<MessageForTransit> message_for_transit(
+ reinterpret_cast<MessageForTransit*>(message));
+ auto dispatcher = GetDispatcher(message_pipe_handle);
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ return dispatcher->WriteMessage(std::move(message_for_transit), flags);
+}
+
+MojoResult Core::ReadMessage(MojoHandle message_pipe_handle,
+ void* bytes,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags) {
+ CHECK((!num_handles || !*num_handles || handles) &&
+ (!num_bytes || !*num_bytes || bytes));
+ RequestContext request_context;
+ auto dispatcher = GetDispatcher(message_pipe_handle);
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ std::unique_ptr<MessageForTransit> message;
+ MojoResult rv =
+ dispatcher->ReadMessage(&message, num_bytes, handles, num_handles, flags,
+ false /* ignore_num_bytes */);
+ if (rv != MOJO_RESULT_OK)
+ return rv;
+
+ if (message && message->num_bytes())
+ memcpy(bytes, message->bytes(), message->num_bytes());
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::ReadMessageNew(MojoHandle message_pipe_handle,
+ MojoMessageHandle* message,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags) {
+ CHECK(message);
+ CHECK(!num_handles || !*num_handles || handles);
+ RequestContext request_context;
+ auto dispatcher = GetDispatcher(message_pipe_handle);
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ std::unique_ptr<MessageForTransit> msg;
+ MojoResult rv =
+ dispatcher->ReadMessage(&msg, num_bytes, handles, num_handles, flags,
+ true /* ignore_num_bytes */);
+ if (rv != MOJO_RESULT_OK)
+ return rv;
+ *message = reinterpret_cast<MojoMessageHandle>(msg.release());
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::FuseMessagePipes(MojoHandle handle0, MojoHandle handle1) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher0;
+ scoped_refptr<Dispatcher> dispatcher1;
+
+ bool valid_handles = true;
+ {
+ base::AutoLock lock(handles_lock_);
+ MojoResult result0 = handles_.GetAndRemoveDispatcher(handle0, &dispatcher0);
+ MojoResult result1 = handles_.GetAndRemoveDispatcher(handle1, &dispatcher1);
+ if (result0 != MOJO_RESULT_OK || result1 != MOJO_RESULT_OK ||
+ dispatcher0->GetType() != Dispatcher::Type::MESSAGE_PIPE ||
+ dispatcher1->GetType() != Dispatcher::Type::MESSAGE_PIPE)
+ valid_handles = false;
+ }
+
+ if (!valid_handles) {
+ if (dispatcher0)
+ dispatcher0->Close();
+ if (dispatcher1)
+ dispatcher1->Close();
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+
+ MessagePipeDispatcher* mpd0 =
+ static_cast<MessagePipeDispatcher*>(dispatcher0.get());
+ MessagePipeDispatcher* mpd1 =
+ static_cast<MessagePipeDispatcher*>(dispatcher1.get());
+
+ if (!mpd0->Fuse(mpd1))
+ return MOJO_RESULT_FAILED_PRECONDITION;
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::NotifyBadMessage(MojoMessageHandle message,
+ const char* error,
+ size_t error_num_bytes) {
+ if (!message)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ const PortsMessage& ports_message =
+ reinterpret_cast<MessageForTransit*>(message)->ports_message();
+ if (ports_message.source_node() == ports::kInvalidNodeName) {
+ DVLOG(1) << "Received invalid message from unknown node.";
+ if (!default_process_error_callback_.is_null())
+ default_process_error_callback_.Run(std::string(error, error_num_bytes));
+ return MOJO_RESULT_OK;
+ }
+
+ GetNodeController()->NotifyBadMessageFrom(
+ ports_message.source_node(), std::string(error, error_num_bytes));
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::CreateDataPipe(
+ const MojoCreateDataPipeOptions* options,
+ MojoHandle* data_pipe_producer_handle,
+ MojoHandle* data_pipe_consumer_handle) {
+ RequestContext request_context;
+ if (options && options->struct_size != sizeof(MojoCreateDataPipeOptions))
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ MojoCreateDataPipeOptions create_options;
+ create_options.struct_size = sizeof(MojoCreateDataPipeOptions);
+ create_options.flags = options ? options->flags : 0;
+ create_options.element_num_bytes = options ? options->element_num_bytes : 1;
+ // TODO(rockot): Use Configuration to get default data pipe capacity.
+ create_options.capacity_num_bytes =
+ options && options->capacity_num_bytes ? options->capacity_num_bytes
+ : 64 * 1024;
+
+ // TODO(rockot): Broker through the parent when necessary.
+ scoped_refptr<PlatformSharedBuffer> ring_buffer =
+ GetNodeController()->CreateSharedBuffer(
+ create_options.capacity_num_bytes);
+ if (!ring_buffer)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ ports::PortRef port0, port1;
+ GetNodeController()->node()->CreatePortPair(&port0, &port1);
+
+ CHECK(data_pipe_producer_handle);
+ CHECK(data_pipe_consumer_handle);
+
+ uint64_t pipe_id = base::RandUint64();
+
+ scoped_refptr<Dispatcher> producer = new DataPipeProducerDispatcher(
+ GetNodeController(), port0, ring_buffer, create_options,
+ true /* initialized */, pipe_id);
+ scoped_refptr<Dispatcher> consumer = new DataPipeConsumerDispatcher(
+ GetNodeController(), port1, ring_buffer, create_options,
+ true /* initialized */, pipe_id);
+
+ *data_pipe_producer_handle = AddDispatcher(producer);
+ *data_pipe_consumer_handle = AddDispatcher(consumer);
+ if (*data_pipe_producer_handle == MOJO_HANDLE_INVALID ||
+ *data_pipe_consumer_handle == MOJO_HANDLE_INVALID) {
+ if (*data_pipe_producer_handle != MOJO_HANDLE_INVALID) {
+ scoped_refptr<Dispatcher> unused;
+ base::AutoLock lock(handles_lock_);
+ handles_.GetAndRemoveDispatcher(*data_pipe_producer_handle, &unused);
+ }
+ producer->Close();
+ consumer->Close();
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::WriteData(MojoHandle data_pipe_producer_handle,
+ const void* elements,
+ uint32_t* num_bytes,
+ MojoWriteDataFlags flags) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(
+ GetDispatcher(data_pipe_producer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ return dispatcher->WriteData(elements, num_bytes, flags);
+}
+
+MojoResult Core::BeginWriteData(MojoHandle data_pipe_producer_handle,
+ void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoWriteDataFlags flags) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(
+ GetDispatcher(data_pipe_producer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ return dispatcher->BeginWriteData(buffer, buffer_num_bytes, flags);
+}
+
+MojoResult Core::EndWriteData(MojoHandle data_pipe_producer_handle,
+ uint32_t num_bytes_written) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(
+ GetDispatcher(data_pipe_producer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ return dispatcher->EndWriteData(num_bytes_written);
+}
+
+MojoResult Core::ReadData(MojoHandle data_pipe_consumer_handle,
+ void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(
+ GetDispatcher(data_pipe_consumer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ return dispatcher->ReadData(elements, num_bytes, flags);
+}
+
+MojoResult Core::BeginReadData(MojoHandle data_pipe_consumer_handle,
+ const void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoReadDataFlags flags) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(
+ GetDispatcher(data_pipe_consumer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ return dispatcher->BeginReadData(buffer, buffer_num_bytes, flags);
+}
+
+MojoResult Core::EndReadData(MojoHandle data_pipe_consumer_handle,
+ uint32_t num_bytes_read) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(
+ GetDispatcher(data_pipe_consumer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ return dispatcher->EndReadData(num_bytes_read);
+}
+
+MojoResult Core::CreateSharedBuffer(
+ const MojoCreateSharedBufferOptions* options,
+ uint64_t num_bytes,
+ MojoHandle* shared_buffer_handle) {
+ RequestContext request_context;
+ MojoCreateSharedBufferOptions validated_options = {};
+ MojoResult result = SharedBufferDispatcher::ValidateCreateOptions(
+ options, &validated_options);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ scoped_refptr<SharedBufferDispatcher> dispatcher;
+ result = SharedBufferDispatcher::Create(
+ validated_options, GetNodeController(), num_bytes, &dispatcher);
+ if (result != MOJO_RESULT_OK) {
+ DCHECK(!dispatcher);
+ return result;
+ }
+
+ *shared_buffer_handle = AddDispatcher(dispatcher);
+ if (*shared_buffer_handle == MOJO_HANDLE_INVALID) {
+ LOG(ERROR) << "Handle table full";
+ dispatcher->Close();
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::DuplicateBufferHandle(
+ MojoHandle buffer_handle,
+ const MojoDuplicateBufferHandleOptions* options,
+ MojoHandle* new_buffer_handle) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ // Don't verify |options| here; that's the dispatcher's job.
+ scoped_refptr<Dispatcher> new_dispatcher;
+ MojoResult result =
+ dispatcher->DuplicateBufferHandle(options, &new_dispatcher);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ *new_buffer_handle = AddDispatcher(new_dispatcher);
+ if (*new_buffer_handle == MOJO_HANDLE_INVALID) {
+ LOG(ERROR) << "Handle table full";
+ dispatcher->Close();
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::MapBuffer(MojoHandle buffer_handle,
+ uint64_t offset,
+ uint64_t num_bytes,
+ void** buffer,
+ MojoMapBufferFlags flags) {
+ RequestContext request_context;
+ scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle));
+ if (!dispatcher)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ std::unique_ptr<PlatformSharedBufferMapping> mapping;
+ MojoResult result = dispatcher->MapBuffer(offset, num_bytes, flags, &mapping);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ DCHECK(mapping);
+ void* address = mapping->GetBase();
+ {
+ base::AutoLock locker(mapping_table_lock_);
+ result = mapping_table_.AddMapping(std::move(mapping));
+ }
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ *buffer = address;
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::UnmapBuffer(void* buffer) {
+ RequestContext request_context;
+ base::AutoLock lock(mapping_table_lock_);
+ return mapping_table_.RemoveMapping(buffer);
+}
+
+MojoResult Core::WrapPlatformHandle(const MojoPlatformHandle* platform_handle,
+ MojoHandle* mojo_handle) {
+ ScopedPlatformHandle handle;
+ MojoResult result = MojoPlatformHandleToScopedPlatformHandle(platform_handle,
+ &handle);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ return CreatePlatformHandleWrapper(std::move(handle), mojo_handle);
+}
+
+MojoResult Core::UnwrapPlatformHandle(MojoHandle mojo_handle,
+ MojoPlatformHandle* platform_handle) {
+ ScopedPlatformHandle handle;
+ MojoResult result = PassWrappedPlatformHandle(mojo_handle, &handle);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ return ScopedPlatformHandleToMojoPlatformHandle(std::move(handle),
+ platform_handle);
+}
+
+MojoResult Core::WrapPlatformSharedBufferHandle(
+ const MojoPlatformHandle* platform_handle,
+ size_t size,
+ MojoPlatformSharedBufferHandleFlags flags,
+ MojoHandle* mojo_handle) {
+ DCHECK(size);
+ ScopedPlatformHandle handle;
+ MojoResult result = MojoPlatformHandleToScopedPlatformHandle(platform_handle,
+ &handle);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ bool read_only = flags & MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_READ_ONLY;
+ scoped_refptr<PlatformSharedBuffer> platform_buffer =
+ PlatformSharedBuffer::CreateFromPlatformHandle(size, read_only,
+ std::move(handle));
+ if (!platform_buffer)
+ return MOJO_RESULT_UNKNOWN;
+
+ scoped_refptr<SharedBufferDispatcher> dispatcher;
+ result = SharedBufferDispatcher::CreateFromPlatformSharedBuffer(
+ platform_buffer, &dispatcher);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ MojoHandle h = AddDispatcher(dispatcher);
+ if (h == MOJO_HANDLE_INVALID) {
+ dispatcher->Close();
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ }
+
+ *mojo_handle = h;
+ return MOJO_RESULT_OK;
+}
+
+MojoResult Core::UnwrapPlatformSharedBufferHandle(
+ MojoHandle mojo_handle,
+ MojoPlatformHandle* platform_handle,
+ size_t* size,
+ MojoPlatformSharedBufferHandleFlags* flags) {
+ scoped_refptr<Dispatcher> dispatcher;
+ MojoResult result = MOJO_RESULT_OK;
+ {
+ base::AutoLock lock(handles_lock_);
+ result = handles_.GetAndRemoveDispatcher(mojo_handle, &dispatcher);
+ if (result != MOJO_RESULT_OK)
+ return result;
+ }
+
+ if (dispatcher->GetType() != Dispatcher::Type::SHARED_BUFFER) {
+ dispatcher->Close();
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+
+ SharedBufferDispatcher* shm_dispatcher =
+ static_cast<SharedBufferDispatcher*>(dispatcher.get());
+ scoped_refptr<PlatformSharedBuffer> platform_shared_buffer =
+ shm_dispatcher->PassPlatformSharedBuffer();
+ CHECK(platform_shared_buffer);
+
+ CHECK(size);
+ *size = platform_shared_buffer->GetNumBytes();
+
+ CHECK(flags);
+ *flags = MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_NONE;
+ if (platform_shared_buffer->IsReadOnly())
+ *flags |= MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_READ_ONLY;
+
+ ScopedPlatformHandle handle = platform_shared_buffer->PassPlatformHandle();
+ return ScopedPlatformHandleToMojoPlatformHandle(std::move(handle),
+ platform_handle);
+}
+
+void Core::GetActiveHandlesForTest(std::vector<MojoHandle>* handles) {
+ base::AutoLock lock(handles_lock_);
+ handles_.GetActiveHandlesForTest(handles);
+}
+
+// static
+void Core::PassNodeControllerToIOThread(
+ std::unique_ptr<NodeController> node_controller) {
+ // It's OK to leak this reference. At this point we know the IO loop is still
+ // running, and we know the NodeController will observe its eventual
+ // destruction. This tells the NodeController to delete itself when that
+ // happens.
+ node_controller.release()->DestroyOnIOThreadShutdown();
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/core.h b/mojo/edk/system/core.h
new file mode 100644
index 0000000000..1f6d865d23
--- /dev/null
+++ b/mojo/edk/system/core.h
@@ -0,0 +1,297 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_CORE_H_
+#define MOJO_EDK_SYSTEM_CORE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/synchronization/lock.h"
+#include "base/task_runner.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/handle_table.h"
+#include "mojo/edk/system/mapping_table.h"
+#include "mojo/edk/system/node_controller.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/public/c/system/buffer.h"
+#include "mojo/public/c/system/data_pipe.h"
+#include "mojo/public/c/system/message_pipe.h"
+#include "mojo/public/c/system/platform_handle.h"
+#include "mojo/public/c/system/types.h"
+#include "mojo/public/c/system/watcher.h"
+#include "mojo/public/cpp/system/message_pipe.h"
+
+namespace base {
+class PortProvider;
+}
+
+namespace mojo {
+namespace edk {
+
+// |Core| is an object that implements the Mojo system calls. All public methods
+// are thread-safe.
+class MOJO_SYSTEM_IMPL_EXPORT Core {
+ public:
+ Core();
+ virtual ~Core();
+
+ // Called exactly once, shortly after construction, and before any other
+ // methods are called on this object.
+ void SetIOTaskRunner(scoped_refptr<base::TaskRunner> io_task_runner);
+
+ // Retrieves the NodeController for the current process.
+ NodeController* GetNodeController();
+
+ scoped_refptr<Dispatcher> GetDispatcher(MojoHandle handle);
+
+ void SetDefaultProcessErrorCallback(const ProcessErrorCallback& callback);
+
+ // Called in the parent process any time a new child is launched.
+ void AddChild(base::ProcessHandle process_handle,
+ ConnectionParams connection_params,
+ const std::string& child_token,
+ const ProcessErrorCallback& process_error_callback);
+
+ // Called in the parent process when a child process fails to launch.
+ void ChildLaunchFailed(const std::string& child_token);
+
+ // Called to connect to a peer process. This should be called only if there
+ // is no common ancestor for the processes involved within this mojo system.
+ // Both processes must call this function, each passing one end of a platform
+ // channel. This returns one end of a message pipe to each process.
+ ScopedMessagePipeHandle ConnectToPeerProcess(ScopedPlatformHandle pipe_handle,
+ const std::string& peer_token);
+ void ClosePeerConnection(const std::string& peer_token);
+
+ // Called in a child process exactly once during early initialization.
+ void InitChild(ConnectionParams connection_params);
+
+ // Creates a message pipe endpoint associated with |token|, which a child
+ // holding the token can later locate and connect to.
+ ScopedMessagePipeHandle CreateParentMessagePipe(
+ const std::string& token, const std::string& child_token);
+
+ // Creates a message pipe endpoint and connects it to a pipe the parent has
+ // associated with |token|.
+ ScopedMessagePipeHandle CreateChildMessagePipe(const std::string& token);
+
+ // Sets the mach port provider for this process.
+ void SetMachPortProvider(base::PortProvider* port_provider);
+
+ MojoHandle AddDispatcher(scoped_refptr<Dispatcher> dispatcher);
+
+ // Adds new dispatchers for non-message-pipe handles received in a message.
+ // |dispatchers| and |handles| should be the same size.
+ bool AddDispatchersFromTransit(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers,
+ MojoHandle* handles);
+
+ // See "mojo/edk/embedder/embedder.h" for more information on these functions.
+ MojoResult CreatePlatformHandleWrapper(ScopedPlatformHandle platform_handle,
+ MojoHandle* wrapper_handle);
+
+ MojoResult PassWrappedPlatformHandle(MojoHandle wrapper_handle,
+ ScopedPlatformHandle* platform_handle);
+
+ MojoResult CreateSharedBufferWrapper(
+ base::SharedMemoryHandle shared_memory_handle,
+ size_t num_bytes,
+ bool read_only,
+ MojoHandle* mojo_wrapper_handle);
+
+ MojoResult PassSharedMemoryHandle(
+ MojoHandle mojo_handle,
+ base::SharedMemoryHandle* shared_memory_handle,
+ size_t* num_bytes,
+ bool* read_only);
+
+ // Requests that the EDK tear itself down. |callback| will be called once
+ // the shutdown process is complete. Note that |callback| is always called
+ // asynchronously on the calling thread if said thread is running a message
+ // loop, and the calling thread must continue running a MessageLoop at least
+ // until the callback is called. If there is no running loop, the |callback|
+ // may be called from any thread. Beware!
+ void RequestShutdown(const base::Closure& callback);
+
+ MojoResult SetProperty(MojoPropertyType type, const void* value);
+
+ // ---------------------------------------------------------------------------
+
+ // The following methods are essentially implementations of the Mojo Core
+ // functions of the Mojo API, with the C interface translated to C++ by
+ // "mojo/edk/embedder/entrypoints.cc". The best way to understand the contract
+ // of these methods is to look at the header files defining the corresponding
+ // API functions, referenced below.
+
+ // These methods correspond to the API functions defined in
+ // "mojo/public/c/system/functions.h":
+ MojoTimeTicks GetTimeTicksNow();
+ MojoResult Close(MojoHandle handle);
+ MojoResult QueryHandleSignalsState(MojoHandle handle,
+ MojoHandleSignalsState* signals_state);
+ MojoResult CreateWatcher(MojoWatcherCallback callback,
+ MojoHandle* watcher_handle);
+ MojoResult Watch(MojoHandle watcher_handle,
+ MojoHandle handle,
+ MojoHandleSignals signals,
+ uintptr_t context);
+ MojoResult CancelWatch(MojoHandle watcher_handle, uintptr_t context);
+ MojoResult ArmWatcher(MojoHandle watcher_handle,
+ uint32_t* num_ready_contexts,
+ uintptr_t* ready_contexts,
+ MojoResult* ready_results,
+ MojoHandleSignalsState* ready_signals_states);
+ MojoResult AllocMessage(uint32_t num_bytes,
+ const MojoHandle* handles,
+ uint32_t num_handles,
+ MojoAllocMessageFlags flags,
+ MojoMessageHandle* message);
+ MojoResult FreeMessage(MojoMessageHandle message);
+ MojoResult GetMessageBuffer(MojoMessageHandle message, void** buffer);
+ MojoResult GetProperty(MojoPropertyType type, void* value);
+
+ // These methods correspond to the API functions defined in
+ // "mojo/public/c/system/message_pipe.h":
+ MojoResult CreateMessagePipe(
+ const MojoCreateMessagePipeOptions* options,
+ MojoHandle* message_pipe_handle0,
+ MojoHandle* message_pipe_handle1);
+ MojoResult WriteMessage(MojoHandle message_pipe_handle,
+ const void* bytes,
+ uint32_t num_bytes,
+ const MojoHandle* handles,
+ uint32_t num_handles,
+ MojoWriteMessageFlags flags);
+ MojoResult WriteMessageNew(MojoHandle message_pipe_handle,
+ MojoMessageHandle message,
+ MojoWriteMessageFlags flags);
+ MojoResult ReadMessage(MojoHandle message_pipe_handle,
+ void* bytes,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags);
+ MojoResult ReadMessageNew(MojoHandle message_pipe_handle,
+ MojoMessageHandle* message,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags);
+ MojoResult FuseMessagePipes(MojoHandle handle0, MojoHandle handle1);
+ MojoResult NotifyBadMessage(MojoMessageHandle message,
+ const char* error,
+ size_t error_num_bytes);
+
+ // These methods correspond to the API functions defined in
+ // "mojo/public/c/system/data_pipe.h":
+ MojoResult CreateDataPipe(
+ const MojoCreateDataPipeOptions* options,
+ MojoHandle* data_pipe_producer_handle,
+ MojoHandle* data_pipe_consumer_handle);
+ MojoResult WriteData(MojoHandle data_pipe_producer_handle,
+ const void* elements,
+ uint32_t* num_bytes,
+ MojoWriteDataFlags flags);
+ MojoResult BeginWriteData(MojoHandle data_pipe_producer_handle,
+ void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoWriteDataFlags flags);
+ MojoResult EndWriteData(MojoHandle data_pipe_producer_handle,
+ uint32_t num_bytes_written);
+ MojoResult ReadData(MojoHandle data_pipe_consumer_handle,
+ void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags);
+ MojoResult BeginReadData(MojoHandle data_pipe_consumer_handle,
+ const void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoReadDataFlags flags);
+ MojoResult EndReadData(MojoHandle data_pipe_consumer_handle,
+ uint32_t num_bytes_read);
+
+ // These methods correspond to the API functions defined in
+ // "mojo/public/c/system/buffer.h":
+ MojoResult CreateSharedBuffer(
+ const MojoCreateSharedBufferOptions* options,
+ uint64_t num_bytes,
+ MojoHandle* shared_buffer_handle);
+ MojoResult DuplicateBufferHandle(
+ MojoHandle buffer_handle,
+ const MojoDuplicateBufferHandleOptions* options,
+ MojoHandle* new_buffer_handle);
+ MojoResult MapBuffer(MojoHandle buffer_handle,
+ uint64_t offset,
+ uint64_t num_bytes,
+ void** buffer,
+ MojoMapBufferFlags flags);
+ MojoResult UnmapBuffer(void* buffer);
+
+ // These methods correspond to the API functions defined in
+ // "mojo/public/c/system/platform_handle.h".
+ MojoResult WrapPlatformHandle(const MojoPlatformHandle* platform_handle,
+ MojoHandle* mojo_handle);
+ MojoResult UnwrapPlatformHandle(MojoHandle mojo_handle,
+ MojoPlatformHandle* platform_handle);
+ MojoResult WrapPlatformSharedBufferHandle(
+ const MojoPlatformHandle* platform_handle,
+ size_t size,
+ MojoPlatformSharedBufferHandleFlags flags,
+ MojoHandle* mojo_handle);
+ MojoResult UnwrapPlatformSharedBufferHandle(
+ MojoHandle mojo_handle,
+ MojoPlatformHandle* platform_handle,
+ size_t* size,
+ MojoPlatformSharedBufferHandleFlags* flags);
+
+ void GetActiveHandlesForTest(std::vector<MojoHandle>* handles);
+
+ private:
+ // Used to pass ownership of our NodeController over to the IO thread in the
+ // event that we're torn down before said thread.
+ static void PassNodeControllerToIOThread(
+ std::unique_ptr<NodeController> node_controller);
+
+ // Guards node_controller_.
+ //
+ // TODO(rockot): Consider removing this. It's only needed because we
+ // initialize node_controller_ lazily and that may happen on any thread.
+ // Otherwise it's effectively const and shouldn't need to be guarded.
+ //
+ // We can get rid of lazy initialization if we defer Mojo initialization far
+ // enough that zygotes don't do it. The zygote can't create a NodeController.
+ base::Lock node_controller_lock_;
+
+ // This is lazily initialized on first access. Always use GetNodeController()
+ // to access it.
+ std::unique_ptr<NodeController> node_controller_;
+
+ // The default callback to invoke, if any, when a process error is reported
+ // but cannot be associated with a specific process.
+ ProcessErrorCallback default_process_error_callback_;
+
+ base::Lock handles_lock_;
+ HandleTable handles_;
+
+ base::Lock mapping_table_lock_; // Protects |mapping_table_|.
+ MappingTable mapping_table_;
+
+ base::Lock property_lock_;
+ // Properties that can be read using the MojoGetProperty() API.
+ bool property_sync_call_allowed_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(Core);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_CORE_H_
diff --git a/mojo/edk/system/core_test_base.cc b/mojo/edk/system/core_test_base.cc
new file mode 100644
index 0000000000..7751612e9d
--- /dev/null
+++ b/mojo/edk/system/core_test_base.cc
@@ -0,0 +1,272 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/core_test_base.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/system/configuration.h"
+#include "mojo/edk/system/core.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/message_for_transit.h"
+
+namespace mojo {
+namespace edk {
+namespace test {
+
+namespace {
+
+// MockDispatcher --------------------------------------------------------------
+
+class MockDispatcher : public Dispatcher {
+ public:
+ static scoped_refptr<MockDispatcher> Create(
+ CoreTestBase::MockHandleInfo* info) {
+ return make_scoped_refptr(new MockDispatcher(info));
+ }
+
+ // Dispatcher:
+ Type GetType() const override { return Type::UNKNOWN; }
+
+ MojoResult Close() override {
+ info_->IncrementCloseCallCount();
+ return MOJO_RESULT_OK;
+ }
+
+ MojoResult WriteMessage(
+ std::unique_ptr<MessageForTransit> message,
+ MojoWriteMessageFlags /*flags*/) override {
+ info_->IncrementWriteMessageCallCount();
+
+ if (message->num_bytes() > GetConfiguration().max_message_num_bytes)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ if (message->num_handles())
+ return MOJO_RESULT_UNIMPLEMENTED;
+
+ return MOJO_RESULT_OK;
+ }
+
+ MojoResult ReadMessage(std::unique_ptr<MessageForTransit>* message,
+ uint32_t* num_bytes,
+ MojoHandle* handle,
+ uint32_t* num_handles,
+ MojoReadMessageFlags /*flags*/,
+ bool ignore_num_bytes) override {
+ info_->IncrementReadMessageCallCount();
+
+ if (num_handles)
+ *num_handles = 1;
+
+ return MOJO_RESULT_OK;
+ }
+
+ MojoResult WriteData(const void* elements,
+ uint32_t* num_bytes,
+ MojoWriteDataFlags flags) override {
+ info_->IncrementWriteDataCallCount();
+ return MOJO_RESULT_UNIMPLEMENTED;
+ }
+
+ MojoResult BeginWriteData(void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoWriteDataFlags flags) override {
+ info_->IncrementBeginWriteDataCallCount();
+ return MOJO_RESULT_UNIMPLEMENTED;
+ }
+
+ MojoResult EndWriteData(uint32_t num_bytes_written) override {
+ info_->IncrementEndWriteDataCallCount();
+ return MOJO_RESULT_UNIMPLEMENTED;
+ }
+
+ MojoResult ReadData(void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags) override {
+ info_->IncrementReadDataCallCount();
+ return MOJO_RESULT_UNIMPLEMENTED;
+ }
+
+ MojoResult BeginReadData(const void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoReadDataFlags flags) override {
+ info_->IncrementBeginReadDataCallCount();
+ return MOJO_RESULT_UNIMPLEMENTED;
+ }
+
+ MojoResult EndReadData(uint32_t num_bytes_read) override {
+ info_->IncrementEndReadDataCallCount();
+ return MOJO_RESULT_UNIMPLEMENTED;
+ }
+
+ private:
+ explicit MockDispatcher(CoreTestBase::MockHandleInfo* info) : info_(info) {
+ CHECK(info_);
+ info_->IncrementCtorCallCount();
+ }
+
+ ~MockDispatcher() override { info_->IncrementDtorCallCount(); }
+
+ CoreTestBase::MockHandleInfo* const info_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockDispatcher);
+};
+
+} // namespace
+
+// CoreTestBase ----------------------------------------------------------------
+
+CoreTestBase::CoreTestBase() {
+}
+
+CoreTestBase::~CoreTestBase() {
+}
+
+MojoHandle CoreTestBase::CreateMockHandle(CoreTestBase::MockHandleInfo* info) {
+ scoped_refptr<MockDispatcher> dispatcher = MockDispatcher::Create(info);
+ return core()->AddDispatcher(dispatcher);
+}
+
+Core* CoreTestBase::core() {
+ return mojo::edk::internal::g_core;
+}
+
+// CoreTestBase_MockHandleInfo -------------------------------------------------
+
+CoreTestBase_MockHandleInfo::CoreTestBase_MockHandleInfo()
+ : ctor_call_count_(0),
+ dtor_call_count_(0),
+ close_call_count_(0),
+ write_message_call_count_(0),
+ read_message_call_count_(0),
+ write_data_call_count_(0),
+ begin_write_data_call_count_(0),
+ end_write_data_call_count_(0),
+ read_data_call_count_(0),
+ begin_read_data_call_count_(0),
+ end_read_data_call_count_(0) {}
+
+CoreTestBase_MockHandleInfo::~CoreTestBase_MockHandleInfo() {
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetCtorCallCount() const {
+ base::AutoLock locker(lock_);
+ return ctor_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetDtorCallCount() const {
+ base::AutoLock locker(lock_);
+ return dtor_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetCloseCallCount() const {
+ base::AutoLock locker(lock_);
+ return close_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetWriteMessageCallCount() const {
+ base::AutoLock locker(lock_);
+ return write_message_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetReadMessageCallCount() const {
+ base::AutoLock locker(lock_);
+ return read_message_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetWriteDataCallCount() const {
+ base::AutoLock locker(lock_);
+ return write_data_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetBeginWriteDataCallCount() const {
+ base::AutoLock locker(lock_);
+ return begin_write_data_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetEndWriteDataCallCount() const {
+ base::AutoLock locker(lock_);
+ return end_write_data_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetReadDataCallCount() const {
+ base::AutoLock locker(lock_);
+ return read_data_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetBeginReadDataCallCount() const {
+ base::AutoLock locker(lock_);
+ return begin_read_data_call_count_;
+}
+
+unsigned CoreTestBase_MockHandleInfo::GetEndReadDataCallCount() const {
+ base::AutoLock locker(lock_);
+ return end_read_data_call_count_;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementCtorCallCount() {
+ base::AutoLock locker(lock_);
+ ctor_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementDtorCallCount() {
+ base::AutoLock locker(lock_);
+ dtor_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementCloseCallCount() {
+ base::AutoLock locker(lock_);
+ close_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementWriteMessageCallCount() {
+ base::AutoLock locker(lock_);
+ write_message_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementReadMessageCallCount() {
+ base::AutoLock locker(lock_);
+ read_message_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementWriteDataCallCount() {
+ base::AutoLock locker(lock_);
+ write_data_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementBeginWriteDataCallCount() {
+ base::AutoLock locker(lock_);
+ begin_write_data_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementEndWriteDataCallCount() {
+ base::AutoLock locker(lock_);
+ end_write_data_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementReadDataCallCount() {
+ base::AutoLock locker(lock_);
+ read_data_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementBeginReadDataCallCount() {
+ base::AutoLock locker(lock_);
+ begin_read_data_call_count_++;
+}
+
+void CoreTestBase_MockHandleInfo::IncrementEndReadDataCallCount() {
+ base::AutoLock locker(lock_);
+ end_read_data_call_count_++;
+}
+
+} // namespace test
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/core_test_base.h b/mojo/edk/system/core_test_base.h
new file mode 100644
index 0000000000..3d156e32e2
--- /dev/null
+++ b/mojo/edk/system/core_test_base.h
@@ -0,0 +1,94 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_CORE_TEST_BASE_H_
+#define MOJO_EDK_SYSTEM_CORE_TEST_BASE_H_
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/system/test_utils.h"
+#include "mojo/public/c/system/types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+
+class Core;
+
+namespace test {
+
+class CoreTestBase_MockHandleInfo;
+
+class CoreTestBase : public testing::Test {
+ public:
+ using MockHandleInfo = CoreTestBase_MockHandleInfo;
+
+ CoreTestBase();
+ ~CoreTestBase() override;
+
+ protected:
+ // |info| must remain alive until the returned handle is closed.
+ MojoHandle CreateMockHandle(MockHandleInfo* info);
+
+ Core* core();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CoreTestBase);
+};
+
+class CoreTestBase_MockHandleInfo {
+ public:
+ CoreTestBase_MockHandleInfo();
+ ~CoreTestBase_MockHandleInfo();
+
+ unsigned GetCtorCallCount() const;
+ unsigned GetDtorCallCount() const;
+ unsigned GetCloseCallCount() const;
+ unsigned GetWriteMessageCallCount() const;
+ unsigned GetReadMessageCallCount() const;
+ unsigned GetWriteDataCallCount() const;
+ unsigned GetBeginWriteDataCallCount() const;
+ unsigned GetEndWriteDataCallCount() const;
+ unsigned GetReadDataCallCount() const;
+ unsigned GetBeginReadDataCallCount() const;
+ unsigned GetEndReadDataCallCount() const;
+
+ // For use by |MockDispatcher|:
+ void IncrementCtorCallCount();
+ void IncrementDtorCallCount();
+ void IncrementCloseCallCount();
+ void IncrementWriteMessageCallCount();
+ void IncrementReadMessageCallCount();
+ void IncrementWriteDataCallCount();
+ void IncrementBeginWriteDataCallCount();
+ void IncrementEndWriteDataCallCount();
+ void IncrementReadDataCallCount();
+ void IncrementBeginReadDataCallCount();
+ void IncrementEndReadDataCallCount();
+
+ private:
+ mutable base::Lock lock_; // Protects the following members.
+ unsigned ctor_call_count_;
+ unsigned dtor_call_count_;
+ unsigned close_call_count_;
+ unsigned write_message_call_count_;
+ unsigned read_message_call_count_;
+ unsigned write_data_call_count_;
+ unsigned begin_write_data_call_count_;
+ unsigned end_write_data_call_count_;
+ unsigned read_data_call_count_;
+ unsigned begin_read_data_call_count_;
+ unsigned end_read_data_call_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(CoreTestBase_MockHandleInfo);
+};
+
+} // namespace test
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_CORE_TEST_BASE_H_
diff --git a/mojo/edk/system/core_unittest.cc b/mojo/edk/system/core_unittest.cc
new file mode 100644
index 0000000000..0d60b48a8b
--- /dev/null
+++ b/mojo/edk/system/core_unittest.cc
@@ -0,0 +1,971 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/core.h"
+
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/bind.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/system/core_test_base.h"
+#include "mojo/edk/system/test_utils.h"
+#include "mojo/public/cpp/system/wait.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#endif
+
+namespace mojo {
+namespace edk {
+namespace {
+
+const MojoHandleSignalsState kEmptyMojoHandleSignalsState = {0u, 0u};
+const MojoHandleSignalsState kFullMojoHandleSignalsState = {~0u, ~0u};
+const MojoHandleSignals kAllSignals = MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+
+using CoreTest = test::CoreTestBase;
+
+TEST_F(CoreTest, GetTimeTicksNow) {
+ const MojoTimeTicks start = core()->GetTimeTicksNow();
+ ASSERT_NE(static_cast<MojoTimeTicks>(0), start)
+ << "GetTimeTicksNow should return nonzero value";
+ test::Sleep(test::DeadlineFromMilliseconds(15));
+ const MojoTimeTicks finish = core()->GetTimeTicksNow();
+ // Allow for some fuzz in sleep.
+ ASSERT_GE((finish - start), static_cast<MojoTimeTicks>(8000))
+ << "Sleeping should result in increasing time ticks";
+}
+
+TEST_F(CoreTest, Basic) {
+ MockHandleInfo info;
+
+ ASSERT_EQ(0u, info.GetCtorCallCount());
+ MojoHandle h = CreateMockHandle(&info);
+ ASSERT_EQ(1u, info.GetCtorCallCount());
+ ASSERT_NE(h, MOJO_HANDLE_INVALID);
+
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h, nullptr, 0, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetWriteMessageCallCount());
+
+ ASSERT_EQ(0u, info.GetReadMessageCallCount());
+ uint32_t num_bytes = 0;
+ ASSERT_EQ(
+ MOJO_RESULT_OK,
+ core()->ReadMessage(h, nullptr, &num_bytes, nullptr, nullptr,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetReadMessageCallCount());
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(h, nullptr, nullptr, nullptr, nullptr,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(2u, info.GetReadMessageCallCount());
+
+ ASSERT_EQ(0u, info.GetWriteDataCallCount());
+ ASSERT_EQ(MOJO_RESULT_UNIMPLEMENTED,
+ core()->WriteData(h, nullptr, nullptr, MOJO_WRITE_DATA_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetWriteDataCallCount());
+
+ ASSERT_EQ(0u, info.GetBeginWriteDataCallCount());
+ ASSERT_EQ(MOJO_RESULT_UNIMPLEMENTED,
+ core()->BeginWriteData(h, nullptr, nullptr,
+ MOJO_WRITE_DATA_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetBeginWriteDataCallCount());
+
+ ASSERT_EQ(0u, info.GetEndWriteDataCallCount());
+ ASSERT_EQ(MOJO_RESULT_UNIMPLEMENTED, core()->EndWriteData(h, 0));
+ ASSERT_EQ(1u, info.GetEndWriteDataCallCount());
+
+ ASSERT_EQ(0u, info.GetReadDataCallCount());
+ ASSERT_EQ(MOJO_RESULT_UNIMPLEMENTED,
+ core()->ReadData(h, nullptr, nullptr, MOJO_READ_DATA_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetReadDataCallCount());
+
+ ASSERT_EQ(0u, info.GetBeginReadDataCallCount());
+ ASSERT_EQ(MOJO_RESULT_UNIMPLEMENTED,
+ core()->BeginReadData(h, nullptr, nullptr,
+ MOJO_READ_DATA_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetBeginReadDataCallCount());
+
+ ASSERT_EQ(0u, info.GetEndReadDataCallCount());
+ ASSERT_EQ(MOJO_RESULT_UNIMPLEMENTED, core()->EndReadData(h, 0));
+ ASSERT_EQ(1u, info.GetEndReadDataCallCount());
+
+ ASSERT_EQ(0u, info.GetDtorCallCount());
+ ASSERT_EQ(0u, info.GetCloseCallCount());
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h));
+ ASSERT_EQ(1u, info.GetCloseCallCount());
+ ASSERT_EQ(1u, info.GetDtorCallCount());
+}
+
+TEST_F(CoreTest, InvalidArguments) {
+ // |Close()|:
+ {
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, core()->Close(MOJO_HANDLE_INVALID));
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, core()->Close(10));
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, core()->Close(1000000000));
+
+ // Test a double-close.
+ MockHandleInfo info;
+ MojoHandle h = CreateMockHandle(&info);
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h));
+ ASSERT_EQ(1u, info.GetCloseCallCount());
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, core()->Close(h));
+ ASSERT_EQ(1u, info.GetCloseCallCount());
+ }
+
+ // |CreateMessagePipe()|: Nothing to check (apart from things that cause
+ // death).
+
+ // |WriteMessage()|:
+ // Only check arguments checked by |Core|, namely |handle|, |handles|, and
+ // |num_handles|.
+ {
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(MOJO_HANDLE_INVALID, nullptr, 0,
+ nullptr, 0, MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ MockHandleInfo info;
+ MojoHandle h = CreateMockHandle(&info);
+ MojoHandle handles[2] = {MOJO_HANDLE_INVALID, MOJO_HANDLE_INVALID};
+
+ // Huge handle count (implausibly big on some systems -- more than can be
+ // stored in a 32-bit address space).
+ // Note: This may return either |MOJO_RESULT_INVALID_ARGUMENT| or
+ // |MOJO_RESULT_RESOURCE_EXHAUSTED|, depending on whether it's plausible or
+ // not.
+ ASSERT_NE(
+ MOJO_RESULT_OK,
+ core()->WriteMessage(h, nullptr, 0, handles,
+ std::numeric_limits<uint32_t>::max(),
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+
+ // Null |bytes| with non-zero message size.
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h, nullptr, 1, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+
+ // Null |handles| with non-zero handle count.
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h, nullptr, 0, nullptr, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+
+ // Huge handle count (plausibly big).
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ core()->WriteMessage(
+ h, nullptr, 0, handles,
+ std::numeric_limits<uint32_t>::max() / sizeof(handles[0]),
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+
+ // Invalid handle in |handles|.
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h, nullptr, 0, handles, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+
+ // Two invalid handles in |handles|.
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h, nullptr, 0, handles, 2,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+
+ // Can't send a handle over itself. Note that this will also cause |h| to be
+ // closed.
+ handles[0] = h;
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h, nullptr, 0, handles, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(0u, info.GetWriteMessageCallCount());
+
+ h = CreateMockHandle(&info);
+
+ MockHandleInfo info2;
+
+ // This is "okay", but |MockDispatcher| doesn't implement it.
+ handles[0] = CreateMockHandle(&info2);
+ ASSERT_EQ(
+ MOJO_RESULT_UNIMPLEMENTED,
+ core()->WriteMessage(h, nullptr, 0, handles, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetWriteMessageCallCount());
+
+ // One of the |handles| is still invalid.
+ handles[0] = CreateMockHandle(&info2);
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h, nullptr, 0, handles, 2,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetWriteMessageCallCount());
+
+ // One of the |handles| is the same as |h|. Both handles are closed.
+ handles[0] = CreateMockHandle(&info2);
+ handles[1] = h;
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h, nullptr, 0, handles, 2,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetWriteMessageCallCount());
+
+ h = CreateMockHandle(&info);
+
+ // Can't send a handle twice in the same message.
+ handles[0] = CreateMockHandle(&info2);
+ handles[1] = handles[0];
+ ASSERT_EQ(
+ MOJO_RESULT_BUSY,
+ core()->WriteMessage(h, nullptr, 0, handles, 2,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, info.GetWriteMessageCallCount());
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h));
+ }
+
+ // |ReadMessage()|:
+ // Only check arguments checked by |Core|, namely |handle|, |handles|, and
+ // |num_handles|.
+ {
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->ReadMessage(MOJO_HANDLE_INVALID, nullptr, nullptr, nullptr,
+ nullptr, MOJO_READ_MESSAGE_FLAG_NONE));
+
+ MockHandleInfo info;
+ MojoHandle h = CreateMockHandle(&info);
+
+ // Okay.
+ uint32_t handle_count = 0;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h, nullptr, nullptr, nullptr, &handle_count,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ // Checked by |Core|, shouldn't go through to the dispatcher.
+ ASSERT_EQ(1u, info.GetReadMessageCallCount());
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h));
+ }
+}
+
+// These test invalid arguments that should cause death if we're being paranoid
+// about checking arguments (which we would want to do if, e.g., we were in a
+// true "kernel" situation, but we might not want to do otherwise for
+// performance reasons). Probably blatant errors like passing in null pointers
+// (for required pointer arguments) will still cause death, but perhaps not
+// predictably.
+TEST_F(CoreTest, InvalidArgumentsDeath) {
+#if defined(OFFICIAL_BUILD)
+ const char kMemoryCheckFailedRegex[] = "";
+#else
+ const char kMemoryCheckFailedRegex[] = "Check failed";
+#endif
+
+ // |CreateMessagePipe()|:
+ {
+ MojoHandle h;
+ ASSERT_DEATH_IF_SUPPORTED(
+ core()->CreateMessagePipe(nullptr, nullptr, nullptr),
+ kMemoryCheckFailedRegex);
+ ASSERT_DEATH_IF_SUPPORTED(
+ core()->CreateMessagePipe(nullptr, &h, nullptr),
+ kMemoryCheckFailedRegex);
+ ASSERT_DEATH_IF_SUPPORTED(
+ core()->CreateMessagePipe(nullptr, nullptr, &h),
+ kMemoryCheckFailedRegex);
+ }
+
+ // |ReadMessage()|:
+ // Only check arguments checked by |Core|, namely |handle|, |handles|, and
+ // |num_handles|.
+ {
+ MockHandleInfo info;
+ MojoHandle h = CreateMockHandle(&info);
+
+ uint32_t handle_count = 1;
+ ASSERT_DEATH_IF_SUPPORTED(
+ core()->ReadMessage(h, nullptr, nullptr, nullptr, &handle_count,
+ MOJO_READ_MESSAGE_FLAG_NONE),
+ kMemoryCheckFailedRegex);
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h));
+ }
+}
+
+TEST_F(CoreTest, MessagePipe) {
+ MojoHandle h[2];
+ MojoHandleSignalsState hss[2];
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->CreateMessagePipe(nullptr, &h[0], &h[1]));
+ // Should get two distinct, valid handles.
+ ASSERT_NE(h[0], MOJO_HANDLE_INVALID);
+ ASSERT_NE(h[1], MOJO_HANDLE_INVALID);
+ ASSERT_NE(h[0], h[1]);
+
+ // Neither should be readable.
+ hss[0] = kEmptyMojoHandleSignalsState;
+ hss[1] = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(h[0], &hss[0]));
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(h[1], &hss[1]));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss[0].satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss[0].satisfiable_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss[1].satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss[1].satisfiable_signals);
+
+ // Try to read anyway.
+ char buffer[1] = {'a'};
+ uint32_t buffer_size = 1;
+ ASSERT_EQ(
+ MOJO_RESULT_SHOULD_WAIT,
+ core()->ReadMessage(h[0], buffer, &buffer_size, nullptr, nullptr,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ // Check that it left its inputs alone.
+ ASSERT_EQ('a', buffer[0]);
+ ASSERT_EQ(1u, buffer_size);
+
+ // Write to |h[1]|.
+ buffer[0] = 'b';
+ ASSERT_EQ(
+ MOJO_RESULT_OK,
+ core()->WriteMessage(h[1], buffer, 1, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for |h[0]| to become readable.
+ EXPECT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h[0]),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss[0]));
+
+ // Read from |h[0]|.
+ // First, get only the size.
+ buffer_size = 0;
+ ASSERT_EQ(
+ MOJO_RESULT_RESOURCE_EXHAUSTED,
+ core()->ReadMessage(h[0], nullptr, &buffer_size, nullptr, nullptr,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, buffer_size);
+ // Then actually read it.
+ buffer[0] = 'c';
+ buffer_size = 1;
+ ASSERT_EQ(
+ MOJO_RESULT_OK,
+ core()->ReadMessage(h[0], buffer, &buffer_size, nullptr, nullptr,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ('b', buffer[0]);
+ ASSERT_EQ(1u, buffer_size);
+
+ // |h[0]| should no longer be readable.
+ hss[0] = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(h[0], &hss[0]));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss[0].satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss[0].satisfiable_signals);
+
+ // Write to |h[0]|.
+ buffer[0] = 'd';
+ ASSERT_EQ(
+ MOJO_RESULT_OK,
+ core()->WriteMessage(h[0], buffer, 1, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Close |h[0]|.
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h[0]));
+
+ // Wait for |h[1]| to learn about the other end's closure.
+ EXPECT_EQ(
+ MOJO_RESULT_OK,
+ mojo::Wait(mojo::Handle(h[1]), MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss[1]));
+
+ // Check that |h[1]| is no longer writable (and will never be).
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss[1].satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss[1].satisfiable_signals);
+
+ // Check that |h[1]| is still readable (for the moment).
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss[1].satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss[1].satisfiable_signals);
+
+ // Discard a message from |h[1]|.
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ core()->ReadMessage(h[1], nullptr, nullptr, nullptr, nullptr,
+ MOJO_READ_MESSAGE_FLAG_MAY_DISCARD));
+
+ // |h[1]| is no longer readable (and will never be).
+ hss[1] = kFullMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(h[1], &hss[1]));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss[1].satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss[1].satisfiable_signals);
+
+ // Try writing to |h[1]|.
+ buffer[0] = 'e';
+ ASSERT_EQ(
+ MOJO_RESULT_FAILED_PRECONDITION,
+ core()->WriteMessage(h[1], buffer, 1, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h[1]));
+}
+
+// Tests passing a message pipe handle.
+TEST_F(CoreTest, MessagePipeBasicLocalHandlePassing1) {
+ const char kHello[] = "hello";
+ const uint32_t kHelloSize = static_cast<uint32_t>(sizeof(kHello));
+ const char kWorld[] = "world!!!";
+ const uint32_t kWorldSize = static_cast<uint32_t>(sizeof(kWorld));
+ char buffer[100];
+ const uint32_t kBufferSize = static_cast<uint32_t>(sizeof(buffer));
+ uint32_t num_bytes;
+ MojoHandle handles[10];
+ uint32_t num_handles;
+ MojoHandleSignalsState hss;
+ MojoHandle h_received;
+
+ MojoHandle h_passing[2];
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->CreateMessagePipe(nullptr, &h_passing[0], &h_passing[1]));
+
+ // Make sure that |h_passing[]| work properly.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passing[0], kHello, kHelloSize, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_passing[1]),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_passing[1], buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kHelloSize, num_bytes);
+ ASSERT_STREQ(kHello, buffer);
+ ASSERT_EQ(0u, num_handles);
+
+ // Make sure that you can't pass either of the message pipe's handles over
+ // itself.
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h_passing[0], kHello, kHelloSize,
+ &h_passing[0], 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->CreateMessagePipe(nullptr, &h_passing[0], &h_passing[1]));
+
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ core()->WriteMessage(h_passing[0], kHello, kHelloSize,
+ &h_passing[1], 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->CreateMessagePipe(nullptr, &h_passing[0], &h_passing[1]));
+
+ MojoHandle h_passed[2];
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->CreateMessagePipe(nullptr, &h_passed[0], &h_passed[1]));
+
+ // Make sure that |h_passed[]| work properly.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passed[0], kHello, kHelloSize, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ ASSERT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_passed[1]),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_passed[1], buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kHelloSize, num_bytes);
+ ASSERT_STREQ(kHello, buffer);
+ ASSERT_EQ(0u, num_handles);
+
+ // Send |h_passed[1]| from |h_passing[0]| to |h_passing[1]|.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passing[0], kWorld, kWorldSize,
+ &h_passed[1], 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ ASSERT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_passing[1]),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_passing[1], buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kWorldSize, num_bytes);
+ ASSERT_STREQ(kWorld, buffer);
+ ASSERT_EQ(1u, num_handles);
+ h_received = handles[0];
+ ASSERT_NE(h_received, MOJO_HANDLE_INVALID);
+ ASSERT_NE(h_received, h_passing[0]);
+ ASSERT_NE(h_received, h_passing[1]);
+ ASSERT_NE(h_received, h_passed[0]);
+
+ // Note: We rely on the Mojo system not re-using handle values very often.
+ ASSERT_NE(h_received, h_passed[1]);
+
+ // |h_passed[1]| should no longer be valid; check that trying to close it
+ // fails. See above note.
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, core()->Close(h_passed[1]));
+
+ // Write to |h_passed[0]|. Should receive on |h_received|.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passed[0], kHello, kHelloSize, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ ASSERT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_received),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_received, buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kHelloSize, num_bytes);
+ ASSERT_STREQ(kHello, buffer);
+ ASSERT_EQ(0u, num_handles);
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h_passing[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h_passing[1]));
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h_passed[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h_received));
+}
+
+TEST_F(CoreTest, DataPipe) {
+ MojoHandle ph, ch; // p is for producer and c is for consumer.
+ MojoHandleSignalsState hss;
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->CreateDataPipe(nullptr, &ph, &ch));
+ // Should get two distinct, valid handles.
+ ASSERT_NE(ph, MOJO_HANDLE_INVALID);
+ ASSERT_NE(ch, MOJO_HANDLE_INVALID);
+ ASSERT_NE(ph, ch);
+
+ // Producer should be never-readable, but already writable.
+ hss = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(ph, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // Consumer should be never-writable, and not yet readable.
+ hss = kFullMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(ch, &hss));
+ EXPECT_EQ(0u, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Write.
+ signed char elements[2] = {'A', 'B'};
+ uint32_t num_bytes = 2u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteData(ph, elements, &num_bytes,
+ MOJO_WRITE_DATA_FLAG_NONE));
+ ASSERT_EQ(2u, num_bytes);
+
+ // Wait for the data to arrive to the consumer.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ mojo::Wait(mojo::Handle(ch), MOJO_HANDLE_SIGNAL_READABLE, &hss));
+
+ // Consumer should now be readable.
+ hss = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(ch, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Peek one character.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = 1u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadData(
+ ch, elements, &num_bytes,
+ MOJO_READ_DATA_FLAG_NONE | MOJO_READ_DATA_FLAG_PEEK));
+ ASSERT_EQ('A', elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Read one character.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = 1u;
+ ASSERT_EQ(MOJO_RESULT_OK, core()->ReadData(ch, elements, &num_bytes,
+ MOJO_READ_DATA_FLAG_NONE));
+ ASSERT_EQ('A', elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Two-phase write.
+ void* write_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->BeginWriteData(ph, &write_ptr, &num_bytes,
+ MOJO_WRITE_DATA_FLAG_NONE));
+ // We count on the default options providing a decent buffer size.
+ ASSERT_GE(num_bytes, 3u);
+
+ // Trying to do a normal write during a two-phase write should fail.
+ elements[0] = 'X';
+ num_bytes = 1u;
+ ASSERT_EQ(MOJO_RESULT_BUSY,
+ core()->WriteData(ph, elements, &num_bytes,
+ MOJO_WRITE_DATA_FLAG_NONE));
+
+ // Actually write the data, and complete it now.
+ static_cast<char*>(write_ptr)[0] = 'C';
+ static_cast<char*>(write_ptr)[1] = 'D';
+ static_cast<char*>(write_ptr)[2] = 'E';
+ ASSERT_EQ(MOJO_RESULT_OK, core()->EndWriteData(ph, 3u));
+
+ // Wait for the data to arrive to the consumer.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ mojo::Wait(mojo::Handle(ch), MOJO_HANDLE_SIGNAL_READABLE, &hss));
+
+ // Query how much data we have.
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadData(ch, nullptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_QUERY));
+ ASSERT_GE(num_bytes, 1u);
+
+ // Try to query with peek. Should fail.
+ num_bytes = 0;
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->ReadData(ch, nullptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_QUERY | MOJO_READ_DATA_FLAG_PEEK));
+ ASSERT_EQ(0u, num_bytes);
+
+ // Try to discard ten characters, in all-or-none mode. Should fail.
+ num_bytes = 10;
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE,
+ core()->ReadData(
+ ch, nullptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_DISCARD | MOJO_READ_DATA_FLAG_ALL_OR_NONE));
+
+ // Try to discard two characters, in peek mode. Should fail.
+ num_bytes = 2;
+ ASSERT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ core()->ReadData(ch, nullptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_DISCARD | MOJO_READ_DATA_FLAG_PEEK));
+
+ // Discard a character.
+ num_bytes = 1;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadData(
+ ch, nullptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_DISCARD | MOJO_READ_DATA_FLAG_ALL_OR_NONE));
+
+ // Ensure the 3 bytes were read.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ mojo::Wait(mojo::Handle(ch), MOJO_HANDLE_SIGNAL_READABLE, &hss));
+
+ // Try a two-phase read of the remaining three bytes with peek. Should fail.
+ const void* read_ptr = nullptr;
+ num_bytes = 3;
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ core()->BeginReadData(ch, &read_ptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_PEEK));
+
+ // Read the remaining two characters, in two-phase mode (all-or-none).
+ num_bytes = 3;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->BeginReadData(ch, &read_ptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_ALL_OR_NONE));
+ // Note: Count on still being able to do the contiguous read here.
+ ASSERT_EQ(3u, num_bytes);
+
+ // Discarding right now should fail.
+ num_bytes = 1;
+ ASSERT_EQ(MOJO_RESULT_BUSY,
+ core()->ReadData(ch, nullptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_DISCARD));
+
+ // Actually check our data and end the two-phase read.
+ ASSERT_EQ('C', static_cast<const char*>(read_ptr)[0]);
+ ASSERT_EQ('D', static_cast<const char*>(read_ptr)[1]);
+ ASSERT_EQ('E', static_cast<const char*>(read_ptr)[2]);
+ ASSERT_EQ(MOJO_RESULT_OK, core()->EndReadData(ch, 3u));
+
+ // Consumer should now be no longer readable.
+ hss = kFullMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(ch, &hss));
+ EXPECT_EQ(0u, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // TODO(vtl): More.
+
+ // Close the producer.
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(ph));
+
+ // Wait for this to get to the consumer.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ mojo::Wait(mojo::Handle(ch), MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+
+ // The consumer should now be never-readable.
+ hss = kFullMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, core()->QueryHandleSignalsState(ch, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(ch));
+}
+
+// Tests passing data pipe producer and consumer handles.
+TEST_F(CoreTest, MessagePipeBasicLocalHandlePassing2) {
+ const char kHello[] = "hello";
+ const uint32_t kHelloSize = static_cast<uint32_t>(sizeof(kHello));
+ const char kWorld[] = "world!!!";
+ const uint32_t kWorldSize = static_cast<uint32_t>(sizeof(kWorld));
+ char buffer[100];
+ const uint32_t kBufferSize = static_cast<uint32_t>(sizeof(buffer));
+ uint32_t num_bytes;
+ MojoHandle handles[10];
+ uint32_t num_handles;
+ MojoHandleSignalsState hss;
+
+ MojoHandle h_passing[2];
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->CreateMessagePipe(nullptr, &h_passing[0], &h_passing[1]));
+
+ MojoHandle ph, ch;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->CreateDataPipe(nullptr, &ph, &ch));
+
+ // Send |ch| from |h_passing[0]| to |h_passing[1]|.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passing[0], kHello, kHelloSize, &ch, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ ASSERT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_passing[1]),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_passing[1], buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kHelloSize, num_bytes);
+ ASSERT_STREQ(kHello, buffer);
+ ASSERT_EQ(1u, num_handles);
+ MojoHandle ch_received = handles[0];
+ ASSERT_NE(ch_received, MOJO_HANDLE_INVALID);
+ ASSERT_NE(ch_received, h_passing[0]);
+ ASSERT_NE(ch_received, h_passing[1]);
+ ASSERT_NE(ch_received, ph);
+
+ // Note: We rely on the Mojo system not re-using handle values very often.
+ ASSERT_NE(ch_received, ch);
+
+ // |ch| should no longer be valid; check that trying to close it fails. See
+ // above note.
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, core()->Close(ch));
+
+ // Write to |ph|. Should receive on |ch_received|.
+ num_bytes = kWorldSize;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteData(ph, kWorld, &num_bytes,
+ MOJO_WRITE_DATA_FLAG_ALL_OR_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(ch_received),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadData(ch_received, buffer, &num_bytes,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kWorldSize, num_bytes);
+ ASSERT_STREQ(kWorld, buffer);
+
+ // Now pass |ph| in the same direction.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passing[0], kWorld, kWorldSize, &ph, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ ASSERT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_passing[1]),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_passing[1], buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kWorldSize, num_bytes);
+ ASSERT_STREQ(kWorld, buffer);
+ ASSERT_EQ(1u, num_handles);
+ MojoHandle ph_received = handles[0];
+ ASSERT_NE(ph_received, MOJO_HANDLE_INVALID);
+ ASSERT_NE(ph_received, h_passing[0]);
+ ASSERT_NE(ph_received, h_passing[1]);
+ ASSERT_NE(ph_received, ch_received);
+
+ // Again, rely on the Mojo system not re-using handle values very often.
+ ASSERT_NE(ph_received, ph);
+
+ // |ph| should no longer be valid; check that trying to close it fails. See
+ // above note.
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, core()->Close(ph));
+
+ // Write to |ph_received|. Should receive on |ch_received|.
+ num_bytes = kHelloSize;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteData(ph_received, kHello, &num_bytes,
+ MOJO_WRITE_DATA_FLAG_ALL_OR_NONE));
+ hss = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(ch_received),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadData(ch_received, buffer, &num_bytes,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kHelloSize, num_bytes);
+ ASSERT_STREQ(kHello, buffer);
+
+ ph = ph_received;
+ ph_received = MOJO_HANDLE_INVALID;
+ ch = ch_received;
+ ch_received = MOJO_HANDLE_INVALID;
+
+ // Make sure that |ph| can't be sent if it's in a two-phase write.
+ void* write_ptr = nullptr;
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->BeginWriteData(ph, &write_ptr, &num_bytes,
+ MOJO_WRITE_DATA_FLAG_NONE));
+ ASSERT_GE(num_bytes, 1u);
+ ASSERT_EQ(MOJO_RESULT_BUSY,
+ core()->WriteMessage(h_passing[0], kHello, kHelloSize, &ph, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // But |ch| can, even if |ph| is in a two-phase write.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passing[0], kHello, kHelloSize, &ch, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ch = MOJO_HANDLE_INVALID;
+ EXPECT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_passing[1]),
+ MOJO_HANDLE_SIGNAL_READABLE));
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_passing[1], buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kHelloSize, num_bytes);
+ ASSERT_STREQ(kHello, buffer);
+ ASSERT_EQ(1u, num_handles);
+ ch = handles[0];
+ ASSERT_NE(ch, MOJO_HANDLE_INVALID);
+
+ // Complete the two-phase write.
+ static_cast<char*>(write_ptr)[0] = 'x';
+ ASSERT_EQ(MOJO_RESULT_OK, core()->EndWriteData(ph, 1));
+
+ // Wait for |ch| to be readable.
+ hss = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK,
+ mojo::Wait(mojo::Handle(ch), MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Make sure that |ch| can't be sent if it's in a two-phase read.
+ const void* read_ptr = nullptr;
+ num_bytes = 1;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->BeginReadData(ch, &read_ptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_ALL_OR_NONE));
+ ASSERT_EQ(MOJO_RESULT_BUSY,
+ core()->WriteMessage(h_passing[0], kHello, kHelloSize, &ch, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // But |ph| can, even if |ch| is in a two-phase read.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->WriteMessage(h_passing[0], kWorld, kWorldSize, &ph, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ph = MOJO_HANDLE_INVALID;
+ hss = kEmptyMojoHandleSignalsState;
+ EXPECT_EQ(MOJO_RESULT_OK, mojo::Wait(mojo::Handle(h_passing[1]),
+ MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ num_bytes = kBufferSize;
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ core()->ReadMessage(
+ h_passing[1], buffer, &num_bytes, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(kWorldSize, num_bytes);
+ ASSERT_STREQ(kWorld, buffer);
+ ASSERT_EQ(1u, num_handles);
+ ph = handles[0];
+ ASSERT_NE(ph, MOJO_HANDLE_INVALID);
+
+ // Complete the two-phase read.
+ ASSERT_EQ('x', static_cast<const char*>(read_ptr)[0]);
+ ASSERT_EQ(MOJO_RESULT_OK, core()->EndReadData(ch, 1));
+
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h_passing[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(h_passing[1]));
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(ph));
+ ASSERT_EQ(MOJO_RESULT_OK, core()->Close(ch));
+}
+
+struct TestAsyncWaiter {
+ TestAsyncWaiter() : result(MOJO_RESULT_UNKNOWN) {}
+
+ void Awake(MojoResult r) { result = r; }
+
+ MojoResult result;
+};
+
+// TODO(vtl): Test |DuplicateBufferHandle()| and |MapBuffer()|.
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/data_pipe_consumer_dispatcher.cc b/mojo/edk/system/data_pipe_consumer_dispatcher.cc
new file mode 100644
index 0000000000..f3387324fc
--- /dev/null
+++ b/mojo/edk/system/data_pipe_consumer_dispatcher.cc
@@ -0,0 +1,562 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/data_pipe_consumer_dispatcher.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/core.h"
+#include "mojo/edk/system/data_pipe_control_message.h"
+#include "mojo/edk/system/node_controller.h"
+#include "mojo/edk/system/ports_message.h"
+#include "mojo/edk/system/request_context.h"
+#include "mojo/public/c/system/data_pipe.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+const uint8_t kFlagPeerClosed = 0x01;
+
+#pragma pack(push, 1)
+
+struct SerializedState {
+ MojoCreateDataPipeOptions options;
+ uint64_t pipe_id;
+ uint32_t read_offset;
+ uint32_t bytes_available;
+ uint8_t flags;
+ char padding[7];
+};
+
+static_assert(sizeof(SerializedState) % 8 == 0,
+ "Invalid SerializedState size.");
+
+#pragma pack(pop)
+
+} // namespace
+
+// A PortObserver which forwards to a DataPipeConsumerDispatcher. This owns a
+// reference to the dispatcher to ensure it lives as long as the observed port.
+class DataPipeConsumerDispatcher::PortObserverThunk
+ : public NodeController::PortObserver {
+ public:
+ explicit PortObserverThunk(
+ scoped_refptr<DataPipeConsumerDispatcher> dispatcher)
+ : dispatcher_(dispatcher) {}
+
+ private:
+ ~PortObserverThunk() override {}
+
+ // NodeController::PortObserver:
+ void OnPortStatusChanged() override { dispatcher_->OnPortStatusChanged(); }
+
+ scoped_refptr<DataPipeConsumerDispatcher> dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(PortObserverThunk);
+};
+
+DataPipeConsumerDispatcher::DataPipeConsumerDispatcher(
+ NodeController* node_controller,
+ const ports::PortRef& control_port,
+ scoped_refptr<PlatformSharedBuffer> shared_ring_buffer,
+ const MojoCreateDataPipeOptions& options,
+ bool initialized,
+ uint64_t pipe_id)
+ : options_(options),
+ node_controller_(node_controller),
+ control_port_(control_port),
+ pipe_id_(pipe_id),
+ watchers_(this),
+ shared_ring_buffer_(shared_ring_buffer) {
+ if (initialized) {
+ base::AutoLock lock(lock_);
+ InitializeNoLock();
+ }
+}
+
+Dispatcher::Type DataPipeConsumerDispatcher::GetType() const {
+ return Type::DATA_PIPE_CONSUMER;
+}
+
+MojoResult DataPipeConsumerDispatcher::Close() {
+ base::AutoLock lock(lock_);
+ DVLOG(1) << "Closing data pipe consumer " << pipe_id_;
+ return CloseNoLock();
+}
+
+MojoResult DataPipeConsumerDispatcher::ReadData(void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags) {
+ base::AutoLock lock(lock_);
+
+ if (!shared_ring_buffer_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (in_two_phase_read_)
+ return MOJO_RESULT_BUSY;
+
+ const bool had_new_data = new_data_available_;
+ new_data_available_ = false;
+
+ if ((flags & MOJO_READ_DATA_FLAG_QUERY)) {
+ if ((flags & MOJO_READ_DATA_FLAG_PEEK) ||
+ (flags & MOJO_READ_DATA_FLAG_DISCARD))
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ DCHECK(!(flags & MOJO_READ_DATA_FLAG_DISCARD)); // Handled above.
+ DVLOG_IF(2, elements)
+ << "Query mode: ignoring non-null |elements|";
+ *num_bytes = static_cast<uint32_t>(bytes_available_);
+
+ if (had_new_data)
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+ return MOJO_RESULT_OK;
+ }
+
+ bool discard = false;
+ if ((flags & MOJO_READ_DATA_FLAG_DISCARD)) {
+ // These flags are mutally exclusive.
+ if (flags & MOJO_READ_DATA_FLAG_PEEK)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ DVLOG_IF(2, elements)
+ << "Discard mode: ignoring non-null |elements|";
+ discard = true;
+ }
+
+ uint32_t max_num_bytes_to_read = *num_bytes;
+ if (max_num_bytes_to_read % options_.element_num_bytes != 0)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ bool all_or_none = flags & MOJO_READ_DATA_FLAG_ALL_OR_NONE;
+ uint32_t min_num_bytes_to_read =
+ all_or_none ? max_num_bytes_to_read : 0;
+
+ if (min_num_bytes_to_read > bytes_available_) {
+ if (had_new_data)
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+ return peer_closed_ ? MOJO_RESULT_FAILED_PRECONDITION
+ : MOJO_RESULT_OUT_OF_RANGE;
+ }
+
+ uint32_t bytes_to_read = std::min(max_num_bytes_to_read, bytes_available_);
+ if (bytes_to_read == 0) {
+ if (had_new_data)
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+ return peer_closed_ ? MOJO_RESULT_FAILED_PRECONDITION
+ : MOJO_RESULT_SHOULD_WAIT;
+ }
+
+ if (!discard) {
+ uint8_t* data = static_cast<uint8_t*>(ring_buffer_mapping_->GetBase());
+ CHECK(data);
+
+ uint8_t* destination = static_cast<uint8_t*>(elements);
+ CHECK(destination);
+
+ DCHECK_LE(read_offset_, options_.capacity_num_bytes);
+ uint32_t tail_bytes_to_copy =
+ std::min(options_.capacity_num_bytes - read_offset_, bytes_to_read);
+ uint32_t head_bytes_to_copy = bytes_to_read - tail_bytes_to_copy;
+ if (tail_bytes_to_copy > 0)
+ memcpy(destination, data + read_offset_, tail_bytes_to_copy);
+ if (head_bytes_to_copy > 0)
+ memcpy(destination + tail_bytes_to_copy, data, head_bytes_to_copy);
+ }
+ *num_bytes = bytes_to_read;
+
+ bool peek = !!(flags & MOJO_READ_DATA_FLAG_PEEK);
+ if (discard || !peek) {
+ read_offset_ = (read_offset_ + bytes_to_read) % options_.capacity_num_bytes;
+ bytes_available_ -= bytes_to_read;
+
+ base::AutoUnlock unlock(lock_);
+ NotifyRead(bytes_to_read);
+ }
+
+ // We may have just read the last available data and thus changed the signals
+ // state.
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult DataPipeConsumerDispatcher::BeginReadData(const void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoReadDataFlags flags) {
+ base::AutoLock lock(lock_);
+ if (!shared_ring_buffer_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (in_two_phase_read_)
+ return MOJO_RESULT_BUSY;
+
+ // These flags may not be used in two-phase mode.
+ if ((flags & MOJO_READ_DATA_FLAG_DISCARD) ||
+ (flags & MOJO_READ_DATA_FLAG_QUERY) ||
+ (flags & MOJO_READ_DATA_FLAG_PEEK))
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ const bool had_new_data = new_data_available_;
+ new_data_available_ = false;
+
+ if (bytes_available_ == 0) {
+ if (had_new_data)
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+ return peer_closed_ ? MOJO_RESULT_FAILED_PRECONDITION
+ : MOJO_RESULT_SHOULD_WAIT;
+ }
+
+ DCHECK_LT(read_offset_, options_.capacity_num_bytes);
+ uint32_t bytes_to_read = std::min(bytes_available_,
+ options_.capacity_num_bytes - read_offset_);
+
+ CHECK(ring_buffer_mapping_);
+ uint8_t* data = static_cast<uint8_t*>(ring_buffer_mapping_->GetBase());
+ CHECK(data);
+
+ in_two_phase_read_ = true;
+ *buffer = data + read_offset_;
+ *buffer_num_bytes = bytes_to_read;
+ two_phase_max_bytes_read_ = bytes_to_read;
+
+ if (had_new_data)
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult DataPipeConsumerDispatcher::EndReadData(uint32_t num_bytes_read) {
+ base::AutoLock lock(lock_);
+ if (!in_two_phase_read_)
+ return MOJO_RESULT_FAILED_PRECONDITION;
+
+ if (in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ CHECK(shared_ring_buffer_);
+
+ MojoResult rv;
+ if (num_bytes_read > two_phase_max_bytes_read_ ||
+ num_bytes_read % options_.element_num_bytes != 0) {
+ rv = MOJO_RESULT_INVALID_ARGUMENT;
+ } else {
+ rv = MOJO_RESULT_OK;
+ read_offset_ =
+ (read_offset_ + num_bytes_read) % options_.capacity_num_bytes;
+
+ DCHECK_GE(bytes_available_, num_bytes_read);
+ bytes_available_ -= num_bytes_read;
+
+ base::AutoUnlock unlock(lock_);
+ NotifyRead(num_bytes_read);
+ }
+
+ in_two_phase_read_ = false;
+ two_phase_max_bytes_read_ = 0;
+
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+
+ return rv;
+}
+
+HandleSignalsState DataPipeConsumerDispatcher::GetHandleSignalsState() const {
+ base::AutoLock lock(lock_);
+ return GetHandleSignalsStateNoLock();
+}
+
+MojoResult DataPipeConsumerDispatcher::AddWatcherRef(
+ const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context) {
+ base::AutoLock lock(lock_);
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watchers_.Add(watcher, context, GetHandleSignalsStateNoLock());
+}
+
+MojoResult DataPipeConsumerDispatcher::RemoveWatcherRef(
+ WatcherDispatcher* watcher,
+ uintptr_t context) {
+ base::AutoLock lock(lock_);
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watchers_.Remove(watcher, context);
+}
+
+void DataPipeConsumerDispatcher::StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) {
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ *num_bytes = static_cast<uint32_t>(sizeof(SerializedState));
+ *num_ports = 1;
+ *num_handles = 1;
+}
+
+bool DataPipeConsumerDispatcher::EndSerialize(
+ void* destination,
+ ports::PortName* ports,
+ PlatformHandle* platform_handles) {
+ SerializedState* state = static_cast<SerializedState*>(destination);
+ memcpy(&state->options, &options_, sizeof(MojoCreateDataPipeOptions));
+ memset(state->padding, 0, sizeof(state->padding));
+
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ state->pipe_id = pipe_id_;
+ state->read_offset = read_offset_;
+ state->bytes_available = bytes_available_;
+ state->flags = peer_closed_ ? kFlagPeerClosed : 0;
+
+ ports[0] = control_port_.name();
+
+ buffer_handle_for_transit_ = shared_ring_buffer_->DuplicatePlatformHandle();
+ platform_handles[0] = buffer_handle_for_transit_.get();
+
+ return true;
+}
+
+bool DataPipeConsumerDispatcher::BeginTransit() {
+ base::AutoLock lock(lock_);
+ if (in_transit_)
+ return false;
+ in_transit_ = !in_two_phase_read_;
+ return in_transit_;
+}
+
+void DataPipeConsumerDispatcher::CompleteTransitAndClose() {
+ node_controller_->SetPortObserver(control_port_, nullptr);
+
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ in_transit_ = false;
+ transferred_ = true;
+ ignore_result(buffer_handle_for_transit_.release());
+ CloseNoLock();
+}
+
+void DataPipeConsumerDispatcher::CancelTransit() {
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ in_transit_ = false;
+ buffer_handle_for_transit_.reset();
+ UpdateSignalsStateNoLock();
+}
+
+// static
+scoped_refptr<DataPipeConsumerDispatcher>
+DataPipeConsumerDispatcher::Deserialize(const void* data,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles) {
+ if (num_ports != 1 || num_handles != 1 ||
+ num_bytes != sizeof(SerializedState)) {
+ return nullptr;
+ }
+
+ const SerializedState* state = static_cast<const SerializedState*>(data);
+
+ NodeController* node_controller = internal::g_core->GetNodeController();
+ ports::PortRef port;
+ if (node_controller->node()->GetPort(ports[0], &port) != ports::OK)
+ return nullptr;
+
+ PlatformHandle buffer_handle;
+ std::swap(buffer_handle, handles[0]);
+ scoped_refptr<PlatformSharedBuffer> ring_buffer =
+ PlatformSharedBuffer::CreateFromPlatformHandle(
+ state->options.capacity_num_bytes,
+ false /* read_only */,
+ ScopedPlatformHandle(buffer_handle));
+ if (!ring_buffer) {
+ DLOG(ERROR) << "Failed to deserialize shared buffer handle.";
+ return nullptr;
+ }
+
+ scoped_refptr<DataPipeConsumerDispatcher> dispatcher =
+ new DataPipeConsumerDispatcher(node_controller, port, ring_buffer,
+ state->options, false /* initialized */,
+ state->pipe_id);
+
+ {
+ base::AutoLock lock(dispatcher->lock_);
+ dispatcher->read_offset_ = state->read_offset;
+ dispatcher->bytes_available_ = state->bytes_available;
+ dispatcher->new_data_available_ = state->bytes_available > 0;
+ dispatcher->peer_closed_ = state->flags & kFlagPeerClosed;
+ dispatcher->InitializeNoLock();
+ dispatcher->UpdateSignalsStateNoLock();
+ }
+
+ return dispatcher;
+}
+
+DataPipeConsumerDispatcher::~DataPipeConsumerDispatcher() {
+ DCHECK(is_closed_ && !shared_ring_buffer_ && !ring_buffer_mapping_ &&
+ !in_transit_);
+}
+
+void DataPipeConsumerDispatcher::InitializeNoLock() {
+ lock_.AssertAcquired();
+
+ if (shared_ring_buffer_) {
+ DCHECK(!ring_buffer_mapping_);
+ ring_buffer_mapping_ =
+ shared_ring_buffer_->Map(0, options_.capacity_num_bytes);
+ if (!ring_buffer_mapping_) {
+ DLOG(ERROR) << "Failed to map shared buffer.";
+ shared_ring_buffer_ = nullptr;
+ }
+ }
+
+ base::AutoUnlock unlock(lock_);
+ node_controller_->SetPortObserver(
+ control_port_,
+ make_scoped_refptr(new PortObserverThunk(this)));
+}
+
+MojoResult DataPipeConsumerDispatcher::CloseNoLock() {
+ lock_.AssertAcquired();
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ is_closed_ = true;
+ ring_buffer_mapping_.reset();
+ shared_ring_buffer_ = nullptr;
+
+ watchers_.NotifyClosed();
+ if (!transferred_) {
+ base::AutoUnlock unlock(lock_);
+ node_controller_->ClosePort(control_port_);
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+HandleSignalsState
+DataPipeConsumerDispatcher::GetHandleSignalsStateNoLock() const {
+ lock_.AssertAcquired();
+
+ HandleSignalsState rv;
+ if (shared_ring_buffer_ && bytes_available_) {
+ if (!in_two_phase_read_) {
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_READABLE;
+ if (new_data_available_)
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE;
+ }
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
+ } else if (!peer_closed_ && shared_ring_buffer_) {
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
+ }
+
+ if (shared_ring_buffer_) {
+ if (new_data_available_ || !peer_closed_)
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE;
+ }
+
+ if (peer_closed_)
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+
+ return rv;
+}
+
+void DataPipeConsumerDispatcher::NotifyRead(uint32_t num_bytes) {
+ DVLOG(1) << "Data pipe consumer " << pipe_id_ << " notifying peer: "
+ << num_bytes << " bytes read. [control_port="
+ << control_port_.name() << "]";
+
+ SendDataPipeControlMessage(node_controller_, control_port_,
+ DataPipeCommand::DATA_WAS_READ, num_bytes);
+}
+
+void DataPipeConsumerDispatcher::OnPortStatusChanged() {
+ DCHECK(RequestContext::current());
+
+ base::AutoLock lock(lock_);
+
+ // We stop observing the control port as soon it's transferred, but this can
+ // race with events which are raised right before that happens. This is fine
+ // to ignore.
+ if (transferred_)
+ return;
+
+ DVLOG(1) << "Control port status changed for data pipe producer " << pipe_id_;
+
+ UpdateSignalsStateNoLock();
+}
+
+void DataPipeConsumerDispatcher::UpdateSignalsStateNoLock() {
+ lock_.AssertAcquired();
+
+ bool was_peer_closed = peer_closed_;
+ size_t previous_bytes_available = bytes_available_;
+
+ ports::PortStatus port_status;
+ int rv = node_controller_->node()->GetStatus(control_port_, &port_status);
+ if (rv != ports::OK || !port_status.receiving_messages) {
+ DVLOG(1) << "Data pipe consumer " << pipe_id_ << " is aware of peer closure"
+ << " [control_port=" << control_port_.name() << "]";
+ peer_closed_ = true;
+ } else if (rv == ports::OK && port_status.has_messages && !in_transit_) {
+ ports::ScopedMessage message;
+ do {
+ int rv = node_controller_->node()->GetMessage(
+ control_port_, &message, nullptr);
+ if (rv != ports::OK)
+ peer_closed_ = true;
+ if (message) {
+ if (message->num_payload_bytes() < sizeof(DataPipeControlMessage)) {
+ peer_closed_ = true;
+ break;
+ }
+
+ const DataPipeControlMessage* m =
+ static_cast<const DataPipeControlMessage*>(
+ message->payload_bytes());
+
+ if (m->command != DataPipeCommand::DATA_WAS_WRITTEN) {
+ DLOG(ERROR) << "Unexpected control message from producer.";
+ peer_closed_ = true;
+ break;
+ }
+
+ if (static_cast<size_t>(bytes_available_) + m->num_bytes >
+ options_.capacity_num_bytes) {
+ DLOG(ERROR) << "Producer claims to have written too many bytes.";
+ peer_closed_ = true;
+ break;
+ }
+
+ DVLOG(1) << "Data pipe consumer " << pipe_id_ << " is aware that "
+ << m->num_bytes << " bytes were written. [control_port="
+ << control_port_.name() << "]";
+
+ bytes_available_ += m->num_bytes;
+ }
+ } while (message);
+ }
+
+ bool has_new_data = bytes_available_ != previous_bytes_available;
+ if (has_new_data)
+ new_data_available_ = true;
+
+ if (peer_closed_ != was_peer_closed || has_new_data)
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/data_pipe_consumer_dispatcher.h b/mojo/edk/system/data_pipe_consumer_dispatcher.h
new file mode 100644
index 0000000000..120c7a387f
--- /dev/null
+++ b/mojo/edk/system/data_pipe_consumer_dispatcher.h
@@ -0,0 +1,123 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_DATA_PIPE_CONSUMER_DISPATCHER_H_
+#define MOJO_EDK_SYSTEM_DATA_PIPE_CONSUMER_DISPATCHER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/ports/port_ref.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/edk/system/watcher_set.h"
+
+namespace mojo {
+namespace edk {
+
+class NodeController;
+
+// This is the Dispatcher implementation for the consumer handle for data
+// pipes created by the Mojo primitive MojoCreateDataPipe(). This class is
+// thread-safe.
+class MOJO_SYSTEM_IMPL_EXPORT DataPipeConsumerDispatcher final
+ : public Dispatcher {
+ public:
+ DataPipeConsumerDispatcher(
+ NodeController* node_controller,
+ const ports::PortRef& control_port,
+ scoped_refptr<PlatformSharedBuffer> shared_ring_buffer,
+ const MojoCreateDataPipeOptions& options,
+ bool initialized,
+ uint64_t pipe_id);
+
+ // Dispatcher:
+ Type GetType() const override;
+ MojoResult Close() override;
+ MojoResult ReadData(void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags) override;
+ MojoResult BeginReadData(const void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoReadDataFlags flags) override;
+ MojoResult EndReadData(uint32_t num_bytes_read) override;
+ HandleSignalsState GetHandleSignalsState() const override;
+ MojoResult AddWatcherRef(const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context) override;
+ MojoResult RemoveWatcherRef(WatcherDispatcher* watcher,
+ uintptr_t context) override;
+ void StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) override;
+ bool EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) override;
+ bool BeginTransit() override;
+ void CompleteTransitAndClose() override;
+ void CancelTransit() override;
+
+ static scoped_refptr<DataPipeConsumerDispatcher>
+ Deserialize(const void* data,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles);
+
+ private:
+ class PortObserverThunk;
+ friend class PortObserverThunk;
+
+ ~DataPipeConsumerDispatcher() override;
+
+ void InitializeNoLock();
+ MojoResult CloseNoLock();
+ HandleSignalsState GetHandleSignalsStateNoLock() const;
+ void NotifyRead(uint32_t num_bytes);
+ void OnPortStatusChanged();
+ void UpdateSignalsStateNoLock();
+
+ const MojoCreateDataPipeOptions options_;
+ NodeController* const node_controller_;
+ const ports::PortRef control_port_;
+ const uint64_t pipe_id_;
+
+ // Guards access to the fields below.
+ mutable base::Lock lock_;
+
+ WatcherSet watchers_;
+
+ scoped_refptr<PlatformSharedBuffer> shared_ring_buffer_;
+ std::unique_ptr<PlatformSharedBufferMapping> ring_buffer_mapping_;
+ ScopedPlatformHandle buffer_handle_for_transit_;
+
+ bool in_two_phase_read_ = false;
+ uint32_t two_phase_max_bytes_read_ = 0;
+
+ bool in_transit_ = false;
+ bool is_closed_ = false;
+ bool peer_closed_ = false;
+ bool transferred_ = false;
+
+ uint32_t read_offset_ = 0;
+ uint32_t bytes_available_ = 0;
+
+ // Indicates whether any new data is available since the last read attempt.
+ bool new_data_available_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(DataPipeConsumerDispatcher);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_DATA_PIPE_CONSUMER_DISPATCHER_H_
diff --git a/mojo/edk/system/data_pipe_control_message.cc b/mojo/edk/system/data_pipe_control_message.cc
new file mode 100644
index 0000000000..23873b8290
--- /dev/null
+++ b/mojo/edk/system/data_pipe_control_message.cc
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/data_pipe_control_message.h"
+
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/system/node_controller.h"
+#include "mojo/edk/system/ports_message.h"
+
+namespace mojo {
+namespace edk {
+
+void SendDataPipeControlMessage(NodeController* node_controller,
+ const ports::PortRef& port,
+ DataPipeCommand command,
+ uint32_t num_bytes) {
+ std::unique_ptr<PortsMessage> message =
+ PortsMessage::NewUserMessage(sizeof(DataPipeControlMessage), 0, 0);
+ CHECK(message);
+
+ DataPipeControlMessage* data =
+ static_cast<DataPipeControlMessage*>(message->mutable_payload_bytes());
+ data->command = command;
+ data->num_bytes = num_bytes;
+
+ int rv = node_controller->SendMessage(port, std::move(message));
+ if (rv != ports::OK && rv != ports::ERROR_PORT_PEER_CLOSED) {
+ DLOG(ERROR) << "Unexpected failure sending data pipe control message: "
+ << rv;
+ }
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/data_pipe_control_message.h b/mojo/edk/system/data_pipe_control_message.h
new file mode 100644
index 0000000000..ec84ea3c55
--- /dev/null
+++ b/mojo/edk/system/data_pipe_control_message.h
@@ -0,0 +1,43 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_DATA_PIPE_CONTROL_MESSAGE_H_
+#define MOJO_EDK_SYSTEM_DATA_PIPE_CONTROL_MESSAGE_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/ports/port_ref.h"
+#include "mojo/public/c/system/macros.h"
+
+namespace mojo {
+namespace edk {
+
+class NodeController;
+
+enum DataPipeCommand : uint32_t {
+ // Signal to the consumer that new data is available.
+ DATA_WAS_WRITTEN,
+
+ // Signal to the producer that data has been consumed.
+ DATA_WAS_READ,
+};
+
+// Message header for messages sent over a data pipe control port.
+struct MOJO_ALIGNAS(8) DataPipeControlMessage {
+ DataPipeCommand command;
+ uint32_t num_bytes;
+};
+
+void SendDataPipeControlMessage(NodeController* node_controller,
+ const ports::PortRef& port,
+ DataPipeCommand command,
+ uint32_t num_bytes);
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_DATA_PIPE_CONTROL_MESSAGE_H_
diff --git a/mojo/edk/system/data_pipe_producer_dispatcher.cc b/mojo/edk/system/data_pipe_producer_dispatcher.cc
new file mode 100644
index 0000000000..b0102a6d9b
--- /dev/null
+++ b/mojo/edk/system/data_pipe_producer_dispatcher.cc
@@ -0,0 +1,507 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/data_pipe_producer_dispatcher.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/configuration.h"
+#include "mojo/edk/system/core.h"
+#include "mojo/edk/system/data_pipe_control_message.h"
+#include "mojo/edk/system/node_controller.h"
+#include "mojo/edk/system/ports_message.h"
+#include "mojo/edk/system/request_context.h"
+#include "mojo/public/c/system/data_pipe.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+const uint8_t kFlagPeerClosed = 0x01;
+
+#pragma pack(push, 1)
+
+struct SerializedState {
+ MojoCreateDataPipeOptions options;
+ uint64_t pipe_id;
+ uint32_t write_offset;
+ uint32_t available_capacity;
+ uint8_t flags;
+ char padding[7];
+};
+
+static_assert(sizeof(SerializedState) % 8 == 0,
+ "Invalid SerializedState size.");
+
+#pragma pack(pop)
+
+} // namespace
+
+// A PortObserver which forwards to a DataPipeProducerDispatcher. This owns a
+// reference to the dispatcher to ensure it lives as long as the observed port.
+class DataPipeProducerDispatcher::PortObserverThunk
+ : public NodeController::PortObserver {
+ public:
+ explicit PortObserverThunk(
+ scoped_refptr<DataPipeProducerDispatcher> dispatcher)
+ : dispatcher_(dispatcher) {}
+
+ private:
+ ~PortObserverThunk() override {}
+
+ // NodeController::PortObserver:
+ void OnPortStatusChanged() override { dispatcher_->OnPortStatusChanged(); }
+
+ scoped_refptr<DataPipeProducerDispatcher> dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(PortObserverThunk);
+};
+
+DataPipeProducerDispatcher::DataPipeProducerDispatcher(
+ NodeController* node_controller,
+ const ports::PortRef& control_port,
+ scoped_refptr<PlatformSharedBuffer> shared_ring_buffer,
+ const MojoCreateDataPipeOptions& options,
+ bool initialized,
+ uint64_t pipe_id)
+ : options_(options),
+ node_controller_(node_controller),
+ control_port_(control_port),
+ pipe_id_(pipe_id),
+ watchers_(this),
+ shared_ring_buffer_(shared_ring_buffer),
+ available_capacity_(options_.capacity_num_bytes) {
+ if (initialized) {
+ base::AutoLock lock(lock_);
+ InitializeNoLock();
+ }
+}
+
+Dispatcher::Type DataPipeProducerDispatcher::GetType() const {
+ return Type::DATA_PIPE_PRODUCER;
+}
+
+MojoResult DataPipeProducerDispatcher::Close() {
+ base::AutoLock lock(lock_);
+ DVLOG(1) << "Closing data pipe producer " << pipe_id_;
+ return CloseNoLock();
+}
+
+MojoResult DataPipeProducerDispatcher::WriteData(const void* elements,
+ uint32_t* num_bytes,
+ MojoWriteDataFlags flags) {
+ base::AutoLock lock(lock_);
+ if (!shared_ring_buffer_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (in_two_phase_write_)
+ return MOJO_RESULT_BUSY;
+
+ if (peer_closed_)
+ return MOJO_RESULT_FAILED_PRECONDITION;
+
+ if (*num_bytes % options_.element_num_bytes != 0)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ if (*num_bytes == 0)
+ return MOJO_RESULT_OK; // Nothing to do.
+
+ if ((flags & MOJO_WRITE_DATA_FLAG_ALL_OR_NONE) &&
+ (*num_bytes > available_capacity_)) {
+ // Don't return "should wait" since you can't wait for a specified amount of
+ // data.
+ return MOJO_RESULT_OUT_OF_RANGE;
+ }
+
+ DCHECK_LE(available_capacity_, options_.capacity_num_bytes);
+ uint32_t num_bytes_to_write = std::min(*num_bytes, available_capacity_);
+ if (num_bytes_to_write == 0)
+ return MOJO_RESULT_SHOULD_WAIT;
+
+ *num_bytes = num_bytes_to_write;
+
+ CHECK(ring_buffer_mapping_);
+ uint8_t* data = static_cast<uint8_t*>(ring_buffer_mapping_->GetBase());
+ CHECK(data);
+
+ const uint8_t* source = static_cast<const uint8_t*>(elements);
+ CHECK(source);
+
+ DCHECK_LE(write_offset_, options_.capacity_num_bytes);
+ uint32_t tail_bytes_to_write =
+ std::min(options_.capacity_num_bytes - write_offset_,
+ num_bytes_to_write);
+ uint32_t head_bytes_to_write = num_bytes_to_write - tail_bytes_to_write;
+
+ DCHECK_GT(tail_bytes_to_write, 0u);
+ memcpy(data + write_offset_, source, tail_bytes_to_write);
+ if (head_bytes_to_write > 0)
+ memcpy(data, source + tail_bytes_to_write, head_bytes_to_write);
+
+ DCHECK_LE(num_bytes_to_write, available_capacity_);
+ available_capacity_ -= num_bytes_to_write;
+ write_offset_ = (write_offset_ + num_bytes_to_write) %
+ options_.capacity_num_bytes;
+
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+
+ base::AutoUnlock unlock(lock_);
+ NotifyWrite(num_bytes_to_write);
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult DataPipeProducerDispatcher::BeginWriteData(
+ void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoWriteDataFlags flags) {
+ base::AutoLock lock(lock_);
+ if (!shared_ring_buffer_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ // These flags may not be used in two-phase mode.
+ if (flags & MOJO_WRITE_DATA_FLAG_ALL_OR_NONE)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (in_two_phase_write_)
+ return MOJO_RESULT_BUSY;
+ if (peer_closed_)
+ return MOJO_RESULT_FAILED_PRECONDITION;
+
+ if (available_capacity_ == 0) {
+ return peer_closed_ ? MOJO_RESULT_FAILED_PRECONDITION
+ : MOJO_RESULT_SHOULD_WAIT;
+ }
+
+ in_two_phase_write_ = true;
+ *buffer_num_bytes = std::min(options_.capacity_num_bytes - write_offset_,
+ available_capacity_);
+ DCHECK_GT(*buffer_num_bytes, 0u);
+
+ CHECK(ring_buffer_mapping_);
+ uint8_t* data = static_cast<uint8_t*>(ring_buffer_mapping_->GetBase());
+ *buffer = data + write_offset_;
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult DataPipeProducerDispatcher::EndWriteData(
+ uint32_t num_bytes_written) {
+ base::AutoLock lock(lock_);
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (!in_two_phase_write_)
+ return MOJO_RESULT_FAILED_PRECONDITION;
+
+ DCHECK(shared_ring_buffer_);
+ DCHECK(ring_buffer_mapping_);
+
+ // Note: Allow successful completion of the two-phase write even if the other
+ // side has been closed.
+ MojoResult rv = MOJO_RESULT_OK;
+ if (num_bytes_written > available_capacity_ ||
+ num_bytes_written % options_.element_num_bytes != 0 ||
+ write_offset_ + num_bytes_written > options_.capacity_num_bytes) {
+ rv = MOJO_RESULT_INVALID_ARGUMENT;
+ } else {
+ DCHECK_LE(num_bytes_written + write_offset_, options_.capacity_num_bytes);
+ available_capacity_ -= num_bytes_written;
+ write_offset_ = (write_offset_ + num_bytes_written) %
+ options_.capacity_num_bytes;
+
+ base::AutoUnlock unlock(lock_);
+ NotifyWrite(num_bytes_written);
+ }
+
+ in_two_phase_write_ = false;
+
+ // If we're now writable, we *became* writable (since we weren't writable
+ // during the two-phase write), so notify watchers.
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+
+ return rv;
+}
+
+HandleSignalsState DataPipeProducerDispatcher::GetHandleSignalsState() const {
+ base::AutoLock lock(lock_);
+ return GetHandleSignalsStateNoLock();
+}
+
+MojoResult DataPipeProducerDispatcher::AddWatcherRef(
+ const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context) {
+ base::AutoLock lock(lock_);
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watchers_.Add(watcher, context, GetHandleSignalsStateNoLock());
+}
+
+MojoResult DataPipeProducerDispatcher::RemoveWatcherRef(
+ WatcherDispatcher* watcher,
+ uintptr_t context) {
+ base::AutoLock lock(lock_);
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watchers_.Remove(watcher, context);
+}
+
+void DataPipeProducerDispatcher::StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) {
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ *num_bytes = sizeof(SerializedState);
+ *num_ports = 1;
+ *num_handles = 1;
+}
+
+bool DataPipeProducerDispatcher::EndSerialize(
+ void* destination,
+ ports::PortName* ports,
+ PlatformHandle* platform_handles) {
+ SerializedState* state = static_cast<SerializedState*>(destination);
+ memcpy(&state->options, &options_, sizeof(MojoCreateDataPipeOptions));
+ memset(state->padding, 0, sizeof(state->padding));
+
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ state->pipe_id = pipe_id_;
+ state->write_offset = write_offset_;
+ state->available_capacity = available_capacity_;
+ state->flags = peer_closed_ ? kFlagPeerClosed : 0;
+
+ ports[0] = control_port_.name();
+
+ buffer_handle_for_transit_ = shared_ring_buffer_->DuplicatePlatformHandle();
+ platform_handles[0] = buffer_handle_for_transit_.get();
+
+ return true;
+}
+
+bool DataPipeProducerDispatcher::BeginTransit() {
+ base::AutoLock lock(lock_);
+ if (in_transit_)
+ return false;
+ in_transit_ = !in_two_phase_write_;
+ return in_transit_;
+}
+
+void DataPipeProducerDispatcher::CompleteTransitAndClose() {
+ node_controller_->SetPortObserver(control_port_, nullptr);
+
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ transferred_ = true;
+ in_transit_ = false;
+ ignore_result(buffer_handle_for_transit_.release());
+ CloseNoLock();
+}
+
+void DataPipeProducerDispatcher::CancelTransit() {
+ base::AutoLock lock(lock_);
+ DCHECK(in_transit_);
+ in_transit_ = false;
+ buffer_handle_for_transit_.reset();
+
+ HandleSignalsState state = GetHandleSignalsStateNoLock();
+ watchers_.NotifyState(state);
+}
+
+// static
+scoped_refptr<DataPipeProducerDispatcher>
+DataPipeProducerDispatcher::Deserialize(const void* data,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles) {
+ if (num_ports != 1 || num_handles != 1 ||
+ num_bytes != sizeof(SerializedState)) {
+ return nullptr;
+ }
+
+ const SerializedState* state = static_cast<const SerializedState*>(data);
+
+ NodeController* node_controller = internal::g_core->GetNodeController();
+ ports::PortRef port;
+ if (node_controller->node()->GetPort(ports[0], &port) != ports::OK)
+ return nullptr;
+
+ PlatformHandle buffer_handle;
+ std::swap(buffer_handle, handles[0]);
+ scoped_refptr<PlatformSharedBuffer> ring_buffer =
+ PlatformSharedBuffer::CreateFromPlatformHandle(
+ state->options.capacity_num_bytes,
+ false /* read_only */,
+ ScopedPlatformHandle(buffer_handle));
+ if (!ring_buffer) {
+ DLOG(ERROR) << "Failed to deserialize shared buffer handle.";
+ return nullptr;
+ }
+
+ scoped_refptr<DataPipeProducerDispatcher> dispatcher =
+ new DataPipeProducerDispatcher(node_controller, port, ring_buffer,
+ state->options, false /* initialized */,
+ state->pipe_id);
+
+ {
+ base::AutoLock lock(dispatcher->lock_);
+ dispatcher->write_offset_ = state->write_offset;
+ dispatcher->available_capacity_ = state->available_capacity;
+ dispatcher->peer_closed_ = state->flags & kFlagPeerClosed;
+ dispatcher->InitializeNoLock();
+ dispatcher->UpdateSignalsStateNoLock();
+ }
+
+ return dispatcher;
+}
+
+DataPipeProducerDispatcher::~DataPipeProducerDispatcher() {
+ DCHECK(is_closed_ && !in_transit_ && !shared_ring_buffer_ &&
+ !ring_buffer_mapping_);
+}
+
+void DataPipeProducerDispatcher::InitializeNoLock() {
+ lock_.AssertAcquired();
+
+ if (shared_ring_buffer_) {
+ ring_buffer_mapping_ =
+ shared_ring_buffer_->Map(0, options_.capacity_num_bytes);
+ if (!ring_buffer_mapping_) {
+ DLOG(ERROR) << "Failed to map shared buffer.";
+ shared_ring_buffer_ = nullptr;
+ }
+ }
+
+ base::AutoUnlock unlock(lock_);
+ node_controller_->SetPortObserver(
+ control_port_,
+ make_scoped_refptr(new PortObserverThunk(this)));
+}
+
+MojoResult DataPipeProducerDispatcher::CloseNoLock() {
+ lock_.AssertAcquired();
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ is_closed_ = true;
+ ring_buffer_mapping_.reset();
+ shared_ring_buffer_ = nullptr;
+
+ watchers_.NotifyClosed();
+ if (!transferred_) {
+ base::AutoUnlock unlock(lock_);
+ node_controller_->ClosePort(control_port_);
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+HandleSignalsState DataPipeProducerDispatcher::GetHandleSignalsStateNoLock()
+ const {
+ lock_.AssertAcquired();
+ HandleSignalsState rv;
+ if (!peer_closed_) {
+ if (!in_two_phase_write_ && shared_ring_buffer_ && available_capacity_ > 0)
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_WRITABLE;
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_WRITABLE;
+ } else {
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+ }
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+ return rv;
+}
+
+void DataPipeProducerDispatcher::NotifyWrite(uint32_t num_bytes) {
+ DVLOG(1) << "Data pipe producer " << pipe_id_ << " notifying peer: "
+ << num_bytes << " bytes written. [control_port="
+ << control_port_.name() << "]";
+
+ SendDataPipeControlMessage(node_controller_, control_port_,
+ DataPipeCommand::DATA_WAS_WRITTEN, num_bytes);
+}
+
+void DataPipeProducerDispatcher::OnPortStatusChanged() {
+ DCHECK(RequestContext::current());
+
+ base::AutoLock lock(lock_);
+
+ // We stop observing the control port as soon it's transferred, but this can
+ // race with events which are raised right before that happens. This is fine
+ // to ignore.
+ if (transferred_)
+ return;
+
+ DVLOG(1) << "Control port status changed for data pipe producer " << pipe_id_;
+
+ UpdateSignalsStateNoLock();
+}
+
+void DataPipeProducerDispatcher::UpdateSignalsStateNoLock() {
+ lock_.AssertAcquired();
+
+ bool was_peer_closed = peer_closed_;
+ size_t previous_capacity = available_capacity_;
+
+ ports::PortStatus port_status;
+ int rv = node_controller_->node()->GetStatus(control_port_, &port_status);
+ if (rv != ports::OK || !port_status.receiving_messages) {
+ DVLOG(1) << "Data pipe producer " << pipe_id_ << " is aware of peer closure"
+ << " [control_port=" << control_port_.name() << "]";
+ peer_closed_ = true;
+ } else if (rv == ports::OK && port_status.has_messages && !in_transit_) {
+ ports::ScopedMessage message;
+ do {
+ int rv = node_controller_->node()->GetMessage(
+ control_port_, &message, nullptr);
+ if (rv != ports::OK)
+ peer_closed_ = true;
+ if (message) {
+ if (message->num_payload_bytes() < sizeof(DataPipeControlMessage)) {
+ peer_closed_ = true;
+ break;
+ }
+
+ const DataPipeControlMessage* m =
+ static_cast<const DataPipeControlMessage*>(
+ message->payload_bytes());
+
+ if (m->command != DataPipeCommand::DATA_WAS_READ) {
+ DLOG(ERROR) << "Unexpected message from consumer.";
+ peer_closed_ = true;
+ break;
+ }
+
+ if (static_cast<size_t>(available_capacity_) + m->num_bytes >
+ options_.capacity_num_bytes) {
+ DLOG(ERROR) << "Consumer claims to have read too many bytes.";
+ break;
+ }
+
+ DVLOG(1) << "Data pipe producer " << pipe_id_ << " is aware that "
+ << m->num_bytes << " bytes were read. [control_port="
+ << control_port_.name() << "]";
+
+ available_capacity_ += m->num_bytes;
+ }
+ } while (message);
+ }
+
+ if (peer_closed_ != was_peer_closed ||
+ available_capacity_ != previous_capacity) {
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+ }
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/data_pipe_producer_dispatcher.h b/mojo/edk/system/data_pipe_producer_dispatcher.h
new file mode 100644
index 0000000000..1eddd5dfa8
--- /dev/null
+++ b/mojo/edk/system/data_pipe_producer_dispatcher.h
@@ -0,0 +1,123 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_DATA_PIPE_PRODUCER_DISPATCHER_H_
+#define MOJO_EDK_SYSTEM_DATA_PIPE_PRODUCER_DISPATCHER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/ports/port_ref.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/edk/system/watcher_set.h"
+
+namespace mojo {
+namespace edk {
+
+struct DataPipeControlMessage;
+class NodeController;
+
+// This is the Dispatcher implementation for the producer handle for data
+// pipes created by the Mojo primitive MojoCreateDataPipe(). This class is
+// thread-safe.
+class MOJO_SYSTEM_IMPL_EXPORT DataPipeProducerDispatcher final
+ : public Dispatcher {
+ public:
+ DataPipeProducerDispatcher(
+ NodeController* node_controller,
+ const ports::PortRef& port,
+ scoped_refptr<PlatformSharedBuffer> shared_ring_buffer,
+ const MojoCreateDataPipeOptions& options,
+ bool initialized,
+ uint64_t pipe_id);
+
+ // Dispatcher:
+ Type GetType() const override;
+ MojoResult Close() override;
+ MojoResult WriteData(const void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags) override;
+ MojoResult BeginWriteData(void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoWriteDataFlags flags) override;
+ MojoResult EndWriteData(uint32_t num_bytes_written) override;
+ HandleSignalsState GetHandleSignalsState() const override;
+ MojoResult AddWatcherRef(const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context) override;
+ MojoResult RemoveWatcherRef(WatcherDispatcher* watcher,
+ uintptr_t context) override;
+ void StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) override;
+ bool EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) override;
+ bool BeginTransit() override;
+ void CompleteTransitAndClose() override;
+ void CancelTransit() override;
+
+ static scoped_refptr<DataPipeProducerDispatcher>
+ Deserialize(const void* data,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles);
+
+ private:
+ class PortObserverThunk;
+ friend class PortObserverThunk;
+
+ ~DataPipeProducerDispatcher() override;
+
+ void OnSharedBufferCreated(const scoped_refptr<PlatformSharedBuffer>& buffer);
+ void InitializeNoLock();
+ MojoResult CloseNoLock();
+ HandleSignalsState GetHandleSignalsStateNoLock() const;
+ void NotifyWrite(uint32_t num_bytes);
+ void OnPortStatusChanged();
+ void UpdateSignalsStateNoLock();
+ bool ProcessMessageNoLock(const DataPipeControlMessage& message,
+ ScopedPlatformHandleVectorPtr handles);
+
+ const MojoCreateDataPipeOptions options_;
+ NodeController* const node_controller_;
+ const ports::PortRef control_port_;
+ const uint64_t pipe_id_;
+
+ // Guards access to the fields below.
+ mutable base::Lock lock_;
+
+ WatcherSet watchers_;
+
+ bool buffer_requested_ = false;
+
+ scoped_refptr<PlatformSharedBuffer> shared_ring_buffer_;
+ std::unique_ptr<PlatformSharedBufferMapping> ring_buffer_mapping_;
+ ScopedPlatformHandle buffer_handle_for_transit_;
+
+ bool in_transit_ = false;
+ bool is_closed_ = false;
+ bool peer_closed_ = false;
+ bool transferred_ = false;
+ bool in_two_phase_write_ = false;
+
+ uint32_t write_offset_ = 0;
+ uint32_t available_capacity_;
+
+ DISALLOW_COPY_AND_ASSIGN(DataPipeProducerDispatcher);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_DATA_PIPE_PRODUCER_DISPATCHER_H_
diff --git a/mojo/edk/system/data_pipe_unittest.cc b/mojo/edk/system/data_pipe_unittest.cc
new file mode 100644
index 0000000000..79c1f758fb
--- /dev/null
+++ b/mojo/edk/system/data_pipe_unittest.cc
@@ -0,0 +1,2034 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "mojo/edk/embedder/embedder.h"
+#include "mojo/edk/embedder/platform_channel_pair.h"
+#include "mojo/edk/system/test_utils.h"
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/public/c/system/data_pipe.h"
+#include "mojo/public/c/system/functions.h"
+#include "mojo/public/c/system/message_pipe.h"
+#include "mojo/public/cpp/system/simple_watcher.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+const uint32_t kSizeOfOptions =
+ static_cast<uint32_t>(sizeof(MojoCreateDataPipeOptions));
+
+// In various places, we have to poll (since, e.g., we can't yet wait for a
+// certain amount of data to be available). This is the maximum number of
+// iterations (separated by a short sleep).
+// TODO(vtl): Get rid of this.
+const size_t kMaxPoll = 100;
+
+// Used in Multiprocess test.
+const size_t kMultiprocessCapacity = 37;
+const char kMultiprocessTestData[] = "hello i'm a string that is 36 bytes";
+const int kMultiprocessMaxIter = 5;
+
+// TODO(rockot): There are many uses of ASSERT where EXPECT would be more
+// appropriate. Fix this.
+
+class DataPipeTest : public test::MojoTestBase {
+ public:
+ DataPipeTest() : producer_(MOJO_HANDLE_INVALID),
+ consumer_(MOJO_HANDLE_INVALID) {}
+
+ ~DataPipeTest() override {
+ if (producer_ != MOJO_HANDLE_INVALID)
+ CHECK_EQ(MOJO_RESULT_OK, MojoClose(producer_));
+ if (consumer_ != MOJO_HANDLE_INVALID)
+ CHECK_EQ(MOJO_RESULT_OK, MojoClose(consumer_));
+ }
+
+ MojoResult Create(const MojoCreateDataPipeOptions* options) {
+ return MojoCreateDataPipe(options, &producer_, &consumer_);
+ }
+
+ MojoResult WriteData(const void* elements,
+ uint32_t* num_bytes,
+ bool all_or_none = false) {
+ return MojoWriteData(producer_, elements, num_bytes,
+ all_or_none ? MOJO_WRITE_DATA_FLAG_ALL_OR_NONE
+ : MOJO_WRITE_DATA_FLAG_NONE);
+ }
+
+ MojoResult ReadData(void* elements,
+ uint32_t* num_bytes,
+ bool all_or_none = false,
+ bool peek = false) {
+ MojoReadDataFlags flags = MOJO_READ_DATA_FLAG_NONE;
+ if (all_or_none)
+ flags |= MOJO_READ_DATA_FLAG_ALL_OR_NONE;
+ if (peek)
+ flags |= MOJO_READ_DATA_FLAG_PEEK;
+ return MojoReadData(consumer_, elements, num_bytes, flags);
+ }
+
+ MojoResult QueryData(uint32_t* num_bytes) {
+ return MojoReadData(consumer_, nullptr, num_bytes,
+ MOJO_READ_DATA_FLAG_QUERY);
+ }
+
+ MojoResult DiscardData(uint32_t* num_bytes, bool all_or_none = false) {
+ MojoReadDataFlags flags = MOJO_READ_DATA_FLAG_DISCARD;
+ if (all_or_none)
+ flags |= MOJO_READ_DATA_FLAG_ALL_OR_NONE;
+ return MojoReadData(consumer_, nullptr, num_bytes, flags);
+ }
+
+ MojoResult BeginReadData(const void** elements,
+ uint32_t* num_bytes,
+ bool all_or_none = false) {
+ MojoReadDataFlags flags = MOJO_READ_DATA_FLAG_NONE;
+ if (all_or_none)
+ flags |= MOJO_READ_DATA_FLAG_ALL_OR_NONE;
+ return MojoBeginReadData(consumer_, elements, num_bytes, flags);
+ }
+
+ MojoResult EndReadData(uint32_t num_bytes_read) {
+ return MojoEndReadData(consumer_, num_bytes_read);
+ }
+
+ MojoResult BeginWriteData(void** elements,
+ uint32_t* num_bytes,
+ bool all_or_none = false) {
+ MojoReadDataFlags flags = MOJO_WRITE_DATA_FLAG_NONE;
+ if (all_or_none)
+ flags |= MOJO_WRITE_DATA_FLAG_ALL_OR_NONE;
+ return MojoBeginWriteData(producer_, elements, num_bytes, flags);
+ }
+
+ MojoResult EndWriteData(uint32_t num_bytes_written) {
+ return MojoEndWriteData(producer_, num_bytes_written);
+ }
+
+ MojoResult CloseProducer() {
+ MojoResult rv = MojoClose(producer_);
+ producer_ = MOJO_HANDLE_INVALID;
+ return rv;
+ }
+
+ MojoResult CloseConsumer() {
+ MojoResult rv = MojoClose(consumer_);
+ consumer_ = MOJO_HANDLE_INVALID;
+ return rv;
+ }
+
+ MojoHandle producer_, consumer_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DataPipeTest);
+};
+
+TEST_F(DataPipeTest, Basic) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 1000 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+
+ // We can write to a data pipe handle immediately.
+ int32_t elements[10] = {};
+ uint32_t num_bytes = 0;
+
+ num_bytes =
+ static_cast<uint32_t>(arraysize(elements) * sizeof(elements[0]));
+
+ elements[0] = 123;
+ elements[1] = 456;
+ num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(&elements[0], &num_bytes));
+
+ // Now wait for the other side to become readable.
+ MojoHandleSignalsState state;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ state.satisfied_signals);
+
+ elements[0] = -1;
+ elements[1] = -1;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(&elements[0], &num_bytes));
+ ASSERT_EQ(static_cast<uint32_t>(2u * sizeof(elements[0])), num_bytes);
+ ASSERT_EQ(elements[0], 123);
+ ASSERT_EQ(elements[1], 456);
+}
+
+// Tests creation of data pipes with various (valid) options.
+TEST_F(DataPipeTest, CreateAndMaybeTransfer) {
+ MojoCreateDataPipeOptions test_options[] = {
+ // Default options.
+ {},
+ // Trivial element size, non-default capacity.
+ {kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1, // |element_num_bytes|.
+ 1000}, // |capacity_num_bytes|.
+ // Nontrivial element size, non-default capacity.
+ {kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 4, // |element_num_bytes|.
+ 4000}, // |capacity_num_bytes|.
+ // Nontrivial element size, default capacity.
+ {kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 100, // |element_num_bytes|.
+ 0} // |capacity_num_bytes|.
+ };
+ for (size_t i = 0; i < arraysize(test_options); i++) {
+ MojoHandle producer_handle, consumer_handle;
+ MojoCreateDataPipeOptions* options =
+ i ? &test_options[i] : nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoCreateDataPipe(options, &producer_handle, &consumer_handle));
+ ASSERT_EQ(MOJO_RESULT_OK, MojoClose(producer_handle));
+ ASSERT_EQ(MOJO_RESULT_OK, MojoClose(consumer_handle));
+ }
+}
+
+TEST_F(DataPipeTest, SimpleReadWrite) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 1000 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ int32_t elements[10] = {};
+ uint32_t num_bytes = 0;
+
+ // Try reading; nothing there yet.
+ num_bytes =
+ static_cast<uint32_t>(arraysize(elements) * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT, ReadData(elements, &num_bytes));
+
+ // Query; nothing there yet.
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+
+ // Discard; nothing there yet.
+ num_bytes = static_cast<uint32_t>(5u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT, DiscardData(&num_bytes));
+
+ // Read with invalid |num_bytes|.
+ num_bytes = sizeof(elements[0]) + 1;
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, ReadData(elements, &num_bytes));
+
+ // Write two elements.
+ elements[0] = 123;
+ elements[1] = 456;
+ num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes));
+ // It should have written everything (even without "all or none").
+ ASSERT_EQ(2u * sizeof(elements[0]), num_bytes);
+
+ // Wait.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Query.
+ // TODO(vtl): It's theoretically possible (though not with the current
+ // implementation/configured limits) that not all the data has arrived yet.
+ // (The theoretically-correct assertion here is that |num_bytes| is |1 * ...|
+ // or |2 * ...|.)
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(2 * sizeof(elements[0]), num_bytes);
+
+ // Read one element.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes));
+ ASSERT_EQ(1u * sizeof(elements[0]), num_bytes);
+ ASSERT_EQ(123, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Query.
+ // TODO(vtl): See previous TODO. (If we got 2 elements there, however, we
+ // should get 1 here.)
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(1 * sizeof(elements[0]), num_bytes);
+
+ // Peek one element.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes, false, true));
+ ASSERT_EQ(1u * sizeof(elements[0]), num_bytes);
+ ASSERT_EQ(456, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Query. Still has 1 element remaining.
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(1 * sizeof(elements[0]), num_bytes);
+
+ // Try to read two elements, with "all or none".
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE,
+ ReadData(elements, &num_bytes, true, false));
+ ASSERT_EQ(-1, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Try to read two elements, without "all or none".
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes, false, false));
+ ASSERT_EQ(1u * sizeof(elements[0]), num_bytes);
+ ASSERT_EQ(456, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Query.
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+}
+
+// Note: The "basic" waiting tests test that the "wait states" are correct in
+// various situations; they don't test that waiters are properly awoken on state
+// changes. (For that, we need to use multiple threads.)
+TEST_F(DataPipeTest, BasicProducerWaiting) {
+ // Note: We take advantage of the fact that current for current
+ // implementations capacities are strict maximums. This is not guaranteed by
+ // the API.
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 2 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ Create(&options);
+ MojoHandleSignalsState hss;
+
+ // Never readable. Already writable.
+ hss = GetSignalsState(producer_);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // Write two elements.
+ int32_t elements[2] = {123, 456};
+ uint32_t num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes, true));
+ ASSERT_EQ(static_cast<uint32_t>(2u * sizeof(elements[0])), num_bytes);
+
+ // Wait for data to become available to the consumer.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Peek one element.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes, true, true));
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(elements[0])), num_bytes);
+ ASSERT_EQ(123, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Read one element.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes, true, false));
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(elements[0])), num_bytes);
+ ASSERT_EQ(123, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Try writing, using a two-phase write.
+ void* buffer = nullptr;
+ num_bytes = static_cast<uint32_t>(3u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&buffer, &num_bytes));
+ EXPECT_TRUE(buffer);
+ ASSERT_GE(num_bytes, static_cast<uint32_t>(1u * sizeof(elements[0])));
+
+ static_cast<int32_t*>(buffer)[0] = 789;
+ ASSERT_EQ(MOJO_RESULT_OK, EndWriteData(static_cast<uint32_t>(
+ 1u * sizeof(elements[0]))));
+
+ // Read one element, using a two-phase read.
+ const void* read_buffer = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ BeginReadData(&read_buffer, &num_bytes, false));
+ EXPECT_TRUE(read_buffer);
+ // The two-phase read should be able to read at least one element.
+ ASSERT_GE(num_bytes, static_cast<uint32_t>(1u * sizeof(elements[0])));
+ ASSERT_EQ(456, static_cast<const int32_t*>(read_buffer)[0]);
+ ASSERT_EQ(MOJO_RESULT_OK, EndReadData(static_cast<uint32_t>(
+ 1u * sizeof(elements[0]))));
+
+ // Write one element.
+ elements[0] = 123;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes));
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(elements[0])), num_bytes);
+
+ // Close the consumer.
+ CloseConsumer();
+
+ // It should now be never-writable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(producer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+}
+
+TEST_F(DataPipeTest, PeerClosedProducerWaiting) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 2 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Close the consumer.
+ CloseConsumer();
+
+ // It should be signaled.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(producer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+}
+
+TEST_F(DataPipeTest, PeerClosedConsumerWaiting) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 2 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Close the producer.
+ CloseProducer();
+
+ // It should be signaled.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+}
+
+TEST_F(DataPipeTest, BasicConsumerWaiting) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 1000 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Never writable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_WRITABLE, &hss));
+ EXPECT_EQ(0u, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Write two elements.
+ int32_t elements[2] = {123, 456};
+ uint32_t num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes, true));
+
+ // Wait for readability.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Discard one element.
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, DiscardData(&num_bytes, true));
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(elements[0])), num_bytes);
+
+ // Should still be readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Peek one element.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes, true, true));
+ ASSERT_EQ(456, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Should still be readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Read one element.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes, true));
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(elements[0])), num_bytes);
+ ASSERT_EQ(456, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Write one element.
+ elements[0] = 789;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes, true));
+
+ // Waiting should now succeed.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Close the producer.
+ CloseProducer();
+
+ // Should still be readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_TRUE(hss.satisfied_signals & (MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Wait for the peer closed signal.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Read one element.
+ elements[0] = -1;
+ elements[1] = -1;
+ num_bytes = static_cast<uint32_t>(1u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(elements, &num_bytes, true));
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(elements[0])), num_bytes);
+ ASSERT_EQ(789, elements[0]);
+ ASSERT_EQ(-1, elements[1]);
+
+ // Should be never-readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+}
+
+TEST_F(DataPipeTest, ConsumerNewDataReadable) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 1000 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ EXPECT_EQ(MOJO_RESULT_OK, Create(&options));
+
+ int32_t elements[2] = {123, 456};
+ uint32_t num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ EXPECT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes, true));
+
+ // The consumer handle should appear to be readable and have new data.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE));
+ EXPECT_TRUE(GetSignalsState(consumer_).satisfied_signals &
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE);
+
+ // Now try to read a minimum of 6 elements.
+ int32_t read_elements[6];
+ uint32_t num_read_bytes = sizeof(read_elements);
+ EXPECT_EQ(MOJO_RESULT_OUT_OF_RANGE,
+ MojoReadData(consumer_, read_elements, &num_read_bytes,
+ MOJO_READ_DATA_FLAG_ALL_OR_NONE));
+
+ // The consumer should still appear to be readable but not with new data.
+ EXPECT_TRUE(GetSignalsState(consumer_).satisfied_signals &
+ MOJO_HANDLE_SIGNAL_READABLE);
+ EXPECT_FALSE(GetSignalsState(consumer_).satisfied_signals &
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE);
+
+ // Write four more elements.
+ EXPECT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes, true));
+ EXPECT_EQ(MOJO_RESULT_OK, WriteData(elements, &num_bytes, true));
+
+ // The consumer handle should once again appear to be readable.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE));
+
+ // Try again to read a minimum of 6 elements. Should succeed this time.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoReadData(consumer_, read_elements, &num_read_bytes,
+ MOJO_READ_DATA_FLAG_ALL_OR_NONE));
+
+ // And now the consumer is unreadable.
+ EXPECT_FALSE(GetSignalsState(consumer_).satisfied_signals &
+ MOJO_HANDLE_SIGNAL_READABLE);
+ EXPECT_FALSE(GetSignalsState(consumer_).satisfied_signals &
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE);
+}
+
+// Test with two-phase APIs and also closing the producer with an active
+// consumer waiter.
+TEST_F(DataPipeTest, ConsumerWaitingTwoPhase) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 1000 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Write two elements.
+ int32_t* elements = nullptr;
+ void* buffer = nullptr;
+ // Request room for three (but we'll only write two).
+ uint32_t num_bytes = static_cast<uint32_t>(3u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&buffer, &num_bytes, false));
+ EXPECT_TRUE(buffer);
+ EXPECT_GE(num_bytes, static_cast<uint32_t>(3u * sizeof(elements[0])));
+ elements = static_cast<int32_t*>(buffer);
+ elements[0] = 123;
+ elements[1] = 456;
+ ASSERT_EQ(MOJO_RESULT_OK, EndWriteData(2u * sizeof(elements[0])));
+
+ // Wait for readability.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Read one element.
+ // Request two in all-or-none mode, but only read one.
+ const void* read_buffer = nullptr;
+ num_bytes = static_cast<uint32_t>(2u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_buffer, &num_bytes, true));
+ EXPECT_TRUE(read_buffer);
+ ASSERT_EQ(static_cast<uint32_t>(2u * sizeof(elements[0])), num_bytes);
+ const int32_t* read_elements = static_cast<const int32_t*>(read_buffer);
+ ASSERT_EQ(123, read_elements[0]);
+ ASSERT_EQ(MOJO_RESULT_OK, EndReadData(1u * sizeof(elements[0])));
+
+ // Should still be readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Read one element.
+ // Request three, but not in all-or-none mode.
+ read_buffer = nullptr;
+ num_bytes = static_cast<uint32_t>(3u * sizeof(elements[0]));
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_buffer, &num_bytes));
+ EXPECT_TRUE(read_buffer);
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(elements[0])), num_bytes);
+ read_elements = static_cast<const int32_t*>(read_buffer);
+ ASSERT_EQ(456, read_elements[0]);
+ ASSERT_EQ(MOJO_RESULT_OK, EndReadData(1u * sizeof(elements[0])));
+
+ // Close the producer.
+ CloseProducer();
+
+ // Should be never-readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+}
+
+// Tests that data pipes aren't writable/readable during two-phase writes/reads.
+TEST_F(DataPipeTest, BasicTwoPhaseWaiting) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 1000 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // It should be writable.
+ hss = GetSignalsState(producer_);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ uint32_t num_bytes = static_cast<uint32_t>(1u * sizeof(int32_t));
+ void* write_ptr = nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&write_ptr, &num_bytes));
+ EXPECT_TRUE(write_ptr);
+ EXPECT_GE(num_bytes, static_cast<uint32_t>(1u * sizeof(int32_t)));
+
+ // At this point, it shouldn't be writable.
+ hss = GetSignalsState(producer_);
+ ASSERT_EQ(0u, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // It shouldn't be readable yet either (we'll wait later).
+ hss = GetSignalsState(consumer_);
+ ASSERT_EQ(0u, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ static_cast<int32_t*>(write_ptr)[0] = 123;
+ ASSERT_EQ(MOJO_RESULT_OK, EndWriteData(1u * sizeof(int32_t)));
+
+ // It should immediately be writable again.
+ hss = GetSignalsState(producer_);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // It should become readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Start another two-phase write and check that it's readable even in the
+ // middle of it.
+ num_bytes = static_cast<uint32_t>(1u * sizeof(int32_t));
+ write_ptr = nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&write_ptr, &num_bytes));
+ EXPECT_TRUE(write_ptr);
+ EXPECT_GE(num_bytes, static_cast<uint32_t>(1u * sizeof(int32_t)));
+
+ // It should be readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // End the two-phase write without writing anything.
+ ASSERT_EQ(MOJO_RESULT_OK, EndWriteData(0u));
+
+ // Start a two-phase read.
+ num_bytes = static_cast<uint32_t>(1u * sizeof(int32_t));
+ const void* read_ptr = nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_ptr, &num_bytes));
+ EXPECT_TRUE(read_ptr);
+ ASSERT_EQ(static_cast<uint32_t>(1u * sizeof(int32_t)), num_bytes);
+
+ // At this point, it should still be writable.
+ hss = GetSignalsState(producer_);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // But not readable.
+ hss = GetSignalsState(consumer_);
+ ASSERT_EQ(0u, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // End the two-phase read without reading anything.
+ ASSERT_EQ(MOJO_RESULT_OK, EndReadData(0u));
+
+ // It should be readable again.
+ hss = GetSignalsState(consumer_);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+}
+
+void Seq(int32_t start, size_t count, int32_t* out) {
+ for (size_t i = 0; i < count; i++)
+ out[i] = start + static_cast<int32_t>(i);
+}
+
+TEST_F(DataPipeTest, AllOrNone) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 10 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Try writing more than the total capacity of the pipe.
+ uint32_t num_bytes = 20u * sizeof(int32_t);
+ int32_t buffer[100];
+ Seq(0, arraysize(buffer), buffer);
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE, WriteData(buffer, &num_bytes, true));
+
+ // Should still be empty.
+ num_bytes = ~0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+
+ // Write some data.
+ num_bytes = 5u * sizeof(int32_t);
+ Seq(100, arraysize(buffer), buffer);
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(buffer, &num_bytes, true));
+ ASSERT_EQ(5u * sizeof(int32_t), num_bytes);
+
+ // Wait for data.
+ // TODO(vtl): There's no real guarantee that all the data will become
+ // available at once (except that in current implementations, with reasonable
+ // limits, it will). Eventually, we'll be able to wait for a specified amount
+ // of data to become available.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // Half full.
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(5u * sizeof(int32_t), num_bytes);
+
+ // Try writing more than the available capacity of the pipe, but less than the
+ // total capacity.
+ num_bytes = 6u * sizeof(int32_t);
+ Seq(200, arraysize(buffer), buffer);
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE, WriteData(buffer, &num_bytes, true));
+
+ // Try reading too much.
+ num_bytes = 11u * sizeof(int32_t);
+ memset(buffer, 0xab, sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE, ReadData(buffer, &num_bytes, true));
+ int32_t expected_buffer[100];
+ memset(expected_buffer, 0xab, sizeof(expected_buffer));
+ ASSERT_EQ(0, memcmp(buffer, expected_buffer, sizeof(buffer)));
+
+ // Try discarding too much.
+ num_bytes = 11u * sizeof(int32_t);
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE, DiscardData(&num_bytes, true));
+
+ // Just a little.
+ num_bytes = 2u * sizeof(int32_t);
+ Seq(300, arraysize(buffer), buffer);
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(buffer, &num_bytes, true));
+ ASSERT_EQ(2u * sizeof(int32_t), num_bytes);
+
+ // Just right.
+ num_bytes = 3u * sizeof(int32_t);
+ Seq(400, arraysize(buffer), buffer);
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(buffer, &num_bytes, true));
+ ASSERT_EQ(3u * sizeof(int32_t), num_bytes);
+
+ // TODO(vtl): Hack (see also the TODO above): We can't currently wait for a
+ // specified amount of data to be available, so poll.
+ for (size_t i = 0; i < kMaxPoll; i++) {
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ if (num_bytes >= 10u * sizeof(int32_t))
+ break;
+
+ test::Sleep(test::EpsilonDeadline());
+ }
+ ASSERT_EQ(10u * sizeof(int32_t), num_bytes);
+
+ // Read half.
+ num_bytes = 5u * sizeof(int32_t);
+ memset(buffer, 0xab, sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(buffer, &num_bytes, true));
+ ASSERT_EQ(5u * sizeof(int32_t), num_bytes);
+ memset(expected_buffer, 0xab, sizeof(expected_buffer));
+ Seq(100, 5, expected_buffer);
+ ASSERT_EQ(0, memcmp(buffer, expected_buffer, sizeof(buffer)));
+
+ // Try reading too much again.
+ num_bytes = 6u * sizeof(int32_t);
+ memset(buffer, 0xab, sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE, ReadData(buffer, &num_bytes, true));
+ memset(expected_buffer, 0xab, sizeof(expected_buffer));
+ ASSERT_EQ(0, memcmp(buffer, expected_buffer, sizeof(buffer)));
+
+ // Try discarding too much again.
+ num_bytes = 6u * sizeof(int32_t);
+ ASSERT_EQ(MOJO_RESULT_OUT_OF_RANGE, DiscardData(&num_bytes, true));
+
+ // Discard a little.
+ num_bytes = 2u * sizeof(int32_t);
+ ASSERT_EQ(MOJO_RESULT_OK, DiscardData(&num_bytes, true));
+ ASSERT_EQ(2u * sizeof(int32_t), num_bytes);
+
+ // Three left.
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(3u * sizeof(int32_t), num_bytes);
+
+ // Close the producer, then test producer-closed cases.
+ CloseProducer();
+
+ // Wait.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // Try reading too much; "failed precondition" since the producer is closed.
+ num_bytes = 4u * sizeof(int32_t);
+ memset(buffer, 0xab, sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ ReadData(buffer, &num_bytes, true));
+ memset(expected_buffer, 0xab, sizeof(expected_buffer));
+ ASSERT_EQ(0, memcmp(buffer, expected_buffer, sizeof(buffer)));
+
+ // Try discarding too much; "failed precondition" again.
+ num_bytes = 4u * sizeof(int32_t);
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION, DiscardData(&num_bytes, true));
+
+ // Read a little.
+ num_bytes = 2u * sizeof(int32_t);
+ memset(buffer, 0xab, sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(buffer, &num_bytes, true));
+ ASSERT_EQ(2u * sizeof(int32_t), num_bytes);
+ memset(expected_buffer, 0xab, sizeof(expected_buffer));
+ Seq(400, 2, expected_buffer);
+ ASSERT_EQ(0, memcmp(buffer, expected_buffer, sizeof(buffer)));
+
+ // Discard the remaining element.
+ num_bytes = 1u * sizeof(int32_t);
+ ASSERT_EQ(MOJO_RESULT_OK, DiscardData(&num_bytes, true));
+ ASSERT_EQ(1u * sizeof(int32_t), num_bytes);
+
+ // Empty again.
+ num_bytes = ~0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+}
+
+// Tests that |ProducerWriteData()| and |ConsumerReadData()| writes and reads,
+// respectively, as much as possible, even if it may have to "wrap around" the
+// internal circular buffer. (Note that the two-phase write and read need not do
+// this.)
+TEST_F(DataPipeTest, WrapAround) {
+ unsigned char test_data[1000];
+ for (size_t i = 0; i < arraysize(test_data); i++)
+ test_data[i] = static_cast<unsigned char>(i);
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1u, // |element_num_bytes|.
+ 100u // |capacity_num_bytes|.
+ };
+
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Write 20 bytes.
+ uint32_t num_bytes = 20u;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(&test_data[0], &num_bytes, true));
+ ASSERT_EQ(20u, num_bytes);
+
+ // Wait for data.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_TRUE(hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Read 10 bytes.
+ unsigned char read_buffer[1000] = {0};
+ num_bytes = 10u;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(read_buffer, &num_bytes, true));
+ ASSERT_EQ(10u, num_bytes);
+ ASSERT_EQ(0, memcmp(read_buffer, &test_data[0], 10u));
+
+ // Check that a two-phase write can now only write (at most) 80 bytes. (This
+ // checks an implementation detail; this behavior is not guaranteed.)
+ void* write_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ BeginWriteData(&write_buffer_ptr, &num_bytes, false));
+ EXPECT_TRUE(write_buffer_ptr);
+ ASSERT_EQ(80u, num_bytes);
+ ASSERT_EQ(MOJO_RESULT_OK, EndWriteData(0));
+
+ size_t total_num_bytes = 0;
+ while (total_num_bytes < 90) {
+ // Wait to write.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(producer_, MOJO_HANDLE_SIGNAL_WRITABLE, &hss));
+ ASSERT_EQ(hss.satisfied_signals, MOJO_HANDLE_SIGNAL_WRITABLE);
+ ASSERT_EQ(hss.satisfiable_signals,
+ MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ // Write as much as we can.
+ num_bytes = 100;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WriteData(&test_data[20 + total_num_bytes], &num_bytes, false));
+ total_num_bytes += num_bytes;
+ }
+
+ ASSERT_EQ(90u, total_num_bytes);
+
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(100u, num_bytes);
+
+ // Check that a two-phase read can now only read (at most) 90 bytes. (This
+ // checks an implementation detail; this behavior is not guaranteed.)
+ const void* read_buffer_ptr = nullptr;
+ num_bytes = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_buffer_ptr, &num_bytes, false));
+ EXPECT_TRUE(read_buffer_ptr);
+ ASSERT_EQ(90u, num_bytes);
+ ASSERT_EQ(MOJO_RESULT_OK, EndReadData(0));
+
+ // Read as much as possible. We should read 100 bytes.
+ num_bytes = static_cast<uint32_t>(arraysize(read_buffer) *
+ sizeof(read_buffer[0]));
+ memset(read_buffer, 0, num_bytes);
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(read_buffer, &num_bytes));
+ ASSERT_EQ(100u, num_bytes);
+ ASSERT_EQ(0, memcmp(read_buffer, &test_data[10], 100u));
+}
+
+// Tests the behavior of writing (simple and two-phase), closing the producer,
+// then reading (simple and two-phase).
+TEST_F(DataPipeTest, WriteCloseProducerRead) {
+ const char kTestData[] = "hello world";
+ const uint32_t kTestDataSize = static_cast<uint32_t>(sizeof(kTestData));
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1u, // |element_num_bytes|.
+ 1000u // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+
+ // Write some data, so we'll have something to read.
+ uint32_t num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kTestData, &num_bytes, false));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Write it again, so we'll have something left over.
+ num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kTestData, &num_bytes, false));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Start two-phase write.
+ void* write_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ BeginWriteData(&write_buffer_ptr, &num_bytes, false));
+ EXPECT_TRUE(write_buffer_ptr);
+ EXPECT_GT(num_bytes, 0u);
+
+ // TODO(vtl): (See corresponding TODO in TwoPhaseAllOrNone.)
+ for (size_t i = 0; i < kMaxPoll; i++) {
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ if (num_bytes >= 2u * kTestDataSize)
+ break;
+
+ test::Sleep(test::EpsilonDeadline());
+ }
+ ASSERT_EQ(2u * kTestDataSize, num_bytes);
+
+ // Start two-phase read.
+ const void* read_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ BeginReadData(&read_buffer_ptr, &num_bytes));
+ EXPECT_TRUE(read_buffer_ptr);
+ ASSERT_EQ(2u * kTestDataSize, num_bytes);
+
+ // Close the producer.
+ CloseProducer();
+
+ // The consumer can finish its two-phase read.
+ ASSERT_EQ(0, memcmp(read_buffer_ptr, kTestData, kTestDataSize));
+ ASSERT_EQ(MOJO_RESULT_OK, EndReadData(kTestDataSize));
+
+ // And start another.
+ read_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ BeginReadData(&read_buffer_ptr, &num_bytes));
+ EXPECT_TRUE(read_buffer_ptr);
+ ASSERT_EQ(kTestDataSize, num_bytes);
+}
+
+
+// Tests the behavior of interrupting a two-phase read and write by closing the
+// consumer.
+TEST_F(DataPipeTest, TwoPhaseWriteReadCloseConsumer) {
+ const char kTestData[] = "hello world";
+ const uint32_t kTestDataSize = static_cast<uint32_t>(sizeof(kTestData));
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1u, // |element_num_bytes|.
+ 1000u // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Write some data, so we'll have something to read.
+ uint32_t num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kTestData, &num_bytes));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Start two-phase write.
+ void* write_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&write_buffer_ptr, &num_bytes));
+ EXPECT_TRUE(write_buffer_ptr);
+ ASSERT_GT(num_bytes, kTestDataSize);
+
+ // Wait for data.
+ // TODO(vtl): (See corresponding TODO in AllOrNone.)
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Start two-phase read.
+ const void* read_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_buffer_ptr, &num_bytes));
+ EXPECT_TRUE(read_buffer_ptr);
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Close the consumer.
+ CloseConsumer();
+
+ // Wait for producer to know that the consumer is closed.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(producer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+
+ // Actually write some data. (Note: Premature freeing of the buffer would
+ // probably only be detected under ASAN or similar.)
+ memcpy(write_buffer_ptr, kTestData, kTestDataSize);
+ // Note: Even though the consumer has been closed, ending the two-phase
+ // write will report success.
+ ASSERT_EQ(MOJO_RESULT_OK, EndWriteData(kTestDataSize));
+
+ // But trying to write should result in failure.
+ num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION, WriteData(kTestData, &num_bytes));
+
+ // As will trying to start another two-phase write.
+ write_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ BeginWriteData(&write_buffer_ptr, &num_bytes));
+}
+
+// Tests the behavior of "interrupting" a two-phase write by closing both the
+// producer and the consumer.
+TEST_F(DataPipeTest, TwoPhaseWriteCloseBoth) {
+ const uint32_t kTestDataSize = 15u;
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1u, // |element_num_bytes|.
+ 1000u // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+
+ // Start two-phase write.
+ void* write_buffer_ptr = nullptr;
+ uint32_t num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&write_buffer_ptr, &num_bytes));
+ EXPECT_TRUE(write_buffer_ptr);
+ ASSERT_GT(num_bytes, kTestDataSize);
+}
+
+// Tests the behavior of writing, closing the producer, and then reading (with
+// and without data remaining).
+TEST_F(DataPipeTest, WriteCloseProducerReadNoData) {
+ const char kTestData[] = "hello world";
+ const uint32_t kTestDataSize = static_cast<uint32_t>(sizeof(kTestData));
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1u, // |element_num_bytes|.
+ 1000u // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Write some data, so we'll have something to read.
+ uint32_t num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kTestData, &num_bytes));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Close the producer.
+ CloseProducer();
+
+ // Wait. (Note that once the consumer knows that the producer is closed, it
+ // must also know about all the data that was sent.)
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Peek that data.
+ char buffer[1000];
+ num_bytes = static_cast<uint32_t>(sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(buffer, &num_bytes, false, true));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+ ASSERT_EQ(0, memcmp(buffer, kTestData, kTestDataSize));
+
+ // Read that data.
+ memset(buffer, 0, 1000);
+ num_bytes = static_cast<uint32_t>(sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(buffer, &num_bytes));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+ ASSERT_EQ(0, memcmp(buffer, kTestData, kTestDataSize));
+
+ // A second read should fail.
+ num_bytes = static_cast<uint32_t>(sizeof(buffer));
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION, ReadData(buffer, &num_bytes));
+
+ // A two-phase read should also fail.
+ const void* read_buffer_ptr = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ BeginReadData(&read_buffer_ptr, &num_bytes));
+
+ // Ditto for discard.
+ num_bytes = 10u;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION, DiscardData(&num_bytes));
+}
+
+// Test that during a two phase read the memory stays valid even if more data
+// comes in.
+TEST_F(DataPipeTest, TwoPhaseReadMemoryStable) {
+ const char kTestData[] = "hello world";
+ const uint32_t kTestDataSize = static_cast<uint32_t>(sizeof(kTestData));
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1u, // |element_num_bytes|.
+ 1000u // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Write some data.
+ uint32_t num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kTestData, &num_bytes));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Wait for the data.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Begin a two-phase read.
+ const void* read_buffer_ptr = nullptr;
+ uint32_t read_buffer_size = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_buffer_ptr, &read_buffer_size));
+
+ // Write more data.
+ const char kExtraData[] = "bye world";
+ const uint32_t kExtraDataSize = static_cast<uint32_t>(sizeof(kExtraData));
+ num_bytes = kExtraDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kExtraData, &num_bytes));
+ ASSERT_EQ(kExtraDataSize, num_bytes);
+
+ // Close the producer.
+ CloseProducer();
+
+ // Wait. (Note that once the consumer knows that the producer is closed, it
+ // must also have received the extra data).
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Read the two phase memory to check it's still valid.
+ ASSERT_EQ(0, memcmp(read_buffer_ptr, kTestData, kTestDataSize));
+ EndReadData(read_buffer_size);
+}
+
+// Test that two-phase reads/writes behave correctly when given invalid
+// arguments.
+TEST_F(DataPipeTest, TwoPhaseMoreInvalidArguments) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 10 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // No data.
+ uint32_t num_bytes = 1000u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+
+ // Try "ending" a two-phase write when one isn't active.
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ EndWriteData(1u * sizeof(int32_t)));
+
+ // Wait a bit, to make sure that if a signal were (incorrectly) sent, it'd
+ // have time to propagate.
+ test::Sleep(test::EpsilonDeadline());
+
+ // Still no data.
+ num_bytes = 1000u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+
+ // Try ending a two-phase write with an invalid amount (too much).
+ num_bytes = 0u;
+ void* write_ptr = nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&write_ptr, &num_bytes));
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ EndWriteData(num_bytes + static_cast<uint32_t>(sizeof(int32_t))));
+
+ // But the two-phase write still ended.
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION, EndWriteData(0u));
+
+ // Wait a bit (as above).
+ test::Sleep(test::EpsilonDeadline());
+
+ // Still no data.
+ num_bytes = 1000u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+
+ // Try ending a two-phase write with an invalid amount (not a multiple of the
+ // element size).
+ num_bytes = 0u;
+ write_ptr = nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginWriteData(&write_ptr, &num_bytes));
+ EXPECT_GE(num_bytes, 1u);
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, EndWriteData(1u));
+
+ // But the two-phase write still ended.
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION, EndWriteData(0u));
+
+ // Wait a bit (as above).
+ test::Sleep(test::EpsilonDeadline());
+
+ // Still no data.
+ num_bytes = 1000u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(0u, num_bytes);
+
+ // Now write some data, so we'll be able to try reading.
+ int32_t element = 123;
+ num_bytes = 1u * sizeof(int32_t);
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(&element, &num_bytes));
+
+ // Wait for data.
+ // TODO(vtl): (See corresponding TODO in AllOrNone.)
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // One element available.
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(1u * sizeof(int32_t), num_bytes);
+
+ // Try "ending" a two-phase read when one isn't active.
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION, EndReadData(1u * sizeof(int32_t)));
+
+ // Still one element available.
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(1u * sizeof(int32_t), num_bytes);
+
+ // Try ending a two-phase read with an invalid amount (too much).
+ num_bytes = 0u;
+ const void* read_ptr = nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_ptr, &num_bytes));
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ EndReadData(num_bytes + static_cast<uint32_t>(sizeof(int32_t))));
+
+ // Still one element available.
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(1u * sizeof(int32_t), num_bytes);
+
+ // Try ending a two-phase read with an invalid amount (not a multiple of the
+ // element size).
+ num_bytes = 0u;
+ read_ptr = nullptr;
+ ASSERT_EQ(MOJO_RESULT_OK, BeginReadData(&read_ptr, &num_bytes));
+ ASSERT_EQ(1u * sizeof(int32_t), num_bytes);
+ ASSERT_EQ(123, static_cast<const int32_t*>(read_ptr)[0]);
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT, EndReadData(1u));
+
+ // Still one element available.
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK, QueryData(&num_bytes));
+ ASSERT_EQ(1u * sizeof(int32_t), num_bytes);
+}
+
+// Test that a producer can be sent over a MP.
+TEST_F(DataPipeTest, SendProducer) {
+ const char kTestData[] = "hello world";
+ const uint32_t kTestDataSize = static_cast<uint32_t>(sizeof(kTestData));
+
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1u, // |element_num_bytes|.
+ 1000u // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+ MojoHandleSignalsState hss;
+
+ // Write some data.
+ uint32_t num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kTestData, &num_bytes));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Wait for the data.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Check the data.
+ const void* read_buffer = nullptr;
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ BeginReadData(&read_buffer, &num_bytes, false));
+ ASSERT_EQ(0, memcmp(read_buffer, kTestData, kTestDataSize));
+ EndReadData(num_bytes);
+
+ // Now send the producer over a MP so that it's serialized.
+ MojoHandle pipe0, pipe1;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoCreateMessagePipe(nullptr, &pipe0, &pipe1));
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(pipe0, nullptr, 0, &producer_, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ producer_ = MOJO_HANDLE_INVALID;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ uint32_t num_handles = 1;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessage(pipe1, nullptr, 0, &producer_, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(num_handles, 1u);
+
+ // Write more data.
+ const char kExtraData[] = "bye world";
+ const uint32_t kExtraDataSize = static_cast<uint32_t>(sizeof(kExtraData));
+ num_bytes = kExtraDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kExtraData, &num_bytes));
+ ASSERT_EQ(kExtraDataSize, num_bytes);
+
+ // Wait for it.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ hss.satisfiable_signals);
+
+ // Check the second write.
+ num_bytes = 0u;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ BeginReadData(&read_buffer, &num_bytes, false));
+ ASSERT_EQ(0, memcmp(read_buffer, kExtraData, kExtraDataSize));
+ EndReadData(num_bytes);
+
+ ASSERT_EQ(MOJO_RESULT_OK, MojoClose(pipe0));
+ ASSERT_EQ(MOJO_RESULT_OK, MojoClose(pipe1));
+}
+
+// Ensures that if a data pipe consumer whose producer has closed is passed over
+// a message pipe, the deserialized dispatcher is also marked as having a closed
+// peer.
+TEST_F(DataPipeTest, ConsumerWithClosedProducerSent) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ static_cast<uint32_t>(sizeof(int32_t)), // |element_num_bytes|.
+ 1000 * sizeof(int32_t) // |capacity_num_bytes|.
+ };
+
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+
+ // We can write to a data pipe handle immediately.
+ int32_t data = 123;
+ uint32_t num_bytes = sizeof(data);
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(&data, &num_bytes));
+ ASSERT_EQ(MOJO_RESULT_OK, CloseProducer());
+
+ // Now wait for the other side to become readable and to see the peer closed.
+ MojoHandleSignalsState state;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ state.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ state.satisfiable_signals);
+
+ // Now send the consumer over a MP so that it's serialized.
+ MojoHandle pipe0, pipe1;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoCreateMessagePipe(nullptr, &pipe0, &pipe1));
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(pipe0, nullptr, 0, &consumer_, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ consumer_ = MOJO_HANDLE_INVALID;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1, MOJO_HANDLE_SIGNAL_READABLE, &state));
+ uint32_t num_handles = 1;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessage(pipe1, nullptr, 0, &consumer_, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(num_handles, 1u);
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumer_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ state.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED |
+ MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ state.satisfiable_signals);
+
+ int32_t read_data;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadData(&read_data, &num_bytes));
+ ASSERT_EQ(sizeof(read_data), num_bytes);
+ ASSERT_EQ(data, read_data);
+
+ ASSERT_EQ(MOJO_RESULT_OK, MojoClose(pipe0));
+ ASSERT_EQ(MOJO_RESULT_OK, MojoClose(pipe1));
+}
+
+bool WriteAllData(MojoHandle producer,
+ const void* elements,
+ uint32_t num_bytes) {
+ for (size_t i = 0; i < kMaxPoll; i++) {
+ // Write as much data as we can.
+ uint32_t write_bytes = num_bytes;
+ MojoResult result = MojoWriteData(producer, elements, &write_bytes,
+ MOJO_WRITE_DATA_FLAG_NONE);
+ if (result == MOJO_RESULT_OK) {
+ num_bytes -= write_bytes;
+ elements = static_cast<const uint8_t*>(elements) + write_bytes;
+ if (num_bytes == 0)
+ return true;
+ } else {
+ EXPECT_EQ(MOJO_RESULT_SHOULD_WAIT, result);
+ }
+
+ MojoHandleSignalsState hss = MojoHandleSignalsState();
+ EXPECT_EQ(MOJO_RESULT_OK, test::MojoTestBase::WaitForSignals(
+ producer, MOJO_HANDLE_SIGNAL_WRITABLE, &hss));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+ }
+
+ return false;
+}
+
+// If |expect_empty| is true, expect |consumer| to be empty after reading.
+bool ReadAllData(MojoHandle consumer,
+ void* elements,
+ uint32_t num_bytes,
+ bool expect_empty) {
+ for (size_t i = 0; i < kMaxPoll; i++) {
+ // Read as much data as we can.
+ uint32_t read_bytes = num_bytes;
+ MojoResult result =
+ MojoReadData(consumer, elements, &read_bytes, MOJO_READ_DATA_FLAG_NONE);
+ if (result == MOJO_RESULT_OK) {
+ num_bytes -= read_bytes;
+ elements = static_cast<uint8_t*>(elements) + read_bytes;
+ if (num_bytes == 0) {
+ if (expect_empty) {
+ // Expect no more data.
+ test::Sleep(test::TinyDeadline());
+ MojoReadData(consumer, nullptr, &num_bytes,
+ MOJO_READ_DATA_FLAG_QUERY);
+ EXPECT_EQ(0u, num_bytes);
+ }
+ return true;
+ }
+ } else {
+ EXPECT_EQ(MOJO_RESULT_SHOULD_WAIT, result);
+ }
+
+ MojoHandleSignalsState hss = MojoHandleSignalsState();
+ EXPECT_EQ(MOJO_RESULT_OK, test::MojoTestBase::WaitForSignals(
+ consumer, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ // Peer could have become closed while we're still waiting for data.
+ EXPECT_TRUE(MOJO_HANDLE_SIGNAL_READABLE & hss.satisfied_signals);
+ EXPECT_TRUE(hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE);
+ EXPECT_TRUE(hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+ }
+
+ return num_bytes == 0;
+}
+
+#if !defined(OS_IOS)
+
+TEST_F(DataPipeTest, Multiprocess) {
+ const uint32_t kTestDataSize =
+ static_cast<uint32_t>(sizeof(kMultiprocessTestData));
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1, // |element_num_bytes|.
+ kMultiprocessCapacity // |capacity_num_bytes|.
+ };
+ ASSERT_EQ(MOJO_RESULT_OK, Create(&options));
+
+ RUN_CHILD_ON_PIPE(MultiprocessClient, server_mp)
+ // Send some data before serialising and sending the data pipe over.
+ // This is the first write so we don't need to use WriteAllData.
+ uint32_t num_bytes = kTestDataSize;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteData(kMultiprocessTestData, &num_bytes,
+ MOJO_WRITE_DATA_FLAG_ALL_OR_NONE));
+ ASSERT_EQ(kTestDataSize, num_bytes);
+
+ // Send child process the data pipe.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(server_mp, nullptr, 0, &consumer_, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Send a bunch of data of varying sizes.
+ uint8_t buffer[100];
+ int seq = 0;
+ for (int i = 0; i < kMultiprocessMaxIter; ++i) {
+ for (uint32_t size = 1; size <= kMultiprocessCapacity; size++) {
+ for (unsigned int j = 0; j < size; ++j)
+ buffer[j] = seq + j;
+ EXPECT_TRUE(WriteAllData(producer_, buffer, size));
+ seq += size;
+ }
+ }
+
+ // Write the test string in again.
+ ASSERT_TRUE(WriteAllData(producer_, kMultiprocessTestData, kTestDataSize));
+
+ // Swap ends.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(server_mp, nullptr, 0, &producer_, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Receive the consumer from the other side.
+ producer_ = MOJO_HANDLE_INVALID;
+ MojoHandleSignalsState hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(server_mp, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ MojoHandle handles[2];
+ uint32_t num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessage(server_mp, nullptr, 0, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, num_handles);
+ consumer_ = handles[0];
+
+ // Read the test string twice. Once for when we sent it, and once for the
+ // other end sending it.
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_TRUE(ReadAllData(consumer_, buffer, kTestDataSize, i == 1));
+ EXPECT_EQ(0, memcmp(buffer, kMultiprocessTestData, kTestDataSize));
+ }
+
+ WriteMessage(server_mp, "quit");
+
+ // Don't have to close the consumer here because it will be done for us.
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(MultiprocessClient, DataPipeTest, client_mp) {
+ const uint32_t kTestDataSize =
+ static_cast<uint32_t>(sizeof(kMultiprocessTestData));
+
+ // Receive the data pipe from the other side.
+ MojoHandle consumer = MOJO_HANDLE_INVALID;
+ MojoHandleSignalsState hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(client_mp, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ MojoHandle handles[2];
+ uint32_t num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessage(client_mp, nullptr, 0, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, num_handles);
+ consumer = handles[0];
+
+ // Read the initial string that was sent.
+ int32_t buffer[100];
+ EXPECT_TRUE(ReadAllData(consumer, buffer, kTestDataSize, false));
+ EXPECT_EQ(0, memcmp(buffer, kMultiprocessTestData, kTestDataSize));
+
+ // Receive the main data and check it is correct.
+ int seq = 0;
+ uint8_t expected_buffer[100];
+ for (int i = 0; i < kMultiprocessMaxIter; ++i) {
+ for (uint32_t size = 1; size <= kMultiprocessCapacity; ++size) {
+ for (unsigned int j = 0; j < size; ++j)
+ expected_buffer[j] = seq + j;
+ EXPECT_TRUE(ReadAllData(consumer, buffer, size, false));
+ EXPECT_EQ(0, memcmp(buffer, expected_buffer, size));
+
+ seq += size;
+ }
+ }
+
+ // Swap ends.
+ ASSERT_EQ(MOJO_RESULT_OK, MojoWriteMessage(client_mp, nullptr, 0, &consumer,
+ 1, MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Receive the producer from the other side.
+ MojoHandle producer = MOJO_HANDLE_INVALID;
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(client_mp, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ num_handles = arraysize(handles);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessage(client_mp, nullptr, 0, handles, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(1u, num_handles);
+ producer = handles[0];
+
+ // Write the test string one more time.
+ EXPECT_TRUE(WriteAllData(producer, kMultiprocessTestData, kTestDataSize));
+
+ // We swapped ends, so close the producer.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+
+ // Wait to receive a "quit" message before exiting.
+ EXPECT_EQ("quit", ReadMessage(client_mp));
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(WriteAndCloseProducer, DataPipeTest, h) {
+ MojoHandle p;
+ std::string message = ReadMessageWithHandles(h, &p, 1);
+
+ // Write some data to the producer and close it.
+ uint32_t num_bytes = static_cast<uint32_t>(message.size());
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWriteData(p, message.data(), &num_bytes,
+ MOJO_WRITE_DATA_FLAG_NONE));
+ EXPECT_EQ(num_bytes, static_cast<uint32_t>(message.size()));
+
+ // Close the producer before quitting.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(p));
+
+ // Wait for a quit message.
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReadAndCloseConsumer, DataPipeTest, h) {
+ MojoHandle c;
+ std::string expected_message = ReadMessageWithHandles(h, &c, 1);
+
+ // Wait for the consumer to become readable.
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(c, MOJO_HANDLE_SIGNAL_READABLE));
+
+ // Drain the consumer and expect to find the given message.
+ uint32_t num_bytes = static_cast<uint32_t>(expected_message.size());
+ std::vector<char> bytes(expected_message.size());
+ EXPECT_EQ(MOJO_RESULT_OK, MojoReadData(c, bytes.data(), &num_bytes,
+ MOJO_READ_DATA_FLAG_NONE));
+ EXPECT_EQ(num_bytes, static_cast<uint32_t>(bytes.size()));
+
+ std::string message(bytes.data(), bytes.size());
+ EXPECT_EQ(expected_message, message);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(c));
+
+ // Wait for a quit message.
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+TEST_F(DataPipeTest, SendConsumerAndCloseProducer) {
+ // Create a new data pipe.
+ MojoHandle p, c;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateDataPipe(nullptr, &p ,&c));
+
+ RUN_CHILD_ON_PIPE(WriteAndCloseProducer, producer_client)
+ RUN_CHILD_ON_PIPE(ReadAndCloseConsumer, consumer_client)
+ const std::string kMessage = "Hello, world!";
+ WriteMessageWithHandles(producer_client, kMessage, &p, 1);
+ WriteMessageWithHandles(consumer_client, kMessage, &c, 1);
+
+ WriteMessage(consumer_client, "quit");
+ END_CHILD()
+
+ WriteMessage(producer_client, "quit");
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(CreateAndWrite, DataPipeTest, h) {
+ const MojoCreateDataPipeOptions options = {
+ kSizeOfOptions, // |struct_size|.
+ MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE, // |flags|.
+ 1, // |element_num_bytes|.
+ kMultiprocessCapacity // |capacity_num_bytes|.
+ };
+
+ MojoHandle p, c;
+ ASSERT_EQ(MOJO_RESULT_OK, MojoCreateDataPipe(&options, &p, &c));
+
+ const std::string kMessage = "Hello, world!";
+ WriteMessageWithHandles(h, kMessage, &c, 1);
+
+ // Write some data to the producer and close it.
+ uint32_t num_bytes = static_cast<uint32_t>(kMessage.size());
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWriteData(p, kMessage.data(), &num_bytes,
+ MOJO_WRITE_DATA_FLAG_NONE));
+ EXPECT_EQ(num_bytes, static_cast<uint32_t>(kMessage.size()));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(p));
+
+ // Wait for a quit message.
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+TEST_F(DataPipeTest, CreateInChild) {
+ RUN_CHILD_ON_PIPE(CreateAndWrite, child)
+ MojoHandle c;
+ std::string expected_message = ReadMessageWithHandles(child, &c, 1);
+
+ // Wait for the consumer to become readable.
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(c, MOJO_HANDLE_SIGNAL_READABLE));
+
+ // Drain the consumer and expect to find the given message.
+ uint32_t num_bytes = static_cast<uint32_t>(expected_message.size());
+ std::vector<char> bytes(expected_message.size());
+ EXPECT_EQ(MOJO_RESULT_OK, MojoReadData(c, bytes.data(), &num_bytes,
+ MOJO_READ_DATA_FLAG_NONE));
+ EXPECT_EQ(num_bytes, static_cast<uint32_t>(bytes.size()));
+
+ std::string message(bytes.data(), bytes.size());
+ EXPECT_EQ(expected_message, message);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(c));
+ WriteMessage(child, "quit");
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(DataPipeStatusChangeInTransitClient,
+ DataPipeTest, parent) {
+ // This test verifies that peer closure is detectable through various
+ // mechanisms when it races with handle transfer.
+
+ MojoHandle handles[6];
+ EXPECT_EQ("o_O", ReadMessageWithHandles(parent, handles, 6));
+ MojoHandle* producers = &handles[0];
+ MojoHandle* consumers = &handles[3];
+
+ // Wait on producer 0
+ EXPECT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(producers[0], MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+
+ // Wait on consumer 0
+ EXPECT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(consumers[0], MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+
+ base::MessageLoop message_loop;
+
+ // Wait on producer 1 and consumer 1 using SimpleWatchers.
+ {
+ base::RunLoop run_loop;
+ int count = 0;
+ auto callback = base::Bind(
+ [] (base::RunLoop* loop, int* count, MojoResult result) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ if (++*count == 2)
+ loop->Quit();
+ },
+ &run_loop, &count);
+ SimpleWatcher producer_watcher(FROM_HERE,
+ SimpleWatcher::ArmingPolicy::AUTOMATIC);
+ SimpleWatcher consumer_watcher(FROM_HERE,
+ SimpleWatcher::ArmingPolicy::AUTOMATIC);
+ producer_watcher.Watch(Handle(producers[1]), MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ callback);
+ consumer_watcher.Watch(Handle(consumers[1]), MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ callback);
+ run_loop.Run();
+ EXPECT_EQ(2, count);
+ }
+
+ // Wait on producer 2 by polling with MojoWriteData.
+ MojoResult result;
+ do {
+ uint32_t num_bytes = 0;
+ result = MojoWriteData(
+ producers[2], nullptr, &num_bytes, MOJO_WRITE_DATA_FLAG_NONE);
+ } while (result == MOJO_RESULT_OK);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, result);
+
+ // Wait on consumer 2 by polling with MojoReadData.
+ do {
+ char byte;
+ uint32_t num_bytes = 1;
+ result = MojoReadData(
+ consumers[2], &byte, &num_bytes, MOJO_READ_DATA_FLAG_NONE);
+ } while (result == MOJO_RESULT_SHOULD_WAIT);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, result);
+
+ for (size_t i = 0; i < 6; ++i)
+ CloseHandle(handles[i]);
+}
+
+TEST_F(DataPipeTest, StatusChangeInTransit) {
+ MojoHandle producers[6];
+ MojoHandle consumers[6];
+ for (size_t i = 0; i < 6; ++i)
+ CreateDataPipe(&producers[i], &consumers[i], 1);
+
+ RUN_CHILD_ON_PIPE(DataPipeStatusChangeInTransitClient, child)
+ MojoHandle handles[] = { producers[0], producers[1], producers[2],
+ consumers[3], consumers[4], consumers[5] };
+
+ // Send 3 producers and 3 consumers, and let their transfer race with their
+ // peers' closure.
+ WriteMessageWithHandles(child, "o_O", handles, 6);
+
+ for (size_t i = 0; i < 3; ++i)
+ CloseHandle(consumers[i]);
+ for (size_t i = 3; i < 6; ++i)
+ CloseHandle(producers[i]);
+ END_CHILD()
+}
+
+#endif // !defined(OS_IOS)
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/dispatcher.cc b/mojo/edk/system/dispatcher.cc
new file mode 100644
index 0000000000..7cdbe910d9
--- /dev/null
+++ b/mojo/edk/system/dispatcher.cc
@@ -0,0 +1,198 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/dispatcher.h"
+
+#include "base/logging.h"
+#include "mojo/edk/system/configuration.h"
+#include "mojo/edk/system/data_pipe_consumer_dispatcher.h"
+#include "mojo/edk/system/data_pipe_producer_dispatcher.h"
+#include "mojo/edk/system/message_pipe_dispatcher.h"
+#include "mojo/edk/system/platform_handle_dispatcher.h"
+#include "mojo/edk/system/shared_buffer_dispatcher.h"
+
+namespace mojo {
+namespace edk {
+
+Dispatcher::DispatcherInTransit::DispatcherInTransit() {}
+
+Dispatcher::DispatcherInTransit::DispatcherInTransit(
+ const DispatcherInTransit& other) = default;
+
+Dispatcher::DispatcherInTransit::~DispatcherInTransit() {}
+
+MojoResult Dispatcher::WatchDispatcher(scoped_refptr<Dispatcher> dispatcher,
+ MojoHandleSignals signals,
+ uintptr_t context) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::CancelWatch(uintptr_t context) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::Arm(uint32_t* num_ready_contexts,
+ uintptr_t* ready_contexts,
+ MojoResult* ready_results,
+ MojoHandleSignalsState* ready_signals_states) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::WriteMessage(std::unique_ptr<MessageForTransit> message,
+ MojoWriteMessageFlags flags) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::ReadMessage(std::unique_ptr<MessageForTransit>* message,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags,
+ bool read_any_size) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::DuplicateBufferHandle(
+ const MojoDuplicateBufferHandleOptions* options,
+ scoped_refptr<Dispatcher>* new_dispatcher) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::MapBuffer(
+ uint64_t offset,
+ uint64_t num_bytes,
+ MojoMapBufferFlags flags,
+ std::unique_ptr<PlatformSharedBufferMapping>* mapping) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::ReadData(void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::BeginReadData(const void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoReadDataFlags flags) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::EndReadData(uint32_t num_bytes_read) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::WriteData(const void* elements,
+ uint32_t* num_bytes,
+ MojoWriteDataFlags flags) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::BeginWriteData(void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoWriteDataFlags flags) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::EndWriteData(uint32_t num_bytes_written) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::AddWaitingDispatcher(
+ const scoped_refptr<Dispatcher>& dispatcher,
+ MojoHandleSignals signals,
+ uintptr_t context) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::RemoveWaitingDispatcher(
+ const scoped_refptr<Dispatcher>& dispatcher) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::GetReadyDispatchers(uint32_t* count,
+ DispatcherVector* dispatchers,
+ MojoResult* results,
+ uintptr_t* contexts) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+HandleSignalsState Dispatcher::GetHandleSignalsState() const {
+ return HandleSignalsState();
+}
+
+MojoResult Dispatcher::AddWatcherRef(
+ const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+MojoResult Dispatcher::RemoveWatcherRef(WatcherDispatcher* watcher,
+ uintptr_t context) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+}
+
+void Dispatcher::StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_platform_handles) {
+ *num_bytes = 0;
+ *num_ports = 0;
+ *num_platform_handles = 0;
+}
+
+bool Dispatcher::EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) {
+ LOG(ERROR) << "Attempting to serialize a non-transferrable dispatcher.";
+ return true;
+}
+
+bool Dispatcher::BeginTransit() { return true; }
+
+void Dispatcher::CompleteTransitAndClose() {}
+
+void Dispatcher::CancelTransit() {}
+
+// static
+scoped_refptr<Dispatcher> Dispatcher::Deserialize(
+ Type type,
+ const void* bytes,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* platform_handles,
+ size_t num_platform_handles) {
+ switch (type) {
+ case Type::MESSAGE_PIPE:
+ return MessagePipeDispatcher::Deserialize(
+ bytes, num_bytes, ports, num_ports, platform_handles,
+ num_platform_handles);
+ case Type::SHARED_BUFFER:
+ return SharedBufferDispatcher::Deserialize(
+ bytes, num_bytes, ports, num_ports, platform_handles,
+ num_platform_handles);
+ case Type::DATA_PIPE_CONSUMER:
+ return DataPipeConsumerDispatcher::Deserialize(
+ bytes, num_bytes, ports, num_ports, platform_handles,
+ num_platform_handles);
+ case Type::DATA_PIPE_PRODUCER:
+ return DataPipeProducerDispatcher::Deserialize(
+ bytes, num_bytes, ports, num_ports, platform_handles,
+ num_platform_handles);
+ case Type::PLATFORM_HANDLE:
+ return PlatformHandleDispatcher::Deserialize(
+ bytes, num_bytes, ports, num_ports, platform_handles,
+ num_platform_handles);
+ default:
+ LOG(ERROR) << "Deserializing invalid dispatcher type.";
+ return nullptr;
+ }
+}
+
+Dispatcher::Dispatcher() {}
+
+Dispatcher::~Dispatcher() {}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/dispatcher.h b/mojo/edk/system/dispatcher.h
new file mode 100644
index 0000000000..db1f1f18d7
--- /dev/null
+++ b/mojo/edk/system/dispatcher.h
@@ -0,0 +1,245 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_DISPATCHER_H_
+#define MOJO_EDK_SYSTEM_DISPATCHER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <ostream>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/platform_handle.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/ports/name.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/edk/system/watch.h"
+#include "mojo/public/c/system/buffer.h"
+#include "mojo/public/c/system/data_pipe.h"
+#include "mojo/public/c/system/message_pipe.h"
+#include "mojo/public/c/system/types.h"
+
+namespace mojo {
+namespace edk {
+
+class Dispatcher;
+class MessageForTransit;
+
+using DispatcherVector = std::vector<scoped_refptr<Dispatcher>>;
+
+// A |Dispatcher| implements Mojo EDK calls that are associated with a
+// particular MojoHandle.
+class MOJO_SYSTEM_IMPL_EXPORT Dispatcher
+ : public base::RefCountedThreadSafe<Dispatcher> {
+ public:
+ struct DispatcherInTransit {
+ DispatcherInTransit();
+ DispatcherInTransit(const DispatcherInTransit& other);
+ ~DispatcherInTransit();
+
+ scoped_refptr<Dispatcher> dispatcher;
+ MojoHandle local_handle;
+ };
+
+ enum class Type {
+ UNKNOWN = 0,
+ MESSAGE_PIPE,
+ DATA_PIPE_PRODUCER,
+ DATA_PIPE_CONSUMER,
+ SHARED_BUFFER,
+ WATCHER,
+
+ // "Private" types (not exposed via the public interface):
+ PLATFORM_HANDLE = -1,
+ };
+
+ // All Dispatchers must minimally implement these methods.
+
+ virtual Type GetType() const = 0;
+ virtual MojoResult Close() = 0;
+
+ ///////////// Watcher API ////////////////////
+
+ virtual MojoResult WatchDispatcher(scoped_refptr<Dispatcher> dispatcher,
+ MojoHandleSignals signals,
+ uintptr_t context);
+ virtual MojoResult CancelWatch(uintptr_t context);
+ virtual MojoResult Arm(uint32_t* num_ready_contexts,
+ uintptr_t* ready_contexts,
+ MojoResult* ready_results,
+ MojoHandleSignalsState* ready_signals_states);
+
+ ///////////// Message pipe API /////////////
+
+ virtual MojoResult WriteMessage(std::unique_ptr<MessageForTransit> message,
+ MojoWriteMessageFlags flags);
+
+ virtual MojoResult ReadMessage(std::unique_ptr<MessageForTransit>* message,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags,
+ bool read_any_size);
+
+ ///////////// Shared buffer API /////////////
+
+ // |options| may be null. |new_dispatcher| must not be null, but
+ // |*new_dispatcher| should be null (and will contain the dispatcher for the
+ // new handle on success).
+ virtual MojoResult DuplicateBufferHandle(
+ const MojoDuplicateBufferHandleOptions* options,
+ scoped_refptr<Dispatcher>* new_dispatcher);
+
+ virtual MojoResult MapBuffer(
+ uint64_t offset,
+ uint64_t num_bytes,
+ MojoMapBufferFlags flags,
+ std::unique_ptr<PlatformSharedBufferMapping>* mapping);
+
+ ///////////// Data pipe consumer API /////////////
+
+ virtual MojoResult ReadData(void* elements,
+ uint32_t* num_bytes,
+ MojoReadDataFlags flags);
+
+ virtual MojoResult BeginReadData(const void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoReadDataFlags flags);
+
+ virtual MojoResult EndReadData(uint32_t num_bytes_read);
+
+ ///////////// Data pipe producer API /////////////
+
+ virtual MojoResult WriteData(const void* elements,
+ uint32_t* num_bytes,
+ MojoWriteDataFlags flags);
+
+ virtual MojoResult BeginWriteData(void** buffer,
+ uint32_t* buffer_num_bytes,
+ MojoWriteDataFlags flags);
+
+ virtual MojoResult EndWriteData(uint32_t num_bytes_written);
+
+ ///////////// Wait set API /////////////
+
+ // Adds a dispatcher to wait on. When the dispatcher satisfies |signals|, it
+ // will be returned in the next call to |GetReadyDispatchers()|. If
+ // |dispatcher| has been added, it must be removed before adding again,
+ // otherwise |MOJO_RESULT_ALREADY_EXISTS| will be returned.
+ virtual MojoResult AddWaitingDispatcher(
+ const scoped_refptr<Dispatcher>& dispatcher,
+ MojoHandleSignals signals,
+ uintptr_t context);
+
+ // Removes a dispatcher to wait on. If |dispatcher| has not been added,
+ // |MOJO_RESULT_NOT_FOUND| will be returned.
+ virtual MojoResult RemoveWaitingDispatcher(
+ const scoped_refptr<Dispatcher>& dispatcher);
+
+ // Returns a set of ready dispatchers. |*count| is the maximum number of
+ // dispatchers to return, and will contain the number of dispatchers returned
+ // in |dispatchers| on completion.
+ virtual MojoResult GetReadyDispatchers(uint32_t* count,
+ DispatcherVector* dispatchers,
+ MojoResult* results,
+ uintptr_t* contexts);
+
+ ///////////// General-purpose API for all handle types /////////
+
+ // Gets the current handle signals state. (The default implementation simply
+ // returns a default-constructed |HandleSignalsState|, i.e., no signals
+ // satisfied or satisfiable.) Note: The state is subject to change from other
+ // threads.
+ virtual HandleSignalsState GetHandleSignalsState() const;
+
+ // Adds a WatcherDispatcher reference to this dispatcher, to be notified of
+ // all subsequent changes to handle state including signal changes or closure.
+ // The reference is associated with a |context| for disambiguation of
+ // removals.
+ virtual MojoResult AddWatcherRef(
+ const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context);
+
+ // Removes a WatcherDispatcher reference from this dispatcher.
+ virtual MojoResult RemoveWatcherRef(WatcherDispatcher* watcher,
+ uintptr_t context);
+
+ // Informs the caller of the total serialized size (in bytes) and the total
+ // number of platform handles and ports needed to transfer this dispatcher
+ // across a message pipe.
+ //
+ // Must eventually be followed by a call to EndSerializeAndClose(). Note that
+ // StartSerialize() and EndSerialize() are always called in sequence, and
+ // only between calls to BeginTransit() and either (but not both)
+ // CompleteTransitAndClose() or CancelTransit().
+ //
+ // For this reason it is IMPERATIVE that the implementation ensure a
+ // consistent serializable state between BeginTransit() and
+ // CompleteTransitAndClose()/CancelTransit().
+ virtual void StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_platform_handles);
+
+ // Serializes this dispatcher into |destination|, |ports|, and |handles|.
+ // Returns true iff successful, false otherwise. In either case the dispatcher
+ // will close.
+ //
+ // NOTE: Transit MAY still fail after this call returns. Implementations
+ // should not assume PlatformHandle ownership has transferred until
+ // CompleteTransitAndClose() is called. In other words, if CancelTransit() is
+ // called, the implementation should retain its PlatformHandles in working
+ // condition.
+ virtual bool EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles);
+
+ // Does whatever is necessary to begin transit of the dispatcher. This
+ // should return |true| if transit is OK, or false if the underlying resource
+ // is deemed busy by the implementation.
+ virtual bool BeginTransit();
+
+ // Does whatever is necessary to complete transit of the dispatcher, including
+ // closure. This is only called upon successfully transmitting an outgoing
+ // message containing this serialized dispatcher.
+ virtual void CompleteTransitAndClose();
+
+ // Does whatever is necessary to cancel transit of the dispatcher. The
+ // dispatcher should remain in a working state and resume normal operation.
+ virtual void CancelTransit();
+
+ // Deserializes a specific dispatcher type from an incoming message.
+ static scoped_refptr<Dispatcher> Deserialize(
+ Type type,
+ const void* bytes,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* platform_handles,
+ size_t num_platform_handles);
+
+ protected:
+ friend class base::RefCountedThreadSafe<Dispatcher>;
+
+ Dispatcher();
+ virtual ~Dispatcher();
+
+ DISALLOW_COPY_AND_ASSIGN(Dispatcher);
+};
+
+// So logging macros and |DCHECK_EQ()|, etc. work.
+MOJO_SYSTEM_IMPL_EXPORT inline std::ostream& operator<<(std::ostream& out,
+ Dispatcher::Type type) {
+ return out << static_cast<int>(type);
+}
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_DISPATCHER_H_
diff --git a/mojo/edk/system/handle_signals_state.h b/mojo/edk/system/handle_signals_state.h
new file mode 100644
index 0000000000..f2412787cb
--- /dev/null
+++ b/mojo/edk/system/handle_signals_state.h
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_HANDLE_SIGNALS_STATE_H_
+#define MOJO_EDK_SYSTEM_HANDLE_SIGNALS_STATE_H_
+
+#include "mojo/public/cpp/system/handle_signals_state.h"
+
+// TODO(rockot): Remove this header and use the C++ system library type
+// directly inside the EDK.
+
+#endif // MOJO_EDK_SYSTEM_HANDLE_SIGNALS_STATE_H_
diff --git a/mojo/edk/system/handle_table.cc b/mojo/edk/system/handle_table.cc
new file mode 100644
index 0000000000..b570793dbe
--- /dev/null
+++ b/mojo/edk/system/handle_table.cc
@@ -0,0 +1,135 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/handle_table.h"
+
+#include <stdint.h>
+
+#include <limits>
+
+namespace mojo {
+namespace edk {
+
+HandleTable::HandleTable() {}
+
+HandleTable::~HandleTable() {}
+
+MojoHandle HandleTable::AddDispatcher(scoped_refptr<Dispatcher> dispatcher) {
+ // Oops, we're out of handles.
+ if (next_available_handle_ == MOJO_HANDLE_INVALID)
+ return MOJO_HANDLE_INVALID;
+
+ MojoHandle handle = next_available_handle_++;
+ auto result =
+ handles_.insert(std::make_pair(handle, Entry(std::move(dispatcher))));
+ DCHECK(result.second);
+
+ return handle;
+}
+
+bool HandleTable::AddDispatchersFromTransit(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers,
+ MojoHandle* handles) {
+ // Oops, we're out of handles.
+ if (next_available_handle_ == MOJO_HANDLE_INVALID)
+ return false;
+
+ DCHECK_LE(dispatchers.size(), std::numeric_limits<uint32_t>::max());
+ // If this insertion would cause handle overflow, we're out of handles.
+ if (next_available_handle_ + dispatchers.size() < next_available_handle_)
+ return false;
+
+ for (size_t i = 0; i < dispatchers.size(); ++i) {
+ MojoHandle handle = next_available_handle_++;
+ auto result = handles_.insert(
+ std::make_pair(handle, Entry(dispatchers[i].dispatcher)));
+ DCHECK(result.second);
+ handles[i] = handle;
+ }
+
+ return true;
+}
+
+scoped_refptr<Dispatcher> HandleTable::GetDispatcher(MojoHandle handle) const {
+ auto it = handles_.find(handle);
+ if (it == handles_.end())
+ return nullptr;
+ return it->second.dispatcher;
+}
+
+MojoResult HandleTable::GetAndRemoveDispatcher(
+ MojoHandle handle,
+ scoped_refptr<Dispatcher>* dispatcher) {
+ auto it = handles_.find(handle);
+ if (it == handles_.end())
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ if (it->second.busy)
+ return MOJO_RESULT_BUSY;
+
+ *dispatcher = std::move(it->second.dispatcher);
+ handles_.erase(it);
+ return MOJO_RESULT_OK;
+}
+
+MojoResult HandleTable::BeginTransit(
+ const MojoHandle* handles,
+ uint32_t num_handles,
+ std::vector<Dispatcher::DispatcherInTransit>* dispatchers) {
+ dispatchers->clear();
+ dispatchers->reserve(num_handles);
+ for (size_t i = 0; i < num_handles; ++i) {
+ auto it = handles_.find(handles[i]);
+ if (it == handles_.end())
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ if (it->second.busy)
+ return MOJO_RESULT_BUSY;
+
+ Dispatcher::DispatcherInTransit d;
+ d.local_handle = handles[i];
+ d.dispatcher = it->second.dispatcher;
+ if (!d.dispatcher->BeginTransit())
+ return MOJO_RESULT_BUSY;
+ it->second.busy = true;
+ dispatchers->push_back(d);
+ }
+ return MOJO_RESULT_OK;
+}
+
+void HandleTable::CompleteTransitAndClose(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers) {
+ for (const auto& dispatcher : dispatchers) {
+ auto it = handles_.find(dispatcher.local_handle);
+ DCHECK(it != handles_.end() && it->second.busy);
+ handles_.erase(it);
+ dispatcher.dispatcher->CompleteTransitAndClose();
+ }
+}
+
+void HandleTable::CancelTransit(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers) {
+ for (const auto& dispatcher : dispatchers) {
+ auto it = handles_.find(dispatcher.local_handle);
+ DCHECK(it != handles_.end() && it->second.busy);
+ it->second.busy = false;
+ dispatcher.dispatcher->CancelTransit();
+ }
+}
+
+void HandleTable::GetActiveHandlesForTest(std::vector<MojoHandle>* handles) {
+ handles->clear();
+ for (const auto& entry : handles_)
+ handles->push_back(entry.first);
+}
+
+HandleTable::Entry::Entry() {}
+
+HandleTable::Entry::Entry(scoped_refptr<Dispatcher> dispatcher)
+ : dispatcher(std::move(dispatcher)) {}
+
+HandleTable::Entry::Entry(const Entry& other) = default;
+
+HandleTable::Entry::~Entry() {}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/handle_table.h b/mojo/edk/system/handle_table.h
new file mode 100644
index 0000000000..882d5405ce
--- /dev/null
+++ b/mojo/edk/system/handle_table.h
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_HANDLE_TABLE_H_
+#define MOJO_EDK_SYSTEM_HANDLE_TABLE_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/public/c/system/types.h"
+
+namespace mojo {
+namespace edk {
+
+class HandleTable {
+ public:
+ HandleTable();
+ ~HandleTable();
+
+ MojoHandle AddDispatcher(scoped_refptr<Dispatcher> dispatcher);
+
+ // Inserts multiple dispatchers received from message transit, populating
+ // |handles| with their newly allocated handles. Returns |true| on success.
+ bool AddDispatchersFromTransit(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers,
+ MojoHandle* handles);
+
+ scoped_refptr<Dispatcher> GetDispatcher(MojoHandle handle) const;
+ MojoResult GetAndRemoveDispatcher(MojoHandle,
+ scoped_refptr<Dispatcher>* dispatcher);
+
+ // Marks handles as busy and populates |dispatchers|. Returns MOJO_RESULT_BUSY
+ // if any of the handles are already in transit; MOJO_RESULT_INVALID_ARGUMENT
+ // if any of the handles are invalid; or MOJO_RESULT_OK if successful.
+ MojoResult BeginTransit(
+ const MojoHandle* handles,
+ uint32_t num_handles,
+ std::vector<Dispatcher::DispatcherInTransit>* dispatchers);
+
+ void CompleteTransitAndClose(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers);
+ void CancelTransit(
+ const std::vector<Dispatcher::DispatcherInTransit>& dispatchers);
+
+ void GetActiveHandlesForTest(std::vector<MojoHandle> *handles);
+
+ private:
+ struct Entry {
+ Entry();
+ explicit Entry(scoped_refptr<Dispatcher> dispatcher);
+ Entry(const Entry& other);
+ ~Entry();
+
+ scoped_refptr<Dispatcher> dispatcher;
+ bool busy = false;
+ };
+
+ using HandleMap = base::hash_map<MojoHandle, Entry>;
+
+ HandleMap handles_;
+
+ uint32_t next_available_handle_ = 1;
+
+ DISALLOW_COPY_AND_ASSIGN(HandleTable);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_HANDLE_TABLE_H_
diff --git a/mojo/edk/system/mach_port_relay.cc b/mojo/edk/system/mach_port_relay.cc
new file mode 100644
index 0000000000..f05cf22a9a
--- /dev/null
+++ b/mojo/edk/system/mach_port_relay.cc
@@ -0,0 +1,248 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/mach_port_relay.h"
+
+#include <mach/mach.h>
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/mac/mach_port_util.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+// Errors that can occur in the broker (privileged parent) process.
+// These match tools/metrics/histograms.xml.
+// This enum is append-only.
+enum class BrokerUMAError : int {
+ SUCCESS = 0,
+ // Couldn't get a task port for the process with a given pid.
+ ERROR_TASK_FOR_PID = 1,
+ // Couldn't make a port with receive rights in the destination process.
+ ERROR_MAKE_RECEIVE_PORT = 2,
+ // Couldn't change the attributes of a Mach port.
+ ERROR_SET_ATTRIBUTES = 3,
+ // Couldn't extract a right from the destination.
+ ERROR_EXTRACT_DEST_RIGHT = 4,
+ // Couldn't send a Mach port in a call to mach_msg().
+ ERROR_SEND_MACH_PORT = 5,
+ // Couldn't extract a right from the source.
+ ERROR_EXTRACT_SOURCE_RIGHT = 6,
+ ERROR_MAX
+};
+
+// Errors that can occur in a child process.
+// These match tools/metrics/histograms.xml.
+// This enum is append-only.
+enum class ChildUMAError : int {
+ SUCCESS = 0,
+ // An error occurred while trying to receive a Mach port with mach_msg().
+ ERROR_RECEIVE_MACH_MESSAGE = 1,
+ ERROR_MAX
+};
+
+void ReportBrokerError(BrokerUMAError error) {
+ UMA_HISTOGRAM_ENUMERATION("Mojo.MachPortRelay.BrokerError",
+ static_cast<int>(error),
+ static_cast<int>(BrokerUMAError::ERROR_MAX));
+}
+
+void ReportChildError(ChildUMAError error) {
+ UMA_HISTOGRAM_ENUMERATION("Mojo.MachPortRelay.ChildError",
+ static_cast<int>(error),
+ static_cast<int>(ChildUMAError::ERROR_MAX));
+}
+
+} // namespace
+
+// static
+bool MachPortRelay::ReceivePorts(PlatformHandleVector* handles) {
+ DCHECK(handles);
+
+ for (size_t i = 0; i < handles->size(); i++) {
+ PlatformHandle* handle = handles->data() + i;
+ DCHECK(handle->type != PlatformHandle::Type::MACH);
+ if (handle->type != PlatformHandle::Type::MACH_NAME)
+ continue;
+
+ if (handle->port == MACH_PORT_NULL) {
+ handle->type = PlatformHandle::Type::MACH;
+ continue;
+ }
+
+ base::mac::ScopedMachReceiveRight message_port(handle->port);
+ base::mac::ScopedMachSendRight received_port(
+ base::ReceiveMachPort(message_port.get()));
+ if (received_port.get() == MACH_PORT_NULL) {
+ ReportChildError(ChildUMAError::ERROR_RECEIVE_MACH_MESSAGE);
+ handle->port = MACH_PORT_NULL;
+ LOG(ERROR) << "Error receiving mach port";
+ return false;
+ }
+
+ ReportChildError(ChildUMAError::SUCCESS);
+ handle->port = received_port.release();
+ handle->type = PlatformHandle::Type::MACH;
+ }
+
+ return true;
+}
+
+MachPortRelay::MachPortRelay(base::PortProvider* port_provider)
+ : port_provider_(port_provider) {
+ DCHECK(port_provider);
+ port_provider_->AddObserver(this);
+}
+
+MachPortRelay::~MachPortRelay() {
+ port_provider_->RemoveObserver(this);
+}
+
+bool MachPortRelay::SendPortsToProcess(Channel::Message* message,
+ base::ProcessHandle process) {
+ DCHECK(message);
+ mach_port_t task_port = port_provider_->TaskForPid(process);
+ if (task_port == MACH_PORT_NULL) {
+ // Callers check the port provider for the task port before calling this
+ // function, in order to queue pending messages. Therefore, if this fails,
+ // it should be considered a genuine, bona fide, electrified, six-car error.
+ ReportBrokerError(BrokerUMAError::ERROR_TASK_FOR_PID);
+ return false;
+ }
+
+ size_t num_sent = 0;
+ bool error = false;
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ // Message should have handles, otherwise there's no point in calling this
+ // function.
+ DCHECK(handles);
+ for (size_t i = 0; i < handles->size(); i++) {
+ PlatformHandle* handle = &(*handles)[i];
+ DCHECK(handle->type != PlatformHandle::Type::MACH_NAME);
+ if (handle->type != PlatformHandle::Type::MACH)
+ continue;
+
+ if (handle->port == MACH_PORT_NULL) {
+ handle->type = PlatformHandle::Type::MACH_NAME;
+ num_sent++;
+ continue;
+ }
+
+ mach_port_name_t intermediate_port;
+ base::MachCreateError error_code;
+ intermediate_port = base::CreateIntermediateMachPort(
+ task_port, base::mac::ScopedMachSendRight(handle->port), &error_code);
+ if (intermediate_port == MACH_PORT_NULL) {
+ BrokerUMAError uma_error;
+ switch (error_code) {
+ case base::MachCreateError::ERROR_MAKE_RECEIVE_PORT:
+ uma_error = BrokerUMAError::ERROR_MAKE_RECEIVE_PORT;
+ break;
+ case base::MachCreateError::ERROR_SET_ATTRIBUTES:
+ uma_error = BrokerUMAError::ERROR_SET_ATTRIBUTES;
+ break;
+ case base::MachCreateError::ERROR_EXTRACT_DEST_RIGHT:
+ uma_error = BrokerUMAError::ERROR_EXTRACT_DEST_RIGHT;
+ break;
+ case base::MachCreateError::ERROR_SEND_MACH_PORT:
+ uma_error = BrokerUMAError::ERROR_SEND_MACH_PORT;
+ break;
+ }
+ ReportBrokerError(uma_error);
+ handle->port = MACH_PORT_NULL;
+ error = true;
+ break;
+ }
+
+ ReportBrokerError(BrokerUMAError::SUCCESS);
+ handle->port = intermediate_port;
+ handle->type = PlatformHandle::Type::MACH_NAME;
+ num_sent++;
+ }
+ DCHECK(error || num_sent);
+ message->SetHandles(std::move(handles));
+
+ return !error;
+}
+
+bool MachPortRelay::ExtractPortRights(Channel::Message* message,
+ base::ProcessHandle process) {
+ DCHECK(message);
+
+ mach_port_t task_port = port_provider_->TaskForPid(process);
+ if (task_port == MACH_PORT_NULL) {
+ ReportBrokerError(BrokerUMAError::ERROR_TASK_FOR_PID);
+ return false;
+ }
+
+ size_t num_received = 0;
+ bool error = false;
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ // Message should have handles, otherwise there's no point in calling this
+ // function.
+ DCHECK(handles);
+ for (size_t i = 0; i < handles->size(); i++) {
+ PlatformHandle* handle = handles->data() + i;
+ DCHECK(handle->type != PlatformHandle::Type::MACH);
+ if (handle->type != PlatformHandle::Type::MACH_NAME)
+ continue;
+
+ if (handle->port == MACH_PORT_NULL) {
+ handle->type = PlatformHandle::Type::MACH;
+ num_received++;
+ continue;
+ }
+
+ mach_port_t extracted_right = MACH_PORT_NULL;
+ mach_msg_type_name_t extracted_right_type;
+ kern_return_t kr =
+ mach_port_extract_right(task_port, handle->port,
+ MACH_MSG_TYPE_MOVE_SEND,
+ &extracted_right, &extracted_right_type);
+ if (kr != KERN_SUCCESS) {
+ ReportBrokerError(BrokerUMAError::ERROR_EXTRACT_SOURCE_RIGHT);
+ error = true;
+ break;
+ }
+
+ ReportBrokerError(BrokerUMAError::SUCCESS);
+ DCHECK_EQ(static_cast<mach_msg_type_name_t>(MACH_MSG_TYPE_PORT_SEND),
+ extracted_right_type);
+ handle->port = extracted_right;
+ handle->type = PlatformHandle::Type::MACH;
+ num_received++;
+ }
+ DCHECK(error || num_received);
+ message->SetHandles(std::move(handles));
+
+ return !error;
+}
+
+void MachPortRelay::AddObserver(Observer* observer) {
+ base::AutoLock locker(observers_lock_);
+ bool inserted = observers_.insert(observer).second;
+ DCHECK(inserted);
+}
+
+void MachPortRelay::RemoveObserver(Observer* observer) {
+ base::AutoLock locker(observers_lock_);
+ observers_.erase(observer);
+}
+
+void MachPortRelay::OnReceivedTaskPort(base::ProcessHandle process) {
+ base::AutoLock locker(observers_lock_);
+ for (auto* observer : observers_)
+ observer->OnProcessReady(process);
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/mach_port_relay.h b/mojo/edk/system/mach_port_relay.h
new file mode 100644
index 0000000000..87bc56cf5b
--- /dev/null
+++ b/mojo/edk/system/mach_port_relay.h
@@ -0,0 +1,94 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_MACH_PORT_RELAY_H_
+#define MOJO_EDK_SYSTEM_MACH_PORT_RELAY_H_
+
+#include <set>
+
+#include "base/macros.h"
+#include "base/process/port_provider_mac.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/channel.h"
+
+namespace mojo {
+namespace edk {
+
+// The MachPortRelay is used by a privileged process, usually the root process,
+// to manipulate Mach ports in a child process. Ports can be added to and
+// extracted from a child process that has registered itself with the
+// |base::PortProvider| used by this class.
+class MachPortRelay : public base::PortProvider::Observer {
+ public:
+ class Observer {
+ public:
+ // Called by the MachPortRelay to notify observers that a new process is
+ // ready for Mach ports to be sent/received. There are no guarantees about
+ // the thread this is called on, including the presence of a MessageLoop.
+ // Implementations must not call AddObserver() or RemoveObserver() during
+ // this function, as doing so will deadlock.
+ virtual void OnProcessReady(base::ProcessHandle process) = 0;
+ };
+
+ // Used by a child process to receive Mach ports from a sender (privileged)
+ // process. Each Mach port in |handles| is interpreted as an intermediate Mach
+ // port. It replaces each Mach port with the final Mach port received from the
+ // intermediate port. This method takes ownership of the intermediate Mach
+ // port and gives ownership of the final Mach port to the caller. Any handles
+ // that are not Mach ports will remain unchanged, and the number and ordering
+ // of handles is preserved.
+ // Returns |false| on failure and there is no guarantee about whether a Mach
+ // port is intermediate or final.
+ //
+ // See SendPortsToProcess() for the definition of intermediate and final Mach
+ // ports.
+ static bool ReceivePorts(PlatformHandleVector* handles);
+
+ explicit MachPortRelay(base::PortProvider* port_provider);
+ ~MachPortRelay() override;
+
+ // Sends the Mach ports attached to |message| to |process|.
+ // For each Mach port attached to |message|, a new Mach port, the intermediate
+ // port, is created in |process|. The message's Mach port is then sent over
+ // this intermediate port and the message is modified to refer to the name of
+ // the intermediate port. The Mach port received over the intermediate port in
+ // the child is referred to as the final Mach port.
+ // Returns |false| on failure and |message| may contain a mix of actual Mach
+ // ports and names.
+ bool SendPortsToProcess(Channel::Message* message,
+ base::ProcessHandle process);
+
+ // Extracts the Mach ports attached to |message| from |process|.
+ // Any Mach ports attached to |message| are names and not actual Mach ports
+ // that are valid in this process. For each of those Mach port names, a send
+ // right is extracted from |process| and the port name is replaced with the
+ // send right.
+ // Returns |false| on failure and |message| may contain a mix of actual Mach
+ // ports and names.
+ bool ExtractPortRights(Channel::Message* message,
+ base::ProcessHandle process);
+
+ // Observer interface.
+ void AddObserver(Observer* observer);
+ void RemoveObserver(Observer* observer);
+
+ base::PortProvider* port_provider() const { return port_provider_; }
+
+ private:
+ // base::PortProvider::Observer implementation.
+ void OnReceivedTaskPort(base::ProcessHandle process) override;
+
+ base::PortProvider* const port_provider_;
+
+ base::Lock observers_lock_;
+ std::set<Observer*> observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(MachPortRelay);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_MACH_PORT_RELAY_H_
diff --git a/mojo/edk/system/mapping_table.cc b/mojo/edk/system/mapping_table.cc
new file mode 100644
index 0000000000..850944306e
--- /dev/null
+++ b/mojo/edk/system/mapping_table.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/mapping_table.h"
+
+#include "base/logging.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/configuration.h"
+
+namespace mojo {
+namespace edk {
+
+MappingTable::MappingTable() {
+}
+
+MappingTable::~MappingTable() {
+ // This should usually not be reached (the only instance should be owned by
+ // the singleton |Core|, which lives forever), except in tests.
+}
+
+MojoResult MappingTable::AddMapping(
+ std::unique_ptr<PlatformSharedBufferMapping> mapping) {
+ DCHECK(mapping);
+
+ if (address_to_mapping_map_.size() >=
+ GetConfiguration().max_mapping_table_sze)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ void* address = mapping->GetBase();
+ DCHECK(address_to_mapping_map_.find(address) ==
+ address_to_mapping_map_.end());
+ address_to_mapping_map_[address] = mapping.release();
+ return MOJO_RESULT_OK;
+}
+
+MojoResult MappingTable::RemoveMapping(void* address) {
+ AddressToMappingMap::iterator it = address_to_mapping_map_.find(address);
+ if (it == address_to_mapping_map_.end())
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ PlatformSharedBufferMapping* mapping_to_delete = it->second;
+ address_to_mapping_map_.erase(it);
+ delete mapping_to_delete;
+ return MOJO_RESULT_OK;
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/mapping_table.h b/mojo/edk/system/mapping_table.h
new file mode 100644
index 0000000000..00167e3604
--- /dev/null
+++ b/mojo/edk/system/mapping_table.h
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_MAPPING_TABLE_H_
+#define MOJO_EDK_SYSTEM_MAPPING_TABLE_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/public/c/system/types.h"
+
+namespace mojo {
+
+namespace edk {
+class Core;
+class PlatformSharedBufferMapping;
+
+// Test-only function (defined/used in embedder/test_embedder.cc). Declared here
+// so it can be friended.
+namespace internal {
+bool ShutdownCheckNoLeaks(Core*);
+}
+
+// This class provides the (global) table of memory mappings (owned by |Core|),
+// which maps mapping base addresses to |PlatformSharedBufferMapping|s.
+//
+// This class is NOT thread-safe; locking is left to |Core|.
+class MOJO_SYSTEM_IMPL_EXPORT MappingTable {
+ public:
+ MappingTable();
+ ~MappingTable();
+
+ // Tries to add a mapping. (Takes ownership of the mapping in all cases; on
+ // failure, it will be destroyed.)
+ MojoResult AddMapping(std::unique_ptr<PlatformSharedBufferMapping> mapping);
+ MojoResult RemoveMapping(void* address);
+
+ private:
+ friend bool internal::ShutdownCheckNoLeaks(Core*);
+
+ using AddressToMappingMap =
+ base::hash_map<void*, PlatformSharedBufferMapping*>;
+ AddressToMappingMap address_to_mapping_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(MappingTable);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_MAPPING_TABLE_H_
diff --git a/mojo/edk/system/message_for_transit.cc b/mojo/edk/system/message_for_transit.cc
new file mode 100644
index 0000000000..26658e161c
--- /dev/null
+++ b/mojo/edk/system/message_for_transit.cc
@@ -0,0 +1,136 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/message_for_transit.h"
+
+#include <vector>
+
+#include "mojo/edk/embedder/platform_handle_vector.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+static_assert(sizeof(MessageForTransit::MessageHeader) % 8 == 0,
+ "Invalid MessageHeader size.");
+static_assert(sizeof(MessageForTransit::DispatcherHeader) % 8 == 0,
+ "Invalid DispatcherHeader size.");
+
+} // namespace
+
+MessageForTransit::~MessageForTransit() {}
+
+// static
+MojoResult MessageForTransit::Create(
+ std::unique_ptr<MessageForTransit>* message,
+ uint32_t num_bytes,
+ const Dispatcher::DispatcherInTransit* dispatchers,
+ uint32_t num_dispatchers) {
+ // A structure for retaining information about every Dispatcher that will be
+ // sent with this message.
+ struct DispatcherInfo {
+ uint32_t num_bytes;
+ uint32_t num_ports;
+ uint32_t num_handles;
+ };
+
+ // This is only the base header size. It will grow as we accumulate the
+ // size of serialized state for each dispatcher.
+ size_t header_size = sizeof(MessageHeader) +
+ num_dispatchers * sizeof(DispatcherHeader);
+ size_t num_ports = 0;
+ size_t num_handles = 0;
+
+ std::vector<DispatcherInfo> dispatcher_info(num_dispatchers);
+ for (size_t i = 0; i < num_dispatchers; ++i) {
+ Dispatcher* d = dispatchers[i].dispatcher.get();
+ d->StartSerialize(&dispatcher_info[i].num_bytes,
+ &dispatcher_info[i].num_ports,
+ &dispatcher_info[i].num_handles);
+ header_size += dispatcher_info[i].num_bytes;
+ num_ports += dispatcher_info[i].num_ports;
+ num_handles += dispatcher_info[i].num_handles;
+ }
+
+ // We now have enough information to fully allocate the message storage.
+ std::unique_ptr<PortsMessage> msg = PortsMessage::NewUserMessage(
+ header_size + num_bytes, num_ports, num_handles);
+ if (!msg)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ // Populate the message header with information about serialized dispatchers.
+ //
+ // The front of the message is always a MessageHeader followed by a
+ // DispatcherHeader for each dispatcher to be sent.
+ MessageHeader* header =
+ static_cast<MessageHeader*>(msg->mutable_payload_bytes());
+ DispatcherHeader* dispatcher_headers =
+ reinterpret_cast<DispatcherHeader*>(header + 1);
+
+ // Serialized dispatcher state immediately follows the series of
+ // DispatcherHeaders.
+ char* dispatcher_data =
+ reinterpret_cast<char*>(dispatcher_headers + num_dispatchers);
+
+ header->num_dispatchers = num_dispatchers;
+
+ // |header_size| is the total number of bytes preceding the message payload,
+ // including all dispatcher headers and serialized dispatcher state.
+ DCHECK_LE(header_size, std::numeric_limits<uint32_t>::max());
+ header->header_size = static_cast<uint32_t>(header_size);
+
+ if (num_dispatchers > 0) {
+ ScopedPlatformHandleVectorPtr handles(
+ new PlatformHandleVector(num_handles));
+ size_t port_index = 0;
+ size_t handle_index = 0;
+ bool fail = false;
+ for (size_t i = 0; i < num_dispatchers; ++i) {
+ Dispatcher* d = dispatchers[i].dispatcher.get();
+ DispatcherHeader* dh = &dispatcher_headers[i];
+ const DispatcherInfo& info = dispatcher_info[i];
+
+ // Fill in the header for this dispatcher.
+ dh->type = static_cast<int32_t>(d->GetType());
+ dh->num_bytes = info.num_bytes;
+ dh->num_ports = info.num_ports;
+ dh->num_platform_handles = info.num_handles;
+
+ // Fill in serialized state, ports, and platform handles. We'll cancel
+ // the send if the dispatcher implementation rejects for some reason.
+ if (!d->EndSerialize(static_cast<void*>(dispatcher_data),
+ msg->mutable_ports() + port_index,
+ handles->data() + handle_index)) {
+ fail = true;
+ break;
+ }
+
+ dispatcher_data += info.num_bytes;
+ port_index += info.num_ports;
+ handle_index += info.num_handles;
+ }
+
+ if (fail) {
+ // Release any platform handles we've accumulated. Their dispatchers
+ // retain ownership when message creation fails, so these are not actually
+ // leaking.
+ handles->clear();
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+
+ // Take ownership of all the handles and move them into message storage.
+ msg->SetHandles(std::move(handles));
+ }
+
+ message->reset(new MessageForTransit(std::move(msg)));
+ return MOJO_RESULT_OK;
+}
+
+MessageForTransit::MessageForTransit(std::unique_ptr<PortsMessage> message)
+ : message_(std::move(message)) {
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/message_for_transit.h b/mojo/edk/system/message_for_transit.h
new file mode 100644
index 0000000000..6103a771e1
--- /dev/null
+++ b/mojo/edk/system/message_for_transit.h
@@ -0,0 +1,115 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_MESSAGE_FOR_TRANSIT_H_
+#define MOJO_EDK_SYSTEM_MESSAGE_FOR_TRANSIT_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/ports_message.h"
+#include "mojo/edk/system/system_impl_export.h"
+
+namespace mojo {
+namespace edk {
+
+// MessageForTransit holds onto a PortsMessage which may be sent via
+// |MojoWriteMessage()| or which may have been received on a pipe endpoint.
+// Instances of this class are exposed to Mojo system API consumers via the
+// opaque pointers used with |MojoCreateMessage()|, |MojoDestroyMessage()|,
+// |MojoWriteMessageNew()|, and |MojoReadMessageNew()|.
+class MOJO_SYSTEM_IMPL_EXPORT MessageForTransit {
+ public:
+#pragma pack(push, 1)
+ // Header attached to every message.
+ struct MessageHeader {
+ // The number of serialized dispatchers included in this header.
+ uint32_t num_dispatchers;
+
+ // Total size of the header, including serialized dispatcher data.
+ uint32_t header_size;
+ };
+
+ // Header for each dispatcher in a message, immediately following the message
+ // header.
+ struct DispatcherHeader {
+ // The type of the dispatcher, correpsonding to the Dispatcher::Type enum.
+ int32_t type;
+
+ // The size of the serialized dispatcher, not including this header.
+ uint32_t num_bytes;
+
+ // The number of ports needed to deserialize this dispatcher.
+ uint32_t num_ports;
+
+ // The number of platform handles needed to deserialize this dispatcher.
+ uint32_t num_platform_handles;
+ };
+#pragma pack(pop)
+
+ ~MessageForTransit();
+
+ // A static constructor for building outbound messages.
+ static MojoResult Create(
+ std::unique_ptr<MessageForTransit>* message,
+ uint32_t num_bytes,
+ const Dispatcher::DispatcherInTransit* dispatchers,
+ uint32_t num_dispatchers);
+
+ // A static constructor for wrapping inbound messages.
+ static std::unique_ptr<MessageForTransit> WrapPortsMessage(
+ std::unique_ptr<PortsMessage> message) {
+ return base::WrapUnique(new MessageForTransit(std::move(message)));
+ }
+
+ const void* bytes() const {
+ DCHECK(message_);
+ return static_cast<const void*>(
+ static_cast<const char*>(message_->payload_bytes()) +
+ header()->header_size);
+ }
+
+ void* mutable_bytes() {
+ DCHECK(message_);
+ return static_cast<void*>(
+ static_cast<char*>(message_->mutable_payload_bytes()) +
+ header()->header_size);
+ }
+
+ size_t num_bytes() const {
+ size_t header_size = header()->header_size;
+ DCHECK_GE(message_->num_payload_bytes(), header_size);
+ return message_->num_payload_bytes() - header_size;
+ }
+
+ size_t num_handles() const { return header()->num_dispatchers; }
+
+ const PortsMessage& ports_message() const { return *message_; }
+
+ std::unique_ptr<PortsMessage> TakePortsMessage() {
+ return std::move(message_);
+ }
+
+ private:
+ explicit MessageForTransit(std::unique_ptr<PortsMessage> message);
+
+ const MessageForTransit::MessageHeader* header() const {
+ DCHECK(message_);
+ return static_cast<const MessageForTransit::MessageHeader*>(
+ message_->payload_bytes());
+ }
+
+ std::unique_ptr<PortsMessage> message_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageForTransit);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_MESSAGE_FOR_TRANSIT_H_
diff --git a/mojo/edk/system/message_pipe_dispatcher.cc b/mojo/edk/system/message_pipe_dispatcher.cc
new file mode 100644
index 0000000000..1db56c0dac
--- /dev/null
+++ b/mojo/edk/system/message_pipe_dispatcher.cc
@@ -0,0 +1,554 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/message_pipe_dispatcher.h"
+
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/system/core.h"
+#include "mojo/edk/system/message_for_transit.h"
+#include "mojo/edk/system/node_controller.h"
+#include "mojo/edk/system/ports/message_filter.h"
+#include "mojo/edk/system/ports_message.h"
+#include "mojo/edk/system/request_context.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+using DispatcherHeader = MessageForTransit::DispatcherHeader;
+using MessageHeader = MessageForTransit::MessageHeader;
+
+#pragma pack(push, 1)
+
+struct SerializedState {
+ uint64_t pipe_id;
+ int8_t endpoint;
+ char padding[7];
+};
+
+static_assert(sizeof(SerializedState) % 8 == 0,
+ "Invalid SerializedState size.");
+
+#pragma pack(pop)
+
+} // namespace
+
+// A PortObserver which forwards to a MessagePipeDispatcher. This owns a
+// reference to the MPD to ensure it lives as long as the observed port.
+class MessagePipeDispatcher::PortObserverThunk
+ : public NodeController::PortObserver {
+ public:
+ explicit PortObserverThunk(scoped_refptr<MessagePipeDispatcher> dispatcher)
+ : dispatcher_(dispatcher) {}
+
+ private:
+ ~PortObserverThunk() override {}
+
+ // NodeController::PortObserver:
+ void OnPortStatusChanged() override { dispatcher_->OnPortStatusChanged(); }
+
+ scoped_refptr<MessagePipeDispatcher> dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(PortObserverThunk);
+};
+
+// A MessageFilter used by ReadMessage to determine whether a message should
+// actually be consumed yet.
+class ReadMessageFilter : public ports::MessageFilter {
+ public:
+ // Creates a new ReadMessageFilter which captures and potentially modifies
+ // various (unowned) local state within MessagePipeDispatcher::ReadMessage.
+ ReadMessageFilter(bool read_any_size,
+ bool may_discard,
+ uint32_t* num_bytes,
+ uint32_t* num_handles,
+ bool* no_space,
+ bool* invalid_message)
+ : read_any_size_(read_any_size),
+ may_discard_(may_discard),
+ num_bytes_(num_bytes),
+ num_handles_(num_handles),
+ no_space_(no_space),
+ invalid_message_(invalid_message) {}
+
+ ~ReadMessageFilter() override {}
+
+ // ports::MessageFilter:
+ bool Match(const ports::Message& m) override {
+ const PortsMessage& message = static_cast<const PortsMessage&>(m);
+ if (message.num_payload_bytes() < sizeof(MessageHeader)) {
+ *invalid_message_ = true;
+ return true;
+ }
+
+ const MessageHeader* header =
+ static_cast<const MessageHeader*>(message.payload_bytes());
+ if (header->header_size > message.num_payload_bytes()) {
+ *invalid_message_ = true;
+ return true;
+ }
+
+ uint32_t bytes_to_read = 0;
+ uint32_t bytes_available =
+ static_cast<uint32_t>(message.num_payload_bytes()) -
+ header->header_size;
+ if (num_bytes_) {
+ bytes_to_read = std::min(*num_bytes_, bytes_available);
+ *num_bytes_ = bytes_available;
+ }
+
+ uint32_t handles_to_read = 0;
+ uint32_t handles_available = header->num_dispatchers;
+ if (num_handles_) {
+ handles_to_read = std::min(*num_handles_, handles_available);
+ *num_handles_ = handles_available;
+ }
+
+ if (handles_to_read < handles_available ||
+ (!read_any_size_ && bytes_to_read < bytes_available)) {
+ *no_space_ = true;
+ return may_discard_;
+ }
+
+ return true;
+ }
+
+ private:
+ const bool read_any_size_;
+ const bool may_discard_;
+ uint32_t* const num_bytes_;
+ uint32_t* const num_handles_;
+ bool* const no_space_;
+ bool* const invalid_message_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadMessageFilter);
+};
+
+#if DCHECK_IS_ON()
+
+// A MessageFilter which never matches a message. Used to peek at the size of
+// the next available message on a port, for debug logging only.
+class PeekSizeMessageFilter : public ports::MessageFilter {
+ public:
+ PeekSizeMessageFilter() {}
+ ~PeekSizeMessageFilter() override {}
+
+ // ports::MessageFilter:
+ bool Match(const ports::Message& message) override {
+ message_size_ = message.num_payload_bytes();
+ return false;
+ }
+
+ size_t message_size() const { return message_size_; }
+
+ private:
+ size_t message_size_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(PeekSizeMessageFilter);
+};
+
+#endif // DCHECK_IS_ON()
+
+MessagePipeDispatcher::MessagePipeDispatcher(NodeController* node_controller,
+ const ports::PortRef& port,
+ uint64_t pipe_id,
+ int endpoint)
+ : node_controller_(node_controller),
+ port_(port),
+ pipe_id_(pipe_id),
+ endpoint_(endpoint),
+ watchers_(this) {
+ DVLOG(2) << "Creating new MessagePipeDispatcher for port " << port.name()
+ << " [pipe_id=" << pipe_id << "; endpoint=" << endpoint << "]";
+
+ node_controller_->SetPortObserver(
+ port_,
+ make_scoped_refptr(new PortObserverThunk(this)));
+}
+
+bool MessagePipeDispatcher::Fuse(MessagePipeDispatcher* other) {
+ node_controller_->SetPortObserver(port_, nullptr);
+ node_controller_->SetPortObserver(other->port_, nullptr);
+
+ ports::PortRef port0;
+ {
+ base::AutoLock lock(signal_lock_);
+ port0 = port_;
+ port_closed_.Set(true);
+ watchers_.NotifyClosed();
+ }
+
+ ports::PortRef port1;
+ {
+ base::AutoLock lock(other->signal_lock_);
+ port1 = other->port_;
+ other->port_closed_.Set(true);
+ other->watchers_.NotifyClosed();
+ }
+
+ // Both ports are always closed by this call.
+ int rv = node_controller_->MergeLocalPorts(port0, port1);
+ return rv == ports::OK;
+}
+
+Dispatcher::Type MessagePipeDispatcher::GetType() const {
+ return Type::MESSAGE_PIPE;
+}
+
+MojoResult MessagePipeDispatcher::Close() {
+ base::AutoLock lock(signal_lock_);
+ DVLOG(2) << "Closing message pipe " << pipe_id_ << " endpoint " << endpoint_
+ << " [port=" << port_.name() << "]";
+ return CloseNoLock();
+}
+
+MojoResult MessagePipeDispatcher::WriteMessage(
+ std::unique_ptr<MessageForTransit> message,
+ MojoWriteMessageFlags flags) {
+ if (port_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ size_t num_bytes = message->num_bytes();
+ int rv = node_controller_->SendMessage(port_, message->TakePortsMessage());
+
+ DVLOG(4) << "Sent message on pipe " << pipe_id_ << " endpoint " << endpoint_
+ << " [port=" << port_.name() << "; rv=" << rv
+ << "; num_bytes=" << num_bytes << "]";
+
+ if (rv != ports::OK) {
+ if (rv == ports::ERROR_PORT_UNKNOWN ||
+ rv == ports::ERROR_PORT_STATE_UNEXPECTED ||
+ rv == ports::ERROR_PORT_CANNOT_SEND_PEER) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ } else if (rv == ports::ERROR_PORT_PEER_CLOSED) {
+ return MOJO_RESULT_FAILED_PRECONDITION;
+ }
+
+ NOTREACHED();
+ return MOJO_RESULT_UNKNOWN;
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult MessagePipeDispatcher::ReadMessage(
+ std::unique_ptr<MessageForTransit>* message,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags,
+ bool read_any_size) {
+ // We can't read from a port that's closed or in transit!
+ if (port_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ bool no_space = false;
+ bool may_discard = flags & MOJO_READ_MESSAGE_FLAG_MAY_DISCARD;
+ bool invalid_message = false;
+
+ // Grab a message if the provided handles buffer is large enough. If the input
+ // |num_bytes| is provided and |read_any_size| is false, we also ensure
+ // that it specifies a size at least as large as the next available payload.
+ //
+ // If |read_any_size| is true, the input value of |*num_bytes| is ignored.
+ // This flag exists to support both new and old API behavior.
+
+ ports::ScopedMessage ports_message;
+ ReadMessageFilter filter(read_any_size, may_discard, num_bytes, num_handles,
+ &no_space, &invalid_message);
+ int rv = node_controller_->node()->GetMessage(port_, &ports_message, &filter);
+
+ if (invalid_message)
+ return MOJO_RESULT_UNKNOWN;
+
+ if (rv != ports::OK && rv != ports::ERROR_PORT_PEER_CLOSED) {
+ if (rv == ports::ERROR_PORT_UNKNOWN ||
+ rv == ports::ERROR_PORT_STATE_UNEXPECTED)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ NOTREACHED();
+ return MOJO_RESULT_UNKNOWN; // TODO: Add a better error code here?
+ }
+
+ if (no_space) {
+ if (may_discard) {
+ // May have been the last message on the pipe. Need to update signals just
+ // in case.
+ base::AutoLock lock(signal_lock_);
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+ }
+ // |*num_handles| (and/or |*num_bytes| if |read_any_size| is false) wasn't
+ // sufficient to hold this message's data. The message will still be in
+ // queue unless MOJO_READ_MESSAGE_FLAG_MAY_DISCARD was set.
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ }
+
+ if (!ports_message) {
+ // No message was available in queue.
+
+ if (rv == ports::OK)
+ return MOJO_RESULT_SHOULD_WAIT;
+
+ // Peer is closed and there are no more messages to read.
+ DCHECK_EQ(rv, ports::ERROR_PORT_PEER_CLOSED);
+ return MOJO_RESULT_FAILED_PRECONDITION;
+ }
+
+ // Alright! We have a message and the caller has provided sufficient storage
+ // in which to receive it.
+
+ {
+ // We need to update anyone watching our signals in case that was the last
+ // available message.
+ base::AutoLock lock(signal_lock_);
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+ }
+
+ std::unique_ptr<PortsMessage> msg(
+ static_cast<PortsMessage*>(ports_message.release()));
+
+ const MessageHeader* header =
+ static_cast<const MessageHeader*>(msg->payload_bytes());
+ const DispatcherHeader* dispatcher_headers =
+ reinterpret_cast<const DispatcherHeader*>(header + 1);
+
+ if (header->num_dispatchers > std::numeric_limits<uint16_t>::max())
+ return MOJO_RESULT_UNKNOWN;
+
+ // Deserialize dispatchers.
+ if (header->num_dispatchers > 0) {
+ CHECK(handles);
+ std::vector<DispatcherInTransit> dispatchers(header->num_dispatchers);
+ size_t data_payload_index = sizeof(MessageHeader) +
+ header->num_dispatchers * sizeof(DispatcherHeader);
+ if (data_payload_index > header->header_size)
+ return MOJO_RESULT_UNKNOWN;
+ const char* dispatcher_data = reinterpret_cast<const char*>(
+ dispatcher_headers + header->num_dispatchers);
+ size_t port_index = 0;
+ size_t platform_handle_index = 0;
+ ScopedPlatformHandleVectorPtr msg_handles = msg->TakeHandles();
+ const size_t num_msg_handles = msg_handles ? msg_handles->size() : 0;
+ for (size_t i = 0; i < header->num_dispatchers; ++i) {
+ const DispatcherHeader& dh = dispatcher_headers[i];
+ Type type = static_cast<Type>(dh.type);
+
+ size_t next_payload_index = data_payload_index + dh.num_bytes;
+ if (msg->num_payload_bytes() < next_payload_index ||
+ next_payload_index < data_payload_index) {
+ return MOJO_RESULT_UNKNOWN;
+ }
+
+ size_t next_port_index = port_index + dh.num_ports;
+ if (msg->num_ports() < next_port_index || next_port_index < port_index)
+ return MOJO_RESULT_UNKNOWN;
+
+ size_t next_platform_handle_index =
+ platform_handle_index + dh.num_platform_handles;
+ if (num_msg_handles < next_platform_handle_index ||
+ next_platform_handle_index < platform_handle_index) {
+ return MOJO_RESULT_UNKNOWN;
+ }
+
+ PlatformHandle* out_handles =
+ num_msg_handles ? msg_handles->data() + platform_handle_index
+ : nullptr;
+ dispatchers[i].dispatcher = Dispatcher::Deserialize(
+ type, dispatcher_data, dh.num_bytes, msg->ports() + port_index,
+ dh.num_ports, out_handles, dh.num_platform_handles);
+ if (!dispatchers[i].dispatcher)
+ return MOJO_RESULT_UNKNOWN;
+
+ dispatcher_data += dh.num_bytes;
+ data_payload_index = next_payload_index;
+ port_index = next_port_index;
+ platform_handle_index = next_platform_handle_index;
+ }
+
+ if (!node_controller_->core()->AddDispatchersFromTransit(dispatchers,
+ handles))
+ return MOJO_RESULT_UNKNOWN;
+ }
+
+ CHECK(msg);
+ *message = MessageForTransit::WrapPortsMessage(std::move(msg));
+ return MOJO_RESULT_OK;
+}
+
+HandleSignalsState
+MessagePipeDispatcher::GetHandleSignalsState() const {
+ base::AutoLock lock(signal_lock_);
+ return GetHandleSignalsStateNoLock();
+}
+
+MojoResult MessagePipeDispatcher::AddWatcherRef(
+ const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context) {
+ base::AutoLock lock(signal_lock_);
+ if (port_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watchers_.Add(watcher, context, GetHandleSignalsStateNoLock());
+}
+
+MojoResult MessagePipeDispatcher::RemoveWatcherRef(WatcherDispatcher* watcher,
+ uintptr_t context) {
+ base::AutoLock lock(signal_lock_);
+ if (port_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ return watchers_.Remove(watcher, context);
+}
+
+void MessagePipeDispatcher::StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) {
+ *num_bytes = static_cast<uint32_t>(sizeof(SerializedState));
+ *num_ports = 1;
+ *num_handles = 0;
+}
+
+bool MessagePipeDispatcher::EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) {
+ SerializedState* state = static_cast<SerializedState*>(destination);
+ state->pipe_id = pipe_id_;
+ state->endpoint = static_cast<int8_t>(endpoint_);
+ memset(state->padding, 0, sizeof(state->padding));
+ ports[0] = port_.name();
+ return true;
+}
+
+bool MessagePipeDispatcher::BeginTransit() {
+ base::AutoLock lock(signal_lock_);
+ if (in_transit_ || port_closed_)
+ return false;
+ in_transit_.Set(true);
+ return in_transit_;
+}
+
+void MessagePipeDispatcher::CompleteTransitAndClose() {
+ node_controller_->SetPortObserver(port_, nullptr);
+
+ base::AutoLock lock(signal_lock_);
+ port_transferred_ = true;
+ in_transit_.Set(false);
+ CloseNoLock();
+}
+
+void MessagePipeDispatcher::CancelTransit() {
+ base::AutoLock lock(signal_lock_);
+ in_transit_.Set(false);
+
+ // Something may have happened while we were waiting for potential transit.
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+}
+
+// static
+scoped_refptr<Dispatcher> MessagePipeDispatcher::Deserialize(
+ const void* data,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles) {
+ if (num_ports != 1 || num_handles || num_bytes != sizeof(SerializedState))
+ return nullptr;
+
+ const SerializedState* state = static_cast<const SerializedState*>(data);
+
+ ports::PortRef port;
+ CHECK_EQ(
+ ports::OK,
+ internal::g_core->GetNodeController()->node()->GetPort(ports[0], &port));
+
+ return new MessagePipeDispatcher(internal::g_core->GetNodeController(), port,
+ state->pipe_id, state->endpoint);
+}
+
+MessagePipeDispatcher::~MessagePipeDispatcher() {
+ DCHECK(port_closed_ && !in_transit_);
+}
+
+MojoResult MessagePipeDispatcher::CloseNoLock() {
+ signal_lock_.AssertAcquired();
+ if (port_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ port_closed_.Set(true);
+ watchers_.NotifyClosed();
+
+ if (!port_transferred_) {
+ base::AutoUnlock unlock(signal_lock_);
+ node_controller_->ClosePort(port_);
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+HandleSignalsState MessagePipeDispatcher::GetHandleSignalsStateNoLock() const {
+ HandleSignalsState rv;
+
+ ports::PortStatus port_status;
+ if (node_controller_->node()->GetStatus(port_, &port_status) != ports::OK) {
+ CHECK(in_transit_ || port_transferred_ || port_closed_);
+ return HandleSignalsState();
+ }
+
+ if (port_status.has_messages) {
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_READABLE;
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
+ }
+ if (port_status.receiving_messages)
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
+ if (!port_status.peer_closed) {
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_WRITABLE;
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_WRITABLE;
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
+ } else {
+ rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+ }
+ rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+ return rv;
+}
+
+void MessagePipeDispatcher::OnPortStatusChanged() {
+ DCHECK(RequestContext::current());
+
+ base::AutoLock lock(signal_lock_);
+
+ // We stop observing our port as soon as it's transferred, but this can race
+ // with events which are raised right before that happens. This is fine to
+ // ignore.
+ if (port_transferred_)
+ return;
+
+#if DCHECK_IS_ON()
+ ports::PortStatus port_status;
+ if (node_controller_->node()->GetStatus(port_, &port_status) == ports::OK) {
+ if (port_status.has_messages) {
+ ports::ScopedMessage unused;
+ PeekSizeMessageFilter filter;
+ node_controller_->node()->GetMessage(port_, &unused, &filter);
+ DVLOG(4) << "New message detected on message pipe " << pipe_id_
+ << " endpoint " << endpoint_ << " [port=" << port_.name()
+ << "; size=" << filter.message_size() << "]";
+ }
+ if (port_status.peer_closed) {
+ DVLOG(2) << "Peer closure detected on message pipe " << pipe_id_
+ << " endpoint " << endpoint_ << " [port=" << port_.name() << "]";
+ }
+ }
+#endif
+
+ watchers_.NotifyState(GetHandleSignalsStateNoLock());
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/message_pipe_dispatcher.h b/mojo/edk/system/message_pipe_dispatcher.h
new file mode 100644
index 0000000000..574ad660b0
--- /dev/null
+++ b/mojo/edk/system/message_pipe_dispatcher.h
@@ -0,0 +1,115 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_MESSAGE_PIPE_DISPATCHER_H_
+#define MOJO_EDK_SYSTEM_MESSAGE_PIPE_DISPATCHER_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <queue>
+
+#include "base/macros.h"
+#include "mojo/edk/system/atomic_flag.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/message_for_transit.h"
+#include "mojo/edk/system/ports/port_ref.h"
+#include "mojo/edk/system/watcher_set.h"
+
+namespace mojo {
+namespace edk {
+
+class NodeController;
+
+class MessagePipeDispatcher : public Dispatcher {
+ public:
+ // Constructs a MessagePipeDispatcher permanently tied to a specific port.
+ // |connected| must indicate the state of the port at construction time; if
+ // the port is initialized with a peer, |connected| must be true. Otherwise it
+ // must be false.
+ //
+ // A MessagePipeDispatcher may not be transferred while in a disconnected
+ // state, and one can never return to a disconnected once connected.
+ //
+ // |pipe_id| is a unique identifier which can be used to track pipe endpoints
+ // as they're passed around. |endpoint| is either 0 or 1 and again is only
+ // used for tracking pipes (one side is always 0, the other is always 1.)
+ MessagePipeDispatcher(NodeController* node_controller,
+ const ports::PortRef& port,
+ uint64_t pipe_id,
+ int endpoint);
+
+ // Fuses this pipe with |other|. Returns |true| on success or |false| on
+ // failure. Regardless of the return value, both dispatchers are closed by
+ // this call.
+ bool Fuse(MessagePipeDispatcher* other);
+
+ // Dispatcher:
+ Type GetType() const override;
+ MojoResult Close() override;
+ MojoResult WriteMessage(std::unique_ptr<MessageForTransit> message,
+ MojoWriteMessageFlags flags) override;
+ MojoResult ReadMessage(std::unique_ptr<MessageForTransit>* message,
+ uint32_t* num_bytes,
+ MojoHandle* handles,
+ uint32_t* num_handles,
+ MojoReadMessageFlags flags,
+ bool read_any_size) override;
+ HandleSignalsState GetHandleSignalsState() const override;
+ MojoResult AddWatcherRef(const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context) override;
+ MojoResult RemoveWatcherRef(WatcherDispatcher* watcher,
+ uintptr_t context) override;
+ void StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) override;
+ bool EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) override;
+ bool BeginTransit() override;
+ void CompleteTransitAndClose() override;
+ void CancelTransit() override;
+
+ static scoped_refptr<Dispatcher> Deserialize(
+ const void* data,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles);
+
+ private:
+ class PortObserverThunk;
+ friend class PortObserverThunk;
+
+ ~MessagePipeDispatcher() override;
+
+ MojoResult CloseNoLock();
+ HandleSignalsState GetHandleSignalsStateNoLock() const;
+ void OnPortStatusChanged();
+
+ // These are safe to access from any thread without locking.
+ NodeController* const node_controller_;
+ const ports::PortRef port_;
+ const uint64_t pipe_id_;
+ const int endpoint_;
+
+ // Guards access to all the fields below.
+ mutable base::Lock signal_lock_;
+
+ // This is not the same is |port_transferred_|. It's only held true between
+ // BeginTransit() and Complete/CancelTransit().
+ AtomicFlag in_transit_;
+
+ bool port_transferred_ = false;
+ AtomicFlag port_closed_;
+ WatcherSet watchers_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePipeDispatcher);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_MESSAGE_PIPE_DISPATCHER_H_
diff --git a/mojo/edk/system/message_pipe_perftest.cc b/mojo/edk/system/message_pipe_perftest.cc
new file mode 100644
index 0000000000..9866c474de
--- /dev/null
+++ b/mojo/edk/system/message_pipe_perftest.cc
@@ -0,0 +1,167 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/perf_time_logger.h"
+#include "base/threading/thread.h"
+#include "mojo/edk/embedder/embedder.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/test_utils.h"
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/edk/test/test_utils.h"
+#include "mojo/public/c/system/functions.h"
+#include "mojo/public/cpp/system/message_pipe.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+class MessagePipePerfTest : public test::MojoTestBase {
+ public:
+ MessagePipePerfTest() : message_count_(0), message_size_(0) {}
+
+ void SetUpMeasurement(int message_count, size_t message_size) {
+ message_count_ = message_count;
+ message_size_ = message_size;
+ payload_ = std::string(message_size, '*');
+ read_buffer_.resize(message_size * 2);
+ }
+
+ protected:
+ void WriteWaitThenRead(MojoHandle mp) {
+ CHECK_EQ(MojoWriteMessage(mp, payload_.data(),
+ static_cast<uint32_t>(payload_.size()), nullptr,
+ 0, MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ HandleSignalsState hss;
+ CHECK_EQ(WaitForSignals(mp, MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer_.size());
+ CHECK_EQ(MojoReadMessage(mp, &read_buffer_[0], &read_buffer_size, nullptr,
+ nullptr, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ CHECK_EQ(read_buffer_size, static_cast<uint32_t>(payload_.size()));
+ }
+
+ void SendQuitMessage(MojoHandle mp) {
+ CHECK_EQ(MojoWriteMessage(mp, "", 0, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ }
+
+ void Measure(MojoHandle mp) {
+ // Have one ping-pong to ensure channel being established.
+ WriteWaitThenRead(mp);
+
+ std::string test_name =
+ base::StringPrintf("IPC_Perf_%dx_%u", message_count_,
+ static_cast<unsigned>(message_size_));
+ base::PerfTimeLogger logger(test_name.c_str());
+
+ for (int i = 0; i < message_count_; ++i)
+ WriteWaitThenRead(mp);
+
+ logger.Done();
+ }
+
+ protected:
+ void RunPingPongServer(MojoHandle mp) {
+ // This values are set to align with one at ipc_pertests.cc for comparison.
+ const size_t kMsgSize[5] = {12, 144, 1728, 20736, 248832};
+ const int kMessageCount[5] = {50000, 50000, 50000, 12000, 1000};
+
+ for (size_t i = 0; i < 5; i++) {
+ SetUpMeasurement(kMessageCount[i], kMsgSize[i]);
+ Measure(mp);
+ }
+
+ SendQuitMessage(mp);
+ }
+
+ static int RunPingPongClient(MojoHandle mp) {
+ std::string buffer(1000000, '\0');
+ int rv = 0;
+ while (true) {
+ // Wait for our end of the message pipe to be readable.
+ HandleSignalsState hss;
+ MojoResult result = WaitForSignals(mp, MOJO_HANDLE_SIGNAL_READABLE, &hss);
+ if (result != MOJO_RESULT_OK) {
+ rv = result;
+ break;
+ }
+
+ uint32_t read_size = static_cast<uint32_t>(buffer.size());
+ CHECK_EQ(MojoReadMessage(mp, &buffer[0],
+ &read_size, nullptr,
+ 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+
+ // Empty message indicates quit.
+ if (read_size == 0)
+ break;
+
+ CHECK_EQ(MojoWriteMessage(mp, &buffer[0],
+ read_size,
+ nullptr, 0, MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ }
+
+ return rv;
+ }
+
+ private:
+ int message_count_;
+ size_t message_size_;
+ std::string payload_;
+ std::string read_buffer_;
+ std::unique_ptr<base::PerfTimeLogger> perf_logger_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePipePerfTest);
+};
+
+TEST_F(MessagePipePerfTest, PingPong) {
+ MojoHandle server_handle, client_handle;
+ CreateMessagePipe(&server_handle, &client_handle);
+
+ base::Thread client_thread("PingPongClient");
+ client_thread.Start();
+ client_thread.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(base::IgnoreResult(&RunPingPongClient), client_handle));
+
+ RunPingPongServer(server_handle);
+}
+
+// For each message received, sends a reply message with the same contents
+// repeated twice, until the other end is closed or it receives "quitquitquit"
+// (which it doesn't reply to). It'll return the number of messages received,
+// not including any "quitquitquit" message, modulo 100.
+DEFINE_TEST_CLIENT_WITH_PIPE(PingPongClient, MessagePipePerfTest, h) {
+ return RunPingPongClient(h);
+}
+
+// Repeatedly sends messages as previous one got replied by the child.
+// Waits for the child to close its end before quitting once specified
+// number of messages has been sent.
+TEST_F(MessagePipePerfTest, MultiprocessPingPong) {
+ RUN_CHILD_ON_PIPE(PingPongClient, h)
+ RunPingPongServer(h);
+ END_CHILD()
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/message_pipe_unittest.cc b/mojo/edk/system/message_pipe_unittest.cc
new file mode 100644
index 0000000000..e6f1ff6437
--- /dev/null
+++ b/mojo/edk/system/message_pipe_unittest.cc
@@ -0,0 +1,699 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <string.h>
+
+#include "base/memory/ref_counted.h"
+#include "mojo/edk/system/test_utils.h"
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/public/c/system/core.h"
+#include "mojo/public/c/system/types.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+const MojoHandleSignals kAllSignals = MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED;
+static const char kHelloWorld[] = "hello world";
+
+class MessagePipeTest : public test::MojoTestBase {
+ public:
+ MessagePipeTest() {
+ CHECK_EQ(MOJO_RESULT_OK, MojoCreateMessagePipe(nullptr, &pipe0_, &pipe1_));
+ }
+
+ ~MessagePipeTest() override {
+ if (pipe0_ != MOJO_HANDLE_INVALID)
+ CHECK_EQ(MOJO_RESULT_OK, MojoClose(pipe0_));
+ if (pipe1_ != MOJO_HANDLE_INVALID)
+ CHECK_EQ(MOJO_RESULT_OK, MojoClose(pipe1_));
+ }
+
+ MojoResult WriteMessage(MojoHandle message_pipe_handle,
+ const void* bytes,
+ uint32_t num_bytes) {
+ return MojoWriteMessage(message_pipe_handle, bytes, num_bytes, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE);
+ }
+
+ MojoResult ReadMessage(MojoHandle message_pipe_handle,
+ void* bytes,
+ uint32_t* num_bytes,
+ bool may_discard = false) {
+ return MojoReadMessage(message_pipe_handle, bytes, num_bytes, nullptr, 0,
+ may_discard ? MOJO_READ_MESSAGE_FLAG_MAY_DISCARD :
+ MOJO_READ_MESSAGE_FLAG_NONE);
+ }
+
+ MojoHandle pipe0_, pipe1_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MessagePipeTest);
+};
+
+using FuseMessagePipeTest = test::MojoTestBase;
+
+TEST_F(MessagePipeTest, WriteData) {
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WriteMessage(pipe0_, kHelloWorld, sizeof(kHelloWorld)));
+}
+
+// Tests:
+// - only default flags
+// - reading messages from a port
+// - when there are no/one/two messages available for that port
+// - with buffer size 0 (and null buffer) -- should get size
+// - with too-small buffer -- should get size
+// - also verify that buffers aren't modified when/where they shouldn't be
+// - writing messages to a port
+// - in the obvious scenarios (as above)
+// - to a port that's been closed
+// - writing a message to a port, closing the other (would be the source) port,
+// and reading it
+TEST_F(MessagePipeTest, Basic) {
+ int32_t buffer[2];
+ const uint32_t kBufferSize = static_cast<uint32_t>(sizeof(buffer));
+ uint32_t buffer_size;
+
+ // Nothing to read yet on port 0.
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT, ReadMessage(pipe0_, buffer, &buffer_size));
+ ASSERT_EQ(kBufferSize, buffer_size);
+ ASSERT_EQ(123, buffer[0]);
+ ASSERT_EQ(456, buffer[1]);
+
+ // Ditto for port 1.
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT, ReadMessage(pipe1_, buffer, &buffer_size));
+
+ // Write from port 1 (to port 0).
+ buffer[0] = 789012345;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe1_, buffer, sizeof(buffer[0])));
+
+ MojoHandleSignalsState state;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe0_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Read from port 0.
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadMessage(pipe0_, buffer, &buffer_size));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+ ASSERT_EQ(789012345, buffer[0]);
+ ASSERT_EQ(456, buffer[1]);
+
+ // Read again from port 0 -- it should be empty.
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT, ReadMessage(pipe0_, buffer, &buffer_size));
+
+ // Write two messages from port 0 (to port 1).
+ buffer[0] = 123456789;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe0_, buffer, sizeof(buffer[0])));
+ buffer[0] = 234567890;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe0_, buffer, sizeof(buffer[0])));
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Read from port 1 with buffer size 0 (should get the size of next message).
+ // Also test that giving a null buffer is okay when the buffer size is 0.
+ buffer_size = 0;
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ ReadMessage(pipe1_, nullptr, &buffer_size));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+
+ // Read from port 1 with buffer size 1 (too small; should get the size of next
+ // message).
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = 1;
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ ReadMessage(pipe1_, buffer, &buffer_size));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+ ASSERT_EQ(123, buffer[0]);
+ ASSERT_EQ(456, buffer[1]);
+
+ // Read from port 1.
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadMessage(pipe1_, buffer, &buffer_size));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+ ASSERT_EQ(123456789, buffer[0]);
+ ASSERT_EQ(456, buffer[1]);
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Read again from port 1.
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadMessage(pipe1_, buffer, &buffer_size));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+ ASSERT_EQ(234567890, buffer[0]);
+ ASSERT_EQ(456, buffer[1]);
+
+ // Read again from port 1 -- it should be empty.
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT, ReadMessage(pipe1_, buffer, &buffer_size));
+
+ // Write from port 0 (to port 1).
+ buffer[0] = 345678901;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe0_, buffer, sizeof(buffer[0])));
+
+ // Close port 0.
+ MojoClose(pipe0_);
+ pipe0_ = MOJO_HANDLE_INVALID;
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &state));
+
+ // Try to write from port 1 (to port 0).
+ buffer[0] = 456789012;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WriteMessage(pipe1_, buffer, sizeof(buffer[0])));
+
+ // Read from port 1; should still get message (even though port 0 was closed).
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadMessage(pipe1_, buffer, &buffer_size));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+ ASSERT_EQ(345678901, buffer[0]);
+ ASSERT_EQ(456, buffer[1]);
+
+ // Read again from port 1 -- it should be empty (and port 0 is closed).
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ ReadMessage(pipe1_, buffer, &buffer_size));
+}
+
+TEST_F(MessagePipeTest, CloseWithQueuedIncomingMessages) {
+ int32_t buffer[1];
+ const uint32_t kBufferSize = static_cast<uint32_t>(sizeof(buffer));
+ uint32_t buffer_size;
+
+ // Write some messages from port 1 (to port 0).
+ for (int32_t i = 0; i < 5; i++) {
+ buffer[0] = i;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe1_, buffer, kBufferSize));
+ }
+
+ MojoHandleSignalsState state;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe0_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Port 0 shouldn't be empty.
+ buffer_size = 0;
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ ReadMessage(pipe0_, nullptr, &buffer_size));
+ ASSERT_EQ(kBufferSize, buffer_size);
+
+ // Close port 0 first, which should have outstanding (incoming) messages.
+ MojoClose(pipe0_);
+ MojoClose(pipe1_);
+ pipe0_ = pipe1_ = MOJO_HANDLE_INVALID;
+}
+
+TEST_F(MessagePipeTest, DiscardMode) {
+ int32_t buffer[2];
+ const uint32_t kBufferSize = static_cast<uint32_t>(sizeof(buffer));
+ uint32_t buffer_size;
+
+ // Write from port 1 (to port 0).
+ buffer[0] = 789012345;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe1_, buffer, sizeof(buffer[0])));
+
+ MojoHandleSignalsState state;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe0_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Read/discard from port 0 (no buffer); get size.
+ buffer_size = 0;
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ ReadMessage(pipe0_, nullptr, &buffer_size, true));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+
+ // Read again from port 0 -- it should be empty.
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT,
+ ReadMessage(pipe0_, buffer, &buffer_size, true));
+
+ // Write from port 1 (to port 0).
+ buffer[0] = 890123456;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WriteMessage(pipe1_, buffer, sizeof(buffer[0])));
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe0_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Read from port 0 (buffer big enough).
+ buffer[0] = 123;
+ buffer[1] = 456;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadMessage(pipe0_, buffer, &buffer_size, true));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+ ASSERT_EQ(890123456, buffer[0]);
+ ASSERT_EQ(456, buffer[1]);
+
+ // Read again from port 0 -- it should be empty.
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT,
+ ReadMessage(pipe0_, buffer, &buffer_size, true));
+
+ // Write from port 1 (to port 0).
+ buffer[0] = 901234567;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe1_, buffer, sizeof(buffer[0])));
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe0_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Read/discard from port 0 (buffer too small); get size.
+ buffer_size = 1;
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ ReadMessage(pipe0_, buffer, &buffer_size, true));
+ ASSERT_EQ(static_cast<uint32_t>(sizeof(buffer[0])), buffer_size);
+
+ // Read again from port 0 -- it should be empty.
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT,
+ ReadMessage(pipe0_, buffer, &buffer_size, true));
+
+ // Write from port 1 (to port 0).
+ buffer[0] = 123456789;
+ buffer[1] = 0;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe1_, buffer, sizeof(buffer[0])));
+
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe0_, MOJO_HANDLE_SIGNAL_READABLE, &state));
+
+ // Discard from port 0.
+ buffer_size = 1;
+ ASSERT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ ReadMessage(pipe0_, nullptr, 0, true));
+
+ // Read again from port 0 -- it should be empty.
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_SHOULD_WAIT,
+ ReadMessage(pipe0_, buffer, &buffer_size, true));
+}
+
+TEST_F(MessagePipeTest, BasicWaiting) {
+ MojoHandleSignalsState hss;
+
+ int32_t buffer[1];
+ const uint32_t kBufferSize = static_cast<uint32_t>(sizeof(buffer));
+ uint32_t buffer_size;
+
+ // Always writable (until the other port is closed). Not yet readable. Peer
+ // not closed.
+ hss = GetSignalsState(pipe0_);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ hss = MojoHandleSignalsState();
+
+ // Write from port 0 (to port 1), to make port 1 readable.
+ buffer[0] = 123456789;
+ ASSERT_EQ(MOJO_RESULT_OK, WriteMessage(pipe0_, buffer, kBufferSize));
+
+ // Port 1 should already be readable now.
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+ // ... and still writable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_WRITABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ hss.satisfied_signals);
+ ASSERT_EQ(kAllSignals, hss.satisfiable_signals);
+
+ // Close port 0.
+ MojoClose(pipe0_);
+ pipe0_ = MOJO_HANDLE_INVALID;
+
+ // Port 1 should be signaled with peer closed.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_PEER_CLOSED, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // Port 1 should not be writable.
+ hss = MojoHandleSignalsState();
+
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_WRITABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // But it should still be readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ hss.satisfiable_signals);
+
+ // Read from port 1.
+ buffer[0] = 0;
+ buffer_size = kBufferSize;
+ ASSERT_EQ(MOJO_RESULT_OK, ReadMessage(pipe1_, buffer, &buffer_size));
+ ASSERT_EQ(123456789, buffer[0]);
+
+ // Now port 1 should no longer be readable.
+ hss = MojoHandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(pipe1_, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+}
+
+TEST_F(MessagePipeTest, InvalidMessageObjects) {
+ // null message
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoFreeMessage(MOJO_MESSAGE_HANDLE_INVALID));
+
+ // null message
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoGetMessageBuffer(MOJO_MESSAGE_HANDLE_INVALID, nullptr));
+
+ // Non-zero num_handles with null handles array.
+ ASSERT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoAllocMessage(0, nullptr, 1, MOJO_ALLOC_MESSAGE_FLAG_NONE,
+ nullptr));
+}
+
+TEST_F(MessagePipeTest, AllocAndFreeMessage) {
+ const std::string kMessage = "Hello, world.";
+ MojoMessageHandle message = MOJO_MESSAGE_HANDLE_INVALID;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoAllocMessage(static_cast<uint32_t>(kMessage.size()), nullptr, 0,
+ MOJO_ALLOC_MESSAGE_FLAG_NONE, &message));
+ ASSERT_NE(MOJO_MESSAGE_HANDLE_INVALID, message);
+ ASSERT_EQ(MOJO_RESULT_OK, MojoFreeMessage(message));
+}
+
+TEST_F(MessagePipeTest, WriteAndReadMessageObject) {
+ const std::string kMessage = "Hello, world.";
+ MojoMessageHandle message = MOJO_MESSAGE_HANDLE_INVALID;
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoAllocMessage(static_cast<uint32_t>(kMessage.size()), nullptr, 0,
+ MOJO_ALLOC_MESSAGE_FLAG_NONE, &message));
+ ASSERT_NE(MOJO_MESSAGE_HANDLE_INVALID, message);
+
+ void* buffer = nullptr;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoGetMessageBuffer(message, &buffer));
+ ASSERT_TRUE(buffer);
+ memcpy(buffer, kMessage.data(), kMessage.size());
+
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessageNew(a, message, MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(b, MOJO_HANDLE_SIGNAL_READABLE));
+ uint32_t num_bytes = 0;
+ uint32_t num_handles = 0;
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoReadMessageNew(b, &message, &num_bytes, nullptr, &num_handles,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ ASSERT_NE(MOJO_MESSAGE_HANDLE_INVALID, message);
+ EXPECT_EQ(static_cast<uint32_t>(kMessage.size()), num_bytes);
+ EXPECT_EQ(0u, num_handles);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoGetMessageBuffer(message, &buffer));
+ ASSERT_TRUE(buffer);
+
+ EXPECT_EQ(0, strncmp(static_cast<const char*>(buffer), kMessage.data(),
+ num_bytes));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoFreeMessage(message));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+}
+
+#if !defined(OS_IOS)
+
+const size_t kPingPongHandlesPerIteration = 50;
+const size_t kPingPongIterations = 500;
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(HandlePingPong, MessagePipeTest, h) {
+ // Waits for a handle to become readable and writes it back to the sender.
+ for (size_t i = 0; i < kPingPongIterations; i++) {
+ MojoHandle handles[kPingPongHandlesPerIteration];
+ ReadMessageWithHandles(h, handles, kPingPongHandlesPerIteration);
+ WriteMessageWithHandles(h, "", handles, kPingPongHandlesPerIteration);
+ }
+
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE));
+ char msg[4];
+ uint32_t num_bytes = 4;
+ EXPECT_EQ(MOJO_RESULT_OK, ReadMessage(h, msg, &num_bytes));
+}
+
+// This test is flaky: http://crbug.com/585784
+TEST_F(MessagePipeTest, DISABLED_DataPipeConsumerHandlePingPong) {
+ MojoHandle p, c[kPingPongHandlesPerIteration];
+ for (size_t i = 0; i < kPingPongHandlesPerIteration; ++i) {
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateDataPipe(nullptr, &p, &c[i]));
+ MojoClose(p);
+ }
+
+ RUN_CHILD_ON_PIPE(HandlePingPong, h)
+ for (size_t i = 0; i < kPingPongIterations; i++) {
+ WriteMessageWithHandles(h, "", c, kPingPongHandlesPerIteration);
+ ReadMessageWithHandles(h, c, kPingPongHandlesPerIteration);
+ }
+ WriteMessage(h, "quit", 4);
+ END_CHILD()
+ for (size_t i = 0; i < kPingPongHandlesPerIteration; ++i)
+ MojoClose(c[i]);
+}
+
+// This test is flaky: http://crbug.com/585784
+TEST_F(MessagePipeTest, DISABLED_DataPipeProducerHandlePingPong) {
+ MojoHandle p[kPingPongHandlesPerIteration], c;
+ for (size_t i = 0; i < kPingPongHandlesPerIteration; ++i) {
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateDataPipe(nullptr, &p[i], &c));
+ MojoClose(c);
+ }
+
+ RUN_CHILD_ON_PIPE(HandlePingPong, h)
+ for (size_t i = 0; i < kPingPongIterations; i++) {
+ WriteMessageWithHandles(h, "", p, kPingPongHandlesPerIteration);
+ ReadMessageWithHandles(h, p, kPingPongHandlesPerIteration);
+ }
+ WriteMessage(h, "quit", 4);
+ END_CHILD()
+ for (size_t i = 0; i < kPingPongHandlesPerIteration; ++i)
+ MojoClose(p[i]);
+}
+
+TEST_F(MessagePipeTest, SharedBufferHandlePingPong) {
+ MojoHandle buffers[kPingPongHandlesPerIteration];
+ for (size_t i = 0; i <kPingPongHandlesPerIteration; ++i)
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateSharedBuffer(nullptr, 1, &buffers[i]));
+
+ RUN_CHILD_ON_PIPE(HandlePingPong, h)
+ for (size_t i = 0; i < kPingPongIterations; i++) {
+ WriteMessageWithHandles(h, "", buffers, kPingPongHandlesPerIteration);
+ ReadMessageWithHandles(h, buffers, kPingPongHandlesPerIteration);
+ }
+ WriteMessage(h, "quit", 4);
+ END_CHILD()
+ for (size_t i = 0; i < kPingPongHandlesPerIteration; ++i)
+ MojoClose(buffers[i]);
+}
+
+#endif // !defined(OS_IOS)
+
+TEST_F(FuseMessagePipeTest, Basic) {
+ // Test that we can fuse pipes and they still work.
+
+ MojoHandle a, b, c, d;
+ CreateMessagePipe(&a, &b);
+ CreateMessagePipe(&c, &d);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoFuseMessagePipes(b, c));
+
+ // Handles b and c should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(c));
+
+ const std::string kTestMessage1 = "Hello, world!";
+ const std::string kTestMessage2 = "Goodbye, world!";
+
+ WriteMessage(a, kTestMessage1);
+ EXPECT_EQ(kTestMessage1, ReadMessage(d));
+
+ WriteMessage(d, kTestMessage2);
+ EXPECT_EQ(kTestMessage2, ReadMessage(a));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(FuseMessagePipeTest, FuseAfterPeerWrite) {
+ // Test that messages written before fusion are eventually delivered.
+
+ MojoHandle a, b, c, d;
+ CreateMessagePipe(&a, &b);
+ CreateMessagePipe(&c, &d);
+
+ const std::string kTestMessage1 = "Hello, world!";
+ const std::string kTestMessage2 = "Goodbye, world!";
+ WriteMessage(a, kTestMessage1);
+ WriteMessage(d, kTestMessage2);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoFuseMessagePipes(b, c));
+
+ // Handles b and c should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(c));
+
+ EXPECT_EQ(kTestMessage1, ReadMessage(d));
+ EXPECT_EQ(kTestMessage2, ReadMessage(a));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(FuseMessagePipeTest, NoFuseAfterWrite) {
+ // Test that a pipe endpoint which has been written to cannot be fused.
+
+ MojoHandle a, b, c, d;
+ CreateMessagePipe(&a, &b);
+ CreateMessagePipe(&c, &d);
+
+ WriteMessage(b, "shouldn't have done that!");
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, MojoFuseMessagePipes(b, c));
+
+ // Handles b and c should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(c));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(FuseMessagePipeTest, NoFuseSelf) {
+ // Test that a pipe's own endpoints can't be fused together.
+
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, MojoFuseMessagePipes(a, b));
+
+ // Handles a and b should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(b));
+}
+
+TEST_F(FuseMessagePipeTest, FuseInvalidArguments) {
+ MojoHandle a, b, c, d;
+ CreateMessagePipe(&a, &b);
+ CreateMessagePipe(&c, &d);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+
+ // Can't fuse an invalid handle.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoFuseMessagePipes(b, c));
+
+ // Handle c should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(c));
+
+ // Can't fuse a non-message pipe handle.
+ MojoHandle e, f;
+ CreateDataPipe(&e, &f, 16);
+
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoFuseMessagePipes(e, d));
+
+ // Handles d and e should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(d));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(e));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(f));
+}
+
+TEST_F(FuseMessagePipeTest, FuseAfterPeerClosure) {
+ // Test that peer closure prior to fusion can still be detected after fusion.
+
+ MojoHandle a, b, c, d;
+ CreateMessagePipe(&a, &b);
+ CreateMessagePipe(&c, &d);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoFuseMessagePipes(b, c));
+
+ // Handles b and c should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(c));
+
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(d, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(FuseMessagePipeTest, FuseAfterPeerWriteAndClosure) {
+ // Test that peer write and closure prior to fusion still results in the
+ // both message arrival and awareness of peer closure.
+
+ MojoHandle a, b, c, d;
+ CreateMessagePipe(&a, &b);
+ CreateMessagePipe(&c, &d);
+
+ const std::string kTestMessage = "ayyy lmao";
+ WriteMessage(a, kTestMessage);
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoFuseMessagePipes(b, c));
+
+ // Handles b and c should be closed.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoClose(c));
+
+ EXPECT_EQ(kTestMessage, ReadMessage(d));
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(d, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(MessagePipeTest, ClosePipesStressTest) {
+ // Stress test to exercise https://crbug.com/665869.
+ const size_t kNumPipes = 100000;
+ for (size_t i = 0; i < kNumPipes; ++i) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+ MojoClose(a);
+ MojoClose(b);
+ }
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/multiprocess_message_pipe_unittest.cc b/mojo/edk/system/multiprocess_message_pipe_unittest.cc
new file mode 100644
index 0000000000..37248d1438
--- /dev/null
+++ b/mojo/edk/system/multiprocess_message_pipe_unittest.cc
@@ -0,0 +1,1366 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/containers/hash_tables.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/string_split.h"
+#include "build/build_config.h"
+#include "mojo/edk/embedder/platform_channel_pair.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/test_utils.h"
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/edk/test/test_utils.h"
+#include "mojo/public/c/system/buffer.h"
+#include "mojo/public/c/system/functions.h"
+#include "mojo/public/c/system/types.h"
+#include "mojo/public/cpp/system/simple_watcher.h"
+#include "mojo/public/cpp/system/wait.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+
+namespace mojo {
+namespace edk {
+namespace {
+
+class MultiprocessMessagePipeTest : public test::MojoTestBase {
+ protected:
+ // Convenience class for tests which will control command-driven children.
+ // See the CommandDrivenClient definition below.
+ class CommandDrivenClientController {
+ public:
+ explicit CommandDrivenClientController(MojoHandle h) : h_(h) {}
+
+ void Send(const std::string& command) {
+ WriteMessage(h_, command);
+ EXPECT_EQ("ok", ReadMessage(h_));
+ }
+
+ void SendHandle(const std::string& name, MojoHandle p) {
+ WriteMessageWithHandles(h_, "take:" + name, &p, 1);
+ EXPECT_EQ("ok", ReadMessage(h_));
+ }
+
+ MojoHandle RetrieveHandle(const std::string& name) {
+ WriteMessage(h_, "return:" + name);
+ MojoHandle p;
+ EXPECT_EQ("ok", ReadMessageWithHandles(h_, &p, 1));
+ return p;
+ }
+
+ void Exit() { WriteMessage(h_, "exit"); }
+
+ private:
+ MojoHandle h_;
+ };
+};
+
+class MultiprocessMessagePipeTestWithPeerSupport
+ : public MultiprocessMessagePipeTest,
+ public testing::WithParamInterface<test::MojoTestBase::LaunchType> {
+ protected:
+ void SetUp() override {
+ test::MojoTestBase::SetUp();
+ set_launch_type(GetParam());
+ }
+};
+
+// For each message received, sends a reply message with the same contents
+// repeated twice, until the other end is closed or it receives "quitquitquit"
+// (which it doesn't reply to). It'll return the number of messages received,
+// not including any "quitquitquit" message, modulo 100.
+DEFINE_TEST_CLIENT_WITH_PIPE(EchoEcho, MultiprocessMessagePipeTest, h) {
+ const std::string quitquitquit("quitquitquit");
+ int rv = 0;
+ for (;; rv = (rv + 1) % 100) {
+ // Wait for our end of the message pipe to be readable.
+ HandleSignalsState hss;
+ MojoResult result = WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss);
+ if (result != MOJO_RESULT_OK) {
+ // It was closed, probably.
+ CHECK_EQ(result, MOJO_RESULT_FAILED_PRECONDITION);
+ CHECK_EQ(hss.satisfied_signals, MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+ break;
+ } else {
+ CHECK((hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ CHECK((hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ }
+
+ std::string read_buffer(1000, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(h, &read_buffer[0],
+ &read_buffer_size, nullptr,
+ 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+ VLOG(2) << "Child got: " << read_buffer;
+
+ if (read_buffer == quitquitquit) {
+ VLOG(2) << "Child quitting.";
+ break;
+ }
+
+ std::string write_buffer = read_buffer + read_buffer;
+ CHECK_EQ(MojoWriteMessage(h, write_buffer.data(),
+ static_cast<uint32_t>(write_buffer.size()),
+ nullptr, 0u, MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ }
+
+ return rv;
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, Basic) {
+ RUN_CHILD_ON_PIPE(EchoEcho, h)
+ std::string hello("hello");
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, hello.data(),
+ static_cast<uint32_t>(hello.size()), nullptr, 0u,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ // The child may or may not have closed its end of the message pipe and died
+ // (and we may or may not know it yet), so our end may or may not appear as
+ // writable.
+ EXPECT_TRUE((hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ EXPECT_TRUE((hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE));
+
+ std::string read_buffer(1000, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(h, &read_buffer[0],
+ &read_buffer_size, nullptr, 0,
+ MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+ VLOG(2) << "Parent got: " << read_buffer;
+ ASSERT_EQ(hello + hello, read_buffer);
+
+ std::string quitquitquit("quitquitquit");
+ CHECK_EQ(MojoWriteMessage(h, quitquitquit.data(),
+ static_cast<uint32_t>(quitquitquit.size()),
+ nullptr, 0u, MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ END_CHILD_AND_EXPECT_EXIT_CODE(1 % 100);
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, QueueMessages) {
+ static const size_t kNumMessages = 1001;
+ RUN_CHILD_ON_PIPE(EchoEcho, h)
+ for (size_t i = 0; i < kNumMessages; i++) {
+ std::string write_buffer(i, 'A' + (i % 26));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, write_buffer.data(),
+ static_cast<uint32_t>(write_buffer.size()),
+ nullptr, 0u, MOJO_WRITE_MESSAGE_FLAG_NONE));
+ }
+
+ for (size_t i = 0; i < kNumMessages; i++) {
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ // The child may or may not have closed its end of the message pipe and
+ // died (and we may or may not know it yet), so our end may or may not
+ // appear as writable.
+ ASSERT_TRUE((hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ ASSERT_TRUE((hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE));
+
+ std::string read_buffer(kNumMessages * 2, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ ASSERT_EQ(MojoReadMessage(h, &read_buffer[0],
+ &read_buffer_size, nullptr, 0,
+ MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+
+ ASSERT_EQ(std::string(i * 2, 'A' + (i % 26)), read_buffer);
+ }
+
+ const std::string quitquitquit("quitquitquit");
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, quitquitquit.data(),
+ static_cast<uint32_t>(quitquitquit.size()),
+ nullptr, 0u, MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for it to become readable, which should fail (since we sent
+ // "quitquitquit").
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+ END_CHILD_AND_EXPECT_EXIT_CODE(static_cast<int>(kNumMessages % 100));
+}
+
+DEFINE_TEST_CLIENT_WITH_PIPE(CheckSharedBuffer, MultiprocessMessagePipeTest,
+ h) {
+ // Wait for the first message from our parent.
+ HandleSignalsState hss;
+ CHECK_EQ(WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ // In this test, the parent definitely doesn't close its end of the message
+ // pipe before we do.
+ CHECK_EQ(hss.satisfied_signals,
+ MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ // It should have a shared buffer.
+ std::string read_buffer(100, '\0');
+ uint32_t num_bytes = static_cast<uint32_t>(read_buffer.size());
+ MojoHandle handles[10];
+ uint32_t num_handlers = arraysize(handles); // Maximum number to receive
+ CHECK_EQ(MojoReadMessage(h, &read_buffer[0],
+ &num_bytes, &handles[0],
+ &num_handlers, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(num_bytes);
+ CHECK_EQ(read_buffer, std::string("go 1"));
+ CHECK_EQ(num_handlers, 1u);
+
+ // Make a mapping.
+ void* buffer;
+ CHECK_EQ(MojoMapBuffer(handles[0], 0, 100, &buffer,
+ MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE),
+ MOJO_RESULT_OK);
+
+ // Write some stuff to the shared buffer.
+ static const char kHello[] = "hello";
+ memcpy(buffer, kHello, sizeof(kHello));
+
+ // We should be able to close the dispatcher now.
+ MojoClose(handles[0]);
+
+ // And send a message to signal that we've written stuff.
+ const std::string go2("go 2");
+ CHECK_EQ(MojoWriteMessage(h, go2.data(),
+ static_cast<uint32_t>(go2.size()), nullptr, 0u,
+ MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+
+ // Now wait for our parent to send us a message.
+ hss = HandleSignalsState();
+ CHECK_EQ(WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ CHECK_EQ(hss.satisfied_signals,
+ MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ read_buffer = std::string(100, '\0');
+ num_bytes = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(h, &read_buffer[0], &num_bytes,
+ nullptr, 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(num_bytes);
+ CHECK_EQ(read_buffer, std::string("go 3"));
+
+ // It should have written something to the shared buffer.
+ static const char kWorld[] = "world!!!";
+ CHECK_EQ(memcmp(buffer, kWorld, sizeof(kWorld)), 0);
+
+ // And we're done.
+
+ return 0;
+}
+
+TEST_F(MultiprocessMessagePipeTest, SharedBufferPassing) {
+ RUN_CHILD_ON_PIPE(CheckSharedBuffer, h)
+ // Make a shared buffer.
+ MojoCreateSharedBufferOptions options;
+ options.struct_size = sizeof(options);
+ options.flags = MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE;
+
+ MojoHandle shared_buffer;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoCreateSharedBuffer(&options, 100, &shared_buffer));
+
+ // Send the shared buffer.
+ const std::string go1("go 1");
+
+ MojoHandle duplicated_shared_buffer;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoDuplicateBufferHandle(
+ shared_buffer,
+ nullptr,
+ &duplicated_shared_buffer));
+ MojoHandle handles[1];
+ handles[0] = duplicated_shared_buffer;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, &go1[0],
+ static_cast<uint32_t>(go1.size()), &handles[0],
+ arraysize(handles),
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for a message from the child.
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_TRUE((hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ EXPECT_TRUE((hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE));
+
+ std::string read_buffer(100, '\0');
+ uint32_t num_bytes = static_cast<uint32_t>(read_buffer.size());
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessage(h, &read_buffer[0],
+ &num_bytes, nullptr, 0,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ read_buffer.resize(num_bytes);
+ ASSERT_EQ(std::string("go 2"), read_buffer);
+
+ // After we get it, the child should have written something to the shared
+ // buffer.
+ static const char kHello[] = "hello";
+ void* buffer;
+ CHECK_EQ(MojoMapBuffer(shared_buffer, 0, 100, &buffer,
+ MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE),
+ MOJO_RESULT_OK);
+ ASSERT_EQ(0, memcmp(buffer, kHello, sizeof(kHello)));
+
+ // Now we'll write some stuff to the shared buffer.
+ static const char kWorld[] = "world!!!";
+ memcpy(buffer, kWorld, sizeof(kWorld));
+
+ // And send a message to signal that we've written stuff.
+ const std::string go3("go 3");
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, &go3[0],
+ static_cast<uint32_t>(go3.size()), nullptr, 0u,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for |h| to become readable, which should fail.
+ hss = HandleSignalsState();
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_WITH_PIPE(CheckPlatformHandleFile,
+ MultiprocessMessagePipeTest, h) {
+ HandleSignalsState hss;
+ CHECK_EQ(WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ CHECK_EQ(hss.satisfied_signals,
+ MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ std::string read_buffer(100, '\0');
+ uint32_t num_bytes = static_cast<uint32_t>(read_buffer.size());
+ MojoHandle handles[255]; // Maximum number to receive.
+ uint32_t num_handlers = arraysize(handles);
+
+ CHECK_EQ(MojoReadMessage(h, &read_buffer[0],
+ &num_bytes, &handles[0],
+ &num_handlers, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+
+ read_buffer.resize(num_bytes);
+ char hello[32];
+ int num_handles = 0;
+ sscanf(read_buffer.c_str(), "%s %d", hello, &num_handles);
+ CHECK_EQ(std::string("hello"), std::string(hello));
+ CHECK_GT(num_handles, 0);
+
+ for (int i = 0; i < num_handles; ++i) {
+ ScopedPlatformHandle h;
+ CHECK_EQ(PassWrappedPlatformHandle(handles[i], &h), MOJO_RESULT_OK);
+ CHECK(h.is_valid());
+ MojoClose(handles[i]);
+
+ base::ScopedFILE fp(test::FILEFromPlatformHandle(std::move(h), "r"));
+ CHECK(fp);
+ std::string fread_buffer(100, '\0');
+ size_t bytes_read =
+ fread(&fread_buffer[0], 1, fread_buffer.size(), fp.get());
+ fread_buffer.resize(bytes_read);
+ CHECK_EQ(fread_buffer, "world");
+ }
+
+ return 0;
+}
+
+class MultiprocessMessagePipeTestWithPipeCount
+ : public MultiprocessMessagePipeTest,
+ public testing::WithParamInterface<size_t> {};
+
+TEST_P(MultiprocessMessagePipeTestWithPipeCount, PlatformHandlePassing) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ RUN_CHILD_ON_PIPE(CheckPlatformHandleFile, h)
+ std::vector<MojoHandle> handles;
+
+ size_t pipe_count = GetParam();
+ for (size_t i = 0; i < pipe_count; ++i) {
+ base::FilePath unused;
+ base::ScopedFILE fp(
+ CreateAndOpenTemporaryFileInDir(temp_dir.GetPath(), &unused));
+ const std::string world("world");
+ CHECK_EQ(fwrite(&world[0], 1, world.size(), fp.get()), world.size());
+ fflush(fp.get());
+ rewind(fp.get());
+ MojoHandle handle;
+ ASSERT_EQ(
+ CreatePlatformHandleWrapper(
+ ScopedPlatformHandle(test::PlatformHandleFromFILE(std::move(fp))),
+ &handle),
+ MOJO_RESULT_OK);
+ handles.push_back(handle);
+ }
+
+ char message[128];
+ snprintf(message, sizeof(message), "hello %d",
+ static_cast<int>(pipe_count));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, message,
+ static_cast<uint32_t>(strlen(message)),
+ &handles[0],
+ static_cast<uint32_t>(handles.size()),
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for it to become readable, which should fail.
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfied_signals);
+ ASSERT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, hss.satisfiable_signals);
+ END_CHILD()
+}
+
+// Android multi-process tests are not executing the new process. This is flaky.
+#if !defined(OS_ANDROID)
+INSTANTIATE_TEST_CASE_P(PipeCount,
+ MultiprocessMessagePipeTestWithPipeCount,
+ // TODO(rockot): Re-enable the 140-pipe case when
+ // ChannelPosix has support for sending lots of handles.
+ testing::Values(1u, 128u /*, 140u*/));
+#endif
+
+DEFINE_TEST_CLIENT_WITH_PIPE(CheckMessagePipe, MultiprocessMessagePipeTest, h) {
+ // Wait for the first message from our parent.
+ HandleSignalsState hss;
+ CHECK_EQ(WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ // In this test, the parent definitely doesn't close its end of the message
+ // pipe before we do.
+ CHECK_EQ(hss.satisfied_signals,
+ MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ // It should have a message pipe.
+ MojoHandle handles[10];
+ uint32_t num_handlers = arraysize(handles);
+ CHECK_EQ(MojoReadMessage(h, nullptr,
+ nullptr, &handles[0],
+ &num_handlers, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ CHECK_EQ(num_handlers, 1u);
+
+ // Read data from the received message pipe.
+ CHECK_EQ(WaitForSignals(handles[0], MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ CHECK_EQ(hss.satisfied_signals,
+ MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ std::string read_buffer(100, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(handles[0], &read_buffer[0],
+ &read_buffer_size, nullptr,
+ 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+ CHECK_EQ(read_buffer, std::string("hello"));
+
+ // Now write some data into the message pipe.
+ std::string write_buffer = "world";
+ CHECK_EQ(MojoWriteMessage(handles[0], write_buffer.data(),
+ static_cast<uint32_t>(write_buffer.size()), nullptr,
+ 0u, MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ MojoClose(handles[0]);
+ return 0;
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, MessagePipePassing) {
+ RUN_CHILD_ON_PIPE(CheckMessagePipe, h)
+ MojoCreateSharedBufferOptions options;
+ options.struct_size = sizeof(options);
+ options.flags = MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE;
+
+ MojoHandle mp1, mp2;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoCreateMessagePipe(nullptr, &mp1, &mp2));
+
+ // Write a string into one end of the new message pipe and send the other
+ // end.
+ const std::string hello("hello");
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(mp1, &hello[0],
+ static_cast<uint32_t>(hello.size()), nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, nullptr, 0, &mp2, 1,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for a message from the child.
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(mp1, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_TRUE((hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ EXPECT_TRUE((hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE));
+
+ std::string read_buffer(100, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(mp1, &read_buffer[0],
+ &read_buffer_size, nullptr,
+ 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+ CHECK_EQ(read_buffer, std::string("world"));
+
+ MojoClose(mp1);
+ END_CHILD()
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, MessagePipeTwoPassing) {
+ RUN_CHILD_ON_PIPE(CheckMessagePipe, h)
+ MojoHandle mp1, mp2;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoCreateMessagePipe(nullptr, &mp2, &mp1));
+
+ // Write a string into one end of the new message pipe and send the other
+ // end.
+ const std::string hello("hello");
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(mp1, &hello[0],
+ static_cast<uint32_t>(hello.size()), nullptr, 0u,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, nullptr, 0u, &mp2, 1u,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for a message from the child.
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(mp1, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_TRUE((hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ EXPECT_TRUE((hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE));
+
+ std::string read_buffer(100, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(mp1, &read_buffer[0],
+ &read_buffer_size, nullptr,
+ 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+ CHECK_EQ(read_buffer, std::string("world"));
+ END_CHILD();
+}
+
+DEFINE_TEST_CLIENT_WITH_PIPE(DataPipeConsumer, MultiprocessMessagePipeTest, h) {
+ // Wait for the first message from our parent.
+ HandleSignalsState hss;
+ CHECK_EQ(WaitForSignals(h, MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ // In this test, the parent definitely doesn't close its end of the message
+ // pipe before we do.
+ CHECK_EQ(hss.satisfied_signals,
+ MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ // It should have a message pipe.
+ MojoHandle handles[10];
+ uint32_t num_handlers = arraysize(handles);
+ CHECK_EQ(MojoReadMessage(h, nullptr,
+ nullptr, &handles[0],
+ &num_handlers, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ CHECK_EQ(num_handlers, 1u);
+
+ // Read data from the received message pipe.
+ CHECK_EQ(WaitForSignals(handles[0], MOJO_HANDLE_SIGNAL_READABLE, &hss),
+ MOJO_RESULT_OK);
+ CHECK_EQ(hss.satisfied_signals,
+ MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE);
+ CHECK_EQ(hss.satisfiable_signals, MOJO_HANDLE_SIGNAL_READABLE |
+ MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+
+ std::string read_buffer(100, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(handles[0], &read_buffer[0],
+ &read_buffer_size, nullptr,
+ 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+ CHECK_EQ(read_buffer, std::string("hello"));
+
+ // Now write some data into the message pipe.
+ std::string write_buffer = "world";
+ CHECK_EQ(MojoWriteMessage(handles[0], write_buffer.data(),
+ static_cast<uint32_t>(write_buffer.size()),
+ nullptr, 0u, MOJO_WRITE_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ MojoClose(handles[0]);
+ return 0;
+}
+
+TEST_F(MultiprocessMessagePipeTest, DataPipeConsumer) {
+ RUN_CHILD_ON_PIPE(DataPipeConsumer, h)
+ MojoCreateSharedBufferOptions options;
+ options.struct_size = sizeof(options);
+ options.flags = MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE;
+
+ MojoHandle mp1, mp2;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoCreateMessagePipe(nullptr, &mp2, &mp1));
+
+ // Write a string into one end of the new message pipe and send the other
+ // end.
+ const std::string hello("hello");
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(mp1, &hello[0],
+ static_cast<uint32_t>(hello.size()), nullptr, 0u,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWriteMessage(h, nullptr, 0, &mp2, 1u,
+ MOJO_WRITE_MESSAGE_FLAG_NONE));
+
+ // Wait for a message from the child.
+ HandleSignalsState hss;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(mp1, MOJO_HANDLE_SIGNAL_READABLE, &hss));
+ EXPECT_TRUE((hss.satisfied_signals & MOJO_HANDLE_SIGNAL_READABLE));
+ EXPECT_TRUE((hss.satisfiable_signals & MOJO_HANDLE_SIGNAL_READABLE));
+
+ std::string read_buffer(100, '\0');
+ uint32_t read_buffer_size = static_cast<uint32_t>(read_buffer.size());
+ CHECK_EQ(MojoReadMessage(mp1, &read_buffer[0],
+ &read_buffer_size, nullptr,
+ 0, MOJO_READ_MESSAGE_FLAG_NONE),
+ MOJO_RESULT_OK);
+ read_buffer.resize(read_buffer_size);
+ CHECK_EQ(read_buffer, std::string("world"));
+
+ MojoClose(mp1);
+ END_CHILD();
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, CreateMessagePipe) {
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+ VerifyTransmission(p0, p1, std::string(10 * 1024 * 1024, 'a'));
+ VerifyTransmission(p1, p0, std::string(10 * 1024 * 1024, 'e'));
+
+ CloseHandle(p0);
+ CloseHandle(p1);
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, PassMessagePipeLocal) {
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+ VerifyTransmission(p0, p1, "testing testing");
+ VerifyTransmission(p1, p0, "one two three");
+
+ MojoHandle p2, p3;
+
+ CreateMessagePipe(&p2, &p3);
+ VerifyTransmission(p2, p3, "testing testing");
+ VerifyTransmission(p3, p2, "one two three");
+
+ // Pass p2 over p0 to p1.
+ const std::string message = "ceci n'est pas une pipe";
+ WriteMessageWithHandles(p0, message, &p2, 1);
+ EXPECT_EQ(message, ReadMessageWithHandles(p1, &p2, 1));
+
+ CloseHandle(p0);
+ CloseHandle(p1);
+
+ // Verify that the received handle (now in p2) still works.
+ VerifyTransmission(p2, p3, "Easy come, easy go; will you let me go?");
+ VerifyTransmission(p3, p2, "Bismillah! NO! We will not let you go!");
+
+ CloseHandle(p2);
+ CloseHandle(p3);
+}
+
+// Echos the primordial channel until "exit".
+DEFINE_TEST_CLIENT_WITH_PIPE(ChannelEchoClient, MultiprocessMessagePipeTest,
+ h) {
+ for (;;) {
+ std::string message = ReadMessage(h);
+ if (message == "exit")
+ break;
+ WriteMessage(h, message);
+ }
+ return 0;
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, MultiprocessChannelPipe) {
+ RUN_CHILD_ON_PIPE(ChannelEchoClient, h)
+ VerifyEcho(h, "in an interstellar burst");
+ VerifyEcho(h, "i am back to save the universe");
+ VerifyEcho(h, std::string(10 * 1024 * 1024, 'o'));
+
+ WriteMessage(h, "exit");
+ END_CHILD()
+}
+
+// Receives a pipe handle from the primordial channel and echos on it until
+// "exit". Used to test simple pipe transfer across processes via channels.
+DEFINE_TEST_CLIENT_WITH_PIPE(EchoServiceClient, MultiprocessMessagePipeTest,
+ h) {
+ MojoHandle p;
+ ReadMessageWithHandles(h, &p, 1);
+ for (;;) {
+ std::string message = ReadMessage(p);
+ if (message == "exit")
+ break;
+ WriteMessage(p, message);
+ }
+ return 0;
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport,
+ PassMessagePipeCrossProcess) {
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+ RUN_CHILD_ON_PIPE(EchoServiceClient, h)
+
+ // Pass one end of the pipe to the other process.
+ WriteMessageWithHandles(h, "here take this", &p1, 1);
+
+ VerifyEcho(p0, "and you may ask yourself");
+ VerifyEcho(p0, "where does that highway go?");
+ VerifyEcho(p0, std::string(20 * 1024 * 1024, 'i'));
+
+ WriteMessage(p0, "exit");
+ END_CHILD()
+ CloseHandle(p0);
+}
+
+// Receives a pipe handle from the primordial channel and reads new handles
+// from it. Each read handle establishes a new echo channel.
+DEFINE_TEST_CLIENT_WITH_PIPE(EchoServiceFactoryClient,
+ MultiprocessMessagePipeTest, h) {
+ MojoHandle p;
+ ReadMessageWithHandles(h, &p, 1);
+
+ std::vector<Handle> handles(2);
+ handles[0] = Handle(h);
+ handles[1] = Handle(p);
+ std::vector<MojoHandleSignals> signals(2, MOJO_HANDLE_SIGNAL_READABLE);
+ for (;;) {
+ size_t index;
+ CHECK_EQ(
+ mojo::WaitMany(handles.data(), signals.data(), handles.size(), &index),
+ MOJO_RESULT_OK);
+ DCHECK_LE(index, handles.size());
+ if (index == 0) {
+ // If data is available on the first pipe, it should be an exit command.
+ EXPECT_EQ(std::string("exit"), ReadMessage(h));
+ break;
+ } else if (index == 1) {
+ // If the second pipe, it should be a new handle requesting echo service.
+ MojoHandle echo_request;
+ ReadMessageWithHandles(p, &echo_request, 1);
+ handles.push_back(Handle(echo_request));
+ signals.push_back(MOJO_HANDLE_SIGNAL_READABLE);
+ } else {
+ // Otherwise it was one of our established echo pipes. Echo!
+ WriteMessage(handles[index].value(), ReadMessage(handles[index].value()));
+ }
+ }
+
+ for (size_t i = 1; i < handles.size(); ++i)
+ CloseHandle(handles[i].value());
+
+ return 0;
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport,
+ PassMoarMessagePipesCrossProcess) {
+ MojoHandle echo_factory_proxy, echo_factory_request;
+ CreateMessagePipe(&echo_factory_proxy, &echo_factory_request);
+
+ MojoHandle echo_proxy_a, echo_request_a;
+ CreateMessagePipe(&echo_proxy_a, &echo_request_a);
+
+ MojoHandle echo_proxy_b, echo_request_b;
+ CreateMessagePipe(&echo_proxy_b, &echo_request_b);
+
+ MojoHandle echo_proxy_c, echo_request_c;
+ CreateMessagePipe(&echo_proxy_c, &echo_request_c);
+
+ RUN_CHILD_ON_PIPE(EchoServiceFactoryClient, h)
+ WriteMessageWithHandles(
+ h, "gief factory naow plz", &echo_factory_request, 1);
+
+ WriteMessageWithHandles(echo_factory_proxy, "give me an echo service plz!",
+ &echo_request_a, 1);
+ WriteMessageWithHandles(echo_factory_proxy, "give me one too!",
+ &echo_request_b, 1);
+
+ VerifyEcho(echo_proxy_a, "i came here for an argument");
+ VerifyEcho(echo_proxy_a, "shut your festering gob");
+ VerifyEcho(echo_proxy_a, "mumble mumble mumble");
+
+ VerifyEcho(echo_proxy_b, "wubalubadubdub");
+ VerifyEcho(echo_proxy_b, "wubalubadubdub");
+
+ WriteMessageWithHandles(echo_factory_proxy, "hook me up also thanks",
+ &echo_request_c, 1);
+
+ VerifyEcho(echo_proxy_a, "the frobinators taste like frobinators");
+ VerifyEcho(echo_proxy_b, "beep bop boop");
+ VerifyEcho(echo_proxy_c, "zzzzzzzzzzzzzzzzzzzzzzzzzz");
+
+ WriteMessage(h, "exit");
+ END_CHILD()
+
+ CloseHandle(echo_factory_proxy);
+ CloseHandle(echo_proxy_a);
+ CloseHandle(echo_proxy_b);
+ CloseHandle(echo_proxy_c);
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport,
+ ChannelPipesWithMultipleChildren) {
+ RUN_CHILD_ON_PIPE(ChannelEchoClient, a)
+ RUN_CHILD_ON_PIPE(ChannelEchoClient, b)
+ VerifyEcho(a, "hello child 0");
+ VerifyEcho(b, "hello child 1");
+
+ WriteMessage(a, "exit");
+ WriteMessage(b, "exit");
+ END_CHILD()
+ END_CHILD()
+}
+
+// Reads and turns a pipe handle some number of times to create lots of
+// transient proxies.
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(PingPongPipeClient,
+ MultiprocessMessagePipeTest, h) {
+ const size_t kNumBounces = 50;
+ MojoHandle p0, p1;
+ ReadMessageWithHandles(h, &p0, 1);
+ ReadMessageWithHandles(h, &p1, 1);
+ for (size_t i = 0; i < kNumBounces; ++i) {
+ WriteMessageWithHandles(h, "", &p1, 1);
+ ReadMessageWithHandles(h, &p1, 1);
+ }
+ WriteMessageWithHandles(h, "", &p0, 1);
+ WriteMessage(p1, "bye");
+ MojoClose(p1);
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, PingPongPipe) {
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+
+ RUN_CHILD_ON_PIPE(PingPongPipeClient, h)
+ const size_t kNumBounces = 50;
+ WriteMessageWithHandles(h, "", &p0, 1);
+ WriteMessageWithHandles(h, "", &p1, 1);
+ for (size_t i = 0; i < kNumBounces; ++i) {
+ ReadMessageWithHandles(h, &p1, 1);
+ WriteMessageWithHandles(h, "", &p1, 1);
+ }
+ ReadMessageWithHandles(h, &p0, 1);
+ WriteMessage(h, "quit");
+ END_CHILD()
+
+ EXPECT_EQ("bye", ReadMessage(p0));
+
+ // We should still be able to observe peer closure from the other end.
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(p0, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+}
+
+// Parses commands from the parent pipe and does whatever it's asked to do.
+DEFINE_TEST_CLIENT_WITH_PIPE(CommandDrivenClient, MultiprocessMessagePipeTest,
+ h) {
+ base::hash_map<std::string, MojoHandle> named_pipes;
+ for (;;) {
+ MojoHandle p;
+ auto parts = base::SplitString(ReadMessageWithOptionalHandle(h, &p), ":",
+ base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ CHECK(!parts.empty());
+ std::string command = parts[0];
+ if (command == "take") {
+ // Take a pipe.
+ CHECK_EQ(parts.size(), 2u);
+ CHECK_NE(p, MOJO_HANDLE_INVALID);
+ named_pipes[parts[1]] = p;
+ WriteMessage(h, "ok");
+ } else if (command == "return") {
+ // Return a pipe.
+ CHECK_EQ(parts.size(), 2u);
+ CHECK_EQ(p, MOJO_HANDLE_INVALID);
+ p = named_pipes[parts[1]];
+ CHECK_NE(p, MOJO_HANDLE_INVALID);
+ named_pipes.erase(parts[1]);
+ WriteMessageWithHandles(h, "ok", &p, 1);
+ } else if (command == "say") {
+ // Say something to a named pipe.
+ CHECK_EQ(parts.size(), 3u);
+ CHECK_EQ(p, MOJO_HANDLE_INVALID);
+ p = named_pipes[parts[1]];
+ CHECK_NE(p, MOJO_HANDLE_INVALID);
+ CHECK(!parts[2].empty());
+ WriteMessage(p, parts[2]);
+ WriteMessage(h, "ok");
+ } else if (command == "hear") {
+ // Expect to read something from a named pipe.
+ CHECK_EQ(parts.size(), 3u);
+ CHECK_EQ(p, MOJO_HANDLE_INVALID);
+ p = named_pipes[parts[1]];
+ CHECK_NE(p, MOJO_HANDLE_INVALID);
+ CHECK(!parts[2].empty());
+ CHECK_EQ(parts[2], ReadMessage(p));
+ WriteMessage(h, "ok");
+ } else if (command == "pass") {
+ // Pass one named pipe over another named pipe.
+ CHECK_EQ(parts.size(), 3u);
+ CHECK_EQ(p, MOJO_HANDLE_INVALID);
+ p = named_pipes[parts[1]];
+ MojoHandle carrier = named_pipes[parts[2]];
+ CHECK_NE(p, MOJO_HANDLE_INVALID);
+ CHECK_NE(carrier, MOJO_HANDLE_INVALID);
+ named_pipes.erase(parts[1]);
+ WriteMessageWithHandles(carrier, "got a pipe for ya", &p, 1);
+ WriteMessage(h, "ok");
+ } else if (command == "catch") {
+ // Expect to receive one named pipe from another named pipe.
+ CHECK_EQ(parts.size(), 3u);
+ CHECK_EQ(p, MOJO_HANDLE_INVALID);
+ MojoHandle carrier = named_pipes[parts[2]];
+ CHECK_NE(carrier, MOJO_HANDLE_INVALID);
+ ReadMessageWithHandles(carrier, &p, 1);
+ CHECK_NE(p, MOJO_HANDLE_INVALID);
+ named_pipes[parts[1]] = p;
+ WriteMessage(h, "ok");
+ } else if (command == "exit") {
+ CHECK_EQ(parts.size(), 1u);
+ break;
+ }
+ }
+
+ for (auto& pipe : named_pipes)
+ CloseHandle(pipe.second);
+
+ return 0;
+}
+
+TEST_F(MultiprocessMessagePipeTest, ChildToChildPipes) {
+ RUN_CHILD_ON_PIPE(CommandDrivenClient, h0)
+ RUN_CHILD_ON_PIPE(CommandDrivenClient, h1)
+ CommandDrivenClientController a(h0);
+ CommandDrivenClientController b(h1);
+
+ // Create a pipe and pass each end to a different client.
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+ a.SendHandle("x", p0);
+ b.SendHandle("y", p1);
+
+ // Make sure they can talk.
+ a.Send("say:x:hello");
+ b.Send("hear:y:hello");
+
+ b.Send("say:y:i love multiprocess pipes!");
+ a.Send("hear:x:i love multiprocess pipes!");
+
+ a.Exit();
+ b.Exit();
+ END_CHILD()
+ END_CHILD()
+}
+
+TEST_F(MultiprocessMessagePipeTest, MoreChildToChildPipes) {
+ RUN_CHILD_ON_PIPE(CommandDrivenClient, h0)
+ RUN_CHILD_ON_PIPE(CommandDrivenClient, h1)
+ RUN_CHILD_ON_PIPE(CommandDrivenClient, h2)
+ RUN_CHILD_ON_PIPE(CommandDrivenClient, h3)
+ CommandDrivenClientController a(h0), b(h1), c(h2), d(h3);
+
+ // Connect a to b and c to d
+
+ MojoHandle p0, p1;
+
+ CreateMessagePipe(&p0, &p1);
+ a.SendHandle("b_pipe", p0);
+ b.SendHandle("a_pipe", p1);
+
+ MojoHandle p2, p3;
+
+ CreateMessagePipe(&p2, &p3);
+ c.SendHandle("d_pipe", p2);
+ d.SendHandle("c_pipe", p3);
+
+ // Connect b to c via a and d
+ MojoHandle p4, p5;
+ CreateMessagePipe(&p4, &p5);
+ a.SendHandle("d_pipe", p4);
+ d.SendHandle("a_pipe", p5);
+
+ // Have |a| pass its new |d|-pipe to |b|. It will eventually connect
+ // to |c|.
+ a.Send("pass:d_pipe:b_pipe");
+ b.Send("catch:c_pipe:a_pipe");
+
+ // Have |d| pass its new |a|-pipe to |c|. It will now be connected to
+ // |b|.
+ d.Send("pass:a_pipe:c_pipe");
+ c.Send("catch:b_pipe:d_pipe");
+
+ // Make sure b and c and talk.
+ b.Send("say:c_pipe:it's a beautiful day");
+ c.Send("hear:b_pipe:it's a beautiful day");
+
+ // Create x and y and have b and c exchange them.
+ MojoHandle x, y;
+ CreateMessagePipe(&x, &y);
+ b.SendHandle("x", x);
+ c.SendHandle("y", y);
+ b.Send("pass:x:c_pipe");
+ c.Send("pass:y:b_pipe");
+ b.Send("catch:y:c_pipe");
+ c.Send("catch:x:b_pipe");
+
+ // Make sure the pipe still works in both directions.
+ b.Send("say:y:hello");
+ c.Send("hear:x:hello");
+ c.Send("say:x:goodbye");
+ b.Send("hear:y:goodbye");
+
+ // Take both pipes back.
+ y = c.RetrieveHandle("x");
+ x = b.RetrieveHandle("y");
+
+ VerifyTransmission(x, y, "still works");
+ VerifyTransmission(y, x, "in both directions");
+
+ CloseHandle(x);
+ CloseHandle(y);
+
+ a.Exit();
+ b.Exit();
+ c.Exit();
+ d.Exit();
+ END_CHILD()
+ END_CHILD()
+ END_CHILD()
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReceivePipeWithClosedPeer,
+ MultiprocessMessagePipeTest, h) {
+ MojoHandle p;
+ EXPECT_EQ("foo", ReadMessageWithHandles(h, &p, 1));
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(p, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, SendPipeThenClosePeer) {
+ RUN_CHILD_ON_PIPE(ReceivePipeWithClosedPeer, h)
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ // Send |a| and immediately close |b|. The child should observe closure.
+ WriteMessageWithHandles(h, "foo", &a, 1);
+ MojoClose(b);
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(SendOtherChildPipeWithClosedPeer,
+ MultiprocessMessagePipeTest, h) {
+ // Create a new pipe and send one end to the parent, who will connect it to
+ // a client running ReceivePipeWithClosedPeerFromOtherChild.
+ MojoHandle application_proxy, application_request;
+ CreateMessagePipe(&application_proxy, &application_request);
+ WriteMessageWithHandles(h, "c2a plz", &application_request, 1);
+
+ // Create another pipe and send one end to the remote "application".
+ MojoHandle service_proxy, service_request;
+ CreateMessagePipe(&service_proxy, &service_request);
+ WriteMessageWithHandles(application_proxy, "c2s lol", &service_request, 1);
+
+ // Immediately close the service proxy. The "application" should detect this.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(service_proxy));
+
+ // Wait for quit.
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReceivePipeWithClosedPeerFromOtherChild,
+ MultiprocessMessagePipeTest, h) {
+ // Receive a pipe from the parent. This is akin to an "application request".
+ MojoHandle application_client;
+ EXPECT_EQ("c2a", ReadMessageWithHandles(h, &application_client, 1));
+
+ // Receive a pipe from the "application" "client".
+ MojoHandle service_client;
+ EXPECT_EQ("c2s lol",
+ ReadMessageWithHandles(application_client, &service_client, 1));
+
+ // Wait for the service client to signal closure.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(service_client, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(service_client));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(application_client));
+}
+
+#if defined(OS_ANDROID)
+// Android multi-process tests are not executing the new process. This is flaky.
+#define MAYBE_SendPipeWithClosedPeerBetweenChildren \
+ DISABLED_SendPipeWithClosedPeerBetweenChildren
+#else
+#define MAYBE_SendPipeWithClosedPeerBetweenChildren \
+ SendPipeWithClosedPeerBetweenChildren
+#endif
+TEST_F(MultiprocessMessagePipeTest,
+ MAYBE_SendPipeWithClosedPeerBetweenChildren) {
+ RUN_CHILD_ON_PIPE(SendOtherChildPipeWithClosedPeer, kid_a)
+ RUN_CHILD_ON_PIPE(ReceivePipeWithClosedPeerFromOtherChild, kid_b)
+ // Receive an "application request" from the first child and forward it
+ // to the second child.
+ MojoHandle application_request;
+ EXPECT_EQ("c2a plz",
+ ReadMessageWithHandles(kid_a, &application_request, 1));
+
+ WriteMessageWithHandles(kid_b, "c2a", &application_request, 1);
+ END_CHILD()
+
+ WriteMessage(kid_a, "quit");
+ END_CHILD()
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, SendClosePeerSend) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle c, d;
+ CreateMessagePipe(&c, &d);
+
+ // Send |a| over |c|, immediately close |b|, then send |a| back over |d|.
+ WriteMessageWithHandles(c, "foo", &a, 1);
+ EXPECT_EQ("foo", ReadMessageWithHandles(d, &a, 1));
+ WriteMessageWithHandles(d, "bar", &a, 1);
+ EXPECT_EQ("bar", ReadMessageWithHandles(c, &a, 1));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+
+ // We should be able to detect peer closure on |a|.
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(a, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(WriteCloseSendPeerClient,
+ MultiprocessMessagePipeTest, h) {
+ MojoHandle pipe[2];
+ EXPECT_EQ("foo", ReadMessageWithHandles(h, pipe, 2));
+
+ // Write some messages to the first endpoint and then close it.
+ WriteMessage(pipe[0], "baz");
+ WriteMessage(pipe[0], "qux");
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(pipe[0]));
+
+ MojoHandle c, d;
+ CreateMessagePipe(&c, &d);
+
+ // Pass the orphaned endpoint over another pipe before passing it back to
+ // the parent, just for some extra proxying goodness.
+ WriteMessageWithHandles(c, "foo", &pipe[1], 1);
+ EXPECT_EQ("foo", ReadMessageWithHandles(d, &pipe[1], 1));
+
+ // And finally pass it back to the parent.
+ WriteMessageWithHandles(h, "bar", &pipe[1], 1);
+
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+TEST_P(MultiprocessMessagePipeTestWithPeerSupport, WriteCloseSendPeer) {
+ MojoHandle pipe[2];
+ CreateMessagePipe(&pipe[0], &pipe[1]);
+
+ RUN_CHILD_ON_PIPE(WriteCloseSendPeerClient, h)
+ // Pass the pipe to the child.
+ WriteMessageWithHandles(h, "foo", pipe, 2);
+
+ // Read back an endpoint which should have messages on it.
+ MojoHandle p;
+ EXPECT_EQ("bar", ReadMessageWithHandles(h, &p, 1));
+
+ EXPECT_EQ("baz", ReadMessage(p));
+ EXPECT_EQ("qux", ReadMessage(p));
+
+ // Expect to have peer closure signaled.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(p, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+
+ WriteMessage(h, "quit");
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(MessagePipeStatusChangeInTransitClient,
+ MultiprocessMessagePipeTest, parent) {
+ // This test verifies that peer closure is detectable through various
+ // mechanisms when it races with handle transfer.
+ MojoHandle handles[4];
+ EXPECT_EQ("o_O", ReadMessageWithHandles(parent, handles, 4));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ WaitForSignals(handles[0], MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+
+ base::MessageLoop message_loop;
+
+ // Wait on handle 1 using a SimpleWatcher.
+ {
+ base::RunLoop run_loop;
+ SimpleWatcher watcher(FROM_HERE, SimpleWatcher::ArmingPolicy::AUTOMATIC);
+ watcher.Watch(Handle(handles[1]), MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ base::Bind(
+ [](base::RunLoop* loop, MojoResult result) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ loop->Quit();
+ },
+ &run_loop));
+ run_loop.Run();
+ }
+
+ // Wait on handle 2 by polling with MojoReadMessage.
+ MojoResult result;
+ do {
+ result = MojoReadMessage(handles[2], nullptr, nullptr, nullptr, nullptr,
+ MOJO_READ_MESSAGE_FLAG_NONE);
+ } while (result == MOJO_RESULT_SHOULD_WAIT);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, result);
+
+ // Wait on handle 3 by polling with MojoWriteMessage.
+ do {
+ result = MojoWriteMessage(handles[3], nullptr, 0, nullptr, 0,
+ MOJO_WRITE_MESSAGE_FLAG_NONE);
+ } while (result == MOJO_RESULT_OK);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, result);
+
+ for (size_t i = 0; i < 4; ++i)
+ CloseHandle(handles[i]);
+}
+
+TEST_F(MultiprocessMessagePipeTest, MessagePipeStatusChangeInTransit) {
+ MojoHandle local_handles[4];
+ MojoHandle sent_handles[4];
+ for (size_t i = 0; i < 4; ++i)
+ CreateMessagePipe(&local_handles[i], &sent_handles[i]);
+
+ RUN_CHILD_ON_PIPE(MessagePipeStatusChangeInTransitClient, child)
+ // Send 4 handles and let their transfer race with their peers' closure.
+ WriteMessageWithHandles(child, "o_O", sent_handles, 4);
+ for (size_t i = 0; i < 4; ++i)
+ CloseHandle(local_handles[i]);
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(BadMessageClient, MultiprocessMessagePipeTest,
+ parent) {
+ MojoHandle pipe;
+ EXPECT_EQ("hi", ReadMessageWithHandles(parent, &pipe, 1));
+ WriteMessage(pipe, "derp");
+ EXPECT_EQ("bye", ReadMessage(parent));
+}
+
+void OnProcessError(std::string* out_error, const std::string& error) {
+ *out_error = error;
+}
+
+TEST_F(MultiprocessMessagePipeTest, NotifyBadMessage) {
+ const std::string kFirstErrorMessage = "everything is terrible!";
+ const std::string kSecondErrorMessage = "not the bits you're looking for";
+
+ std::string first_process_error;
+ std::string second_process_error;
+
+ set_process_error_callback(base::Bind(&OnProcessError, &first_process_error));
+ RUN_CHILD_ON_PIPE(BadMessageClient, child1)
+ set_process_error_callback(base::Bind(&OnProcessError,
+ &second_process_error));
+ RUN_CHILD_ON_PIPE(BadMessageClient, child2)
+ MojoHandle a, b, c, d;
+ CreateMessagePipe(&a, &b);
+ CreateMessagePipe(&c, &d);
+ WriteMessageWithHandles(child1, "hi", &b, 1);
+ WriteMessageWithHandles(child2, "hi", &d, 1);
+
+ // Read a message from the pipe we sent to child1 and flag it as bad.
+ ASSERT_EQ(MOJO_RESULT_OK, WaitForSignals(a, MOJO_HANDLE_SIGNAL_READABLE));
+ uint32_t num_bytes = 0;
+ MojoMessageHandle message;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessageNew(a, &message, &num_bytes, nullptr, 0,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoNotifyBadMessage(message, kFirstErrorMessage.data(),
+ kFirstErrorMessage.size()));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoFreeMessage(message));
+
+ // Read a message from the pipe we sent to child2 and flag it as bad.
+ ASSERT_EQ(MOJO_RESULT_OK, WaitForSignals(c, MOJO_HANDLE_SIGNAL_READABLE));
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoReadMessageNew(c, &message, &num_bytes, nullptr, 0,
+ MOJO_READ_MESSAGE_FLAG_NONE));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoNotifyBadMessage(message, kSecondErrorMessage.data(),
+ kSecondErrorMessage.size()));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoFreeMessage(message));
+
+ WriteMessage(child2, "bye");
+ END_CHILD();
+
+ WriteMessage(child1, "bye");
+ END_CHILD()
+
+ // The error messages should match the processes which triggered them.
+ EXPECT_NE(std::string::npos, first_process_error.find(kFirstErrorMessage));
+ EXPECT_NE(std::string::npos, second_process_error.find(kSecondErrorMessage));
+}
+INSTANTIATE_TEST_CASE_P(
+ ,
+ MultiprocessMessagePipeTestWithPeerSupport,
+ testing::Values(test::MojoTestBase::LaunchType::CHILD,
+ test::MojoTestBase::LaunchType::PEER,
+ test::MojoTestBase::LaunchType::NAMED_CHILD,
+ test::MojoTestBase::LaunchType::NAMED_PEER));
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/node_channel.cc b/mojo/edk/system/node_channel.cc
new file mode 100644
index 0000000000..b0f770d907
--- /dev/null
+++ b/mojo/edk/system/node_channel.cc
@@ -0,0 +1,905 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/node_channel.h"
+
+#include <cstring>
+#include <limits>
+#include <sstream>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "mojo/edk/system/channel.h"
+#include "mojo/edk/system/request_context.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include "mojo/edk/system/mach_port_relay.h"
+#endif
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+template <typename T>
+T Align(T t) {
+ const auto k = kChannelMessageAlignment;
+ return t + (k - (t % k)) % k;
+}
+
+// NOTE: Please ONLY append messages to the end of this enum.
+enum class MessageType : uint32_t {
+ ACCEPT_CHILD,
+ ACCEPT_PARENT,
+ ADD_BROKER_CLIENT,
+ BROKER_CLIENT_ADDED,
+ ACCEPT_BROKER_CLIENT,
+ PORTS_MESSAGE,
+ REQUEST_PORT_MERGE,
+ REQUEST_INTRODUCTION,
+ INTRODUCE,
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ RELAY_PORTS_MESSAGE,
+#endif
+ BROADCAST,
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ PORTS_MESSAGE_FROM_RELAY,
+#endif
+ ACCEPT_PEER,
+};
+
+struct Header {
+ MessageType type;
+ uint32_t padding;
+};
+
+static_assert(IsAlignedForChannelMessage(sizeof(Header)),
+ "Invalid header size.");
+
+struct AcceptChildData {
+ ports::NodeName parent_name;
+ ports::NodeName token;
+};
+
+struct AcceptParentData {
+ ports::NodeName token;
+ ports::NodeName child_name;
+};
+
+struct AcceptPeerData {
+ ports::NodeName token;
+ ports::NodeName peer_name;
+ ports::PortName port_name;
+};
+
+// This message may include a process handle on plaforms that require it.
+struct AddBrokerClientData {
+ ports::NodeName client_name;
+#if !defined(OS_WIN)
+ uint32_t process_handle;
+ uint32_t padding;
+#endif
+};
+
+#if !defined(OS_WIN)
+static_assert(sizeof(base::ProcessHandle) == sizeof(uint32_t),
+ "Unexpected pid size");
+static_assert(sizeof(AddBrokerClientData) % kChannelMessageAlignment == 0,
+ "Invalid AddBrokerClientData size.");
+#endif
+
+// This data is followed by a platform channel handle to the broker.
+struct BrokerClientAddedData {
+ ports::NodeName client_name;
+};
+
+// This data may be followed by a platform channel handle to the broker. If not,
+// then the parent is the broker and its channel should be used as such.
+struct AcceptBrokerClientData {
+ ports::NodeName broker_name;
+};
+
+// This is followed by arbitrary payload data which is interpreted as a token
+// string for port location.
+struct RequestPortMergeData {
+ ports::PortName connector_port_name;
+};
+
+// Used for both REQUEST_INTRODUCTION and INTRODUCE.
+//
+// For INTRODUCE the message also includes a valid platform handle for a channel
+// the receiver may use to communicate with the named node directly, or an
+// invalid platform handle if the node is unknown to the sender or otherwise
+// cannot be introduced.
+struct IntroductionData {
+ ports::NodeName name;
+};
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+// This struct is followed by the full payload of a message to be relayed.
+struct RelayPortsMessageData {
+ ports::NodeName destination;
+};
+
+// This struct is followed by the full payload of a relayed message.
+struct PortsMessageFromRelayData {
+ ports::NodeName source;
+};
+#endif
+
+template <typename DataType>
+Channel::MessagePtr CreateMessage(MessageType type,
+ size_t payload_size,
+ size_t num_handles,
+ DataType** out_data) {
+ Channel::MessagePtr message(
+ new Channel::Message(sizeof(Header) + payload_size, num_handles));
+ Header* header = reinterpret_cast<Header*>(message->mutable_payload());
+ header->type = type;
+ header->padding = 0;
+ *out_data = reinterpret_cast<DataType*>(&header[1]);
+ return message;
+}
+
+template <typename DataType>
+bool GetMessagePayload(const void* bytes,
+ size_t num_bytes,
+ DataType** out_data) {
+ static_assert(sizeof(DataType) > 0, "DataType must have non-zero size.");
+ if (num_bytes < sizeof(Header) + sizeof(DataType))
+ return false;
+ *out_data = reinterpret_cast<const DataType*>(
+ static_cast<const char*>(bytes) + sizeof(Header));
+ return true;
+}
+
+} // namespace
+
+// static
+scoped_refptr<NodeChannel> NodeChannel::Create(
+ Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner,
+ const ProcessErrorCallback& process_error_callback) {
+#if defined(OS_NACL_SFI)
+ LOG(FATAL) << "Multi-process not yet supported on NaCl-SFI";
+ return nullptr;
+#else
+ return new NodeChannel(delegate, std::move(connection_params), io_task_runner,
+ process_error_callback);
+#endif
+}
+
+// static
+Channel::MessagePtr NodeChannel::CreatePortsMessage(size_t payload_size,
+ void** payload,
+ size_t num_handles) {
+ return CreateMessage(MessageType::PORTS_MESSAGE, payload_size, num_handles,
+ payload);
+}
+
+// static
+void NodeChannel::GetPortsMessageData(Channel::Message* message,
+ void** data,
+ size_t* num_data_bytes) {
+ *data = reinterpret_cast<Header*>(message->mutable_payload()) + 1;
+ *num_data_bytes = message->payload_size() - sizeof(Header);
+}
+
+void NodeChannel::Start() {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ MachPortRelay* relay = delegate_->GetMachPortRelay();
+ if (relay)
+ relay->AddObserver(this);
+#endif
+
+ base::AutoLock lock(channel_lock_);
+ // ShutDown() may have already been called, in which case |channel_| is null.
+ if (channel_)
+ channel_->Start();
+}
+
+void NodeChannel::ShutDown() {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ MachPortRelay* relay = delegate_->GetMachPortRelay();
+ if (relay)
+ relay->RemoveObserver(this);
+#endif
+
+ base::AutoLock lock(channel_lock_);
+ if (channel_) {
+ channel_->ShutDown();
+ channel_ = nullptr;
+ }
+}
+
+void NodeChannel::LeakHandleOnShutdown() {
+ base::AutoLock lock(channel_lock_);
+ if (channel_) {
+ channel_->LeakHandle();
+ }
+}
+
+void NodeChannel::NotifyBadMessage(const std::string& error) {
+ if (!process_error_callback_.is_null())
+ process_error_callback_.Run("Received bad user message: " + error);
+}
+
+void NodeChannel::SetRemoteProcessHandle(base::ProcessHandle process_handle) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+ base::AutoLock lock(remote_process_handle_lock_);
+ DCHECK_EQ(base::kNullProcessHandle, remote_process_handle_);
+ CHECK_NE(remote_process_handle_, base::GetCurrentProcessHandle());
+ remote_process_handle_ = process_handle;
+#if defined(OS_WIN)
+ DCHECK(!scoped_remote_process_handle_.is_valid());
+ scoped_remote_process_handle_.reset(PlatformHandle(process_handle));
+#endif
+}
+
+bool NodeChannel::HasRemoteProcessHandle() {
+ base::AutoLock lock(remote_process_handle_lock_);
+ return remote_process_handle_ != base::kNullProcessHandle;
+}
+
+base::ProcessHandle NodeChannel::CopyRemoteProcessHandle() {
+ base::AutoLock lock(remote_process_handle_lock_);
+#if defined(OS_WIN)
+ if (remote_process_handle_ != base::kNullProcessHandle) {
+ // Privileged nodes use this to pass their childrens' process handles to the
+ // broker on launch.
+ HANDLE handle = remote_process_handle_;
+ BOOL result = DuplicateHandle(
+ base::GetCurrentProcessHandle(), remote_process_handle_,
+ base::GetCurrentProcessHandle(), &handle, 0, FALSE,
+ DUPLICATE_SAME_ACCESS);
+ DPCHECK(result);
+ return handle;
+ }
+ return base::kNullProcessHandle;
+#else
+ return remote_process_handle_;
+#endif
+}
+
+void NodeChannel::SetRemoteNodeName(const ports::NodeName& name) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+ remote_node_name_ = name;
+}
+
+void NodeChannel::AcceptChild(const ports::NodeName& parent_name,
+ const ports::NodeName& token) {
+ AcceptChildData* data;
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::ACCEPT_CHILD, sizeof(AcceptChildData), 0, &data);
+ data->parent_name = parent_name;
+ data->token = token;
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::AcceptParent(const ports::NodeName& token,
+ const ports::NodeName& child_name) {
+ AcceptParentData* data;
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::ACCEPT_PARENT, sizeof(AcceptParentData), 0, &data);
+ data->token = token;
+ data->child_name = child_name;
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::AcceptPeer(const ports::NodeName& sender_name,
+ const ports::NodeName& token,
+ const ports::PortName& port_name) {
+ AcceptPeerData* data;
+ Channel::MessagePtr message =
+ CreateMessage(MessageType::ACCEPT_PEER, sizeof(AcceptPeerData), 0, &data);
+ data->token = token;
+ data->peer_name = sender_name;
+ data->port_name = port_name;
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::AddBrokerClient(const ports::NodeName& client_name,
+ base::ProcessHandle process_handle) {
+ AddBrokerClientData* data;
+ ScopedPlatformHandleVectorPtr handles(new PlatformHandleVector());
+#if defined(OS_WIN)
+ handles->push_back(PlatformHandle(process_handle));
+#endif
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::ADD_BROKER_CLIENT, sizeof(AddBrokerClientData),
+ handles->size(), &data);
+ message->SetHandles(std::move(handles));
+ data->client_name = client_name;
+#if !defined(OS_WIN)
+ data->process_handle = process_handle;
+ data->padding = 0;
+#endif
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::BrokerClientAdded(const ports::NodeName& client_name,
+ ScopedPlatformHandle broker_channel) {
+ BrokerClientAddedData* data;
+ ScopedPlatformHandleVectorPtr handles(new PlatformHandleVector());
+ if (broker_channel.is_valid())
+ handles->push_back(broker_channel.release());
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::BROKER_CLIENT_ADDED, sizeof(BrokerClientAddedData),
+ handles->size(), &data);
+ message->SetHandles(std::move(handles));
+ data->client_name = client_name;
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::AcceptBrokerClient(const ports::NodeName& broker_name,
+ ScopedPlatformHandle broker_channel) {
+ AcceptBrokerClientData* data;
+ ScopedPlatformHandleVectorPtr handles(new PlatformHandleVector());
+ if (broker_channel.is_valid())
+ handles->push_back(broker_channel.release());
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::ACCEPT_BROKER_CLIENT, sizeof(AcceptBrokerClientData),
+ handles->size(), &data);
+ message->SetHandles(std::move(handles));
+ data->broker_name = broker_name;
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::PortsMessage(Channel::MessagePtr message) {
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::RequestPortMerge(const ports::PortName& connector_port_name,
+ const std::string& token) {
+ RequestPortMergeData* data;
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::REQUEST_PORT_MERGE,
+ sizeof(RequestPortMergeData) + token.size(), 0, &data);
+ data->connector_port_name = connector_port_name;
+ memcpy(data + 1, token.data(), token.size());
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::RequestIntroduction(const ports::NodeName& name) {
+ IntroductionData* data;
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::REQUEST_INTRODUCTION, sizeof(IntroductionData), 0, &data);
+ data->name = name;
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::Introduce(const ports::NodeName& name,
+ ScopedPlatformHandle channel_handle) {
+ IntroductionData* data;
+ ScopedPlatformHandleVectorPtr handles(new PlatformHandleVector());
+ if (channel_handle.is_valid())
+ handles->push_back(channel_handle.release());
+ Channel::MessagePtr message = CreateMessage(
+ MessageType::INTRODUCE, sizeof(IntroductionData), handles->size(), &data);
+ message->SetHandles(std::move(handles));
+ data->name = name;
+ WriteChannelMessage(std::move(message));
+}
+
+void NodeChannel::Broadcast(Channel::MessagePtr message) {
+ DCHECK(!message->has_handles());
+ void* data;
+ Channel::MessagePtr broadcast_message = CreateMessage(
+ MessageType::BROADCAST, message->data_num_bytes(), 0, &data);
+ memcpy(data, message->data(), message->data_num_bytes());
+ WriteChannelMessage(std::move(broadcast_message));
+}
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+void NodeChannel::RelayPortsMessage(const ports::NodeName& destination,
+ Channel::MessagePtr message) {
+#if defined(OS_WIN)
+ DCHECK(message->has_handles());
+
+ // Note that this is only used on Windows, and on Windows all platform
+ // handles are included in the message data. We blindly copy all the data
+ // here and the relay node (the parent) will duplicate handles as needed.
+ size_t num_bytes = sizeof(RelayPortsMessageData) + message->data_num_bytes();
+ RelayPortsMessageData* data;
+ Channel::MessagePtr relay_message = CreateMessage(
+ MessageType::RELAY_PORTS_MESSAGE, num_bytes, 0, &data);
+ data->destination = destination;
+ memcpy(data + 1, message->data(), message->data_num_bytes());
+
+ // When the handles are duplicated in the parent, the source handles will
+ // be closed. If the parent never receives this message then these handles
+ // will leak, but that means something else has probably broken and the
+ // sending process won't likely be around much longer.
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ handles->clear();
+
+#else
+ DCHECK(message->has_mach_ports());
+
+ // On OSX, the handles are extracted from the relayed message and attached to
+ // the wrapper. The broker then takes the handles attached to the wrapper and
+ // moves them back to the relayed message. This is necessary because the
+ // message may contain fds which need to be attached to the outer message so
+ // that they can be transferred to the broker.
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ size_t num_bytes = sizeof(RelayPortsMessageData) + message->data_num_bytes();
+ RelayPortsMessageData* data;
+ Channel::MessagePtr relay_message = CreateMessage(
+ MessageType::RELAY_PORTS_MESSAGE, num_bytes, handles->size(), &data);
+ data->destination = destination;
+ memcpy(data + 1, message->data(), message->data_num_bytes());
+ relay_message->SetHandles(std::move(handles));
+#endif // defined(OS_WIN)
+
+ WriteChannelMessage(std::move(relay_message));
+}
+
+void NodeChannel::PortsMessageFromRelay(const ports::NodeName& source,
+ Channel::MessagePtr message) {
+ size_t num_bytes = sizeof(PortsMessageFromRelayData) +
+ message->payload_size();
+ PortsMessageFromRelayData* data;
+ Channel::MessagePtr relayed_message = CreateMessage(
+ MessageType::PORTS_MESSAGE_FROM_RELAY, num_bytes, message->num_handles(),
+ &data);
+ data->source = source;
+ if (message->payload_size())
+ memcpy(data + 1, message->payload(), message->payload_size());
+ relayed_message->SetHandles(message->TakeHandles());
+ WriteChannelMessage(std::move(relayed_message));
+}
+#endif // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+
+NodeChannel::NodeChannel(Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner,
+ const ProcessErrorCallback& process_error_callback)
+ : delegate_(delegate),
+ io_task_runner_(io_task_runner),
+ process_error_callback_(process_error_callback)
+#if !defined(OS_NACL_SFI)
+ ,
+ channel_(
+ Channel::Create(this, std::move(connection_params), io_task_runner_))
+#endif
+{
+}
+
+NodeChannel::~NodeChannel() {
+ ShutDown();
+}
+
+void NodeChannel::OnChannelMessage(const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ RequestContext request_context(RequestContext::Source::SYSTEM);
+
+ // Ensure this NodeChannel stays alive through the extent of this method. The
+ // delegate may have the only other reference to this object and it may choose
+ // to drop it here in response to, e.g., a malformed message.
+ scoped_refptr<NodeChannel> keepalive = this;
+
+#if defined(OS_WIN)
+ // If we receive handles from a known process, rewrite them to our own
+ // process. This can occur when a privileged node receives handles directly
+ // from a privileged descendant.
+ {
+ base::AutoLock lock(remote_process_handle_lock_);
+ if (handles && remote_process_handle_ != base::kNullProcessHandle) {
+ // Note that we explicitly mark the handles as being owned by the sending
+ // process before rewriting them, in order to accommodate RewriteHandles'
+ // internal sanity checks.
+ for (auto& handle : *handles)
+ handle.owning_process = remote_process_handle_;
+ if (!Channel::Message::RewriteHandles(remote_process_handle_,
+ base::GetCurrentProcessHandle(),
+ handles.get())) {
+ DLOG(ERROR) << "Received one or more invalid handles.";
+ }
+ } else if (handles) {
+ // Handles received by an unknown process must already be owned by us.
+ for (auto& handle : *handles)
+ handle.owning_process = base::GetCurrentProcessHandle();
+ }
+ }
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ // If we're not the root, receive any mach ports from the message. If we're
+ // the root, the only message containing mach ports should be a
+ // RELAY_PORTS_MESSAGE.
+ {
+ MachPortRelay* relay = delegate_->GetMachPortRelay();
+ if (handles && !relay) {
+ if (!MachPortRelay::ReceivePorts(handles.get())) {
+ LOG(ERROR) << "Error receiving mach ports.";
+ }
+ }
+ }
+#endif // defined(OS_WIN)
+
+
+ if (payload_size <= sizeof(Header)) {
+ delegate_->OnChannelError(remote_node_name_, this);
+ return;
+ }
+
+ const Header* header = static_cast<const Header*>(payload);
+ switch (header->type) {
+ case MessageType::ACCEPT_CHILD: {
+ const AcceptChildData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ delegate_->OnAcceptChild(remote_node_name_, data->parent_name,
+ data->token);
+ return;
+ }
+ break;
+ }
+
+ case MessageType::ACCEPT_PARENT: {
+ const AcceptParentData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ delegate_->OnAcceptParent(remote_node_name_, data->token,
+ data->child_name);
+ return;
+ }
+ break;
+ }
+
+ case MessageType::ADD_BROKER_CLIENT: {
+ const AddBrokerClientData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ ScopedPlatformHandle process_handle;
+#if defined(OS_WIN)
+ if (!handles || handles->size() != 1) {
+ DLOG(ERROR) << "Dropping invalid AddBrokerClient message.";
+ break;
+ }
+ process_handle = ScopedPlatformHandle(handles->at(0));
+ handles->clear();
+ delegate_->OnAddBrokerClient(remote_node_name_, data->client_name,
+ process_handle.release().handle);
+#else
+ if (handles && handles->size() != 0) {
+ DLOG(ERROR) << "Dropping invalid AddBrokerClient message.";
+ break;
+ }
+ delegate_->OnAddBrokerClient(remote_node_name_, data->client_name,
+ data->process_handle);
+#endif
+ return;
+ }
+ break;
+ }
+
+ case MessageType::BROKER_CLIENT_ADDED: {
+ const BrokerClientAddedData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ ScopedPlatformHandle broker_channel;
+ if (!handles || handles->size() != 1) {
+ DLOG(ERROR) << "Dropping invalid BrokerClientAdded message.";
+ break;
+ }
+ broker_channel = ScopedPlatformHandle(handles->at(0));
+ handles->clear();
+ delegate_->OnBrokerClientAdded(remote_node_name_, data->client_name,
+ std::move(broker_channel));
+ return;
+ }
+ break;
+ }
+
+ case MessageType::ACCEPT_BROKER_CLIENT: {
+ const AcceptBrokerClientData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ ScopedPlatformHandle broker_channel;
+ if (handles && handles->size() > 1) {
+ DLOG(ERROR) << "Dropping invalid AcceptBrokerClient message.";
+ break;
+ }
+ if (handles && handles->size() == 1) {
+ broker_channel = ScopedPlatformHandle(handles->at(0));
+ handles->clear();
+ }
+ delegate_->OnAcceptBrokerClient(remote_node_name_, data->broker_name,
+ std::move(broker_channel));
+ return;
+ }
+ break;
+ }
+
+ case MessageType::PORTS_MESSAGE: {
+ size_t num_handles = handles ? handles->size() : 0;
+ Channel::MessagePtr message(
+ new Channel::Message(payload_size, num_handles));
+ message->SetHandles(std::move(handles));
+ memcpy(message->mutable_payload(), payload, payload_size);
+ delegate_->OnPortsMessage(remote_node_name_, std::move(message));
+ return;
+ }
+
+ case MessageType::REQUEST_PORT_MERGE: {
+ const RequestPortMergeData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ // Don't accept an empty token.
+ size_t token_size = payload_size - sizeof(*data) - sizeof(Header);
+ if (token_size == 0)
+ break;
+ std::string token(reinterpret_cast<const char*>(data + 1), token_size);
+ delegate_->OnRequestPortMerge(remote_node_name_,
+ data->connector_port_name, token);
+ return;
+ }
+ break;
+ }
+
+ case MessageType::REQUEST_INTRODUCTION: {
+ const IntroductionData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ delegate_->OnRequestIntroduction(remote_node_name_, data->name);
+ return;
+ }
+ break;
+ }
+
+ case MessageType::INTRODUCE: {
+ const IntroductionData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ if (handles && handles->size() > 1) {
+ DLOG(ERROR) << "Dropping invalid introduction message.";
+ break;
+ }
+ ScopedPlatformHandle channel_handle;
+ if (handles && handles->size() == 1) {
+ channel_handle = ScopedPlatformHandle(handles->at(0));
+ handles->clear();
+ }
+ delegate_->OnIntroduce(remote_node_name_, data->name,
+ std::move(channel_handle));
+ return;
+ }
+ break;
+ }
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ case MessageType::RELAY_PORTS_MESSAGE: {
+ base::ProcessHandle from_process;
+ {
+ base::AutoLock lock(remote_process_handle_lock_);
+ from_process = remote_process_handle_;
+ }
+ const RelayPortsMessageData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ // Don't try to relay an empty message.
+ if (payload_size <= sizeof(Header) + sizeof(RelayPortsMessageData))
+ break;
+
+ const void* message_start = data + 1;
+ Channel::MessagePtr message = Channel::Message::Deserialize(
+ message_start, payload_size - sizeof(Header) - sizeof(*data));
+ if (!message) {
+ DLOG(ERROR) << "Dropping invalid relay message.";
+ break;
+ }
+ #if defined(OS_MACOSX) && !defined(OS_IOS)
+ message->SetHandles(std::move(handles));
+ MachPortRelay* relay = delegate_->GetMachPortRelay();
+ if (!relay) {
+ LOG(ERROR) << "Receiving mach ports without a port relay from "
+ << remote_node_name_ << ". Dropping message.";
+ break;
+ }
+ {
+ base::AutoLock lock(pending_mach_messages_lock_);
+ if (relay->port_provider()->TaskForPid(from_process) ==
+ MACH_PORT_NULL) {
+ pending_relay_messages_.push(
+ std::make_pair(data->destination, std::move(message)));
+ break;
+ }
+ }
+ #endif
+ delegate_->OnRelayPortsMessage(remote_node_name_, from_process,
+ data->destination, std::move(message));
+ return;
+ }
+ break;
+ }
+#endif
+
+ case MessageType::BROADCAST: {
+ if (payload_size <= sizeof(Header))
+ break;
+ const void* data = static_cast<const void*>(
+ reinterpret_cast<const Header*>(payload) + 1);
+ Channel::MessagePtr message =
+ Channel::Message::Deserialize(data, payload_size - sizeof(Header));
+ if (!message || message->has_handles()) {
+ DLOG(ERROR) << "Dropping invalid broadcast message.";
+ break;
+ }
+ delegate_->OnBroadcast(remote_node_name_, std::move(message));
+ return;
+ }
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ case MessageType::PORTS_MESSAGE_FROM_RELAY:
+ const PortsMessageFromRelayData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ size_t num_bytes = payload_size - sizeof(*data);
+ if (num_bytes < sizeof(Header))
+ break;
+ num_bytes -= sizeof(Header);
+
+ size_t num_handles = handles ? handles->size() : 0;
+ Channel::MessagePtr message(
+ new Channel::Message(num_bytes, num_handles));
+ message->SetHandles(std::move(handles));
+ if (num_bytes)
+ memcpy(message->mutable_payload(), data + 1, num_bytes);
+ delegate_->OnPortsMessageFromRelay(
+ remote_node_name_, data->source, std::move(message));
+ return;
+ }
+ break;
+
+#endif // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+
+ case MessageType::ACCEPT_PEER: {
+ const AcceptPeerData* data;
+ if (GetMessagePayload(payload, payload_size, &data)) {
+ delegate_->OnAcceptPeer(remote_node_name_, data->token, data->peer_name,
+ data->port_name);
+ return;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ DLOG(ERROR) << "Received invalid message. Closing channel.";
+ delegate_->OnChannelError(remote_node_name_, this);
+}
+
+void NodeChannel::OnChannelError() {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ RequestContext request_context(RequestContext::Source::SYSTEM);
+
+ ShutDown();
+ // |OnChannelError()| may cause |this| to be destroyed, but still need access
+ // to the name name after that destruction. So may a copy of
+ // |remote_node_name_| so it can be used if |this| becomes destroyed.
+ ports::NodeName node_name = remote_node_name_;
+ delegate_->OnChannelError(node_name, this);
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+void NodeChannel::OnProcessReady(base::ProcessHandle process) {
+ io_task_runner_->PostTask(FROM_HERE, base::Bind(
+ &NodeChannel::ProcessPendingMessagesWithMachPorts, this));
+}
+
+void NodeChannel::ProcessPendingMessagesWithMachPorts() {
+ MachPortRelay* relay = delegate_->GetMachPortRelay();
+ DCHECK(relay);
+
+ base::ProcessHandle remote_process_handle;
+ {
+ base::AutoLock lock(remote_process_handle_lock_);
+ remote_process_handle = remote_process_handle_;
+ }
+ PendingMessageQueue pending_writes;
+ PendingRelayMessageQueue pending_relays;
+ {
+ base::AutoLock lock(pending_mach_messages_lock_);
+ pending_writes.swap(pending_write_messages_);
+ pending_relays.swap(pending_relay_messages_);
+ }
+
+ while (!pending_writes.empty()) {
+ Channel::MessagePtr message = std::move(pending_writes.front());
+ pending_writes.pop();
+ if (!relay->SendPortsToProcess(message.get(), remote_process_handle)) {
+ LOG(ERROR) << "Error on sending mach ports. Remote process is likely "
+ << "gone. Dropping message.";
+ return;
+ }
+
+ base::AutoLock lock(channel_lock_);
+ if (!channel_) {
+ DLOG(ERROR) << "Dropping message on closed channel.";
+ break;
+ } else {
+ channel_->Write(std::move(message));
+ }
+ }
+
+ // Ensure this NodeChannel stays alive while flushing relay messages.
+ scoped_refptr<NodeChannel> keepalive = this;
+
+ while (!pending_relays.empty()) {
+ ports::NodeName destination = pending_relays.front().first;
+ Channel::MessagePtr message = std::move(pending_relays.front().second);
+ pending_relays.pop();
+ delegate_->OnRelayPortsMessage(remote_node_name_, remote_process_handle,
+ destination, std::move(message));
+ }
+}
+#endif
+
+void NodeChannel::WriteChannelMessage(Channel::MessagePtr message) {
+#if defined(OS_WIN)
+ // Map handles to the destination process. Note: only messages from a
+ // privileged node should contain handles on Windows. If an unprivileged
+ // node needs to send handles, it should do so via RelayPortsMessage which
+ // stashes the handles in the message in such a way that they go undetected
+ // here (they'll be unpacked and duplicated by a privileged parent.)
+
+ if (message->has_handles()) {
+ base::ProcessHandle remote_process_handle;
+ {
+ base::AutoLock lock(remote_process_handle_lock_);
+ remote_process_handle = remote_process_handle_;
+ }
+
+ // Rewrite outgoing handles if we have a handle to the destination process.
+ if (remote_process_handle != base::kNullProcessHandle) {
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ if (!Channel::Message::RewriteHandles(base::GetCurrentProcessHandle(),
+ remote_process_handle,
+ handles.get())) {
+ DLOG(ERROR) << "Failed to duplicate one or more outgoing handles.";
+ }
+ message->SetHandles(std::move(handles));
+ }
+ }
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ // On OSX, we need to transfer mach ports to the destination process before
+ // transferring the message itself.
+ if (message->has_mach_ports()) {
+ MachPortRelay* relay = delegate_->GetMachPortRelay();
+ if (relay) {
+ base::ProcessHandle remote_process_handle;
+ {
+ base::AutoLock lock(remote_process_handle_lock_);
+ // Expect that the receiving node is a child.
+ DCHECK(remote_process_handle_ != base::kNullProcessHandle);
+ remote_process_handle = remote_process_handle_;
+ }
+ {
+ base::AutoLock lock(pending_mach_messages_lock_);
+ if (relay->port_provider()->TaskForPid(remote_process_handle) ==
+ MACH_PORT_NULL) {
+ // It is also possible for TaskForPid() to return MACH_PORT_NULL when
+ // the process has started, then died. In that case, the queued
+ // message will never be processed. But that's fine since we're about
+ // to die anyway.
+ pending_write_messages_.push(std::move(message));
+ return;
+ }
+ }
+
+ if (!relay->SendPortsToProcess(message.get(), remote_process_handle)) {
+ LOG(ERROR) << "Error on sending mach ports. Remote process is likely "
+ << "gone. Dropping message.";
+ return;
+ }
+ }
+ }
+#endif
+
+ base::AutoLock lock(channel_lock_);
+ if (!channel_)
+ DLOG(ERROR) << "Dropping message on closed channel.";
+ else
+ channel_->Write(std::move(message));
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/node_channel.h b/mojo/edk/system/node_channel.h
new file mode 100644
index 0000000000..95dc3410eb
--- /dev/null
+++ b/mojo/edk/system/node_channel.h
@@ -0,0 +1,219 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_NODE_CHANNEL_H_
+#define MOJO_EDK_SYSTEM_NODE_CHANNEL_H_
+
+#include <queue>
+#include <unordered_map>
+#include <utility>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/lock.h"
+#include "base/task_runner.h"
+#include "build/build_config.h"
+#include "mojo/edk/embedder/connection_params.h"
+#include "mojo/edk/embedder/embedder.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/channel.h"
+#include "mojo/edk/system/ports/name.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include "mojo/edk/system/mach_port_relay.h"
+#endif
+
+namespace mojo {
+namespace edk {
+
+// Wraps a Channel to send and receive Node control messages.
+class NodeChannel : public base::RefCountedThreadSafe<NodeChannel>,
+ public Channel::Delegate
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ , public MachPortRelay::Observer
+#endif
+ {
+ public:
+ class Delegate {
+ public:
+ virtual ~Delegate() {}
+ virtual void OnAcceptChild(const ports::NodeName& from_node,
+ const ports::NodeName& parent_name,
+ const ports::NodeName& token) = 0;
+ virtual void OnAcceptParent(const ports::NodeName& from_node,
+ const ports::NodeName& token,
+ const ports::NodeName& child_name) = 0;
+ virtual void OnAddBrokerClient(const ports::NodeName& from_node,
+ const ports::NodeName& client_name,
+ base::ProcessHandle process_handle) = 0;
+ virtual void OnBrokerClientAdded(const ports::NodeName& from_node,
+ const ports::NodeName& client_name,
+ ScopedPlatformHandle broker_channel) = 0;
+ virtual void OnAcceptBrokerClient(const ports::NodeName& from_node,
+ const ports::NodeName& broker_name,
+ ScopedPlatformHandle broker_channel) = 0;
+ virtual void OnPortsMessage(const ports::NodeName& from_node,
+ Channel::MessagePtr message) = 0;
+ virtual void OnRequestPortMerge(const ports::NodeName& from_node,
+ const ports::PortName& connector_port_name,
+ const std::string& token) = 0;
+ virtual void OnRequestIntroduction(const ports::NodeName& from_node,
+ const ports::NodeName& name) = 0;
+ virtual void OnIntroduce(const ports::NodeName& from_node,
+ const ports::NodeName& name,
+ ScopedPlatformHandle channel_handle) = 0;
+ virtual void OnBroadcast(const ports::NodeName& from_node,
+ Channel::MessagePtr message) = 0;
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ virtual void OnRelayPortsMessage(const ports::NodeName& from_node,
+ base::ProcessHandle from_process,
+ const ports::NodeName& destination,
+ Channel::MessagePtr message) = 0;
+ virtual void OnPortsMessageFromRelay(const ports::NodeName& from_node,
+ const ports::NodeName& source_node,
+ Channel::MessagePtr message) = 0;
+#endif
+ virtual void OnAcceptPeer(const ports::NodeName& from_node,
+ const ports::NodeName& token,
+ const ports::NodeName& peer_name,
+ const ports::PortName& port_name) = 0;
+ virtual void OnChannelError(const ports::NodeName& node,
+ NodeChannel* channel) = 0;
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ virtual MachPortRelay* GetMachPortRelay() = 0;
+#endif
+ };
+
+ static scoped_refptr<NodeChannel> Create(
+ Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner,
+ const ProcessErrorCallback& process_error_callback);
+
+ static Channel::MessagePtr CreatePortsMessage(size_t payload_size,
+ void** payload,
+ size_t num_handles);
+
+ static void GetPortsMessageData(Channel::Message* message, void** data,
+ size_t* num_data_bytes);
+
+ // Start receiving messages.
+ void Start();
+
+ // Permanently stop the channel from sending or receiving messages.
+ void ShutDown();
+
+ // Leaks the pipe handle instead of closing it on shutdown.
+ void LeakHandleOnShutdown();
+
+ // Invokes the bad message callback for this channel, if any.
+ void NotifyBadMessage(const std::string& error);
+
+ // Note: On Windows, we take ownership of the remote process handle.
+ void SetRemoteProcessHandle(base::ProcessHandle process_handle);
+ bool HasRemoteProcessHandle();
+ // Note: The returned |ProcessHandle| is owned by the caller and should be
+ // freed if necessary.
+ base::ProcessHandle CopyRemoteProcessHandle();
+
+ // Used for context in Delegate calls (via |from_node| arguments.)
+ void SetRemoteNodeName(const ports::NodeName& name);
+
+ void AcceptChild(const ports::NodeName& parent_name,
+ const ports::NodeName& token);
+ void AcceptParent(const ports::NodeName& token,
+ const ports::NodeName& child_name);
+ void AcceptPeer(const ports::NodeName& sender_name,
+ const ports::NodeName& token,
+ const ports::PortName& port_name);
+ void AddBrokerClient(const ports::NodeName& client_name,
+ base::ProcessHandle process_handle);
+ void BrokerClientAdded(const ports::NodeName& client_name,
+ ScopedPlatformHandle broker_channel);
+ void AcceptBrokerClient(const ports::NodeName& broker_name,
+ ScopedPlatformHandle broker_channel);
+ void PortsMessage(Channel::MessagePtr message);
+ void RequestPortMerge(const ports::PortName& connector_port_name,
+ const std::string& token);
+ void RequestIntroduction(const ports::NodeName& name);
+ void Introduce(const ports::NodeName& name,
+ ScopedPlatformHandle channel_handle);
+ void Broadcast(Channel::MessagePtr message);
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ // Relay the message to the specified node via this channel. This is used to
+ // pass windows handles between two processes that do not have permission to
+ // duplicate handles into the other's address space. The relay process is
+ // assumed to have that permission.
+ void RelayPortsMessage(const ports::NodeName& destination,
+ Channel::MessagePtr message);
+
+ // Sends a message to its destination from a relay. This is interpreted by the
+ // receiver similarly to PortsMessage, but the original source node is
+ // provided as additional message metadata from the (trusted) relay node.
+ void PortsMessageFromRelay(const ports::NodeName& source,
+ Channel::MessagePtr message);
+#endif
+
+ private:
+ friend class base::RefCountedThreadSafe<NodeChannel>;
+
+ using PendingMessageQueue = std::queue<Channel::MessagePtr>;
+ using PendingRelayMessageQueue =
+ std::queue<std::pair<ports::NodeName, Channel::MessagePtr>>;
+
+ NodeChannel(Delegate* delegate,
+ ConnectionParams connection_params,
+ scoped_refptr<base::TaskRunner> io_task_runner,
+ const ProcessErrorCallback& process_error_callback);
+ ~NodeChannel() override;
+
+ // Channel::Delegate:
+ void OnChannelMessage(const void* payload,
+ size_t payload_size,
+ ScopedPlatformHandleVectorPtr handles) override;
+ void OnChannelError() override;
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // MachPortRelay::Observer:
+ void OnProcessReady(base::ProcessHandle process) override;
+
+ void ProcessPendingMessagesWithMachPorts();
+#endif
+
+ void WriteChannelMessage(Channel::MessagePtr message);
+
+ Delegate* const delegate_;
+ const scoped_refptr<base::TaskRunner> io_task_runner_;
+ const ProcessErrorCallback process_error_callback_;
+
+ base::Lock channel_lock_;
+ scoped_refptr<Channel> channel_;
+
+ // Must only be accessed from |io_task_runner_|'s thread.
+ ports::NodeName remote_node_name_;
+
+ base::Lock remote_process_handle_lock_;
+ base::ProcessHandle remote_process_handle_ = base::kNullProcessHandle;
+#if defined(OS_WIN)
+ ScopedPlatformHandle scoped_remote_process_handle_;
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ base::Lock pending_mach_messages_lock_;
+ PendingMessageQueue pending_write_messages_;
+ PendingRelayMessageQueue pending_relay_messages_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(NodeChannel);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_NODE_CHANNEL_H_
diff --git a/mojo/edk/system/node_controller.cc b/mojo/edk/system/node_controller.cc
new file mode 100644
index 0000000000..73b16b14ae
--- /dev/null
+++ b/mojo/edk/system/node_controller.cc
@@ -0,0 +1,1470 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/node_controller.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/time/time.h"
+#include "base/timer/elapsed_timer.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/embedder/named_platform_channel_pair.h"
+#include "mojo/edk/embedder/named_platform_handle.h"
+#include "mojo/edk/embedder/platform_channel_pair.h"
+#include "mojo/edk/system/broker.h"
+#include "mojo/edk/system/broker_host.h"
+#include "mojo/edk/system/core.h"
+#include "mojo/edk/system/ports_message.h"
+#include "mojo/edk/system/request_context.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include "mojo/edk/system/mach_port_relay.h"
+#endif
+
+#if !defined(OS_NACL)
+#include "crypto/random.h"
+#endif
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+#if defined(OS_NACL)
+template <typename T>
+void GenerateRandomName(T* out) { base::RandBytes(out, sizeof(T)); }
+#else
+template <typename T>
+void GenerateRandomName(T* out) { crypto::RandBytes(out, sizeof(T)); }
+#endif
+
+ports::NodeName GetRandomNodeName() {
+ ports::NodeName name;
+ GenerateRandomName(&name);
+ return name;
+}
+
+void RecordPeerCount(size_t count) {
+ DCHECK_LE(count, static_cast<size_t>(std::numeric_limits<int32_t>::max()));
+
+ // 8k is the maximum number of file descriptors allowed in Chrome.
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Mojo.System.Node.ConnectedPeers",
+ static_cast<int32_t>(count),
+ 1 /* min */,
+ 8000 /* max */,
+ 50 /* bucket count */);
+}
+
+void RecordPendingChildCount(size_t count) {
+ DCHECK_LE(count, static_cast<size_t>(std::numeric_limits<int32_t>::max()));
+
+ // 8k is the maximum number of file descriptors allowed in Chrome.
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Mojo.System.Node.PendingChildren",
+ static_cast<int32_t>(count),
+ 1 /* min */,
+ 8000 /* max */,
+ 50 /* bucket count */);
+}
+
+bool ParsePortsMessage(Channel::Message* message,
+ void** data,
+ size_t* num_data_bytes,
+ size_t* num_header_bytes,
+ size_t* num_payload_bytes,
+ size_t* num_ports_bytes) {
+ DCHECK(data && num_data_bytes && num_header_bytes && num_payload_bytes &&
+ num_ports_bytes);
+
+ NodeChannel::GetPortsMessageData(message, data, num_data_bytes);
+ if (!*num_data_bytes)
+ return false;
+
+ if (!ports::Message::Parse(*data, *num_data_bytes, num_header_bytes,
+ num_payload_bytes, num_ports_bytes)) {
+ return false;
+ }
+
+ return true;
+}
+
+// Used by NodeController to watch for shutdown. Since no IO can happen once
+// the IO thread is killed, the NodeController can cleanly drop all its peers
+// at that time.
+class ThreadDestructionObserver :
+ public base::MessageLoop::DestructionObserver {
+ public:
+ static void Create(scoped_refptr<base::TaskRunner> task_runner,
+ const base::Closure& callback) {
+ if (task_runner->RunsTasksOnCurrentThread()) {
+ // Owns itself.
+ new ThreadDestructionObserver(callback);
+ } else {
+ task_runner->PostTask(FROM_HERE,
+ base::Bind(&Create, task_runner, callback));
+ }
+ }
+
+ private:
+ explicit ThreadDestructionObserver(const base::Closure& callback)
+ : callback_(callback) {
+ base::MessageLoop::current()->AddDestructionObserver(this);
+ }
+
+ ~ThreadDestructionObserver() override {
+ base::MessageLoop::current()->RemoveDestructionObserver(this);
+ }
+
+ // base::MessageLoop::DestructionObserver:
+ void WillDestroyCurrentMessageLoop() override {
+ callback_.Run();
+ delete this;
+ }
+
+ const base::Closure callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadDestructionObserver);
+};
+
+} // namespace
+
+NodeController::~NodeController() {}
+
+NodeController::NodeController(Core* core)
+ : core_(core),
+ name_(GetRandomNodeName()),
+ node_(new ports::Node(name_, this)) {
+ DVLOG(1) << "Initializing node " << name_;
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+void NodeController::CreateMachPortRelay(
+ base::PortProvider* port_provider) {
+ base::AutoLock lock(mach_port_relay_lock_);
+ DCHECK(!mach_port_relay_);
+ mach_port_relay_.reset(new MachPortRelay(port_provider));
+}
+#endif
+
+void NodeController::SetIOTaskRunner(
+ scoped_refptr<base::TaskRunner> task_runner) {
+ io_task_runner_ = task_runner;
+ ThreadDestructionObserver::Create(
+ io_task_runner_,
+ base::Bind(&NodeController::DropAllPeers, base::Unretained(this)));
+}
+
+void NodeController::ConnectToChild(
+ base::ProcessHandle process_handle,
+ ConnectionParams connection_params,
+ const std::string& child_token,
+ const ProcessErrorCallback& process_error_callback) {
+ // Generate the temporary remote node name here so that it can be associated
+ // with the embedder's child_token. If an error occurs in the child process
+ // after it is launched, but before any reserved ports are connected, this can
+ // be used to clean up any dangling ports.
+ ports::NodeName node_name;
+ GenerateRandomName(&node_name);
+
+ {
+ base::AutoLock lock(reserved_ports_lock_);
+ bool inserted = pending_child_tokens_.insert(
+ std::make_pair(node_name, child_token)).second;
+ DCHECK(inserted);
+ }
+
+#if defined(OS_WIN)
+ // On Windows, we need to duplicate the process handle because we have no
+ // control over its lifetime and it may become invalid by the time the posted
+ // task runs.
+ HANDLE dup_handle = INVALID_HANDLE_VALUE;
+ BOOL ok = ::DuplicateHandle(
+ base::GetCurrentProcessHandle(), process_handle,
+ base::GetCurrentProcessHandle(), &dup_handle,
+ 0, FALSE, DUPLICATE_SAME_ACCESS);
+ DPCHECK(ok);
+ process_handle = dup_handle;
+#endif
+
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&NodeController::ConnectToChildOnIOThread,
+ base::Unretained(this), process_handle,
+ base::Passed(&connection_params), node_name,
+ process_error_callback));
+}
+
+void NodeController::CloseChildPorts(const std::string& child_token) {
+ std::vector<ports::PortRef> ports_to_close;
+ {
+ std::vector<std::string> port_tokens;
+ base::AutoLock lock(reserved_ports_lock_);
+ for (const auto& port : reserved_ports_) {
+ if (port.second.child_token == child_token) {
+ DVLOG(1) << "Closing reserved port " << port.second.port.name();
+ ports_to_close.push_back(port.second.port);
+ port_tokens.push_back(port.first);
+ }
+ }
+
+ for (const auto& token : port_tokens)
+ reserved_ports_.erase(token);
+ }
+
+ for (const auto& port : ports_to_close)
+ node_->ClosePort(port);
+
+ // Ensure local port closure messages are processed.
+ AcceptIncomingMessages();
+}
+
+void NodeController::ClosePeerConnection(const std::string& peer_token) {
+ io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&NodeController::ClosePeerConnectionOnIOThread,
+ base::Unretained(this), peer_token));
+}
+
+void NodeController::ConnectToParent(ConnectionParams connection_params) {
+#if !defined(OS_MACOSX) && !defined(OS_NACL_SFI)
+ // Use the bootstrap channel for the broker and receive the node's channel
+ // synchronously as the first message from the broker.
+ base::ElapsedTimer timer;
+ broker_.reset(new Broker(connection_params.TakeChannelHandle()));
+ ScopedPlatformHandle platform_handle = broker_->GetParentPlatformHandle();
+ UMA_HISTOGRAM_TIMES("Mojo.System.GetParentPlatformHandleSyncTime",
+ timer.Elapsed());
+
+ if (!platform_handle.is_valid()) {
+ // Most likely the browser side of the channel has already been closed and
+ // the broker was unable to negotiate a NodeChannel pipe. In this case we
+ // can cancel parent connection.
+ DVLOG(1) << "Cannot connect to invalid parent channel.";
+ CancelPendingPortMerges();
+ return;
+ }
+ connection_params = ConnectionParams(std::move(platform_handle));
+#endif
+
+ io_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&NodeController::ConnectToParentOnIOThread,
+ base::Unretained(this), base::Passed(&connection_params)));
+}
+
+void NodeController::ConnectToPeer(ConnectionParams connection_params,
+ const ports::PortRef& port,
+ const std::string& peer_token) {
+ ports::NodeName node_name;
+ GenerateRandomName(&node_name);
+ io_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&NodeController::ConnectToPeerOnIOThread,
+ base::Unretained(this), base::Passed(&connection_params),
+ node_name, port, peer_token));
+}
+
+void NodeController::SetPortObserver(const ports::PortRef& port,
+ scoped_refptr<PortObserver> observer) {
+ node_->SetUserData(port, std::move(observer));
+}
+
+void NodeController::ClosePort(const ports::PortRef& port) {
+ SetPortObserver(port, nullptr);
+ int rv = node_->ClosePort(port);
+ DCHECK_EQ(rv, ports::OK) << " Failed to close port: " << port.name();
+
+ AcceptIncomingMessages();
+}
+
+int NodeController::SendMessage(const ports::PortRef& port,
+ std::unique_ptr<PortsMessage> message) {
+ ports::ScopedMessage ports_message(message.release());
+ int rv = node_->SendMessage(port, std::move(ports_message));
+
+ AcceptIncomingMessages();
+ return rv;
+}
+
+void NodeController::ReservePort(const std::string& token,
+ const ports::PortRef& port,
+ const std::string& child_token) {
+ DVLOG(2) << "Reserving port " << port.name() << "@" << name_ << " for token "
+ << token;
+
+ base::AutoLock lock(reserved_ports_lock_);
+ auto result = reserved_ports_.insert(
+ std::make_pair(token, ReservedPort{port, child_token}));
+ DCHECK(result.second);
+}
+
+void NodeController::MergePortIntoParent(const std::string& token,
+ const ports::PortRef& port) {
+ bool was_merged = false;
+ {
+ // This request may be coming from within the process that reserved the
+ // "parent" side (e.g. for Chrome single-process mode), so if this token is
+ // reserved locally, merge locally instead.
+ base::AutoLock lock(reserved_ports_lock_);
+ auto it = reserved_ports_.find(token);
+ if (it != reserved_ports_.end()) {
+ node_->MergePorts(port, name_, it->second.port.name());
+ reserved_ports_.erase(it);
+ was_merged = true;
+ }
+ }
+ if (was_merged) {
+ AcceptIncomingMessages();
+ return;
+ }
+
+ scoped_refptr<NodeChannel> parent;
+ bool reject_merge = false;
+ {
+ // Hold |pending_port_merges_lock_| while getting |parent|. Otherwise,
+ // there is a race where the parent can be set, and |pending_port_merges_|
+ // be processed between retrieving |parent| and adding the merge to
+ // |pending_port_merges_|.
+ base::AutoLock lock(pending_port_merges_lock_);
+ parent = GetParentChannel();
+ if (reject_pending_merges_) {
+ reject_merge = true;
+ } else if (!parent) {
+ pending_port_merges_.push_back(std::make_pair(token, port));
+ return;
+ }
+ }
+ if (reject_merge) {
+ node_->ClosePort(port);
+ DVLOG(2) << "Rejecting port merge for token " << token
+ << " due to closed parent channel.";
+ AcceptIncomingMessages();
+ return;
+ }
+
+ parent->RequestPortMerge(port.name(), token);
+}
+
+int NodeController::MergeLocalPorts(const ports::PortRef& port0,
+ const ports::PortRef& port1) {
+ int rv = node_->MergeLocalPorts(port0, port1);
+ AcceptIncomingMessages();
+ return rv;
+}
+
+scoped_refptr<PlatformSharedBuffer> NodeController::CreateSharedBuffer(
+ size_t num_bytes) {
+#if !defined(OS_MACOSX) && !defined(OS_NACL_SFI)
+ // Shared buffer creation failure is fatal, so always use the broker when we
+ // have one. This does mean that a non-root process that has children will use
+ // the broker for shared buffer creation even though that process is
+ // privileged.
+ if (broker_) {
+ return broker_->GetSharedBuffer(num_bytes);
+ }
+#endif
+ return PlatformSharedBuffer::Create(num_bytes);
+}
+
+void NodeController::RequestShutdown(const base::Closure& callback) {
+ {
+ base::AutoLock lock(shutdown_lock_);
+ shutdown_callback_ = callback;
+ shutdown_callback_flag_.Set(true);
+ }
+
+ AttemptShutdownIfRequested();
+}
+
+void NodeController::NotifyBadMessageFrom(const ports::NodeName& source_node,
+ const std::string& error) {
+ scoped_refptr<NodeChannel> peer = GetPeerChannel(source_node);
+ if (peer)
+ peer->NotifyBadMessage(error);
+}
+
+void NodeController::ConnectToChildOnIOThread(
+ base::ProcessHandle process_handle,
+ ConnectionParams connection_params,
+ ports::NodeName token,
+ const ProcessErrorCallback& process_error_callback) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+#if !defined(OS_MACOSX) && !defined(OS_NACL)
+ PlatformChannelPair node_channel;
+ ScopedPlatformHandle server_handle = node_channel.PassServerHandle();
+ // BrokerHost owns itself.
+ BrokerHost* broker_host =
+ new BrokerHost(process_handle, connection_params.TakeChannelHandle());
+ bool channel_ok = broker_host->SendChannel(node_channel.PassClientHandle());
+
+#if defined(OS_WIN)
+ if (!channel_ok) {
+ // On Windows the above operation may fail if the channel is crossing a
+ // session boundary. In that case we fall back to a named pipe.
+ NamedPlatformChannelPair named_channel;
+ server_handle = named_channel.PassServerHandle();
+ broker_host->SendNamedChannel(named_channel.handle().name);
+ }
+#else
+ CHECK(channel_ok);
+#endif // defined(OS_WIN)
+
+ scoped_refptr<NodeChannel> channel =
+ NodeChannel::Create(this, ConnectionParams(std::move(server_handle)),
+ io_task_runner_, process_error_callback);
+
+#else // !defined(OS_MACOSX) && !defined(OS_NACL)
+ scoped_refptr<NodeChannel> channel =
+ NodeChannel::Create(this, std::move(connection_params), io_task_runner_,
+ process_error_callback);
+#endif // !defined(OS_MACOSX) && !defined(OS_NACL)
+
+ // We set up the child channel with a temporary name so it can be identified
+ // as a pending child if it writes any messages to the channel. We may start
+ // receiving messages from it (though we shouldn't) as soon as Start() is
+ // called below.
+
+ pending_children_.insert(std::make_pair(token, channel));
+ RecordPendingChildCount(pending_children_.size());
+
+ channel->SetRemoteNodeName(token);
+ channel->SetRemoteProcessHandle(process_handle);
+ channel->Start();
+
+ channel->AcceptChild(name_, token);
+}
+
+void NodeController::ConnectToParentOnIOThread(
+ ConnectionParams connection_params) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ {
+ base::AutoLock lock(parent_lock_);
+ DCHECK(parent_name_ == ports::kInvalidNodeName);
+
+ // At this point we don't know the parent's name, so we can't yet insert it
+ // into our |peers_| map. That will happen as soon as we receive an
+ // AcceptChild message from them.
+ bootstrap_parent_channel_ =
+ NodeChannel::Create(this, std::move(connection_params), io_task_runner_,
+ ProcessErrorCallback());
+ // Prevent the parent pipe handle from being closed on shutdown. Pipe
+ // closure is used by the parent to detect the child process has exited.
+ // Relying on message pipes to be closed is not enough because the parent
+ // may see the message pipe closure before the child is dead, causing the
+ // child process to be unexpectedly SIGKILL'd.
+ bootstrap_parent_channel_->LeakHandleOnShutdown();
+ }
+ bootstrap_parent_channel_->Start();
+}
+
+void NodeController::ConnectToPeerOnIOThread(ConnectionParams connection_params,
+ ports::NodeName token,
+ ports::PortRef port,
+ const std::string& peer_token) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ scoped_refptr<NodeChannel> channel = NodeChannel::Create(
+ this, std::move(connection_params), io_task_runner_, {});
+ peer_connections_.insert(
+ {token, PeerConnection{channel, port, peer_token}});
+ peers_by_token_.insert({peer_token, token});
+
+ channel->SetRemoteNodeName(token);
+ channel->Start();
+
+ channel->AcceptPeer(name_, token, port.name());
+}
+
+void NodeController::ClosePeerConnectionOnIOThread(
+ const std::string& peer_token) {
+ RequestContext request_context(RequestContext::Source::SYSTEM);
+ auto peer = peers_by_token_.find(peer_token);
+ // The connection may already be closed.
+ if (peer == peers_by_token_.end())
+ return;
+
+ // |peer| may be removed so make a copy of |name|.
+ ports::NodeName name = peer->second;
+ DropPeer(name, nullptr);
+}
+
+scoped_refptr<NodeChannel> NodeController::GetPeerChannel(
+ const ports::NodeName& name) {
+ base::AutoLock lock(peers_lock_);
+ auto it = peers_.find(name);
+ if (it == peers_.end())
+ return nullptr;
+ return it->second;
+}
+
+scoped_refptr<NodeChannel> NodeController::GetParentChannel() {
+ ports::NodeName parent_name;
+ {
+ base::AutoLock lock(parent_lock_);
+ parent_name = parent_name_;
+ }
+ return GetPeerChannel(parent_name);
+}
+
+scoped_refptr<NodeChannel> NodeController::GetBrokerChannel() {
+ ports::NodeName broker_name;
+ {
+ base::AutoLock lock(broker_lock_);
+ broker_name = broker_name_;
+ }
+ return GetPeerChannel(broker_name);
+}
+
+void NodeController::AddPeer(const ports::NodeName& name,
+ scoped_refptr<NodeChannel> channel,
+ bool start_channel) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ DCHECK(name != ports::kInvalidNodeName);
+ DCHECK(channel);
+
+ channel->SetRemoteNodeName(name);
+
+ OutgoingMessageQueue pending_messages;
+ {
+ base::AutoLock lock(peers_lock_);
+ if (peers_.find(name) != peers_.end()) {
+ // This can happen normally if two nodes race to be introduced to each
+ // other. The losing pipe will be silently closed and introduction should
+ // not be affected.
+ DVLOG(1) << "Ignoring duplicate peer name " << name;
+ return;
+ }
+
+ auto result = peers_.insert(std::make_pair(name, channel));
+ DCHECK(result.second);
+
+ DVLOG(2) << "Accepting new peer " << name << " on node " << name_;
+
+ RecordPeerCount(peers_.size());
+
+ auto it = pending_peer_messages_.find(name);
+ if (it != pending_peer_messages_.end()) {
+ std::swap(pending_messages, it->second);
+ pending_peer_messages_.erase(it);
+ }
+ }
+
+ if (start_channel)
+ channel->Start();
+
+ // Flush any queued message we need to deliver to this node.
+ while (!pending_messages.empty()) {
+ channel->PortsMessage(std::move(pending_messages.front()));
+ pending_messages.pop();
+ }
+}
+
+void NodeController::DropPeer(const ports::NodeName& name,
+ NodeChannel* channel) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ {
+ base::AutoLock lock(peers_lock_);
+ auto it = peers_.find(name);
+
+ if (it != peers_.end()) {
+ ports::NodeName peer = it->first;
+ peers_.erase(it);
+ DVLOG(1) << "Dropped peer " << peer;
+ }
+
+ pending_peer_messages_.erase(name);
+ pending_children_.erase(name);
+
+ RecordPeerCount(peers_.size());
+ RecordPendingChildCount(pending_children_.size());
+ }
+
+ std::vector<ports::PortRef> ports_to_close;
+ {
+ // Clean up any reserved ports.
+ base::AutoLock lock(reserved_ports_lock_);
+ auto it = pending_child_tokens_.find(name);
+ if (it != pending_child_tokens_.end()) {
+ const std::string& child_token = it->second;
+
+ std::vector<std::string> port_tokens;
+ for (const auto& port : reserved_ports_) {
+ if (port.second.child_token == child_token) {
+ DVLOG(1) << "Closing reserved port: " << port.second.port.name();
+ ports_to_close.push_back(port.second.port);
+ port_tokens.push_back(port.first);
+ }
+ }
+
+ // We have to erase reserved ports in a two-step manner because the usual
+ // manner of using the returned iterator from map::erase isn't technically
+ // valid in C++11 (although it is in C++14).
+ for (const auto& token : port_tokens)
+ reserved_ports_.erase(token);
+
+ pending_child_tokens_.erase(it);
+ }
+ }
+
+ bool is_parent;
+ {
+ base::AutoLock lock(parent_lock_);
+ is_parent = (name == parent_name_ || channel == bootstrap_parent_channel_);
+ }
+
+ // If the error comes from the parent channel, we also need to cancel any
+ // port merge requests, so that errors can be propagated to the message
+ // pipes.
+ if (is_parent)
+ CancelPendingPortMerges();
+
+ auto peer = peer_connections_.find(name);
+ if (peer != peer_connections_.end()) {
+ peers_by_token_.erase(peer->second.peer_token);
+ ports_to_close.push_back(peer->second.local_port);
+ peer_connections_.erase(peer);
+ }
+
+ for (const auto& port : ports_to_close)
+ node_->ClosePort(port);
+
+ node_->LostConnectionToNode(name);
+
+ AcceptIncomingMessages();
+}
+
+void NodeController::SendPeerMessage(const ports::NodeName& name,
+ ports::ScopedMessage message) {
+ Channel::MessagePtr channel_message =
+ static_cast<PortsMessage*>(message.get())->TakeChannelMessage();
+
+ scoped_refptr<NodeChannel> peer = GetPeerChannel(name);
+#if defined(OS_WIN)
+ if (channel_message->has_handles()) {
+ // If we're sending a message with handles we aren't the destination
+ // node's parent or broker (i.e. we don't know its process handle), ask
+ // the broker to relay for us.
+ scoped_refptr<NodeChannel> broker = GetBrokerChannel();
+ if (!peer || !peer->HasRemoteProcessHandle()) {
+ if (broker) {
+ broker->RelayPortsMessage(name, std::move(channel_message));
+ } else {
+ base::AutoLock lock(broker_lock_);
+ pending_relay_messages_[name].emplace(std::move(channel_message));
+ }
+ return;
+ }
+ }
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ if (channel_message->has_mach_ports()) {
+ // Messages containing Mach ports are always routed through the broker, even
+ // if the broker process is the intended recipient.
+ bool use_broker = false;
+ {
+ base::AutoLock lock(parent_lock_);
+ use_broker = (bootstrap_parent_channel_ ||
+ parent_name_ != ports::kInvalidNodeName);
+ }
+ if (use_broker) {
+ scoped_refptr<NodeChannel> broker = GetBrokerChannel();
+ if (broker) {
+ broker->RelayPortsMessage(name, std::move(channel_message));
+ } else {
+ base::AutoLock lock(broker_lock_);
+ pending_relay_messages_[name].emplace(std::move(channel_message));
+ }
+ return;
+ }
+ }
+#endif // defined(OS_WIN)
+
+ if (peer) {
+ peer->PortsMessage(std::move(channel_message));
+ return;
+ }
+
+ // If we don't know who the peer is and we are the broker, we can only assume
+ // the peer is invalid, i.e., it's either a junk name or has already been
+ // disconnected.
+ scoped_refptr<NodeChannel> broker = GetBrokerChannel();
+ if (!broker) {
+ DVLOG(1) << "Dropping message for unknown peer: " << name;
+ return;
+ }
+
+ // If we aren't the broker, assume we just need to be introduced and queue
+ // until that can be either confirmed or denied by the broker.
+ bool needs_introduction = false;
+ {
+ base::AutoLock lock(peers_lock_);
+ auto& queue = pending_peer_messages_[name];
+ needs_introduction = queue.empty();
+ queue.emplace(std::move(channel_message));
+ }
+ if (needs_introduction)
+ broker->RequestIntroduction(name);
+}
+
+void NodeController::AcceptIncomingMessages() {
+ // This is an impactically large value which should never be reached in
+ // practice. See the CHECK below for usage.
+ constexpr size_t kMaxAcceptedMessages = 1000000;
+
+ size_t num_messages_accepted = 0;
+ while (incoming_messages_flag_) {
+ // TODO: We may need to be more careful to avoid starving the rest of the
+ // thread here. Revisit this if it turns out to be a problem. One
+ // alternative would be to schedule a task to continue pumping messages
+ // after flushing once.
+
+ messages_lock_.Acquire();
+ if (incoming_messages_.empty()) {
+ messages_lock_.Release();
+ break;
+ }
+
+ // libstdc++'s deque creates an internal buffer on construction, even when
+ // the size is 0. So avoid creating it until it is necessary.
+ std::queue<ports::ScopedMessage> messages;
+ std::swap(messages, incoming_messages_);
+ incoming_messages_flag_.Set(false);
+ messages_lock_.Release();
+
+ num_messages_accepted += messages.size();
+ while (!messages.empty()) {
+ node_->AcceptMessage(std::move(messages.front()));
+ messages.pop();
+ }
+
+ // This is effectively a safeguard against potential bugs which might lead
+ // to runaway message cycles. If any such cycles arise, we'll start seeing
+ // crash reports from this location.
+ CHECK_LE(num_messages_accepted, kMaxAcceptedMessages);
+ }
+
+ if (num_messages_accepted >= 4) {
+ // Note: We avoid logging this histogram for the vast majority of cases.
+ // See https://crbug.com/685763 for more context.
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Mojo.System.MessagesAcceptedPerEvent",
+ static_cast<int32_t>(num_messages_accepted),
+ 1 /* min */,
+ 500 /* max */,
+ 50 /* bucket count */);
+ }
+
+ AttemptShutdownIfRequested();
+}
+
+void NodeController::ProcessIncomingMessages() {
+ RequestContext request_context(RequestContext::Source::SYSTEM);
+
+ {
+ base::AutoLock lock(messages_lock_);
+ // Allow a new incoming messages processing task to be posted. This can't be
+ // done after AcceptIncomingMessages() otherwise a message might be missed.
+ // Doing it here may result in at most two tasks existing at the same time;
+ // this running one, and one pending in the task runner.
+ incoming_messages_task_posted_ = false;
+ }
+
+ AcceptIncomingMessages();
+}
+
+void NodeController::DropAllPeers() {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ std::vector<scoped_refptr<NodeChannel>> all_peers;
+ {
+ base::AutoLock lock(parent_lock_);
+ if (bootstrap_parent_channel_) {
+ // |bootstrap_parent_channel_| isn't null'd here becuase we rely on its
+ // existence to determine whether or not this is the root node. Once
+ // bootstrap_parent_channel_->ShutDown() has been called,
+ // |bootstrap_parent_channel_| is essentially a dead object and it doesn't
+ // matter if it's deleted now or when |this| is deleted.
+ // Note: |bootstrap_parent_channel_| is only modified on the IO thread.
+ all_peers.push_back(bootstrap_parent_channel_);
+ }
+ }
+
+ {
+ base::AutoLock lock(peers_lock_);
+ for (const auto& peer : peers_)
+ all_peers.push_back(peer.second);
+ for (const auto& peer : pending_children_)
+ all_peers.push_back(peer.second);
+ peers_.clear();
+ pending_children_.clear();
+ pending_peer_messages_.clear();
+ peer_connections_.clear();
+ }
+
+ for (const auto& peer : all_peers)
+ peer->ShutDown();
+
+ if (destroy_on_io_thread_shutdown_)
+ delete this;
+}
+
+void NodeController::GenerateRandomPortName(ports::PortName* port_name) {
+ GenerateRandomName(port_name);
+}
+
+void NodeController::AllocMessage(size_t num_header_bytes,
+ ports::ScopedMessage* message) {
+ message->reset(new PortsMessage(num_header_bytes, 0, 0, nullptr));
+}
+
+void NodeController::ForwardMessage(const ports::NodeName& node,
+ ports::ScopedMessage message) {
+ DCHECK(message);
+ bool schedule_pump_task = false;
+ if (node == name_) {
+ // NOTE: We need to avoid re-entering the Node instance within
+ // ForwardMessage. Because ForwardMessage is only ever called
+ // (synchronously) in response to Node's ClosePort, SendMessage, or
+ // AcceptMessage, we flush the queue after calling any of those methods.
+ base::AutoLock lock(messages_lock_);
+ // |io_task_runner_| may be null in tests or processes that don't require
+ // multi-process Mojo.
+ schedule_pump_task = incoming_messages_.empty() && io_task_runner_ &&
+ !incoming_messages_task_posted_;
+ incoming_messages_task_posted_ |= schedule_pump_task;
+ incoming_messages_.emplace(std::move(message));
+ incoming_messages_flag_.Set(true);
+ } else {
+ SendPeerMessage(node, std::move(message));
+ }
+
+ if (schedule_pump_task) {
+ // Normally, the queue is processed after the action that added the local
+ // message is done (i.e. SendMessage, ClosePort, etc). However, it's also
+ // possible for a local message to be added as a result of a remote message,
+ // and OnChannelMessage() doesn't process this queue (although
+ // OnPortsMessage() does). There may also be other code paths, now or added
+ // in the future, which cause local messages to be added but don't process
+ // this message queue.
+ //
+ // Instead of adding a call to AcceptIncomingMessages() on every possible
+ // code path, post a task to the IO thread to process the queue. If the
+ // current call stack processes the queue, this may end up doing nothing.
+ io_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&NodeController::ProcessIncomingMessages,
+ base::Unretained(this)));
+ }
+}
+
+void NodeController::BroadcastMessage(ports::ScopedMessage message) {
+ CHECK_EQ(message->num_ports(), 0u);
+ Channel::MessagePtr channel_message =
+ static_cast<PortsMessage*>(message.get())->TakeChannelMessage();
+ CHECK(!channel_message->has_handles());
+
+ scoped_refptr<NodeChannel> broker = GetBrokerChannel();
+ if (broker)
+ broker->Broadcast(std::move(channel_message));
+ else
+ OnBroadcast(name_, std::move(channel_message));
+}
+
+void NodeController::PortStatusChanged(const ports::PortRef& port) {
+ scoped_refptr<ports::UserData> user_data;
+ node_->GetUserData(port, &user_data);
+
+ PortObserver* observer = static_cast<PortObserver*>(user_data.get());
+ if (observer) {
+ observer->OnPortStatusChanged();
+ } else {
+ DVLOG(2) << "Ignoring status change for " << port.name() << " because it "
+ << "doesn't have an observer.";
+ }
+}
+
+void NodeController::OnAcceptChild(const ports::NodeName& from_node,
+ const ports::NodeName& parent_name,
+ const ports::NodeName& token) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ scoped_refptr<NodeChannel> parent;
+ {
+ base::AutoLock lock(parent_lock_);
+ if (bootstrap_parent_channel_ && parent_name_ == ports::kInvalidNodeName) {
+ parent_name_ = parent_name;
+ parent = bootstrap_parent_channel_;
+ }
+ }
+
+ if (!parent) {
+ DLOG(ERROR) << "Unexpected AcceptChild message from " << from_node;
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ parent->SetRemoteNodeName(parent_name);
+ parent->AcceptParent(token, name_);
+
+ // NOTE: The child does not actually add its parent as a peer until
+ // receiving an AcceptBrokerClient message from the broker. The parent
+ // will request that said message be sent upon receiving AcceptParent.
+
+ DVLOG(1) << "Child " << name_ << " accepting parent " << parent_name;
+}
+
+void NodeController::OnAcceptParent(const ports::NodeName& from_node,
+ const ports::NodeName& token,
+ const ports::NodeName& child_name) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ auto it = pending_children_.find(from_node);
+ if (it == pending_children_.end() || token != from_node) {
+ DLOG(ERROR) << "Received unexpected AcceptParent message from "
+ << from_node;
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ {
+ base::AutoLock lock(reserved_ports_lock_);
+ auto it = pending_child_tokens_.find(from_node);
+ if (it != pending_child_tokens_.end()) {
+ std::string token = std::move(it->second);
+ pending_child_tokens_.erase(it);
+ pending_child_tokens_[child_name] = std::move(token);
+ }
+ }
+
+ scoped_refptr<NodeChannel> channel = it->second;
+ pending_children_.erase(it);
+
+ DCHECK(channel);
+
+ DVLOG(1) << "Parent " << name_ << " accepted child " << child_name;
+
+ AddPeer(child_name, channel, false /* start_channel */);
+
+ // TODO(rockot/amistry): We could simplify child initialization if we could
+ // synchronously get a new async broker channel from the broker. For now we do
+ // it asynchronously since it's only used to facilitate handle passing, not
+ // handle creation.
+ scoped_refptr<NodeChannel> broker = GetBrokerChannel();
+ if (broker) {
+ // Inform the broker of this new child.
+ broker->AddBrokerClient(child_name, channel->CopyRemoteProcessHandle());
+ } else {
+ // If we have no broker, either we need to wait for one, or we *are* the
+ // broker.
+ scoped_refptr<NodeChannel> parent = GetParentChannel();
+ if (!parent) {
+ base::AutoLock lock(parent_lock_);
+ parent = bootstrap_parent_channel_;
+ }
+
+ if (!parent) {
+ // Yes, we're the broker. We can initialize the child directly.
+ channel->AcceptBrokerClient(name_, ScopedPlatformHandle());
+ } else {
+ // We aren't the broker, so wait for a broker connection.
+ base::AutoLock lock(broker_lock_);
+ pending_broker_clients_.push(child_name);
+ }
+ }
+}
+
+void NodeController::OnAddBrokerClient(const ports::NodeName& from_node,
+ const ports::NodeName& client_name,
+ base::ProcessHandle process_handle) {
+#if defined(OS_WIN)
+ // Scoped handle to avoid leaks on error.
+ ScopedPlatformHandle scoped_process_handle =
+ ScopedPlatformHandle(PlatformHandle(process_handle));
+#endif
+ scoped_refptr<NodeChannel> sender = GetPeerChannel(from_node);
+ if (!sender) {
+ DLOG(ERROR) << "Ignoring AddBrokerClient from unknown sender.";
+ return;
+ }
+
+ if (GetPeerChannel(client_name)) {
+ DLOG(ERROR) << "Ignoring AddBrokerClient for known client.";
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ PlatformChannelPair broker_channel;
+ ConnectionParams connection_params(broker_channel.PassServerHandle());
+ scoped_refptr<NodeChannel> client =
+ NodeChannel::Create(this, std::move(connection_params), io_task_runner_,
+ ProcessErrorCallback());
+
+#if defined(OS_WIN)
+ // The broker must have a working handle to the client process in order to
+ // properly copy other handles to and from the client.
+ if (!scoped_process_handle.is_valid()) {
+ DLOG(ERROR) << "Broker rejecting client with invalid process handle.";
+ return;
+ }
+ client->SetRemoteProcessHandle(scoped_process_handle.release().handle);
+#else
+ client->SetRemoteProcessHandle(process_handle);
+#endif
+
+ AddPeer(client_name, client, true /* start_channel */);
+
+ DVLOG(1) << "Broker " << name_ << " accepting client " << client_name
+ << " from peer " << from_node;
+
+ sender->BrokerClientAdded(client_name, broker_channel.PassClientHandle());
+}
+
+void NodeController::OnBrokerClientAdded(const ports::NodeName& from_node,
+ const ports::NodeName& client_name,
+ ScopedPlatformHandle broker_channel) {
+ scoped_refptr<NodeChannel> client = GetPeerChannel(client_name);
+ if (!client) {
+ DLOG(ERROR) << "BrokerClientAdded for unknown child " << client_name;
+ return;
+ }
+
+ // This should have come from our own broker.
+ if (GetBrokerChannel() != GetPeerChannel(from_node)) {
+ DLOG(ERROR) << "BrokerClientAdded from non-broker node " << from_node;
+ return;
+ }
+
+ DVLOG(1) << "Child " << client_name << " accepted by broker " << from_node;
+
+ client->AcceptBrokerClient(from_node, std::move(broker_channel));
+}
+
+void NodeController::OnAcceptBrokerClient(const ports::NodeName& from_node,
+ const ports::NodeName& broker_name,
+ ScopedPlatformHandle broker_channel) {
+ // This node should already have a parent in bootstrap mode.
+ ports::NodeName parent_name;
+ scoped_refptr<NodeChannel> parent;
+ {
+ base::AutoLock lock(parent_lock_);
+ parent_name = parent_name_;
+ parent = bootstrap_parent_channel_;
+ bootstrap_parent_channel_ = nullptr;
+ }
+ DCHECK(parent_name == from_node);
+ DCHECK(parent);
+
+ std::queue<ports::NodeName> pending_broker_clients;
+ std::unordered_map<ports::NodeName, OutgoingMessageQueue>
+ pending_relay_messages;
+ {
+ base::AutoLock lock(broker_lock_);
+ broker_name_ = broker_name;
+ std::swap(pending_broker_clients, pending_broker_clients_);
+ std::swap(pending_relay_messages, pending_relay_messages_);
+ }
+ DCHECK(broker_name != ports::kInvalidNodeName);
+
+ // It's now possible to add both the broker and the parent as peers.
+ // Note that the broker and parent may be the same node.
+ scoped_refptr<NodeChannel> broker;
+ if (broker_name == parent_name) {
+ DCHECK(!broker_channel.is_valid());
+ broker = parent;
+ } else {
+ DCHECK(broker_channel.is_valid());
+ broker =
+ NodeChannel::Create(this, ConnectionParams(std::move(broker_channel)),
+ io_task_runner_, ProcessErrorCallback());
+ AddPeer(broker_name, broker, true /* start_channel */);
+ }
+
+ AddPeer(parent_name, parent, false /* start_channel */);
+
+ {
+ // Complete any port merge requests we have waiting for the parent.
+ base::AutoLock lock(pending_port_merges_lock_);
+ for (const auto& request : pending_port_merges_)
+ parent->RequestPortMerge(request.second.name(), request.first);
+ pending_port_merges_.clear();
+ }
+
+ // Feed the broker any pending children of our own.
+ while (!pending_broker_clients.empty()) {
+ const ports::NodeName& child_name = pending_broker_clients.front();
+ auto it = pending_children_.find(child_name);
+ DCHECK(it != pending_children_.end());
+ broker->AddBrokerClient(child_name, it->second->CopyRemoteProcessHandle());
+ pending_broker_clients.pop();
+ }
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ // Have the broker relay any messages we have waiting.
+ for (auto& entry : pending_relay_messages) {
+ const ports::NodeName& destination = entry.first;
+ auto& message_queue = entry.second;
+ while (!message_queue.empty()) {
+ broker->RelayPortsMessage(destination, std::move(message_queue.front()));
+ message_queue.pop();
+ }
+ }
+#endif
+
+ DVLOG(1) << "Child " << name_ << " accepted by broker " << broker_name;
+}
+
+void NodeController::OnPortsMessage(const ports::NodeName& from_node,
+ Channel::MessagePtr channel_message) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ void* data;
+ size_t num_data_bytes, num_header_bytes, num_payload_bytes, num_ports_bytes;
+ if (!ParsePortsMessage(channel_message.get(), &data, &num_data_bytes,
+ &num_header_bytes, &num_payload_bytes,
+ &num_ports_bytes)) {
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ CHECK(channel_message);
+ std::unique_ptr<PortsMessage> ports_message(
+ new PortsMessage(num_header_bytes,
+ num_payload_bytes,
+ num_ports_bytes,
+ std::move(channel_message)));
+ ports_message->set_source_node(from_node);
+ node_->AcceptMessage(ports::ScopedMessage(ports_message.release()));
+ AcceptIncomingMessages();
+}
+
+void NodeController::OnRequestPortMerge(
+ const ports::NodeName& from_node,
+ const ports::PortName& connector_port_name,
+ const std::string& token) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ DVLOG(2) << "Node " << name_ << " received RequestPortMerge for token "
+ << token << " and port " << connector_port_name << "@" << from_node;
+
+ ports::PortRef local_port;
+ {
+ base::AutoLock lock(reserved_ports_lock_);
+ auto it = reserved_ports_.find(token);
+ if (it == reserved_ports_.end()) {
+ DVLOG(1) << "Ignoring request to connect to port for unknown token "
+ << token;
+ return;
+ }
+ local_port = it->second.port;
+ reserved_ports_.erase(it);
+ }
+
+ int rv = node_->MergePorts(local_port, from_node, connector_port_name);
+ if (rv != ports::OK)
+ DLOG(ERROR) << "MergePorts failed: " << rv;
+
+ AcceptIncomingMessages();
+}
+
+void NodeController::OnRequestIntroduction(const ports::NodeName& from_node,
+ const ports::NodeName& name) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ scoped_refptr<NodeChannel> requestor = GetPeerChannel(from_node);
+ if (from_node == name || name == ports::kInvalidNodeName || !requestor) {
+ DLOG(ERROR) << "Rejecting invalid OnRequestIntroduction message from "
+ << from_node;
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ scoped_refptr<NodeChannel> new_friend = GetPeerChannel(name);
+ if (!new_friend) {
+ // We don't know who they're talking about!
+ requestor->Introduce(name, ScopedPlatformHandle());
+ } else {
+ PlatformChannelPair new_channel;
+ requestor->Introduce(name, new_channel.PassServerHandle());
+ new_friend->Introduce(from_node, new_channel.PassClientHandle());
+ }
+}
+
+void NodeController::OnIntroduce(const ports::NodeName& from_node,
+ const ports::NodeName& name,
+ ScopedPlatformHandle channel_handle) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ if (!channel_handle.is_valid()) {
+ node_->LostConnectionToNode(name);
+
+ DVLOG(1) << "Could not be introduced to peer " << name;
+ base::AutoLock lock(peers_lock_);
+ pending_peer_messages_.erase(name);
+ return;
+ }
+
+ scoped_refptr<NodeChannel> channel =
+ NodeChannel::Create(this, ConnectionParams(std::move(channel_handle)),
+ io_task_runner_, ProcessErrorCallback());
+
+ DVLOG(1) << "Adding new peer " << name << " via parent introduction.";
+ AddPeer(name, channel, true /* start_channel */);
+}
+
+void NodeController::OnBroadcast(const ports::NodeName& from_node,
+ Channel::MessagePtr message) {
+ DCHECK(!message->has_handles());
+
+ void* data;
+ size_t num_data_bytes, num_header_bytes, num_payload_bytes, num_ports_bytes;
+ if (!ParsePortsMessage(message.get(), &data, &num_data_bytes,
+ &num_header_bytes, &num_payload_bytes,
+ &num_ports_bytes)) {
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ // Broadcast messages must not contain ports.
+ if (num_ports_bytes > 0) {
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ base::AutoLock lock(peers_lock_);
+ for (auto& iter : peers_) {
+ // Copy and send the message to each known peer.
+ Channel::MessagePtr peer_message(
+ new Channel::Message(message->payload_size(), 0));
+ memcpy(peer_message->mutable_payload(), message->payload(),
+ message->payload_size());
+ iter.second->PortsMessage(std::move(peer_message));
+ }
+}
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+void NodeController::OnRelayPortsMessage(const ports::NodeName& from_node,
+ base::ProcessHandle from_process,
+ const ports::NodeName& destination,
+ Channel::MessagePtr message) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ if (GetBrokerChannel()) {
+ // Only the broker should be asked to relay a message.
+ LOG(ERROR) << "Non-broker refusing to relay message.";
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ // The parent should always know which process this came from.
+ DCHECK(from_process != base::kNullProcessHandle);
+
+#if defined(OS_WIN)
+ // Rewrite the handles to this (the parent) process. If the message is
+ // destined for another child process, the handles will be rewritten to that
+ // process before going out (see NodeChannel::WriteChannelMessage).
+ //
+ // TODO: We could avoid double-duplication.
+ //
+ // Note that we explicitly mark the handles as being owned by the sending
+ // process before rewriting them, in order to accommodate RewriteHandles'
+ // internal sanity checks.
+ ScopedPlatformHandleVectorPtr handles = message->TakeHandles();
+ for (size_t i = 0; i < handles->size(); ++i)
+ (*handles)[i].owning_process = from_process;
+ if (!Channel::Message::RewriteHandles(from_process,
+ base::GetCurrentProcessHandle(),
+ handles.get())) {
+ DLOG(ERROR) << "Failed to relay one or more handles.";
+ }
+ message->SetHandles(std::move(handles));
+#else
+ MachPortRelay* relay = GetMachPortRelay();
+ if (!relay) {
+ LOG(ERROR) << "Receiving Mach ports without a port relay from "
+ << from_node << ". Dropping message.";
+ return;
+ }
+ if (!relay->ExtractPortRights(message.get(), from_process)) {
+ // NodeChannel should ensure that MachPortRelay is ready for the remote
+ // process. At this point, if the port extraction failed, either something
+ // went wrong in the mach stuff, or the remote process died.
+ LOG(ERROR) << "Error on receiving Mach ports " << from_node
+ << ". Dropping message.";
+ return;
+ }
+#endif // defined(OS_WIN)
+
+ if (destination == name_) {
+ // Great, we can deliver this message locally.
+ OnPortsMessage(from_node, std::move(message));
+ return;
+ }
+
+ scoped_refptr<NodeChannel> peer = GetPeerChannel(destination);
+ if (peer)
+ peer->PortsMessageFromRelay(from_node, std::move(message));
+ else
+ DLOG(ERROR) << "Dropping relay message for unknown node " << destination;
+}
+
+void NodeController::OnPortsMessageFromRelay(const ports::NodeName& from_node,
+ const ports::NodeName& source_node,
+ Channel::MessagePtr message) {
+ if (GetPeerChannel(from_node) != GetBrokerChannel()) {
+ LOG(ERROR) << "Refusing relayed message from non-broker node.";
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ OnPortsMessage(source_node, std::move(message));
+}
+#endif
+
+void NodeController::OnAcceptPeer(const ports::NodeName& from_node,
+ const ports::NodeName& token,
+ const ports::NodeName& peer_name,
+ const ports::PortName& port_name) {
+ DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
+
+ auto it = peer_connections_.find(from_node);
+ if (it == peer_connections_.end()) {
+ DLOG(ERROR) << "Received unexpected AcceptPeer message from " << from_node;
+ DropPeer(from_node, nullptr);
+ return;
+ }
+
+ scoped_refptr<NodeChannel> channel = std::move(it->second.channel);
+ ports::PortRef local_port = it->second.local_port;
+ std::string peer_token = std::move(it->second.peer_token);
+ peer_connections_.erase(it);
+ DCHECK(channel);
+
+ // If the peer connection is a self connection (which is used in tests),
+ // drop the channel to it and skip straight to merging the ports.
+ if (name_ == peer_name) {
+ peers_by_token_.erase(peer_token);
+ } else {
+ peers_by_token_[peer_token] = peer_name;
+ peer_connections_.insert(
+ {peer_name, PeerConnection{nullptr, local_port, peer_token}});
+ DVLOG(1) << "Node " << name_ << " accepted peer " << peer_name;
+
+ AddPeer(peer_name, channel, false /* start_channel */);
+ }
+
+ // We need to choose one side to initiate the port merge. It doesn't matter
+ // who does it as long as they don't both try. Simple solution: pick the one
+ // with the "smaller" port name.
+ if (local_port.name() < port_name) {
+ node()->MergePorts(local_port, peer_name, port_name);
+ }
+}
+
+void NodeController::OnChannelError(const ports::NodeName& from_node,
+ NodeChannel* channel) {
+ if (io_task_runner_->RunsTasksOnCurrentThread()) {
+ DropPeer(from_node, channel);
+ // DropPeer may have caused local port closures, so be sure to process any
+ // pending local messages.
+ AcceptIncomingMessages();
+ } else {
+ io_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&NodeController::OnChannelError, base::Unretained(this),
+ from_node, channel));
+ }
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+MachPortRelay* NodeController::GetMachPortRelay() {
+ {
+ base::AutoLock lock(parent_lock_);
+ // Return null if we're not the root.
+ if (bootstrap_parent_channel_ || parent_name_ != ports::kInvalidNodeName)
+ return nullptr;
+ }
+
+ base::AutoLock lock(mach_port_relay_lock_);
+ return mach_port_relay_.get();
+}
+#endif
+
+void NodeController::CancelPendingPortMerges() {
+ std::vector<ports::PortRef> ports_to_close;
+
+ {
+ base::AutoLock lock(pending_port_merges_lock_);
+ reject_pending_merges_ = true;
+ for (const auto& port : pending_port_merges_)
+ ports_to_close.push_back(port.second);
+ pending_port_merges_.clear();
+ }
+
+ for (const auto& port : ports_to_close)
+ node_->ClosePort(port);
+}
+
+void NodeController::DestroyOnIOThreadShutdown() {
+ destroy_on_io_thread_shutdown_ = true;
+}
+
+void NodeController::AttemptShutdownIfRequested() {
+ if (!shutdown_callback_flag_)
+ return;
+
+ base::Closure callback;
+ {
+ base::AutoLock lock(shutdown_lock_);
+ if (shutdown_callback_.is_null())
+ return;
+ if (!node_->CanShutdownCleanly(
+ ports::Node::ShutdownPolicy::ALLOW_LOCAL_PORTS)) {
+ DVLOG(2) << "Unable to cleanly shut down node " << name_;
+ return;
+ }
+
+ callback = shutdown_callback_;
+ shutdown_callback_.Reset();
+ shutdown_callback_flag_.Set(false);
+ }
+
+ DCHECK(!callback.is_null());
+
+ callback.Run();
+}
+
+NodeController::PeerConnection::PeerConnection() = default;
+
+NodeController::PeerConnection::PeerConnection(
+ const PeerConnection& other) = default;
+
+NodeController::PeerConnection::PeerConnection(
+ PeerConnection&& other) = default;
+
+NodeController::PeerConnection::PeerConnection(
+ scoped_refptr<NodeChannel> channel,
+ const ports::PortRef& local_port,
+ const std::string& peer_token)
+ : channel(std::move(channel)),
+ local_port(local_port),
+ peer_token(peer_token) {}
+
+NodeController::PeerConnection::~PeerConnection() = default;
+
+NodeController::PeerConnection& NodeController::PeerConnection::
+operator=(const PeerConnection& other) = default;
+
+NodeController::PeerConnection& NodeController::PeerConnection::
+operator=(PeerConnection&& other) = default;
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/node_controller.h b/mojo/edk/system/node_controller.h
new file mode 100644
index 0000000000..46a2d61208
--- /dev/null
+++ b/mojo/edk/system/node_controller.h
@@ -0,0 +1,378 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_NODE_CONTROLLER_H_
+#define MOJO_EDK_SYSTEM_NODE_CONTROLLER_H_
+
+#include <memory>
+#include <queue>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/atomic_flag.h"
+#include "mojo/edk/system/node_channel.h"
+#include "mojo/edk/system/ports/name.h"
+#include "mojo/edk/system/ports/node.h"
+#include "mojo/edk/system/ports/node_delegate.h"
+
+namespace base {
+class PortProvider;
+}
+
+namespace mojo {
+namespace edk {
+
+class Broker;
+class Core;
+class MachPortRelay;
+class PortsMessage;
+
+// The owner of ports::Node which facilitates core EDK implementation. All
+// public interface methods are safe to call from any thread.
+class NodeController : public ports::NodeDelegate,
+ public NodeChannel::Delegate {
+ public:
+ class PortObserver : public ports::UserData {
+ public:
+ virtual void OnPortStatusChanged() = 0;
+
+ protected:
+ ~PortObserver() override {}
+ };
+
+ // |core| owns and out-lives us.
+ explicit NodeController(Core* core);
+ ~NodeController() override;
+
+ const ports::NodeName& name() const { return name_; }
+ Core* core() const { return core_; }
+ ports::Node* node() const { return node_.get(); }
+ scoped_refptr<base::TaskRunner> io_task_runner() const {
+ return io_task_runner_;
+ }
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // Create the relay used to transfer mach ports between processes.
+ void CreateMachPortRelay(base::PortProvider* port_provider);
+#endif
+
+ // Called exactly once, shortly after construction, and before any other
+ // methods are called on this object.
+ void SetIOTaskRunner(scoped_refptr<base::TaskRunner> io_task_runner);
+
+ // Connects this node to a child node. This node will initiate a handshake.
+ void ConnectToChild(base::ProcessHandle process_handle,
+ ConnectionParams connection_params,
+ const std::string& child_token,
+ const ProcessErrorCallback& process_error_callback);
+
+ // Closes all reserved ports which associated with the child process
+ // |child_token|.
+ void CloseChildPorts(const std::string& child_token);
+
+ // Close a connection to a peer associated with |peer_token|.
+ void ClosePeerConnection(const std::string& peer_token);
+
+ // Connects this node to a parent node. The parent node will initiate a
+ // handshake.
+ void ConnectToParent(ConnectionParams connection_params);
+
+ // Connects this node to a peer node. On success, |port| will be merged with
+ // the corresponding port in the peer node.
+ void ConnectToPeer(ConnectionParams connection_params,
+ const ports::PortRef& port,
+ const std::string& peer_token);
+
+ // Sets a port's observer. If |observer| is null the port's current observer
+ // is removed.
+ void SetPortObserver(const ports::PortRef& port,
+ scoped_refptr<PortObserver> observer);
+
+ // Closes a port. Use this in lieu of calling Node::ClosePort() directly, as
+ // it ensures the port's observer has also been removed.
+ void ClosePort(const ports::PortRef& port);
+
+ // Sends a message on a port to its peer.
+ int SendMessage(const ports::PortRef& port_ref,
+ std::unique_ptr<PortsMessage> message);
+
+ // Reserves a local port |port| associated with |token|. A peer holding a copy
+ // of |token| can merge one of its own ports into this one.
+ void ReservePort(const std::string& token, const ports::PortRef& port,
+ const std::string& child_token);
+
+ // Merges a local port |port| into a port reserved by |token| in the parent.
+ void MergePortIntoParent(const std::string& token,
+ const ports::PortRef& port);
+
+ // Merges two local ports together.
+ int MergeLocalPorts(const ports::PortRef& port0, const ports::PortRef& port1);
+
+ // Creates a new shared buffer for use in the current process.
+ scoped_refptr<PlatformSharedBuffer> CreateSharedBuffer(size_t num_bytes);
+
+ // Request that the Node be shut down cleanly. This may take an arbitrarily
+ // long time to complete, at which point |callback| will be called.
+ //
+ // Note that while it is safe to continue using the NodeController's public
+ // interface after requesting shutdown, you do so at your own risk and there
+ // is NO guarantee that new messages will be sent or ports will complete
+ // transfer.
+ void RequestShutdown(const base::Closure& callback);
+
+ // Notifies the NodeController that we received a bad message from the given
+ // node.
+ void NotifyBadMessageFrom(const ports::NodeName& source_node,
+ const std::string& error);
+
+ private:
+ friend Core;
+
+ using NodeMap = std::unordered_map<ports::NodeName,
+ scoped_refptr<NodeChannel>>;
+ using OutgoingMessageQueue = std::queue<Channel::MessagePtr>;
+
+ struct ReservedPort {
+ ports::PortRef port;
+ const std::string child_token;
+ };
+
+ struct PeerConnection {
+ PeerConnection();
+ PeerConnection(const PeerConnection& other);
+ PeerConnection(PeerConnection&& other);
+ PeerConnection(scoped_refptr<NodeChannel> channel,
+ const ports::PortRef& local_port,
+ const std::string& peer_token);
+ ~PeerConnection();
+
+ PeerConnection& operator=(const PeerConnection& other);
+ PeerConnection& operator=(PeerConnection&& other);
+
+
+ scoped_refptr<NodeChannel> channel;
+ ports::PortRef local_port;
+ std::string peer_token;
+ };
+
+ void ConnectToChildOnIOThread(
+ base::ProcessHandle process_handle,
+ ConnectionParams connection_params,
+ ports::NodeName token,
+ const ProcessErrorCallback& process_error_callback);
+ void ConnectToParentOnIOThread(ConnectionParams connection_params);
+
+ void ConnectToPeerOnIOThread(ConnectionParams connection_params,
+ ports::NodeName token,
+ ports::PortRef port,
+ const std::string& peer_token);
+ void ClosePeerConnectionOnIOThread(const std::string& node_name);
+
+ scoped_refptr<NodeChannel> GetPeerChannel(const ports::NodeName& name);
+ scoped_refptr<NodeChannel> GetParentChannel();
+ scoped_refptr<NodeChannel> GetBrokerChannel();
+
+ void AddPeer(const ports::NodeName& name,
+ scoped_refptr<NodeChannel> channel,
+ bool start_channel);
+ void DropPeer(const ports::NodeName& name, NodeChannel* channel);
+ void SendPeerMessage(const ports::NodeName& name,
+ ports::ScopedMessage message);
+ void AcceptIncomingMessages();
+ void ProcessIncomingMessages();
+ void DropAllPeers();
+
+ // ports::NodeDelegate:
+ void GenerateRandomPortName(ports::PortName* port_name) override;
+ void AllocMessage(size_t num_header_bytes,
+ ports::ScopedMessage* message) override;
+ void ForwardMessage(const ports::NodeName& node,
+ ports::ScopedMessage message) override;
+ void BroadcastMessage(ports::ScopedMessage message) override;
+ void PortStatusChanged(const ports::PortRef& port) override;
+
+ // NodeChannel::Delegate:
+ void OnAcceptChild(const ports::NodeName& from_node,
+ const ports::NodeName& parent_name,
+ const ports::NodeName& token) override;
+ void OnAcceptParent(const ports::NodeName& from_node,
+ const ports::NodeName& token,
+ const ports::NodeName& child_name) override;
+ void OnAddBrokerClient(const ports::NodeName& from_node,
+ const ports::NodeName& client_name,
+ base::ProcessHandle process_handle) override;
+ void OnBrokerClientAdded(const ports::NodeName& from_node,
+ const ports::NodeName& client_name,
+ ScopedPlatformHandle broker_channel) override;
+ void OnAcceptBrokerClient(const ports::NodeName& from_node,
+ const ports::NodeName& broker_name,
+ ScopedPlatformHandle broker_channel) override;
+ void OnPortsMessage(const ports::NodeName& from_node,
+ Channel::MessagePtr message) override;
+ void OnRequestPortMerge(const ports::NodeName& from_node,
+ const ports::PortName& connector_port_name,
+ const std::string& token) override;
+ void OnRequestIntroduction(const ports::NodeName& from_node,
+ const ports::NodeName& name) override;
+ void OnIntroduce(const ports::NodeName& from_node,
+ const ports::NodeName& name,
+ ScopedPlatformHandle channel_handle) override;
+ void OnBroadcast(const ports::NodeName& from_node,
+ Channel::MessagePtr message) override;
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ void OnRelayPortsMessage(const ports::NodeName& from_node,
+ base::ProcessHandle from_process,
+ const ports::NodeName& destination,
+ Channel::MessagePtr message) override;
+ void OnPortsMessageFromRelay(const ports::NodeName& from_node,
+ const ports::NodeName& source_node,
+ Channel::MessagePtr message) override;
+#endif
+ void OnAcceptPeer(const ports::NodeName& from_node,
+ const ports::NodeName& token,
+ const ports::NodeName& peer_name,
+ const ports::PortName& port_name) override;
+ void OnChannelError(const ports::NodeName& from_node,
+ NodeChannel* channel) override;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ MachPortRelay* GetMachPortRelay() override;
+#endif
+
+ // Cancels all pending port merges. These are merges which are supposed to
+ // be requested from the parent ASAP, and they may be cancelled if the
+ // connection to the parent is broken or never established.
+ void CancelPendingPortMerges();
+
+ // Marks this NodeController for destruction when the IO thread shuts down.
+ // This is used in case Core is torn down before the IO thread. Must only be
+ // called on the IO thread.
+ void DestroyOnIOThreadShutdown();
+
+ // If there is a registered shutdown callback (meaning shutdown has been
+ // requested, this checks the Node's status to see if clean shutdown is
+ // possible. If so, shutdown is performed and the shutdown callback is run.
+ void AttemptShutdownIfRequested();
+
+ // These are safe to access from any thread as long as the Node is alive.
+ Core* const core_;
+ const ports::NodeName name_;
+ const std::unique_ptr<ports::Node> node_;
+ scoped_refptr<base::TaskRunner> io_task_runner_;
+
+ // Guards |peers_| and |pending_peer_messages_|.
+ base::Lock peers_lock_;
+
+ // Channels to known peers, including parent and children, if any.
+ NodeMap peers_;
+
+ // Outgoing message queues for peers we've heard of but can't yet talk to.
+ std::unordered_map<ports::NodeName, OutgoingMessageQueue>
+ pending_peer_messages_;
+
+ // Guards |reserved_ports_| and |pending_child_tokens_|.
+ base::Lock reserved_ports_lock_;
+
+ // Ports reserved by token. Key is the port token.
+ base::hash_map<std::string, ReservedPort> reserved_ports_;
+ // TODO(amistry): This _really_ needs to be a bimap. Unfortunately, we don't
+ // have one yet :(
+ std::unordered_map<ports::NodeName, std::string> pending_child_tokens_;
+
+ // Guards |pending_port_merges_| and |reject_pending_merges_|.
+ base::Lock pending_port_merges_lock_;
+
+ // A set of port merge requests awaiting parent connection.
+ std::vector<std::pair<std::string, ports::PortRef>> pending_port_merges_;
+
+ // Indicates that new merge requests should be rejected because the parent has
+ // disconnected.
+ bool reject_pending_merges_ = false;
+
+ // Guards |parent_name_| and |bootstrap_parent_channel_|.
+ base::Lock parent_lock_;
+
+ // The name of our parent node, if any.
+ ports::NodeName parent_name_;
+
+ // A temporary reference to the parent channel before we know their name.
+ scoped_refptr<NodeChannel> bootstrap_parent_channel_;
+
+ // Guards |broker_name_|, |pending_broker_clients_|, and
+ // |pending_relay_messages_|.
+ base::Lock broker_lock_;
+
+ // The name of our broker node, if any.
+ ports::NodeName broker_name_;
+
+ // A queue of pending child names waiting to be connected to a broker.
+ std::queue<ports::NodeName> pending_broker_clients_;
+
+ // Messages waiting to be relayed by the broker once it's known.
+ std::unordered_map<ports::NodeName, OutgoingMessageQueue>
+ pending_relay_messages_;
+
+ // Guards |incoming_messages_| and |incoming_messages_task_posted_|.
+ base::Lock messages_lock_;
+ std::queue<ports::ScopedMessage> incoming_messages_;
+ // Ensures that there is only one incoming messages task posted to the IO
+ // thread.
+ bool incoming_messages_task_posted_ = false;
+ // Flag to fast-path checking |incoming_messages_|.
+ AtomicFlag incoming_messages_flag_;
+
+ // Guards |shutdown_callback_|.
+ base::Lock shutdown_lock_;
+
+ // Set by RequestShutdown(). If this is non-null, the controller will
+ // begin polling the Node to see if clean shutdown is possible any time the
+ // Node's state is modified by the controller.
+ base::Closure shutdown_callback_;
+ // Flag to fast-path checking |shutdown_callback_|.
+ AtomicFlag shutdown_callback_flag_;
+
+ // All other fields below must only be accessed on the I/O thread, i.e., the
+ // thread on which core_->io_task_runner() runs tasks.
+
+ // Channels to children during handshake.
+ NodeMap pending_children_;
+
+ using PeerNodeMap =
+ std::unordered_map<ports::NodeName, PeerConnection>;
+ PeerNodeMap peer_connections_;
+
+ // Maps from peer token to node name, pending or not.
+ std::unordered_map<std::string, ports::NodeName> peers_by_token_;
+
+ // Indicates whether this object should delete itself on IO thread shutdown.
+ // Must only be accessed from the IO thread.
+ bool destroy_on_io_thread_shutdown_ = false;
+
+#if !defined(OS_MACOSX) && !defined(OS_NACL_SFI)
+ // Broker for sync shared buffer creation in children.
+ std::unique_ptr<Broker> broker_;
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ base::Lock mach_port_relay_lock_;
+ // Relay for transferring mach ports to/from children.
+ std::unique_ptr<MachPortRelay> mach_port_relay_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(NodeController);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_NODE_CONTROLLER_H_
diff --git a/mojo/edk/system/options_validation.h b/mojo/edk/system/options_validation.h
new file mode 100644
index 0000000000..e1b337d5f7
--- /dev/null
+++ b/mojo/edk/system/options_validation.h
@@ -0,0 +1,97 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Functions to help with verifying various |Mojo...Options| structs from the
+// (public, C) API. These are "extensible" structs, which all have |struct_size|
+// as their first member. All fields (other than |struct_size|) are optional,
+// but any |flags| specified must be known to the system (otherwise, an error of
+// |MOJO_RESULT_UNIMPLEMENTED| should be returned).
+
+#ifndef MOJO_EDK_SYSTEM_OPTIONS_VALIDATION_H_
+#define MOJO_EDK_SYSTEM_OPTIONS_VALIDATION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/public/c/system/types.h"
+
+namespace mojo {
+namespace edk {
+
+template <class Options>
+class UserOptionsReader {
+ public:
+ // Constructor from a |const* Options| (which it checks -- this constructor
+ // has side effects!).
+ // Note: We initialize |options_reader_| without checking, since we do a check
+ // in |GetSizeForReader()|.
+ explicit UserOptionsReader(const Options* options) {
+ CHECK(options && IsAligned<MOJO_ALIGNOF(Options)>(options));
+ options_ = GetSizeForReader(options) == 0 ? nullptr : options;
+ static_assert(offsetof(Options, struct_size) == 0,
+ "struct_size not first member of Options");
+ // TODO(vtl): Enable when MSVC supports this (C++11 extended sizeof):
+ // static_assert(sizeof(Options::struct_size) == sizeof(uint32_t),
+ // "Options::struct_size not a uint32_t");
+ // (Or maybe assert that its type is uint32_t?)
+ }
+
+ bool is_valid() const { return !!options_; }
+
+ const Options& options() const {
+ DCHECK(is_valid());
+ return *options_;
+ }
+
+ // Checks that the given (variable-size) |options| passed to the constructor
+ // (plausibly) has a member at the given offset with the given size. You
+ // probably want to use |OPTIONS_STRUCT_HAS_MEMBER()| instead.
+ bool HasMember(size_t offset, size_t size) const {
+ DCHECK(is_valid());
+ // We assume that |offset| and |size| are reasonable, since they should come
+ // from |offsetof(Options, some_member)| and |sizeof(Options::some_member)|,
+ // respectively.
+ return options().struct_size >= offset + size;
+ }
+
+ private:
+ static inline size_t GetSizeForReader(const Options* options) {
+ uint32_t struct_size = *reinterpret_cast<const uint32_t*>(options);
+ if (struct_size < sizeof(uint32_t))
+ return 0;
+
+ return std::min(static_cast<size_t>(struct_size), sizeof(Options));
+ }
+
+ template <size_t alignment>
+ static bool IsAligned(const void* pointer) {
+ return reinterpret_cast<uintptr_t>(pointer) % alignment == 0;
+ }
+
+ const Options* options_;
+
+ DISALLOW_COPY_AND_ASSIGN(UserOptionsReader);
+};
+
+// Macro to invoke |UserOptionsReader<Options>::HasMember()| parametrized by
+// member name instead of offset and size.
+//
+// (We can't just give |HasMember()| a member pointer template argument instead,
+// since there's no good/strictly-correct way to get an offset from that.)
+//
+// TODO(vtl): With C++11, use |sizeof(Options::member)| instead of (the
+// contortion below). We might also be able to pull out the type |Options| from
+// |reader| (using |decltype|) instead of requiring a parameter.
+#define OPTIONS_STRUCT_HAS_MEMBER(Options, member, reader) \
+ reader.HasMember(offsetof(Options, member), sizeof(reader.options().member))
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_OPTIONS_VALIDATION_H_
diff --git a/mojo/edk/system/options_validation_unittest.cc b/mojo/edk/system/options_validation_unittest.cc
new file mode 100644
index 0000000000..a01a92cfb1
--- /dev/null
+++ b/mojo/edk/system/options_validation_unittest.cc
@@ -0,0 +1,134 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/options_validation.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "mojo/public/c/system/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+// Declare a test options struct just as we do in actual public headers.
+
+using TestOptionsFlags = uint32_t;
+
+static_assert(MOJO_ALIGNOF(int64_t) == 8, "int64_t has weird alignment");
+struct MOJO_ALIGNAS(8) TestOptions {
+ uint32_t struct_size;
+ TestOptionsFlags flags;
+ uint32_t member1;
+ uint32_t member2;
+};
+static_assert(sizeof(TestOptions) == 16, "TestOptions has wrong size");
+
+const uint32_t kSizeOfTestOptions = static_cast<uint32_t>(sizeof(TestOptions));
+
+TEST(OptionsValidationTest, Valid) {
+ {
+ const TestOptions kOptions = {kSizeOfTestOptions};
+ UserOptionsReader<TestOptions> reader(&kOptions);
+ EXPECT_TRUE(reader.is_valid());
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, flags, reader));
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member1, reader));
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member2, reader));
+ }
+ {
+ const TestOptions kOptions = {static_cast<uint32_t>(
+ offsetof(TestOptions, struct_size) + sizeof(uint32_t))};
+ UserOptionsReader<TestOptions> reader(&kOptions);
+ EXPECT_TRUE(reader.is_valid());
+ EXPECT_FALSE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, flags, reader));
+ EXPECT_FALSE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member1, reader));
+ EXPECT_FALSE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member2, reader));
+ }
+
+ {
+ const TestOptions kOptions = {
+ static_cast<uint32_t>(offsetof(TestOptions, flags) + sizeof(uint32_t))};
+ UserOptionsReader<TestOptions> reader(&kOptions);
+ EXPECT_TRUE(reader.is_valid());
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, flags, reader));
+ EXPECT_FALSE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member1, reader));
+ EXPECT_FALSE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member2, reader));
+ }
+ {
+ MOJO_ALIGNAS(8) char buf[sizeof(TestOptions) + 100] = {};
+ TestOptions* options = reinterpret_cast<TestOptions*>(buf);
+ options->struct_size = kSizeOfTestOptions + 1;
+ UserOptionsReader<TestOptions> reader(options);
+ EXPECT_TRUE(reader.is_valid());
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, flags, reader));
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member1, reader));
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member2, reader));
+ }
+ {
+ MOJO_ALIGNAS(8) char buf[sizeof(TestOptions) + 100] = {};
+ TestOptions* options = reinterpret_cast<TestOptions*>(buf);
+ options->struct_size = kSizeOfTestOptions + 4;
+ UserOptionsReader<TestOptions> reader(options);
+ EXPECT_TRUE(reader.is_valid());
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, flags, reader));
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member1, reader));
+ EXPECT_TRUE(OPTIONS_STRUCT_HAS_MEMBER(TestOptions, member2, reader));
+ }
+}
+
+TEST(OptionsValidationTest, Invalid) {
+ // Size too small:
+ for (size_t i = 0; i < sizeof(uint32_t); i++) {
+ TestOptions options = {static_cast<uint32_t>(i)};
+ UserOptionsReader<TestOptions> reader(&options);
+ EXPECT_FALSE(reader.is_valid()) << i;
+ }
+}
+
+// These test invalid arguments that should cause death if we're being paranoid
+// about checking arguments (which we would want to do if, e.g., we were in a
+// true "kernel" situation, but we might not want to do otherwise for
+// performance reasons). Probably blatant errors like passing in null pointers
+// (for required pointer arguments) will still cause death, but perhaps not
+// predictably.
+TEST(OptionsValidationTest, InvalidDeath) {
+#if defined(OFFICIAL_BUILD)
+ const char kMemoryCheckFailedRegex[] = "";
+#else
+ const char kMemoryCheckFailedRegex[] = "Check failed";
+#endif
+
+ // Null:
+ EXPECT_DEATH_IF_SUPPORTED(
+ { UserOptionsReader<TestOptions> reader((nullptr)); },
+ kMemoryCheckFailedRegex);
+
+ // Unaligned:
+ EXPECT_DEATH_IF_SUPPORTED(
+ {
+ UserOptionsReader<TestOptions> reader(
+ reinterpret_cast<const TestOptions*>(1));
+ },
+ kMemoryCheckFailedRegex);
+ // Note: The current implementation checks the size only after checking the
+ // alignment versus that required for the |uint32_t| size, so it won't die in
+ // the expected way if you pass, e.g., 4. So we have to manufacture a valid
+ // pointer at an offset of alignment 4.
+ EXPECT_DEATH_IF_SUPPORTED(
+ {
+ uint32_t buffer[100] = {};
+ TestOptions* options = (reinterpret_cast<uintptr_t>(buffer) % 8 == 0)
+ ? reinterpret_cast<TestOptions*>(&buffer[1])
+ : reinterpret_cast<TestOptions*>(&buffer[0]);
+ options->struct_size = static_cast<uint32_t>(sizeof(TestOptions));
+ UserOptionsReader<TestOptions> reader(options);
+ },
+ kMemoryCheckFailedRegex);
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/platform_handle_dispatcher.cc b/mojo/edk/system/platform_handle_dispatcher.cc
new file mode 100644
index 0000000000..3e708c2517
--- /dev/null
+++ b/mojo/edk/system/platform_handle_dispatcher.cc
@@ -0,0 +1,104 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/platform_handle_dispatcher.h"
+
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+
+namespace mojo {
+namespace edk {
+
+// static
+scoped_refptr<PlatformHandleDispatcher> PlatformHandleDispatcher::Create(
+ ScopedPlatformHandle platform_handle) {
+ return new PlatformHandleDispatcher(std::move(platform_handle));
+}
+
+ScopedPlatformHandle PlatformHandleDispatcher::PassPlatformHandle() {
+ return std::move(platform_handle_);
+}
+
+Dispatcher::Type PlatformHandleDispatcher::GetType() const {
+ return Type::PLATFORM_HANDLE;
+}
+
+MojoResult PlatformHandleDispatcher::Close() {
+ base::AutoLock lock(lock_);
+ if (is_closed_ || in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ is_closed_ = true;
+ platform_handle_.reset();
+ return MOJO_RESULT_OK;
+}
+
+void PlatformHandleDispatcher::StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) {
+ *num_bytes = 0;
+ *num_ports = 0;
+ *num_handles = 1;
+}
+
+bool PlatformHandleDispatcher::EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) {
+ base::AutoLock lock(lock_);
+ if (is_closed_)
+ return false;
+ handles[0] = platform_handle_.get();
+ return true;
+}
+
+bool PlatformHandleDispatcher::BeginTransit() {
+ base::AutoLock lock(lock_);
+ if (in_transit_)
+ return false;
+ in_transit_ = !is_closed_;
+ return in_transit_;
+}
+
+void PlatformHandleDispatcher::CompleteTransitAndClose() {
+ base::AutoLock lock(lock_);
+
+ in_transit_ = false;
+ is_closed_ = true;
+
+ // The system has taken ownership of our handle.
+ ignore_result(platform_handle_.release());
+}
+
+void PlatformHandleDispatcher::CancelTransit() {
+ base::AutoLock lock(lock_);
+ in_transit_ = false;
+}
+
+// static
+scoped_refptr<PlatformHandleDispatcher> PlatformHandleDispatcher::Deserialize(
+ const void* bytes,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles) {
+ if (num_bytes || num_ports || num_handles != 1)
+ return nullptr;
+
+ PlatformHandle handle;
+ std::swap(handle, handles[0]);
+
+ return PlatformHandleDispatcher::Create(ScopedPlatformHandle(handle));
+}
+
+PlatformHandleDispatcher::PlatformHandleDispatcher(
+ ScopedPlatformHandle platform_handle)
+ : platform_handle_(std::move(platform_handle)) {}
+
+PlatformHandleDispatcher::~PlatformHandleDispatcher() {
+ DCHECK(is_closed_ && !in_transit_);
+ DCHECK(!platform_handle_.is_valid());
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/platform_handle_dispatcher.h b/mojo/edk/system/platform_handle_dispatcher.h
new file mode 100644
index 0000000000..a36c7a0e22
--- /dev/null
+++ b/mojo/edk/system/platform_handle_dispatcher.h
@@ -0,0 +1,61 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PLATFORM_HANDLE_DISPATCHER_H_
+#define MOJO_EDK_SYSTEM_PLATFORM_HANDLE_DISPATCHER_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/system_impl_export.h"
+
+namespace mojo {
+namespace edk {
+
+class MOJO_SYSTEM_IMPL_EXPORT PlatformHandleDispatcher : public Dispatcher {
+ public:
+ static scoped_refptr<PlatformHandleDispatcher> Create(
+ ScopedPlatformHandle platform_handle);
+
+ ScopedPlatformHandle PassPlatformHandle();
+
+ // Dispatcher:
+ Type GetType() const override;
+ MojoResult Close() override;
+ void StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_handles) override;
+ bool EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) override;
+ bool BeginTransit() override;
+ void CompleteTransitAndClose() override;
+ void CancelTransit() override;
+
+ static scoped_refptr<PlatformHandleDispatcher> Deserialize(
+ const void* bytes,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* handles,
+ size_t num_handles);
+
+ private:
+ PlatformHandleDispatcher(ScopedPlatformHandle platform_handle);
+ ~PlatformHandleDispatcher() override;
+
+ base::Lock lock_;
+ bool in_transit_ = false;
+ bool is_closed_ = false;
+ ScopedPlatformHandle platform_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(PlatformHandleDispatcher);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PLATFORM_HANDLE_DISPATCHER_H_
diff --git a/mojo/edk/system/platform_handle_dispatcher_unittest.cc b/mojo/edk/system/platform_handle_dispatcher_unittest.cc
new file mode 100644
index 0000000000..7a942622b0
--- /dev/null
+++ b/mojo/edk/system/platform_handle_dispatcher_unittest.cc
@@ -0,0 +1,123 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/platform_handle_dispatcher.h"
+
+#include <stdio.h>
+#include <utility>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/ref_counted.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/test/test_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+TEST(PlatformHandleDispatcherTest, Basic) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ static const char kHelloWorld[] = "hello world";
+
+ base::FilePath unused;
+ base::ScopedFILE fp(
+ CreateAndOpenTemporaryFileInDir(temp_dir.GetPath(), &unused));
+ ASSERT_TRUE(fp);
+ EXPECT_EQ(sizeof(kHelloWorld),
+ fwrite(kHelloWorld, 1, sizeof(kHelloWorld), fp.get()));
+
+ ScopedPlatformHandle h(test::PlatformHandleFromFILE(std::move(fp)));
+ EXPECT_FALSE(fp);
+ ASSERT_TRUE(h.is_valid());
+
+ scoped_refptr<PlatformHandleDispatcher> dispatcher =
+ PlatformHandleDispatcher::Create(std::move(h));
+ EXPECT_FALSE(h.is_valid());
+ EXPECT_EQ(Dispatcher::Type::PLATFORM_HANDLE, dispatcher->GetType());
+
+ h = dispatcher->PassPlatformHandle();
+ EXPECT_TRUE(h.is_valid());
+
+ fp = test::FILEFromPlatformHandle(std::move(h), "rb");
+ EXPECT_FALSE(h.is_valid());
+ EXPECT_TRUE(fp);
+
+ rewind(fp.get());
+ char read_buffer[1000] = {};
+ EXPECT_EQ(sizeof(kHelloWorld),
+ fread(read_buffer, 1, sizeof(read_buffer), fp.get()));
+ EXPECT_STREQ(kHelloWorld, read_buffer);
+
+ // Try getting the handle again. (It should fail cleanly.)
+ h = dispatcher->PassPlatformHandle();
+ EXPECT_FALSE(h.is_valid());
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->Close());
+}
+
+TEST(PlatformHandleDispatcherTest, Serialization) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ static const char kFooBar[] = "foo bar";
+
+ base::FilePath unused;
+ base::ScopedFILE fp(
+ CreateAndOpenTemporaryFileInDir(temp_dir.GetPath(), &unused));
+ EXPECT_EQ(sizeof(kFooBar), fwrite(kFooBar, 1, sizeof(kFooBar), fp.get()));
+
+ scoped_refptr<PlatformHandleDispatcher> dispatcher =
+ PlatformHandleDispatcher::Create(
+ test::PlatformHandleFromFILE(std::move(fp)));
+
+ uint32_t num_bytes = 0;
+ uint32_t num_ports = 0;
+ uint32_t num_handles = 0;
+ EXPECT_TRUE(dispatcher->BeginTransit());
+ dispatcher->StartSerialize(&num_bytes, &num_ports, &num_handles);
+
+ EXPECT_EQ(0u, num_bytes);
+ EXPECT_EQ(0u, num_ports);
+ EXPECT_EQ(1u, num_handles);
+
+ ScopedPlatformHandleVectorPtr handles(new PlatformHandleVector(1));
+ EXPECT_TRUE(dispatcher->EndSerialize(nullptr, nullptr, handles->data()));
+ dispatcher->CompleteTransitAndClose();
+
+ EXPECT_TRUE(handles->at(0).is_valid());
+
+ ScopedPlatformHandle handle = dispatcher->PassPlatformHandle();
+ EXPECT_FALSE(handle.is_valid());
+
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, dispatcher->Close());
+
+ dispatcher = static_cast<PlatformHandleDispatcher*>(
+ Dispatcher::Deserialize(Dispatcher::Type::PLATFORM_HANDLE, nullptr,
+ num_bytes, nullptr, num_ports, handles->data(),
+ 1).get());
+
+ EXPECT_FALSE(handles->at(0).is_valid());
+ EXPECT_TRUE(dispatcher->GetType() == Dispatcher::Type::PLATFORM_HANDLE);
+
+ fp = test::FILEFromPlatformHandle(dispatcher->PassPlatformHandle(), "rb");
+ EXPECT_TRUE(fp);
+
+ rewind(fp.get());
+ char read_buffer[1000] = {};
+ EXPECT_EQ(sizeof(kFooBar),
+ fread(read_buffer, 1, sizeof(read_buffer), fp.get()));
+ EXPECT_STREQ(kFooBar, read_buffer);
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->Close());
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/platform_wrapper_unittest.cc b/mojo/edk/system/platform_wrapper_unittest.cc
new file mode 100644
index 0000000000..f29d62b04f
--- /dev/null
+++ b/mojo/edk/system/platform_wrapper_unittest.cc
@@ -0,0 +1,212 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process_handle.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/public/c/system/platform_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#if defined(OS_POSIX)
+#define SIMPLE_PLATFORM_HANDLE_TYPE MOJO_PLATFORM_HANDLE_TYPE_FILE_DESCRIPTOR
+#elif defined(OS_WIN)
+#define SIMPLE_PLATFORM_HANDLE_TYPE MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#define SHARED_BUFFER_PLATFORM_HANDLE_TYPE MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT
+#else
+#define SHARED_BUFFER_PLATFORM_HANDLE_TYPE SIMPLE_PLATFORM_HANDLE_TYPE
+#endif
+
+uint64_t PlatformHandleValueFromPlatformFile(base::PlatformFile file) {
+#if defined(OS_WIN)
+ return reinterpret_cast<uint64_t>(file);
+#else
+ return static_cast<uint64_t>(file);
+#endif
+}
+
+base::PlatformFile PlatformFileFromPlatformHandleValue(uint64_t value) {
+#if defined(OS_WIN)
+ return reinterpret_cast<base::PlatformFile>(value);
+#else
+ return static_cast<base::PlatformFile>(value);
+#endif
+}
+
+namespace mojo {
+namespace edk {
+namespace {
+
+using PlatformWrapperTest = test::MojoTestBase;
+
+TEST_F(PlatformWrapperTest, WrapPlatformHandle) {
+ // Create a temporary file and write a message to it.
+ base::FilePath temp_file_path;
+ ASSERT_TRUE(base::CreateTemporaryFile(&temp_file_path));
+ const std::string kMessage = "Hello, world!";
+ EXPECT_EQ(base::WriteFile(temp_file_path, kMessage.data(),
+ static_cast<int>(kMessage.size())),
+ static_cast<int>(kMessage.size()));
+
+ RUN_CHILD_ON_PIPE(ReadPlatformFile, h)
+ // Open the temporary file for reading, wrap its handle, and send it to
+ // the child along with the expected message to be read.
+ base::File file(temp_file_path,
+ base::File::FLAG_OPEN | base::File::FLAG_READ);
+ EXPECT_TRUE(file.IsValid());
+
+ MojoHandle wrapped_handle;
+ MojoPlatformHandle os_file;
+ os_file.struct_size = sizeof(MojoPlatformHandle);
+ os_file.type = SIMPLE_PLATFORM_HANDLE_TYPE;
+ os_file.value =
+ PlatformHandleValueFromPlatformFile(file.TakePlatformFile());
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWrapPlatformHandle(&os_file, &wrapped_handle));
+
+ WriteMessageWithHandles(h, kMessage, &wrapped_handle, 1);
+ END_CHILD()
+
+ base::DeleteFile(temp_file_path, false);
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReadPlatformFile, PlatformWrapperTest, h) {
+ // Read a message and a wrapped file handle; unwrap the handle.
+ MojoHandle wrapped_handle;
+ std::string message = ReadMessageWithHandles(h, &wrapped_handle, 1);
+
+ MojoPlatformHandle platform_handle;
+ platform_handle.struct_size = sizeof(MojoPlatformHandle);
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoUnwrapPlatformHandle(wrapped_handle, &platform_handle));
+ EXPECT_EQ(SIMPLE_PLATFORM_HANDLE_TYPE, platform_handle.type);
+ base::File file(PlatformFileFromPlatformHandleValue(platform_handle.value));
+
+ // Expect to read the same message from the file.
+ std::vector<char> data(message.size());
+ EXPECT_EQ(file.ReadAtCurrentPos(data.data(), static_cast<int>(data.size())),
+ static_cast<int>(data.size()));
+ EXPECT_TRUE(std::equal(message.begin(), message.end(), data.begin()));
+}
+
+TEST_F(PlatformWrapperTest, WrapPlatformSharedBufferHandle) {
+ // Allocate a new platform shared buffer and write a message into it.
+ const std::string kMessage = "Hello, world!";
+ base::SharedMemory buffer;
+ buffer.CreateAndMapAnonymous(kMessage.size());
+ CHECK(buffer.memory());
+ memcpy(buffer.memory(), kMessage.data(), kMessage.size());
+
+ RUN_CHILD_ON_PIPE(ReadPlatformSharedBuffer, h)
+ // Wrap the shared memory handle and send it to the child along with the
+ // expected message.
+ base::SharedMemoryHandle memory_handle =
+ base::SharedMemory::DuplicateHandle(buffer.handle());
+ MojoPlatformHandle os_buffer;
+ os_buffer.struct_size = sizeof(MojoPlatformHandle);
+ os_buffer.type = SHARED_BUFFER_PLATFORM_HANDLE_TYPE;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ os_buffer.value = static_cast<uint64_t>(memory_handle.GetMemoryObject());
+#elif defined(OS_POSIX)
+ os_buffer.value = static_cast<uint64_t>(memory_handle.fd);
+#elif defined(OS_WIN)
+ os_buffer.value = reinterpret_cast<uint64_t>(memory_handle.GetHandle());
+#endif
+
+ MojoHandle wrapped_handle;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoWrapPlatformSharedBufferHandle(
+ &os_buffer, kMessage.size(),
+ MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_NONE,
+ &wrapped_handle));
+ WriteMessageWithHandles(h, kMessage, &wrapped_handle, 1);
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReadPlatformSharedBuffer, PlatformWrapperTest,
+ h) {
+ // Read a message and a wrapped shared buffer handle.
+ MojoHandle wrapped_handle;
+ std::string message = ReadMessageWithHandles(h, &wrapped_handle, 1);
+
+ // Check the message in the buffer
+ ExpectBufferContents(wrapped_handle, 0, message);
+
+ // Now unwrap the buffer and verify that the base::SharedMemoryHandle also
+ // works as expected.
+ MojoPlatformHandle os_buffer;
+ os_buffer.struct_size = sizeof(MojoPlatformHandle);
+ size_t size;
+ MojoPlatformSharedBufferHandleFlags flags;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ MojoUnwrapPlatformSharedBufferHandle(wrapped_handle, &os_buffer,
+ &size, &flags));
+ bool read_only = flags & MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_NONE;
+ EXPECT_FALSE(read_only);
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ ASSERT_EQ(MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT, os_buffer.type);
+ base::SharedMemoryHandle memory_handle(
+ static_cast<mach_port_t>(os_buffer.value), size,
+ base::GetCurrentProcId());
+#elif defined(OS_POSIX)
+ ASSERT_EQ(MOJO_PLATFORM_HANDLE_TYPE_FILE_DESCRIPTOR, os_buffer.type);
+ base::SharedMemoryHandle memory_handle(static_cast<int>(os_buffer.value),
+ false);
+#elif defined(OS_WIN)
+ ASSERT_EQ(MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE, os_buffer.type);
+ base::SharedMemoryHandle memory_handle(
+ reinterpret_cast<HANDLE>(os_buffer.value), base::GetCurrentProcId());
+#endif
+
+ base::SharedMemory memory(memory_handle, read_only);
+ memory.Map(message.size());
+ ASSERT_TRUE(memory.memory());
+
+ EXPECT_TRUE(std::equal(message.begin(), message.end(),
+ static_cast<const char*>(memory.memory())));
+}
+
+TEST_F(PlatformWrapperTest, InvalidHandle) {
+ // Wrap an invalid platform handle and expect to unwrap the same.
+
+ MojoHandle wrapped_handle;
+ MojoPlatformHandle invalid_handle;
+ invalid_handle.struct_size = sizeof(MojoPlatformHandle);
+ invalid_handle.type = MOJO_PLATFORM_HANDLE_TYPE_INVALID;
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWrapPlatformHandle(&invalid_handle, &wrapped_handle));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoUnwrapPlatformHandle(wrapped_handle, &invalid_handle));
+ EXPECT_EQ(MOJO_PLATFORM_HANDLE_TYPE_INVALID, invalid_handle.type);
+}
+
+TEST_F(PlatformWrapperTest, InvalidArgument) {
+ // Try to wrap an invalid MojoPlatformHandle struct and expect an error.
+ MojoHandle wrapped_handle;
+ MojoPlatformHandle platform_handle;
+ platform_handle.struct_size = 0;
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoWrapPlatformHandle(&platform_handle, &wrapped_handle));
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/BUILD.gn b/mojo/edk/system/ports/BUILD.gn
new file mode 100644
index 0000000000..5c82761982
--- /dev/null
+++ b/mojo/edk/system/ports/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//testing/test.gni")
+
+source_set("ports") {
+ sources = [
+ "event.cc",
+ "event.h",
+ "message.cc",
+ "message.h",
+ "message_filter.h",
+ "message_queue.cc",
+ "message_queue.h",
+ "name.cc",
+ "name.h",
+ "node.cc",
+ "node.h",
+ "node_delegate.h",
+ "port.cc",
+ "port.h",
+ "port_ref.cc",
+ "port_ref.h",
+ "user_data.h",
+ ]
+
+ public_deps = [
+ "//base",
+ ]
+}
+
+source_set("tests") {
+ testonly = true
+
+ sources = [
+ "ports_unittest.cc",
+ ]
+
+ deps = [
+ ":ports",
+ "//base",
+ "//base/test:test_support",
+ "//testing/gtest",
+ ]
+}
diff --git a/mojo/edk/system/ports/event.cc b/mojo/edk/system/ports/event.cc
new file mode 100644
index 0000000000..2e2208641f
--- /dev/null
+++ b/mojo/edk/system/ports/event.cc
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/ports/event.h"
+
+#include <string.h>
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+namespace {
+
+const size_t kPortsMessageAlignment = 8;
+
+static_assert(sizeof(PortDescriptor) % kPortsMessageAlignment == 0,
+ "Invalid PortDescriptor size.");
+
+static_assert(sizeof(EventHeader) % kPortsMessageAlignment == 0,
+ "Invalid EventHeader size.");
+
+static_assert(sizeof(UserEventData) % kPortsMessageAlignment == 0,
+ "Invalid UserEventData size.");
+
+static_assert(sizeof(ObserveProxyEventData) % kPortsMessageAlignment == 0,
+ "Invalid ObserveProxyEventData size.");
+
+static_assert(sizeof(ObserveProxyAckEventData) % kPortsMessageAlignment == 0,
+ "Invalid ObserveProxyAckEventData size.");
+
+static_assert(sizeof(ObserveClosureEventData) % kPortsMessageAlignment == 0,
+ "Invalid ObserveClosureEventData size.");
+
+static_assert(sizeof(MergePortEventData) % kPortsMessageAlignment == 0,
+ "Invalid MergePortEventData size.");
+
+} // namespace
+
+PortDescriptor::PortDescriptor() {
+ memset(padding, 0, sizeof(padding));
+}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/event.h b/mojo/edk/system/ports/event.h
new file mode 100644
index 0000000000..a66dfc1186
--- /dev/null
+++ b/mojo/edk/system/ports/event.h
@@ -0,0 +1,111 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_EVENT_H_
+#define MOJO_EDK_SYSTEM_PORTS_EVENT_H_
+
+#include <stdint.h>
+
+#include "mojo/edk/system/ports/message.h"
+#include "mojo/edk/system/ports/name.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+#pragma pack(push, 1)
+
+// TODO: Add static assertions of alignment.
+
+struct PortDescriptor {
+ PortDescriptor();
+
+ NodeName peer_node_name;
+ PortName peer_port_name;
+ NodeName referring_node_name;
+ PortName referring_port_name;
+ uint64_t next_sequence_num_to_send;
+ uint64_t next_sequence_num_to_receive;
+ uint64_t last_sequence_num_to_receive;
+ bool peer_closed;
+ char padding[7];
+};
+
+enum struct EventType : uint32_t {
+ kUser,
+ kPortAccepted,
+ kObserveProxy,
+ kObserveProxyAck,
+ kObserveClosure,
+ kMergePort,
+};
+
+struct EventHeader {
+ EventType type;
+ uint32_t padding;
+ PortName port_name;
+};
+
+struct UserEventData {
+ uint64_t sequence_num;
+ uint32_t num_ports;
+ uint32_t padding;
+};
+
+struct ObserveProxyEventData {
+ NodeName proxy_node_name;
+ PortName proxy_port_name;
+ NodeName proxy_to_node_name;
+ PortName proxy_to_port_name;
+};
+
+struct ObserveProxyAckEventData {
+ uint64_t last_sequence_num;
+};
+
+struct ObserveClosureEventData {
+ uint64_t last_sequence_num;
+};
+
+struct MergePortEventData {
+ PortName new_port_name;
+ PortDescriptor new_port_descriptor;
+};
+
+#pragma pack(pop)
+
+inline const EventHeader* GetEventHeader(const Message& message) {
+ return static_cast<const EventHeader*>(message.header_bytes());
+}
+
+inline EventHeader* GetMutableEventHeader(Message* message) {
+ return static_cast<EventHeader*>(message->mutable_header_bytes());
+}
+
+template <typename EventData>
+inline const EventData* GetEventData(const Message& message) {
+ return reinterpret_cast<const EventData*>(
+ reinterpret_cast<const char*>(GetEventHeader(message) + 1));
+}
+
+template <typename EventData>
+inline EventData* GetMutableEventData(Message* message) {
+ return reinterpret_cast<EventData*>(
+ reinterpret_cast<char*>(GetMutableEventHeader(message) + 1));
+}
+
+inline const PortDescriptor* GetPortDescriptors(const UserEventData* event) {
+ return reinterpret_cast<const PortDescriptor*>(
+ reinterpret_cast<const char*>(event + 1));
+}
+
+inline PortDescriptor* GetMutablePortDescriptors(UserEventData* event) {
+ return reinterpret_cast<PortDescriptor*>(reinterpret_cast<char*>(event + 1));
+}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_EVENT_H_
diff --git a/mojo/edk/system/ports/message.cc b/mojo/edk/system/ports/message.cc
new file mode 100644
index 0000000000..5d3c000a3a
--- /dev/null
+++ b/mojo/edk/system/ports/message.cc
@@ -0,0 +1,100 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "mojo/edk/system/ports/event.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+// static
+bool Message::Parse(const void* bytes,
+ size_t num_bytes,
+ size_t* num_header_bytes,
+ size_t* num_payload_bytes,
+ size_t* num_ports_bytes) {
+ if (num_bytes < sizeof(EventHeader))
+ return false;
+ const EventHeader* header = static_cast<const EventHeader*>(bytes);
+ switch (header->type) {
+ case EventType::kUser:
+ // See below.
+ break;
+ case EventType::kPortAccepted:
+ *num_header_bytes = sizeof(EventHeader);
+ break;
+ case EventType::kObserveProxy:
+ *num_header_bytes = sizeof(EventHeader) + sizeof(ObserveProxyEventData);
+ break;
+ case EventType::kObserveProxyAck:
+ *num_header_bytes =
+ sizeof(EventHeader) + sizeof(ObserveProxyAckEventData);
+ break;
+ case EventType::kObserveClosure:
+ *num_header_bytes = sizeof(EventHeader) + sizeof(ObserveClosureEventData);
+ break;
+ case EventType::kMergePort:
+ *num_header_bytes = sizeof(EventHeader) + sizeof(MergePortEventData);
+ break;
+ default:
+ return false;
+ }
+
+ if (header->type == EventType::kUser) {
+ if (num_bytes < sizeof(EventHeader) + sizeof(UserEventData))
+ return false;
+ const UserEventData* event_data =
+ reinterpret_cast<const UserEventData*>(
+ reinterpret_cast<const char*>(header + 1));
+ if (event_data->num_ports > std::numeric_limits<uint16_t>::max())
+ return false;
+ *num_header_bytes = sizeof(EventHeader) +
+ sizeof(UserEventData) +
+ event_data->num_ports * sizeof(PortDescriptor);
+ *num_ports_bytes = event_data->num_ports * sizeof(PortName);
+ if (num_bytes < *num_header_bytes + *num_ports_bytes)
+ return false;
+ *num_payload_bytes = num_bytes - *num_header_bytes - *num_ports_bytes;
+ } else {
+ if (*num_header_bytes != num_bytes)
+ return false;
+ *num_payload_bytes = 0;
+ *num_ports_bytes = 0;
+ }
+
+ return true;
+}
+
+Message::Message(size_t num_payload_bytes, size_t num_ports)
+ : Message(sizeof(EventHeader) + sizeof(UserEventData) +
+ num_ports * sizeof(PortDescriptor),
+ num_payload_bytes, num_ports * sizeof(PortName)) {
+ num_ports_ = num_ports;
+}
+
+Message::Message(size_t num_header_bytes,
+ size_t num_payload_bytes,
+ size_t num_ports_bytes)
+ : start_(nullptr),
+ num_header_bytes_(num_header_bytes),
+ num_ports_bytes_(num_ports_bytes),
+ num_payload_bytes_(num_payload_bytes) {
+}
+
+void Message::InitializeUserMessageHeader(void* start) {
+ start_ = static_cast<char*>(start);
+ memset(start_, 0, num_header_bytes_);
+ GetMutableEventHeader(this)->type = EventType::kUser;
+ GetMutableEventData<UserEventData>(this)->num_ports =
+ static_cast<uint32_t>(num_ports_);
+}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/message.h b/mojo/edk/system/ports/message.h
new file mode 100644
index 0000000000..95fa04676c
--- /dev/null
+++ b/mojo/edk/system/ports/message.h
@@ -0,0 +1,93 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_MESSAGE_H_
+#define MOJO_EDK_SYSTEM_PORTS_MESSAGE_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "mojo/edk/system/ports/name.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+// A message consists of a header (array of bytes), payload (array of bytes)
+// and an array of ports. The header is used by the Node implementation.
+//
+// This class is designed to be subclassed, and the subclass is responsible for
+// providing the underlying storage. The header size will be aligned, and it
+// should be followed in memory by the array of ports and finally the payload.
+//
+// NOTE: This class does not manage the lifetime of the ports it references.
+class Message {
+ public:
+ virtual ~Message() {}
+
+ // Inspect the message at |bytes| and return the size of each section. Returns
+ // |false| if the message is malformed and |true| otherwise.
+ static bool Parse(const void* bytes,
+ size_t num_bytes,
+ size_t* num_header_bytes,
+ size_t* num_payload_bytes,
+ size_t* num_ports_bytes);
+
+ void* mutable_header_bytes() { return start_; }
+ const void* header_bytes() const { return start_; }
+ size_t num_header_bytes() const { return num_header_bytes_; }
+
+ void* mutable_payload_bytes() {
+ return start_ + num_header_bytes_ + num_ports_bytes_;
+ }
+ const void* payload_bytes() const {
+ return const_cast<Message*>(this)->mutable_payload_bytes();
+ }
+ size_t num_payload_bytes() const { return num_payload_bytes_; }
+
+ PortName* mutable_ports() {
+ return reinterpret_cast<PortName*>(start_ + num_header_bytes_);
+ }
+ const PortName* ports() const {
+ return const_cast<Message*>(this)->mutable_ports();
+ }
+ size_t num_ports_bytes() const { return num_ports_bytes_; }
+ size_t num_ports() const { return num_ports_bytes_ / sizeof(PortName); }
+
+ protected:
+ // Constructs a new Message base for a user message.
+ //
+ // Note: You MUST call InitializeUserMessageHeader() before this Message is
+ // ready for transmission.
+ Message(size_t num_payload_bytes, size_t num_ports);
+
+ // Constructs a new Message base for an internal message. Do NOT call
+ // InitializeUserMessageHeader() when using this constructor.
+ Message(size_t num_header_bytes,
+ size_t num_payload_bytes,
+ size_t num_ports_bytes);
+
+ Message(const Message& other) = delete;
+ void operator=(const Message& other) = delete;
+
+ // Initializes the header in a newly allocated message buffer to carry a
+ // user message.
+ void InitializeUserMessageHeader(void* start);
+
+ // Note: storage is [header][ports][payload].
+ char* start_ = nullptr;
+ size_t num_ports_ = 0;
+ size_t num_header_bytes_ = 0;
+ size_t num_ports_bytes_ = 0;
+ size_t num_payload_bytes_ = 0;
+};
+
+using ScopedMessage = std::unique_ptr<Message>;
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_MESSAGE_H_
diff --git a/mojo/edk/system/ports/message_filter.h b/mojo/edk/system/ports/message_filter.h
new file mode 100644
index 0000000000..bf8fa21966
--- /dev/null
+++ b/mojo/edk/system/ports/message_filter.h
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_MESSAGE_FILTER_H_
+#define MOJO_EDK_SYSTEM_PORTS_MESSAGE_FILTER_H_
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+class Message;
+
+// An interface which can be implemented to filter port messages according to
+// arbitrary policy.
+class MessageFilter {
+ public:
+ virtual ~MessageFilter() {}
+
+ // Returns true of |message| should be accepted by whomever is applying this
+ // filter. See MessageQueue::GetNextMessage(), for example.
+ virtual bool Match(const Message& message) = 0;
+};
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_MESSAGE_FILTER_H_
diff --git a/mojo/edk/system/ports/message_queue.cc b/mojo/edk/system/ports/message_queue.cc
new file mode 100644
index 0000000000..defb1b6c75
--- /dev/null
+++ b/mojo/edk/system/ports/message_queue.cc
@@ -0,0 +1,87 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/ports/message_queue.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "mojo/edk/system/ports/event.h"
+#include "mojo/edk/system/ports/message_filter.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+inline uint64_t GetSequenceNum(const ScopedMessage& message) {
+ return GetEventData<UserEventData>(*message)->sequence_num;
+}
+
+// Used by std::{push,pop}_heap functions
+inline bool operator<(const ScopedMessage& a, const ScopedMessage& b) {
+ return GetSequenceNum(a) > GetSequenceNum(b);
+}
+
+MessageQueue::MessageQueue() : MessageQueue(kInitialSequenceNum) {}
+
+MessageQueue::MessageQueue(uint64_t next_sequence_num)
+ : next_sequence_num_(next_sequence_num) {
+ // The message queue is blocked waiting for a message with sequence number
+ // equal to |next_sequence_num|.
+}
+
+MessageQueue::~MessageQueue() {
+#if DCHECK_IS_ON()
+ size_t num_leaked_ports = 0;
+ for (const auto& message : heap_)
+ num_leaked_ports += message->num_ports();
+ DVLOG_IF(1, num_leaked_ports > 0)
+ << "Leaking " << num_leaked_ports << " ports in unreceived messages";
+#endif
+}
+
+bool MessageQueue::HasNextMessage() const {
+ return !heap_.empty() && GetSequenceNum(heap_[0]) == next_sequence_num_;
+}
+
+void MessageQueue::GetNextMessage(ScopedMessage* message,
+ MessageFilter* filter) {
+ if (!HasNextMessage() || (filter && !filter->Match(*heap_[0].get()))) {
+ message->reset();
+ return;
+ }
+
+ std::pop_heap(heap_.begin(), heap_.end());
+ *message = std::move(heap_.back());
+ heap_.pop_back();
+
+ next_sequence_num_++;
+}
+
+void MessageQueue::AcceptMessage(ScopedMessage message,
+ bool* has_next_message) {
+ DCHECK(GetEventHeader(*message)->type == EventType::kUser);
+
+ // TODO: Handle sequence number roll-over.
+
+ heap_.emplace_back(std::move(message));
+ std::push_heap(heap_.begin(), heap_.end());
+
+ if (!signalable_) {
+ *has_next_message = false;
+ } else {
+ *has_next_message = (GetSequenceNum(heap_[0]) == next_sequence_num_);
+ }
+}
+
+void MessageQueue::GetReferencedPorts(std::deque<PortName>* port_names) {
+ for (const auto& message : heap_) {
+ for (size_t i = 0; i < message->num_ports(); ++i)
+ port_names->push_back(message->ports()[i]);
+ }
+}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/message_queue.h b/mojo/edk/system/ports/message_queue.h
new file mode 100644
index 0000000000..d9a47ed0a1
--- /dev/null
+++ b/mojo/edk/system/ports/message_queue.h
@@ -0,0 +1,73 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_MESSAGE_QUEUE_H_
+#define MOJO_EDK_SYSTEM_PORTS_MESSAGE_QUEUE_H_
+
+#include <stdint.h>
+
+#include <deque>
+#include <functional>
+#include <limits>
+#include <vector>
+
+#include "base/macros.h"
+#include "mojo/edk/system/ports/message.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+const uint64_t kInitialSequenceNum = 1;
+const uint64_t kInvalidSequenceNum = std::numeric_limits<uint64_t>::max();
+
+class MessageFilter;
+
+// An incoming message queue for a port. MessageQueue keeps track of the highest
+// known sequence number and can indicate whether the next sequential message is
+// available. Thus the queue enforces message ordering for the consumer without
+// enforcing it for the producer (see AcceptMessage() below.)
+class MessageQueue {
+ public:
+ explicit MessageQueue();
+ explicit MessageQueue(uint64_t next_sequence_num);
+ ~MessageQueue();
+
+ void set_signalable(bool value) { signalable_ = value; }
+
+ uint64_t next_sequence_num() const { return next_sequence_num_; }
+
+ bool HasNextMessage() const;
+
+ // Gives ownership of the message. If |filter| is non-null, the next message
+ // will only be retrieved if the filter successfully matches it.
+ void GetNextMessage(ScopedMessage* message, MessageFilter* filter);
+
+ // Takes ownership of the message. Note: Messages are ordered, so while we
+ // have added a message to the queue, we may still be waiting on a message
+ // ahead of this one before we can let any of the messages be returned by
+ // GetNextMessage.
+ //
+ // Furthermore, once has_next_message is set to true, it will remain false
+ // until GetNextMessage is called enough times to return a null message.
+ // In other words, has_next_message acts like an edge trigger.
+ //
+ void AcceptMessage(ScopedMessage message, bool* has_next_message);
+
+ // Returns all of the ports referenced by messages in this message queue.
+ void GetReferencedPorts(std::deque<PortName>* ports);
+
+ private:
+ std::vector<ScopedMessage> heap_;
+ uint64_t next_sequence_num_;
+ bool signalable_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageQueue);
+};
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_MESSAGE_QUEUE_H_
diff --git a/mojo/edk/system/ports/name.cc b/mojo/edk/system/ports/name.cc
new file mode 100644
index 0000000000..ea17698f97
--- /dev/null
+++ b/mojo/edk/system/ports/name.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/ports/name.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+extern const PortName kInvalidPortName = {0, 0};
+
+extern const NodeName kInvalidNodeName = {0, 0};
+
+std::ostream& operator<<(std::ostream& stream, const Name& name) {
+ std::ios::fmtflags flags(stream.flags());
+ stream << std::hex << std::uppercase << name.v1;
+ if (name.v2 != 0)
+ stream << '.' << name.v2;
+ stream.flags(flags);
+ return stream;
+}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/name.h b/mojo/edk/system/ports/name.h
new file mode 100644
index 0000000000..72e41b92ab
--- /dev/null
+++ b/mojo/edk/system/ports/name.h
@@ -0,0 +1,74 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_NAME_H_
+#define MOJO_EDK_SYSTEM_PORTS_NAME_H_
+
+#include <stdint.h>
+
+#include <ostream>
+#include <tuple>
+
+#include "base/hash.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+struct Name {
+ Name(uint64_t v1, uint64_t v2) : v1(v1), v2(v2) {}
+ uint64_t v1, v2;
+};
+
+inline bool operator==(const Name& a, const Name& b) {
+ return a.v1 == b.v1 && a.v2 == b.v2;
+}
+
+inline bool operator!=(const Name& a, const Name& b) {
+ return !(a == b);
+}
+
+inline bool operator<(const Name& a, const Name& b) {
+ return std::tie(a.v1, a.v2) < std::tie(b.v1, b.v2);
+}
+
+std::ostream& operator<<(std::ostream& stream, const Name& name);
+
+struct PortName : Name {
+ PortName() : Name(0, 0) {}
+ PortName(uint64_t v1, uint64_t v2) : Name(v1, v2) {}
+};
+
+extern const PortName kInvalidPortName;
+
+struct NodeName : Name {
+ NodeName() : Name(0, 0) {}
+ NodeName(uint64_t v1, uint64_t v2) : Name(v1, v2) {}
+};
+
+extern const NodeName kInvalidNodeName;
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+namespace std {
+
+template <>
+struct hash<mojo::edk::ports::PortName> {
+ std::size_t operator()(const mojo::edk::ports::PortName& name) const {
+ return base::HashInts64(name.v1, name.v2);
+ }
+};
+
+template <>
+struct hash<mojo::edk::ports::NodeName> {
+ std::size_t operator()(const mojo::edk::ports::NodeName& name) const {
+ return base::HashInts64(name.v1, name.v2);
+ }
+};
+
+} // namespace std
+
+#endif // MOJO_EDK_SYSTEM_PORTS_NAME_H_
diff --git a/mojo/edk/system/ports/node.cc b/mojo/edk/system/ports/node.cc
new file mode 100644
index 0000000000..f9a3feb805
--- /dev/null
+++ b/mojo/edk/system/ports/node.cc
@@ -0,0 +1,1385 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/ports/node.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/system/ports/node_delegate.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+namespace {
+
+int DebugError(const char* message, int error_code) {
+ CHECK(false) << "Oops: " << message;
+ return error_code;
+}
+
+#define OOPS(x) DebugError(#x, x)
+
+bool CanAcceptMoreMessages(const Port* port) {
+ // Have we already doled out the last message (i.e., do we expect to NOT
+ // receive further messages)?
+ uint64_t next_sequence_num = port->message_queue.next_sequence_num();
+ if (port->state == Port::kClosed)
+ return false;
+ if (port->peer_closed || port->remove_proxy_on_last_message) {
+ if (port->last_sequence_num_to_receive == next_sequence_num - 1)
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+class Node::LockedPort {
+ public:
+ explicit LockedPort(Port* port) : port_(port) {
+ port_->lock.AssertAcquired();
+ }
+
+ Port* get() const { return port_; }
+ Port* operator->() const { return port_; }
+
+ private:
+ Port* const port_;
+};
+
+Node::Node(const NodeName& name, NodeDelegate* delegate)
+ : name_(name),
+ delegate_(delegate) {
+}
+
+Node::~Node() {
+ if (!ports_.empty())
+ DLOG(WARNING) << "Unclean shutdown for node " << name_;
+}
+
+bool Node::CanShutdownCleanly(ShutdownPolicy policy) {
+ base::AutoLock ports_lock(ports_lock_);
+
+ if (policy == ShutdownPolicy::DONT_ALLOW_LOCAL_PORTS) {
+#if DCHECK_IS_ON()
+ for (auto entry : ports_) {
+ DVLOG(2) << "Port " << entry.first << " referencing node "
+ << entry.second->peer_node_name << " is blocking shutdown of "
+ << "node " << name_ << " (state=" << entry.second->state << ")";
+ }
+#endif
+ return ports_.empty();
+ }
+
+ DCHECK_EQ(policy, ShutdownPolicy::ALLOW_LOCAL_PORTS);
+
+ // NOTE: This is not efficient, though it probably doesn't need to be since
+ // relatively few ports should be open during shutdown and shutdown doesn't
+ // need to be blazingly fast.
+ bool can_shutdown = true;
+ for (auto entry : ports_) {
+ base::AutoLock lock(entry.second->lock);
+ if (entry.second->peer_node_name != name_ &&
+ entry.second->state != Port::kReceiving) {
+ can_shutdown = false;
+#if DCHECK_IS_ON()
+ DVLOG(2) << "Port " << entry.first << " referencing node "
+ << entry.second->peer_node_name << " is blocking shutdown of "
+ << "node " << name_ << " (state=" << entry.second->state << ")";
+#else
+ // Exit early when not debugging.
+ break;
+#endif
+ }
+ }
+
+ return can_shutdown;
+}
+
+int Node::GetPort(const PortName& port_name, PortRef* port_ref) {
+ scoped_refptr<Port> port = GetPort(port_name);
+ if (!port)
+ return ERROR_PORT_UNKNOWN;
+
+ *port_ref = PortRef(port_name, std::move(port));
+ return OK;
+}
+
+int Node::CreateUninitializedPort(PortRef* port_ref) {
+ PortName port_name;
+ delegate_->GenerateRandomPortName(&port_name);
+
+ scoped_refptr<Port> port(new Port(kInitialSequenceNum, kInitialSequenceNum));
+ int rv = AddPortWithName(port_name, port);
+ if (rv != OK)
+ return rv;
+
+ *port_ref = PortRef(port_name, std::move(port));
+ return OK;
+}
+
+int Node::InitializePort(const PortRef& port_ref,
+ const NodeName& peer_node_name,
+ const PortName& peer_port_name) {
+ Port* port = port_ref.port();
+
+ {
+ base::AutoLock lock(port->lock);
+ if (port->state != Port::kUninitialized)
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ port->state = Port::kReceiving;
+ port->peer_node_name = peer_node_name;
+ port->peer_port_name = peer_port_name;
+ }
+
+ delegate_->PortStatusChanged(port_ref);
+
+ return OK;
+}
+
+int Node::CreatePortPair(PortRef* port0_ref, PortRef* port1_ref) {
+ int rv;
+
+ rv = CreateUninitializedPort(port0_ref);
+ if (rv != OK)
+ return rv;
+
+ rv = CreateUninitializedPort(port1_ref);
+ if (rv != OK)
+ return rv;
+
+ rv = InitializePort(*port0_ref, name_, port1_ref->name());
+ if (rv != OK)
+ return rv;
+
+ rv = InitializePort(*port1_ref, name_, port0_ref->name());
+ if (rv != OK)
+ return rv;
+
+ return OK;
+}
+
+int Node::SetUserData(const PortRef& port_ref,
+ scoped_refptr<UserData> user_data) {
+ Port* port = port_ref.port();
+
+ base::AutoLock lock(port->lock);
+ if (port->state == Port::kClosed)
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ port->user_data = std::move(user_data);
+
+ return OK;
+}
+
+int Node::GetUserData(const PortRef& port_ref,
+ scoped_refptr<UserData>* user_data) {
+ Port* port = port_ref.port();
+
+ base::AutoLock lock(port->lock);
+ if (port->state == Port::kClosed)
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ *user_data = port->user_data;
+
+ return OK;
+}
+
+int Node::ClosePort(const PortRef& port_ref) {
+ std::deque<PortName> referenced_port_names;
+
+ ObserveClosureEventData data;
+
+ NodeName peer_node_name;
+ PortName peer_port_name;
+ Port* port = port_ref.port();
+ {
+ // We may need to erase the port, which requires ports_lock_ to be held,
+ // but ports_lock_ must be acquired before any individual port locks.
+ base::AutoLock ports_lock(ports_lock_);
+
+ base::AutoLock lock(port->lock);
+ if (port->state == Port::kUninitialized) {
+ // If the port was not yet initialized, there's nothing interesting to do.
+ ErasePort_Locked(port_ref.name());
+ return OK;
+ }
+
+ if (port->state != Port::kReceiving)
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ port->state = Port::kClosed;
+
+ // We pass along the sequence number of the last message sent from this
+ // port to allow the peer to have the opportunity to consume all inbound
+ // messages before notifying the embedder that this port is closed.
+ data.last_sequence_num = port->next_sequence_num_to_send - 1;
+
+ peer_node_name = port->peer_node_name;
+ peer_port_name = port->peer_port_name;
+
+ // If the port being closed still has unread messages, then we need to take
+ // care to close those ports so as to avoid leaking memory.
+ port->message_queue.GetReferencedPorts(&referenced_port_names);
+
+ ErasePort_Locked(port_ref.name());
+ }
+
+ DVLOG(2) << "Sending ObserveClosure from " << port_ref.name() << "@" << name_
+ << " to " << peer_port_name << "@" << peer_node_name;
+
+ delegate_->ForwardMessage(
+ peer_node_name,
+ NewInternalMessage(peer_port_name, EventType::kObserveClosure, data));
+
+ for (const auto& name : referenced_port_names) {
+ PortRef ref;
+ if (GetPort(name, &ref) == OK)
+ ClosePort(ref);
+ }
+ return OK;
+}
+
+int Node::GetStatus(const PortRef& port_ref, PortStatus* port_status) {
+ Port* port = port_ref.port();
+
+ base::AutoLock lock(port->lock);
+
+ if (port->state != Port::kReceiving)
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ port_status->has_messages = port->message_queue.HasNextMessage();
+ port_status->receiving_messages = CanAcceptMoreMessages(port);
+ port_status->peer_closed = port->peer_closed;
+ return OK;
+}
+
+int Node::GetMessage(const PortRef& port_ref,
+ ScopedMessage* message,
+ MessageFilter* filter) {
+ *message = nullptr;
+
+ DVLOG(4) << "GetMessage for " << port_ref.name() << "@" << name_;
+
+ Port* port = port_ref.port();
+ {
+ base::AutoLock lock(port->lock);
+
+ // This could also be treated like the port being unknown since the
+ // embedder should no longer be referring to a port that has been sent.
+ if (port->state != Port::kReceiving)
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ // Let the embedder get messages until there are no more before reporting
+ // that the peer closed its end.
+ if (!CanAcceptMoreMessages(port))
+ return ERROR_PORT_PEER_CLOSED;
+
+ port->message_queue.GetNextMessage(message, filter);
+ }
+
+ // Allow referenced ports to trigger PortStatusChanged calls.
+ if (*message) {
+ for (size_t i = 0; i < (*message)->num_ports(); ++i) {
+ const PortName& new_port_name = (*message)->ports()[i];
+ scoped_refptr<Port> new_port = GetPort(new_port_name);
+
+ DCHECK(new_port) << "Port " << new_port_name << "@" << name_
+ << " does not exist!";
+
+ base::AutoLock lock(new_port->lock);
+
+ DCHECK(new_port->state == Port::kReceiving);
+ new_port->message_queue.set_signalable(true);
+ }
+ }
+
+ return OK;
+}
+
+int Node::SendMessage(const PortRef& port_ref, ScopedMessage message) {
+ int rv = SendMessageInternal(port_ref, &message);
+ if (rv != OK) {
+ // If send failed, close all carried ports. Note that we're careful not to
+ // close the sending port itself if it happened to be one of the encoded
+ // ports (an invalid but possible condition.)
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ if (message->ports()[i] == port_ref.name())
+ continue;
+
+ PortRef port;
+ if (GetPort(message->ports()[i], &port) == OK)
+ ClosePort(port);
+ }
+ }
+ return rv;
+}
+
+int Node::AcceptMessage(ScopedMessage message) {
+ const EventHeader* header = GetEventHeader(*message);
+ switch (header->type) {
+ case EventType::kUser:
+ return OnUserMessage(std::move(message));
+ case EventType::kPortAccepted:
+ return OnPortAccepted(header->port_name);
+ case EventType::kObserveProxy:
+ return OnObserveProxy(
+ header->port_name,
+ *GetEventData<ObserveProxyEventData>(*message));
+ case EventType::kObserveProxyAck:
+ return OnObserveProxyAck(
+ header->port_name,
+ GetEventData<ObserveProxyAckEventData>(*message)->last_sequence_num);
+ case EventType::kObserveClosure:
+ return OnObserveClosure(
+ header->port_name,
+ GetEventData<ObserveClosureEventData>(*message)->last_sequence_num);
+ case EventType::kMergePort:
+ return OnMergePort(header->port_name,
+ *GetEventData<MergePortEventData>(*message));
+ }
+ return OOPS(ERROR_NOT_IMPLEMENTED);
+}
+
+int Node::MergePorts(const PortRef& port_ref,
+ const NodeName& destination_node_name,
+ const PortName& destination_port_name) {
+ Port* port = port_ref.port();
+ MergePortEventData data;
+ {
+ base::AutoLock lock(port->lock);
+
+ DVLOG(1) << "Sending MergePort from " << port_ref.name() << "@" << name_
+ << " to " << destination_port_name << "@" << destination_node_name;
+
+ // Send the port-to-merge over to the destination node so it can be merged
+ // into the port cycle atomically there.
+ data.new_port_name = port_ref.name();
+ WillSendPort(LockedPort(port), destination_node_name, &data.new_port_name,
+ &data.new_port_descriptor);
+ }
+ delegate_->ForwardMessage(
+ destination_node_name,
+ NewInternalMessage(destination_port_name,
+ EventType::kMergePort, data));
+ return OK;
+}
+
+int Node::MergeLocalPorts(const PortRef& port0_ref, const PortRef& port1_ref) {
+ Port* port0 = port0_ref.port();
+ Port* port1 = port1_ref.port();
+ int rv;
+ {
+ // |ports_lock_| must be held when acquiring overlapping port locks.
+ base::AutoLock ports_lock(ports_lock_);
+ base::AutoLock port0_lock(port0->lock);
+ base::AutoLock port1_lock(port1->lock);
+
+ DVLOG(1) << "Merging local ports " << port0_ref.name() << "@" << name_
+ << " and " << port1_ref.name() << "@" << name_;
+
+ if (port0->state != Port::kReceiving || port1->state != Port::kReceiving)
+ rv = ERROR_PORT_STATE_UNEXPECTED;
+ else
+ rv = MergePorts_Locked(port0_ref, port1_ref);
+ }
+
+ if (rv != OK) {
+ ClosePort(port0_ref);
+ ClosePort(port1_ref);
+ }
+
+ return rv;
+}
+
+int Node::LostConnectionToNode(const NodeName& node_name) {
+ // We can no longer send events to the given node. We also can't expect any
+ // PortAccepted events.
+
+ DVLOG(1) << "Observing lost connection from node " << name_
+ << " to node " << node_name;
+
+ DestroyAllPortsWithPeer(node_name, kInvalidPortName);
+ return OK;
+}
+
+int Node::OnUserMessage(ScopedMessage message) {
+ PortName port_name = GetEventHeader(*message)->port_name;
+ const auto* event = GetEventData<UserEventData>(*message);
+
+#if DCHECK_IS_ON()
+ std::ostringstream ports_buf;
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ if (i > 0)
+ ports_buf << ",";
+ ports_buf << message->ports()[i];
+ }
+
+ DVLOG(4) << "AcceptMessage " << event->sequence_num
+ << " [ports=" << ports_buf.str() << "] at "
+ << port_name << "@" << name_;
+#endif
+
+ scoped_refptr<Port> port = GetPort(port_name);
+
+ // Even if this port does not exist, cannot receive anymore messages or is
+ // buffering or proxying messages, we still need these ports to be bound to
+ // this node. When the message is forwarded, these ports will get transferred
+ // following the usual method. If the message cannot be accepted, then the
+ // newly bound ports will simply be closed.
+
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ int rv = AcceptPort(message->ports()[i], GetPortDescriptors(event)[i]);
+ if (rv != OK)
+ return rv;
+ }
+
+ bool has_next_message = false;
+ bool message_accepted = false;
+
+ if (port) {
+ // We may want to forward messages once the port lock is held, so we must
+ // acquire |ports_lock_| first.
+ base::AutoLock ports_lock(ports_lock_);
+ base::AutoLock lock(port->lock);
+
+ // Reject spurious messages if we've already received the last expected
+ // message.
+ if (CanAcceptMoreMessages(port.get())) {
+ message_accepted = true;
+ port->message_queue.AcceptMessage(std::move(message), &has_next_message);
+
+ if (port->state == Port::kBuffering) {
+ has_next_message = false;
+ } else if (port->state == Port::kProxying) {
+ has_next_message = false;
+
+ // Forward messages. We forward messages in sequential order here so
+ // that we maintain the message queue's notion of next sequence number.
+ // That's useful for the proxy removal process as we can tell when this
+ // port has seen all of the messages it is expected to see.
+ int rv = ForwardMessages_Locked(LockedPort(port.get()), port_name);
+ if (rv != OK)
+ return rv;
+
+ MaybeRemoveProxy_Locked(LockedPort(port.get()), port_name);
+ }
+ }
+ }
+
+ if (!message_accepted) {
+ DVLOG(2) << "Message not accepted!\n";
+ // Close all newly accepted ports as they are effectively orphaned.
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ PortRef port_ref;
+ if (GetPort(message->ports()[i], &port_ref) == OK) {
+ ClosePort(port_ref);
+ } else {
+ DLOG(WARNING) << "Cannot close non-existent port!\n";
+ }
+ }
+ } else if (has_next_message) {
+ PortRef port_ref(port_name, port);
+ delegate_->PortStatusChanged(port_ref);
+ }
+
+ return OK;
+}
+
+int Node::OnPortAccepted(const PortName& port_name) {
+ scoped_refptr<Port> port = GetPort(port_name);
+ if (!port)
+ return ERROR_PORT_UNKNOWN;
+
+ DVLOG(2) << "PortAccepted at " << port_name << "@" << name_
+ << " pointing to "
+ << port->peer_port_name << "@" << port->peer_node_name;
+
+ return BeginProxying(PortRef(port_name, std::move(port)));
+}
+
+int Node::OnObserveProxy(const PortName& port_name,
+ const ObserveProxyEventData& event) {
+ if (port_name == kInvalidPortName) {
+ // An ObserveProxy with an invalid target port name is a broadcast used to
+ // inform ports when their peer (which was itself a proxy) has become
+ // defunct due to unexpected node disconnection.
+ //
+ // Receiving ports affected by this treat it as equivalent to peer closure.
+ // Proxies affected by this can be removed and will in turn broadcast their
+ // own death with a similar message.
+ CHECK_EQ(event.proxy_to_node_name, kInvalidNodeName);
+ CHECK_EQ(event.proxy_to_port_name, kInvalidPortName);
+ DestroyAllPortsWithPeer(event.proxy_node_name, event.proxy_port_name);
+ return OK;
+ }
+
+ // The port may have already been closed locally, in which case the
+ // ObserveClosure message will contain the last_sequence_num field.
+ // We can then silently ignore this message.
+ scoped_refptr<Port> port = GetPort(port_name);
+ if (!port) {
+ DVLOG(1) << "ObserveProxy: " << port_name << "@" << name_ << " not found";
+ return OK;
+ }
+
+ DVLOG(2) << "ObserveProxy at " << port_name << "@" << name_ << ", proxy at "
+ << event.proxy_port_name << "@"
+ << event.proxy_node_name << " pointing to "
+ << event.proxy_to_port_name << "@"
+ << event.proxy_to_node_name;
+
+ {
+ base::AutoLock lock(port->lock);
+
+ if (port->peer_node_name == event.proxy_node_name &&
+ port->peer_port_name == event.proxy_port_name) {
+ if (port->state == Port::kReceiving) {
+ port->peer_node_name = event.proxy_to_node_name;
+ port->peer_port_name = event.proxy_to_port_name;
+
+ ObserveProxyAckEventData ack;
+ ack.last_sequence_num = port->next_sequence_num_to_send - 1;
+
+ delegate_->ForwardMessage(
+ event.proxy_node_name,
+ NewInternalMessage(event.proxy_port_name,
+ EventType::kObserveProxyAck,
+ ack));
+ } else {
+ // As a proxy ourselves, we don't know how to honor the ObserveProxy
+ // event or to populate the last_sequence_num field of ObserveProxyAck.
+ // Afterall, another port could be sending messages to our peer now
+ // that we've sent out our own ObserveProxy event. Instead, we will
+ // send an ObserveProxyAck indicating that the ObserveProxy event
+ // should be re-sent (last_sequence_num set to kInvalidSequenceNum).
+ // However, this has to be done after we are removed as a proxy.
+ // Otherwise, we might just find ourselves back here again, which
+ // would be akin to a busy loop.
+
+ DVLOG(2) << "Delaying ObserveProxyAck to "
+ << event.proxy_port_name << "@" << event.proxy_node_name;
+
+ ObserveProxyAckEventData ack;
+ ack.last_sequence_num = kInvalidSequenceNum;
+
+ port->send_on_proxy_removal.reset(
+ new std::pair<NodeName, ScopedMessage>(
+ event.proxy_node_name,
+ NewInternalMessage(event.proxy_port_name,
+ EventType::kObserveProxyAck,
+ ack)));
+ }
+ } else {
+ // Forward this event along to our peer. Eventually, it should find the
+ // port referring to the proxy.
+ delegate_->ForwardMessage(
+ port->peer_node_name,
+ NewInternalMessage(port->peer_port_name,
+ EventType::kObserveProxy,
+ event));
+ }
+ }
+ return OK;
+}
+
+int Node::OnObserveProxyAck(const PortName& port_name,
+ uint64_t last_sequence_num) {
+ DVLOG(2) << "ObserveProxyAck at " << port_name << "@" << name_
+ << " (last_sequence_num=" << last_sequence_num << ")";
+
+ scoped_refptr<Port> port = GetPort(port_name);
+ if (!port)
+ return ERROR_PORT_UNKNOWN; // The port may have observed closure first, so
+ // this is not an "Oops".
+
+ {
+ base::AutoLock lock(port->lock);
+
+ if (port->state != Port::kProxying)
+ return OOPS(ERROR_PORT_STATE_UNEXPECTED);
+
+ if (last_sequence_num == kInvalidSequenceNum) {
+ // Send again.
+ InitiateProxyRemoval(LockedPort(port.get()), port_name);
+ return OK;
+ }
+
+ // We can now remove this port once we have received and forwarded the last
+ // message addressed to this port.
+ port->remove_proxy_on_last_message = true;
+ port->last_sequence_num_to_receive = last_sequence_num;
+ }
+ TryRemoveProxy(PortRef(port_name, std::move(port)));
+ return OK;
+}
+
+int Node::OnObserveClosure(const PortName& port_name,
+ uint64_t last_sequence_num) {
+ // OK if the port doesn't exist, as it may have been closed already.
+ scoped_refptr<Port> port = GetPort(port_name);
+ if (!port)
+ return OK;
+
+ // This message tells the port that it should no longer expect more messages
+ // beyond last_sequence_num. This message is forwarded along until we reach
+ // the receiving end, and this message serves as an equivalent to
+ // ObserveProxyAck.
+
+ bool notify_delegate = false;
+ ObserveClosureEventData forwarded_data;
+ NodeName peer_node_name;
+ PortName peer_port_name;
+ bool try_remove_proxy = false;
+ {
+ base::AutoLock lock(port->lock);
+
+ port->peer_closed = true;
+ port->last_sequence_num_to_receive = last_sequence_num;
+
+ DVLOG(2) << "ObserveClosure at " << port_name << "@" << name_
+ << " (state=" << port->state << ") pointing to "
+ << port->peer_port_name << "@" << port->peer_node_name
+ << " (last_sequence_num=" << last_sequence_num << ")";
+
+ // We always forward ObserveClosure, even beyond the receiving port which
+ // cares about it. This ensures that any dead-end proxies beyond that port
+ // are notified to remove themselves.
+
+ if (port->state == Port::kReceiving) {
+ notify_delegate = true;
+
+ // When forwarding along the other half of the port cycle, this will only
+ // reach dead-end proxies. Tell them we've sent our last message so they
+ // can go away.
+ //
+ // TODO: Repurposing ObserveClosure for this has the desired result but
+ // may be semantically confusing since the forwarding port is not actually
+ // closed. Consider replacing this with a new event type.
+ forwarded_data.last_sequence_num = port->next_sequence_num_to_send - 1;
+ } else {
+ // We haven't yet reached the receiving peer of the closed port, so
+ // forward the message along as-is.
+ forwarded_data.last_sequence_num = last_sequence_num;
+
+ // See about removing the port if it is a proxy as our peer won't be able
+ // to participate in proxy removal.
+ port->remove_proxy_on_last_message = true;
+ if (port->state == Port::kProxying)
+ try_remove_proxy = true;
+ }
+
+ DVLOG(2) << "Forwarding ObserveClosure from "
+ << port_name << "@" << name_ << " to peer "
+ << port->peer_port_name << "@" << port->peer_node_name
+ << " (last_sequence_num=" << forwarded_data.last_sequence_num
+ << ")";
+
+ peer_node_name = port->peer_node_name;
+ peer_port_name = port->peer_port_name;
+ }
+ if (try_remove_proxy)
+ TryRemoveProxy(PortRef(port_name, port));
+
+ delegate_->ForwardMessage(
+ peer_node_name,
+ NewInternalMessage(peer_port_name, EventType::kObserveClosure,
+ forwarded_data));
+
+ if (notify_delegate) {
+ PortRef port_ref(port_name, std::move(port));
+ delegate_->PortStatusChanged(port_ref);
+ }
+ return OK;
+}
+
+int Node::OnMergePort(const PortName& port_name,
+ const MergePortEventData& event) {
+ scoped_refptr<Port> port = GetPort(port_name);
+
+ DVLOG(1) << "MergePort at " << port_name << "@" << name_ << " (state="
+ << (port ? port->state : -1) << ") merging with proxy "
+ << event.new_port_name
+ << "@" << name_ << " pointing to "
+ << event.new_port_descriptor.peer_port_name << "@"
+ << event.new_port_descriptor.peer_node_name << " referred by "
+ << event.new_port_descriptor.referring_port_name << "@"
+ << event.new_port_descriptor.referring_node_name;
+
+ bool close_target_port = false;
+ bool close_new_port = false;
+
+ // Accept the new port. This is now the receiving end of the other port cycle
+ // to be merged with ours.
+ int rv = AcceptPort(event.new_port_name, event.new_port_descriptor);
+ if (rv != OK) {
+ close_target_port = true;
+ } else if (port) {
+ // BeginProxying_Locked may call MaybeRemoveProxy_Locked, which in turn
+ // needs to hold |ports_lock_|. We also acquire multiple port locks within.
+ base::AutoLock ports_lock(ports_lock_);
+ base::AutoLock lock(port->lock);
+
+ if (port->state != Port::kReceiving) {
+ close_new_port = true;
+ } else {
+ scoped_refptr<Port> new_port = GetPort_Locked(event.new_port_name);
+ base::AutoLock new_port_lock(new_port->lock);
+ DCHECK(new_port->state == Port::kReceiving);
+
+ // Both ports are locked. Now all we have to do is swap their peer
+ // information and set them up as proxies.
+
+ PortRef port0_ref(port_name, port);
+ PortRef port1_ref(event.new_port_name, new_port);
+ int rv = MergePorts_Locked(port0_ref, port1_ref);
+ if (rv == OK)
+ return rv;
+
+ close_new_port = true;
+ close_target_port = true;
+ }
+ } else {
+ close_new_port = true;
+ }
+
+ if (close_target_port) {
+ PortRef target_port;
+ rv = GetPort(port_name, &target_port);
+ DCHECK(rv == OK);
+
+ ClosePort(target_port);
+ }
+
+ if (close_new_port) {
+ PortRef new_port;
+ rv = GetPort(event.new_port_name, &new_port);
+ DCHECK(rv == OK);
+
+ ClosePort(new_port);
+ }
+
+ return ERROR_PORT_STATE_UNEXPECTED;
+}
+
+int Node::AddPortWithName(const PortName& port_name, scoped_refptr<Port> port) {
+ base::AutoLock lock(ports_lock_);
+
+ if (!ports_.insert(std::make_pair(port_name, std::move(port))).second)
+ return OOPS(ERROR_PORT_EXISTS); // Suggests a bad UUID generator.
+
+ DVLOG(2) << "Created port " << port_name << "@" << name_;
+ return OK;
+}
+
+void Node::ErasePort(const PortName& port_name) {
+ base::AutoLock lock(ports_lock_);
+ ErasePort_Locked(port_name);
+}
+
+void Node::ErasePort_Locked(const PortName& port_name) {
+ ports_lock_.AssertAcquired();
+ ports_.erase(port_name);
+ DVLOG(2) << "Deleted port " << port_name << "@" << name_;
+}
+
+scoped_refptr<Port> Node::GetPort(const PortName& port_name) {
+ base::AutoLock lock(ports_lock_);
+ return GetPort_Locked(port_name);
+}
+
+scoped_refptr<Port> Node::GetPort_Locked(const PortName& port_name) {
+ ports_lock_.AssertAcquired();
+ auto iter = ports_.find(port_name);
+ if (iter == ports_.end())
+ return nullptr;
+
+#if (defined(OS_ANDROID) || defined(__ANDROID__)) && defined(ARCH_CPU_ARM64)
+ // Workaround for https://crbug.com/665869.
+ base::subtle::MemoryBarrier();
+#endif
+
+ return iter->second;
+}
+
+int Node::SendMessageInternal(const PortRef& port_ref, ScopedMessage* message) {
+ ScopedMessage& m = *message;
+ for (size_t i = 0; i < m->num_ports(); ++i) {
+ if (m->ports()[i] == port_ref.name())
+ return ERROR_PORT_CANNOT_SEND_SELF;
+ }
+
+ Port* port = port_ref.port();
+ NodeName peer_node_name;
+ {
+ // We must acquire |ports_lock_| before grabbing any port locks, because
+ // WillSendMessage_Locked may need to lock multiple ports out of order.
+ base::AutoLock ports_lock(ports_lock_);
+ base::AutoLock lock(port->lock);
+
+ if (port->state != Port::kReceiving)
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ if (port->peer_closed)
+ return ERROR_PORT_PEER_CLOSED;
+
+ int rv = WillSendMessage_Locked(LockedPort(port), port_ref.name(), m.get());
+ if (rv != OK)
+ return rv;
+
+ // Beyond this point there's no sense in returning anything but OK. Even if
+ // message forwarding or acceptance fails, there's nothing the embedder can
+ // do to recover. Assume that failure beyond this point must be treated as a
+ // transport failure.
+
+ peer_node_name = port->peer_node_name;
+ }
+
+ if (peer_node_name != name_) {
+ delegate_->ForwardMessage(peer_node_name, std::move(m));
+ return OK;
+ }
+
+ int rv = AcceptMessage(std::move(m));
+ if (rv != OK) {
+ // See comment above for why we don't return an error in this case.
+ DVLOG(2) << "AcceptMessage failed: " << rv;
+ }
+
+ return OK;
+}
+
+int Node::MergePorts_Locked(const PortRef& port0_ref,
+ const PortRef& port1_ref) {
+ Port* port0 = port0_ref.port();
+ Port* port1 = port1_ref.port();
+
+ ports_lock_.AssertAcquired();
+ port0->lock.AssertAcquired();
+ port1->lock.AssertAcquired();
+
+ CHECK(port0->state == Port::kReceiving);
+ CHECK(port1->state == Port::kReceiving);
+
+ // Ports cannot be merged with their own receiving peer!
+ if (port0->peer_node_name == name_ &&
+ port0->peer_port_name == port1_ref.name())
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ if (port1->peer_node_name == name_ &&
+ port1->peer_port_name == port0_ref.name())
+ return ERROR_PORT_STATE_UNEXPECTED;
+
+ // Only merge if both ports have never sent a message.
+ if (port0->next_sequence_num_to_send == kInitialSequenceNum &&
+ port1->next_sequence_num_to_send == kInitialSequenceNum) {
+ // Swap the ports' peer information and switch them both into buffering
+ // (eventually proxying) mode.
+
+ std::swap(port0->peer_node_name, port1->peer_node_name);
+ std::swap(port0->peer_port_name, port1->peer_port_name);
+
+ port0->state = Port::kBuffering;
+ if (port0->peer_closed)
+ port0->remove_proxy_on_last_message = true;
+
+ port1->state = Port::kBuffering;
+ if (port1->peer_closed)
+ port1->remove_proxy_on_last_message = true;
+
+ int rv1 = BeginProxying_Locked(LockedPort(port0), port0_ref.name());
+ int rv2 = BeginProxying_Locked(LockedPort(port1), port1_ref.name());
+
+ if (rv1 == OK && rv2 == OK) {
+ // If either merged port had a closed peer, its new peer needs to be
+ // informed of this.
+ if (port1->peer_closed) {
+ ObserveClosureEventData data;
+ data.last_sequence_num = port0->last_sequence_num_to_receive;
+ delegate_->ForwardMessage(
+ port0->peer_node_name,
+ NewInternalMessage(port0->peer_port_name,
+ EventType::kObserveClosure, data));
+ }
+
+ if (port0->peer_closed) {
+ ObserveClosureEventData data;
+ data.last_sequence_num = port1->last_sequence_num_to_receive;
+ delegate_->ForwardMessage(
+ port1->peer_node_name,
+ NewInternalMessage(port1->peer_port_name,
+ EventType::kObserveClosure, data));
+ }
+
+ return OK;
+ }
+
+ // If either proxy failed to initialize (e.g. had undeliverable messages
+ // or ended up in a bad state somehow), we keep the system in a consistent
+ // state by undoing the peer swap.
+ std::swap(port0->peer_node_name, port1->peer_node_name);
+ std::swap(port0->peer_port_name, port1->peer_port_name);
+ port0->remove_proxy_on_last_message = false;
+ port1->remove_proxy_on_last_message = false;
+ port0->state = Port::kReceiving;
+ port1->state = Port::kReceiving;
+ }
+
+ return ERROR_PORT_STATE_UNEXPECTED;
+}
+
+void Node::WillSendPort(const LockedPort& port,
+ const NodeName& to_node_name,
+ PortName* port_name,
+ PortDescriptor* port_descriptor) {
+ port->lock.AssertAcquired();
+
+ PortName local_port_name = *port_name;
+
+ PortName new_port_name;
+ delegate_->GenerateRandomPortName(&new_port_name);
+
+ // Make sure we don't send messages to the new peer until after we know it
+ // exists. In the meantime, just buffer messages locally.
+ DCHECK(port->state == Port::kReceiving);
+ port->state = Port::kBuffering;
+
+ // If we already know our peer is closed, we already know this proxy can
+ // be removed once it receives and forwards its last expected message.
+ if (port->peer_closed)
+ port->remove_proxy_on_last_message = true;
+
+ *port_name = new_port_name;
+
+ port_descriptor->peer_node_name = port->peer_node_name;
+ port_descriptor->peer_port_name = port->peer_port_name;
+ port_descriptor->referring_node_name = name_;
+ port_descriptor->referring_port_name = local_port_name;
+ port_descriptor->next_sequence_num_to_send = port->next_sequence_num_to_send;
+ port_descriptor->next_sequence_num_to_receive =
+ port->message_queue.next_sequence_num();
+ port_descriptor->last_sequence_num_to_receive =
+ port->last_sequence_num_to_receive;
+ port_descriptor->peer_closed = port->peer_closed;
+ memset(port_descriptor->padding, 0, sizeof(port_descriptor->padding));
+
+ // Configure the local port to point to the new port.
+ port->peer_node_name = to_node_name;
+ port->peer_port_name = new_port_name;
+}
+
+int Node::AcceptPort(const PortName& port_name,
+ const PortDescriptor& port_descriptor) {
+ scoped_refptr<Port> port = make_scoped_refptr(
+ new Port(port_descriptor.next_sequence_num_to_send,
+ port_descriptor.next_sequence_num_to_receive));
+ port->state = Port::kReceiving;
+ port->peer_node_name = port_descriptor.peer_node_name;
+ port->peer_port_name = port_descriptor.peer_port_name;
+ port->last_sequence_num_to_receive =
+ port_descriptor.last_sequence_num_to_receive;
+ port->peer_closed = port_descriptor.peer_closed;
+
+ DVLOG(2) << "Accepting port " << port_name << " [peer_closed="
+ << port->peer_closed << "; last_sequence_num_to_receive="
+ << port->last_sequence_num_to_receive << "]";
+
+ // A newly accepted port is not signalable until the message referencing the
+ // new port finds its way to the consumer (see GetMessage).
+ port->message_queue.set_signalable(false);
+
+ int rv = AddPortWithName(port_name, std::move(port));
+ if (rv != OK)
+ return rv;
+
+ // Allow referring port to forward messages.
+ delegate_->ForwardMessage(
+ port_descriptor.referring_node_name,
+ NewInternalMessage(port_descriptor.referring_port_name,
+ EventType::kPortAccepted));
+ return OK;
+}
+
+int Node::WillSendMessage_Locked(const LockedPort& port,
+ const PortName& port_name,
+ Message* message) {
+ ports_lock_.AssertAcquired();
+ port->lock.AssertAcquired();
+
+ DCHECK(message);
+
+ // Messages may already have a sequence number if they're being forwarded
+ // by a proxy. Otherwise, use the next outgoing sequence number.
+ uint64_t* sequence_num =
+ &GetMutableEventData<UserEventData>(message)->sequence_num;
+ if (*sequence_num == 0)
+ *sequence_num = port->next_sequence_num_to_send++;
+
+#if DCHECK_IS_ON()
+ std::ostringstream ports_buf;
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ if (i > 0)
+ ports_buf << ",";
+ ports_buf << message->ports()[i];
+ }
+#endif
+
+ if (message->num_ports() > 0) {
+ // Note: Another thread could be trying to send the same ports, so we need
+ // to ensure that they are ours to send before we mutate their state.
+
+ std::vector<scoped_refptr<Port>> ports;
+ ports.resize(message->num_ports());
+
+ {
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ ports[i] = GetPort_Locked(message->ports()[i]);
+ DCHECK(ports[i]);
+
+ ports[i]->lock.Acquire();
+ int error = OK;
+ if (ports[i]->state != Port::kReceiving)
+ error = ERROR_PORT_STATE_UNEXPECTED;
+ else if (message->ports()[i] == port->peer_port_name)
+ error = ERROR_PORT_CANNOT_SEND_PEER;
+
+ if (error != OK) {
+ // Oops, we cannot send this port.
+ for (size_t j = 0; j <= i; ++j)
+ ports[i]->lock.Release();
+ // Backpedal on the sequence number.
+ port->next_sequence_num_to_send--;
+ return error;
+ }
+ }
+ }
+
+ PortDescriptor* port_descriptors =
+ GetMutablePortDescriptors(GetMutableEventData<UserEventData>(message));
+
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ WillSendPort(LockedPort(ports[i].get()),
+ port->peer_node_name,
+ message->mutable_ports() + i,
+ port_descriptors + i);
+ }
+
+ for (size_t i = 0; i < message->num_ports(); ++i)
+ ports[i]->lock.Release();
+ }
+
+#if DCHECK_IS_ON()
+ DVLOG(4) << "Sending message "
+ << GetEventData<UserEventData>(*message)->sequence_num
+ << " [ports=" << ports_buf.str() << "]"
+ << " from " << port_name << "@" << name_
+ << " to " << port->peer_port_name << "@" << port->peer_node_name;
+#endif
+
+ GetMutableEventHeader(message)->port_name = port->peer_port_name;
+ return OK;
+}
+
+int Node::BeginProxying_Locked(const LockedPort& port,
+ const PortName& port_name) {
+ ports_lock_.AssertAcquired();
+ port->lock.AssertAcquired();
+
+ if (port->state != Port::kBuffering)
+ return OOPS(ERROR_PORT_STATE_UNEXPECTED);
+
+ port->state = Port::kProxying;
+
+ int rv = ForwardMessages_Locked(LockedPort(port), port_name);
+ if (rv != OK)
+ return rv;
+
+ // We may have observed closure while buffering. In that case, we can advance
+ // to removing the proxy without sending out an ObserveProxy message. We
+ // already know the last expected message, etc.
+
+ if (port->remove_proxy_on_last_message) {
+ MaybeRemoveProxy_Locked(LockedPort(port), port_name);
+
+ // Make sure we propagate closure to our current peer.
+ ObserveClosureEventData data;
+ data.last_sequence_num = port->last_sequence_num_to_receive;
+ delegate_->ForwardMessage(
+ port->peer_node_name,
+ NewInternalMessage(port->peer_port_name,
+ EventType::kObserveClosure, data));
+ } else {
+ InitiateProxyRemoval(LockedPort(port), port_name);
+ }
+
+ return OK;
+}
+
+int Node::BeginProxying(PortRef port_ref) {
+ Port* port = port_ref.port();
+ {
+ base::AutoLock ports_lock(ports_lock_);
+ base::AutoLock lock(port->lock);
+
+ if (port->state != Port::kBuffering)
+ return OOPS(ERROR_PORT_STATE_UNEXPECTED);
+
+ port->state = Port::kProxying;
+
+ int rv = ForwardMessages_Locked(LockedPort(port), port_ref.name());
+ if (rv != OK)
+ return rv;
+ }
+
+ bool should_remove;
+ NodeName peer_node_name;
+ ScopedMessage closure_message;
+ {
+ base::AutoLock lock(port->lock);
+ if (port->state != Port::kProxying)
+ return OOPS(ERROR_PORT_STATE_UNEXPECTED);
+
+ should_remove = port->remove_proxy_on_last_message;
+ if (should_remove) {
+ // Make sure we propagate closure to our current peer.
+ ObserveClosureEventData data;
+ data.last_sequence_num = port->last_sequence_num_to_receive;
+ peer_node_name = port->peer_node_name;
+ closure_message = NewInternalMessage(port->peer_port_name,
+ EventType::kObserveClosure, data);
+ } else {
+ InitiateProxyRemoval(LockedPort(port), port_ref.name());
+ }
+ }
+
+ if (should_remove) {
+ TryRemoveProxy(port_ref);
+ delegate_->ForwardMessage(peer_node_name, std::move(closure_message));
+ }
+
+ return OK;
+}
+
+int Node::ForwardMessages_Locked(const LockedPort& port,
+ const PortName &port_name) {
+ ports_lock_.AssertAcquired();
+ port->lock.AssertAcquired();
+
+ for (;;) {
+ ScopedMessage message;
+ port->message_queue.GetNextMessage(&message, nullptr);
+ if (!message)
+ break;
+
+ int rv = WillSendMessage_Locked(LockedPort(port), port_name, message.get());
+ if (rv != OK)
+ return rv;
+
+ delegate_->ForwardMessage(port->peer_node_name, std::move(message));
+ }
+ return OK;
+}
+
+void Node::InitiateProxyRemoval(const LockedPort& port,
+ const PortName& port_name) {
+ port->lock.AssertAcquired();
+
+ // To remove this node, we start by notifying the connected graph that we are
+ // a proxy. This allows whatever port is referencing this node to skip it.
+ // Eventually, this node will receive ObserveProxyAck (or ObserveClosure if
+ // the peer was closed in the meantime).
+
+ ObserveProxyEventData data;
+ data.proxy_node_name = name_;
+ data.proxy_port_name = port_name;
+ data.proxy_to_node_name = port->peer_node_name;
+ data.proxy_to_port_name = port->peer_port_name;
+
+ delegate_->ForwardMessage(
+ port->peer_node_name,
+ NewInternalMessage(port->peer_port_name, EventType::kObserveProxy, data));
+}
+
+void Node::MaybeRemoveProxy_Locked(const LockedPort& port,
+ const PortName& port_name) {
+ // |ports_lock_| must be held so we can potentilaly ErasePort_Locked().
+ ports_lock_.AssertAcquired();
+ port->lock.AssertAcquired();
+
+ DCHECK(port->state == Port::kProxying);
+
+ // Make sure we have seen ObserveProxyAck before removing the port.
+ if (!port->remove_proxy_on_last_message)
+ return;
+
+ if (!CanAcceptMoreMessages(port.get())) {
+ // This proxy port is done. We can now remove it!
+ ErasePort_Locked(port_name);
+
+ if (port->send_on_proxy_removal) {
+ NodeName to_node = port->send_on_proxy_removal->first;
+ ScopedMessage& message = port->send_on_proxy_removal->second;
+
+ delegate_->ForwardMessage(to_node, std::move(message));
+ port->send_on_proxy_removal.reset();
+ }
+ } else {
+ DVLOG(2) << "Cannot remove port " << port_name << "@" << name_
+ << " now; waiting for more messages";
+ }
+}
+
+void Node::TryRemoveProxy(PortRef port_ref) {
+ Port* port = port_ref.port();
+ bool should_erase = false;
+ ScopedMessage msg;
+ NodeName to_node;
+ {
+ base::AutoLock lock(port->lock);
+
+ // Port already removed. Nothing to do.
+ if (port->state == Port::kClosed)
+ return;
+
+ DCHECK(port->state == Port::kProxying);
+
+ // Make sure we have seen ObserveProxyAck before removing the port.
+ if (!port->remove_proxy_on_last_message)
+ return;
+
+ if (!CanAcceptMoreMessages(port)) {
+ // This proxy port is done. We can now remove it!
+ should_erase = true;
+
+ if (port->send_on_proxy_removal) {
+ to_node = port->send_on_proxy_removal->first;
+ msg = std::move(port->send_on_proxy_removal->second);
+ port->send_on_proxy_removal.reset();
+ }
+ } else {
+ DVLOG(2) << "Cannot remove port " << port_ref.name() << "@" << name_
+ << " now; waiting for more messages";
+ }
+ }
+
+ if (should_erase)
+ ErasePort(port_ref.name());
+
+ if (msg)
+ delegate_->ForwardMessage(to_node, std::move(msg));
+}
+
+void Node::DestroyAllPortsWithPeer(const NodeName& node_name,
+ const PortName& port_name) {
+ // Wipes out all ports whose peer node matches |node_name| and whose peer port
+ // matches |port_name|. If |port_name| is |kInvalidPortName|, only the peer
+ // node is matched.
+
+ std::vector<PortRef> ports_to_notify;
+ std::vector<PortName> dead_proxies_to_broadcast;
+ std::deque<PortName> referenced_port_names;
+
+ {
+ base::AutoLock ports_lock(ports_lock_);
+
+ for (auto iter = ports_.begin(); iter != ports_.end(); ++iter) {
+ Port* port = iter->second.get();
+ {
+ base::AutoLock port_lock(port->lock);
+
+ if (port->peer_node_name == node_name &&
+ (port_name == kInvalidPortName ||
+ port->peer_port_name == port_name)) {
+ if (!port->peer_closed) {
+ // Treat this as immediate peer closure. It's an exceptional
+ // condition akin to a broken pipe, so we don't care about losing
+ // messages.
+
+ port->peer_closed = true;
+ port->last_sequence_num_to_receive =
+ port->message_queue.next_sequence_num() - 1;
+
+ if (port->state == Port::kReceiving)
+ ports_to_notify.push_back(PortRef(iter->first, port));
+ }
+
+ // We don't expect to forward any further messages, and we don't
+ // expect to receive a Port{Accepted,Rejected} event. Because we're
+ // a proxy with no active peer, we cannot use the normal proxy removal
+ // procedure of forward-propagating an ObserveProxy. Instead we
+ // broadcast our own death so it can be back-propagated. This is
+ // inefficient but rare.
+ if (port->state != Port::kReceiving) {
+ dead_proxies_to_broadcast.push_back(iter->first);
+ iter->second->message_queue.GetReferencedPorts(
+ &referenced_port_names);
+ }
+ }
+ }
+ }
+
+ for (const auto& proxy_name : dead_proxies_to_broadcast) {
+ ports_.erase(proxy_name);
+ DVLOG(2) << "Forcibly deleted port " << proxy_name << "@" << name_;
+ }
+ }
+
+ // Wake up any receiving ports who have just observed simulated peer closure.
+ for (const auto& port : ports_to_notify)
+ delegate_->PortStatusChanged(port);
+
+ for (const auto& proxy_name : dead_proxies_to_broadcast) {
+ // Broadcast an event signifying that this proxy is no longer functioning.
+ ObserveProxyEventData event;
+ event.proxy_node_name = name_;
+ event.proxy_port_name = proxy_name;
+ event.proxy_to_node_name = kInvalidNodeName;
+ event.proxy_to_port_name = kInvalidPortName;
+ delegate_->BroadcastMessage(NewInternalMessage(
+ kInvalidPortName, EventType::kObserveProxy, event));
+
+ // Also process death locally since the port that points this closed one
+ // could be on the current node.
+ // Note: Although this is recursive, only a single port is involved which
+ // limits the expected branching to 1.
+ DestroyAllPortsWithPeer(name_, proxy_name);
+ }
+
+ // Close any ports referenced by the closed proxies.
+ for (const auto& name : referenced_port_names) {
+ PortRef ref;
+ if (GetPort(name, &ref) == OK)
+ ClosePort(ref);
+ }
+}
+
+ScopedMessage Node::NewInternalMessage_Helper(const PortName& port_name,
+ const EventType& type,
+ const void* data,
+ size_t num_data_bytes) {
+ ScopedMessage message;
+ delegate_->AllocMessage(sizeof(EventHeader) + num_data_bytes, &message);
+
+ EventHeader* header = GetMutableEventHeader(message.get());
+ header->port_name = port_name;
+ header->type = type;
+ header->padding = 0;
+
+ if (num_data_bytes)
+ memcpy(header + 1, data, num_data_bytes);
+
+ return message;
+}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/node.h b/mojo/edk/system/ports/node.h
new file mode 100644
index 0000000000..55b8d27547
--- /dev/null
+++ b/mojo/edk/system/ports/node.h
@@ -0,0 +1,228 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_NODE_H_
+#define MOJO_EDK_SYSTEM_PORTS_NODE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <queue>
+#include <unordered_map>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/system/ports/event.h"
+#include "mojo/edk/system/ports/message.h"
+#include "mojo/edk/system/ports/name.h"
+#include "mojo/edk/system/ports/port.h"
+#include "mojo/edk/system/ports/port_ref.h"
+#include "mojo/edk/system/ports/user_data.h"
+
+#undef SendMessage // Gah, windows
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+enum : int {
+ OK = 0,
+ ERROR_PORT_UNKNOWN = -10,
+ ERROR_PORT_EXISTS = -11,
+ ERROR_PORT_STATE_UNEXPECTED = -12,
+ ERROR_PORT_CANNOT_SEND_SELF = -13,
+ ERROR_PORT_PEER_CLOSED = -14,
+ ERROR_PORT_CANNOT_SEND_PEER = -15,
+ ERROR_NOT_IMPLEMENTED = -100,
+};
+
+struct PortStatus {
+ bool has_messages;
+ bool receiving_messages;
+ bool peer_closed;
+};
+
+class MessageFilter;
+class NodeDelegate;
+
+class Node {
+ public:
+ enum class ShutdownPolicy {
+ DONT_ALLOW_LOCAL_PORTS,
+ ALLOW_LOCAL_PORTS,
+ };
+
+ // Does not take ownership of the delegate.
+ Node(const NodeName& name, NodeDelegate* delegate);
+ ~Node();
+
+ // Returns true iff there are no open ports referring to another node or ports
+ // in the process of being transferred from this node to another. If this
+ // returns false, then to ensure clean shutdown, it is necessary to keep the
+ // node alive and continue routing messages to it via AcceptMessage. This
+ // method may be called again after AcceptMessage to check if the Node is now
+ // ready to be destroyed.
+ //
+ // If |policy| is set to |ShutdownPolicy::ALLOW_LOCAL_PORTS|, this will return
+ // |true| even if some ports remain alive, as long as none of them are proxies
+ // to another node.
+ bool CanShutdownCleanly(
+ ShutdownPolicy policy = ShutdownPolicy::DONT_ALLOW_LOCAL_PORTS);
+
+ // Lookup the named port.
+ int GetPort(const PortName& port_name, PortRef* port_ref);
+
+ // Creates a port on this node. Before the port can be used, it must be
+ // initialized using InitializePort. This method is useful for bootstrapping
+ // a connection between two nodes. Generally, ports are created using
+ // CreatePortPair instead.
+ int CreateUninitializedPort(PortRef* port_ref);
+
+ // Initializes a newly created port.
+ int InitializePort(const PortRef& port_ref,
+ const NodeName& peer_node_name,
+ const PortName& peer_port_name);
+
+ // Generates a new connected pair of ports bound to this node. These ports
+ // are initialized and ready to go.
+ int CreatePortPair(PortRef* port0_ref, PortRef* port1_ref);
+
+ // User data associated with the port.
+ int SetUserData(const PortRef& port_ref, scoped_refptr<UserData> user_data);
+ int GetUserData(const PortRef& port_ref,
+ scoped_refptr<UserData>* user_data);
+
+ // Prevents further messages from being sent from this port or delivered to
+ // this port. The port is removed, and the port's peer is notified of the
+ // closure after it has consumed all pending messages.
+ int ClosePort(const PortRef& port_ref);
+
+ // Returns the current status of the port.
+ int GetStatus(const PortRef& port_ref, PortStatus* port_status);
+
+ // Returns the next available message on the specified port or returns a null
+ // message if there are none available. Returns ERROR_PORT_PEER_CLOSED to
+ // indicate that this port's peer has closed. In such cases GetMessage may
+ // be called until it yields a null message, indicating that no more messages
+ // may be read from the port.
+ //
+ // If |filter| is non-null, the next available message is returned only if it
+ // is matched by the filter. If the provided filter does not match the next
+ // available message, GetMessage() behaves as if there is no message
+ // available. Ownership of |filter| is not taken, and it must outlive the
+ // extent of this call.
+ int GetMessage(const PortRef& port_ref,
+ ScopedMessage* message,
+ MessageFilter* filter);
+
+ // Sends a message from the specified port to its peer. Note that the message
+ // notification may arrive synchronously (via PortStatusChanged() on the
+ // delegate) if the peer is local to this Node.
+ int SendMessage(const PortRef& port_ref, ScopedMessage message);
+
+ // Corresponding to NodeDelegate::ForwardMessage.
+ int AcceptMessage(ScopedMessage message);
+
+ // Called to merge two ports with each other. If you have two independent
+ // port pairs A <=> B and C <=> D, the net result of merging B and C is a
+ // single connected port pair A <=> D.
+ //
+ // Note that the behavior of this operation is undefined if either port to be
+ // merged (B or C above) has ever been read from or written to directly, and
+ // this must ONLY be called on one side of the merge, though it doesn't matter
+ // which side.
+ //
+ // It is safe for the non-merged peers (A and D above) to be transferred,
+ // closed, and/or written to before, during, or after the merge.
+ int MergePorts(const PortRef& port_ref,
+ const NodeName& destination_node_name,
+ const PortName& destination_port_name);
+
+ // Like above but merges two ports local to this node. Because both ports are
+ // local this can also verify that neither port has been written to before the
+ // merge. If this fails for any reason, both ports are closed. Otherwise OK
+ // is returned and the ports' receiving peers are connected to each other.
+ int MergeLocalPorts(const PortRef& port0_ref, const PortRef& port1_ref);
+
+ // Called to inform this node that communication with another node is lost
+ // indefinitely. This triggers cleanup of ports bound to this node.
+ int LostConnectionToNode(const NodeName& node_name);
+
+ private:
+ class LockedPort;
+
+ // Note: Functions that end with _Locked require |ports_lock_| to be held
+ // before calling.
+ int OnUserMessage(ScopedMessage message);
+ int OnPortAccepted(const PortName& port_name);
+ int OnObserveProxy(const PortName& port_name,
+ const ObserveProxyEventData& event);
+ int OnObserveProxyAck(const PortName& port_name, uint64_t last_sequence_num);
+ int OnObserveClosure(const PortName& port_name, uint64_t last_sequence_num);
+ int OnMergePort(const PortName& port_name, const MergePortEventData& event);
+
+ int AddPortWithName(const PortName& port_name, scoped_refptr<Port> port);
+ void ErasePort(const PortName& port_name);
+ void ErasePort_Locked(const PortName& port_name);
+ scoped_refptr<Port> GetPort(const PortName& port_name);
+ scoped_refptr<Port> GetPort_Locked(const PortName& port_name);
+
+ int SendMessageInternal(const PortRef& port_ref, ScopedMessage* message);
+ int MergePorts_Locked(const PortRef& port0_ref, const PortRef& port1_ref);
+ void WillSendPort(const LockedPort& port,
+ const NodeName& to_node_name,
+ PortName* port_name,
+ PortDescriptor* port_descriptor);
+ int AcceptPort(const PortName& port_name,
+ const PortDescriptor& port_descriptor);
+
+ int WillSendMessage_Locked(const LockedPort& port,
+ const PortName& port_name,
+ Message* message);
+ int BeginProxying_Locked(const LockedPort& port, const PortName& port_name);
+ int BeginProxying(PortRef port_ref);
+ int ForwardMessages_Locked(const LockedPort& port, const PortName& port_name);
+ void InitiateProxyRemoval(const LockedPort& port, const PortName& port_name);
+ void MaybeRemoveProxy_Locked(const LockedPort& port,
+ const PortName& port_name);
+ void TryRemoveProxy(PortRef port_ref);
+ void DestroyAllPortsWithPeer(const NodeName& node_name,
+ const PortName& port_name);
+
+ ScopedMessage NewInternalMessage_Helper(const PortName& port_name,
+ const EventType& type,
+ const void* data,
+ size_t num_data_bytes);
+
+ ScopedMessage NewInternalMessage(const PortName& port_name,
+ const EventType& type) {
+ return NewInternalMessage_Helper(port_name, type, nullptr, 0);
+ }
+
+ template <typename EventData>
+ ScopedMessage NewInternalMessage(const PortName& port_name,
+ const EventType& type,
+ const EventData& data) {
+ return NewInternalMessage_Helper(port_name, type, &data, sizeof(data));
+ }
+
+ const NodeName name_;
+ NodeDelegate* const delegate_;
+
+ // Guards |ports_| as well as any operation which needs to hold multiple port
+ // locks simultaneously. Usage of this is subtle: it must NEVER be acquired
+ // after a Port lock is acquired, and it must ALWAYS be acquired before
+ // calling WillSendMessage_Locked or ForwardMessages_Locked.
+ base::Lock ports_lock_;
+ std::unordered_map<PortName, scoped_refptr<Port>> ports_;
+
+ DISALLOW_COPY_AND_ASSIGN(Node);
+};
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_NODE_H_
diff --git a/mojo/edk/system/ports/node_delegate.h b/mojo/edk/system/ports/node_delegate.h
new file mode 100644
index 0000000000..8547302a5e
--- /dev/null
+++ b/mojo/edk/system/ports/node_delegate.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_NODE_DELEGATE_H_
+#define MOJO_EDK_SYSTEM_PORTS_NODE_DELEGATE_H_
+
+#include <stddef.h>
+
+#include "mojo/edk/system/ports/message.h"
+#include "mojo/edk/system/ports/name.h"
+#include "mojo/edk/system/ports/port_ref.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+class NodeDelegate {
+ public:
+ virtual ~NodeDelegate() {}
+
+ // Port names should be difficult to guess.
+ virtual void GenerateRandomPortName(PortName* port_name) = 0;
+
+ // Allocate a message, including a header that can be used by the Node
+ // implementation. |num_header_bytes| will be aligned. The newly allocated
+ // memory need not be zero-filled.
+ virtual void AllocMessage(size_t num_header_bytes,
+ ScopedMessage* message) = 0;
+
+ // Forward a message asynchronously to the specified node. This method MUST
+ // NOT synchronously call any methods on Node.
+ virtual void ForwardMessage(const NodeName& node, ScopedMessage message) = 0;
+
+ // Broadcast a message to all nodes.
+ virtual void BroadcastMessage(ScopedMessage message) = 0;
+
+ // Indicates that the port's status has changed recently. Use Node::GetStatus
+ // to query the latest status of the port. Note, this event could be spurious
+ // if another thread is simultaneously modifying the status of the port.
+ virtual void PortStatusChanged(const PortRef& port_ref) = 0;
+};
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_NODE_DELEGATE_H_
diff --git a/mojo/edk/system/ports/port.cc b/mojo/edk/system/ports/port.cc
new file mode 100644
index 0000000000..e4403aed78
--- /dev/null
+++ b/mojo/edk/system/ports/port.cc
@@ -0,0 +1,24 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/ports/port.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+Port::Port(uint64_t next_sequence_num_to_send,
+ uint64_t next_sequence_num_to_receive)
+ : state(kUninitialized),
+ next_sequence_num_to_send(next_sequence_num_to_send),
+ last_sequence_num_to_receive(0),
+ message_queue(next_sequence_num_to_receive),
+ remove_proxy_on_last_message(false),
+ peer_closed(false) {}
+
+Port::~Port() {}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/port.h b/mojo/edk/system/ports/port.h
new file mode 100644
index 0000000000..ea53d43b5f
--- /dev/null
+++ b/mojo/edk/system/ports/port.h
@@ -0,0 +1,60 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_PORT_H_
+#define MOJO_EDK_SYSTEM_PORTS_PORT_H_
+
+#include <memory>
+#include <queue>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/system/ports/message_queue.h"
+#include "mojo/edk/system/ports/user_data.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+class Port : public base::RefCountedThreadSafe<Port> {
+ public:
+ enum State {
+ kUninitialized,
+ kReceiving,
+ kBuffering,
+ kProxying,
+ kClosed
+ };
+
+ base::Lock lock;
+ State state;
+ NodeName peer_node_name;
+ PortName peer_port_name;
+ uint64_t next_sequence_num_to_send;
+ uint64_t last_sequence_num_to_receive;
+ MessageQueue message_queue;
+ std::unique_ptr<std::pair<NodeName, ScopedMessage>> send_on_proxy_removal;
+ scoped_refptr<UserData> user_data;
+ bool remove_proxy_on_last_message;
+ bool peer_closed;
+
+ Port(uint64_t next_sequence_num_to_send,
+ uint64_t next_sequence_num_to_receive);
+
+ private:
+ friend class base::RefCountedThreadSafe<Port>;
+
+ ~Port();
+
+ DISALLOW_COPY_AND_ASSIGN(Port);
+};
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_PORT_H_
diff --git a/mojo/edk/system/ports/port_ref.cc b/mojo/edk/system/ports/port_ref.cc
new file mode 100644
index 0000000000..675754d488
--- /dev/null
+++ b/mojo/edk/system/ports/port_ref.cc
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/ports/port_ref.h"
+
+#include "mojo/edk/system/ports/port.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+PortRef::~PortRef() {
+}
+
+PortRef::PortRef() {
+}
+
+PortRef::PortRef(const PortName& name, scoped_refptr<Port> port)
+ : name_(name), port_(std::move(port)) {}
+
+PortRef::PortRef(const PortRef& other)
+ : name_(other.name_), port_(other.port_) {
+}
+
+PortRef& PortRef::operator=(const PortRef& other) {
+ if (&other != this) {
+ name_ = other.name_;
+ port_ = other.port_;
+ }
+ return *this;
+}
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/port_ref.h b/mojo/edk/system/ports/port_ref.h
new file mode 100644
index 0000000000..59036c3869
--- /dev/null
+++ b/mojo/edk/system/ports/port_ref.h
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_PORT_REF_H_
+#define MOJO_EDK_SYSTEM_PORTS_PORT_REF_H_
+
+#include "base/memory/ref_counted.h"
+#include "mojo/edk/system/ports/name.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+class Port;
+class Node;
+
+class PortRef {
+ public:
+ ~PortRef();
+ PortRef();
+ PortRef(const PortName& name, scoped_refptr<Port> port);
+
+ PortRef(const PortRef& other);
+ PortRef& operator=(const PortRef& other);
+
+ const PortName& name() const { return name_; }
+
+ private:
+ friend class Node;
+ Port* port() const { return port_.get(); }
+
+ PortName name_;
+ scoped_refptr<Port> port_;
+};
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_PORT_REF_H_
diff --git a/mojo/edk/system/ports/ports_unittest.cc b/mojo/edk/system/ports/ports_unittest.cc
new file mode 100644
index 0000000000..cb48b3eb2f
--- /dev/null
+++ b/mojo/edk/system/ports/ports_unittest.cc
@@ -0,0 +1,1478 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <map>
+#include <queue>
+#include <sstream>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/rand_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "mojo/edk/system/ports/event.h"
+#include "mojo/edk/system/ports/node.h"
+#include "mojo/edk/system/ports/node_delegate.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+namespace test {
+
+namespace {
+
+bool MessageEquals(const ScopedMessage& message, const base::StringPiece& s) {
+ return !strcmp(static_cast<const char*>(message->payload_bytes()), s.data());
+}
+
+class TestMessage : public Message {
+ public:
+ static ScopedMessage NewUserMessage(size_t num_payload_bytes,
+ size_t num_ports) {
+ return ScopedMessage(new TestMessage(num_payload_bytes, num_ports));
+ }
+
+ TestMessage(size_t num_payload_bytes, size_t num_ports)
+ : Message(num_payload_bytes, num_ports) {
+ start_ = new char[num_header_bytes_ + num_ports_bytes_ + num_payload_bytes];
+ InitializeUserMessageHeader(start_);
+ }
+
+ TestMessage(size_t num_header_bytes,
+ size_t num_payload_bytes,
+ size_t num_ports_bytes)
+ : Message(num_header_bytes,
+ num_payload_bytes,
+ num_ports_bytes) {
+ start_ = new char[num_header_bytes + num_payload_bytes + num_ports_bytes];
+ }
+
+ ~TestMessage() override {
+ delete[] start_;
+ }
+};
+
+class TestNode;
+
+class MessageRouter {
+ public:
+ virtual ~MessageRouter() {}
+
+ virtual void GeneratePortName(PortName* name) = 0;
+ virtual void ForwardMessage(TestNode* from_node,
+ const NodeName& node_name,
+ ScopedMessage message) = 0;
+ virtual void BroadcastMessage(TestNode* from_node, ScopedMessage message) = 0;
+};
+
+class TestNode : public NodeDelegate {
+ public:
+ explicit TestNode(uint64_t id)
+ : node_name_(id, 1),
+ node_(node_name_, this),
+ node_thread_(base::StringPrintf("Node %" PRIu64 " thread", id)),
+ messages_available_event_(
+ base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
+ idle_event_(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::SIGNALED) {
+ }
+
+ ~TestNode() override {
+ StopWhenIdle();
+ node_thread_.Stop();
+ }
+
+ const NodeName& name() const { return node_name_; }
+
+ // NOTE: Node is thread-safe.
+ Node& node() { return node_; }
+
+ base::WaitableEvent& idle_event() { return idle_event_; }
+
+ bool IsIdle() {
+ base::AutoLock lock(lock_);
+ return started_ && !dispatching_ &&
+ (incoming_messages_.empty() || (block_on_event_ && blocked_));
+ }
+
+ void BlockOnEvent(EventType type) {
+ base::AutoLock lock(lock_);
+ blocked_event_type_ = type;
+ block_on_event_ = true;
+ }
+
+ void Unblock() {
+ base::AutoLock lock(lock_);
+ block_on_event_ = false;
+ messages_available_event_.Signal();
+ }
+
+ void Start(MessageRouter* router) {
+ router_ = router;
+ node_thread_.Start();
+ node_thread_.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&TestNode::ProcessMessages, base::Unretained(this)));
+ }
+
+ void StopWhenIdle() {
+ base::AutoLock lock(lock_);
+ should_quit_ = true;
+ messages_available_event_.Signal();
+ }
+
+ void WakeUp() { messages_available_event_.Signal(); }
+
+ int SendStringMessage(const PortRef& port, const std::string& s) {
+ size_t size = s.size() + 1;
+ ScopedMessage message = TestMessage::NewUserMessage(size, 0);
+ memcpy(message->mutable_payload_bytes(), s.data(), size);
+ return node_.SendMessage(port, std::move(message));
+ }
+
+ int SendStringMessageWithPort(const PortRef& port,
+ const std::string& s,
+ const PortName& sent_port_name) {
+ size_t size = s.size() + 1;
+ ScopedMessage message = TestMessage::NewUserMessage(size, 1);
+ memcpy(message->mutable_payload_bytes(), s.data(), size);
+ message->mutable_ports()[0] = sent_port_name;
+ return node_.SendMessage(port, std::move(message));
+ }
+
+ int SendStringMessageWithPort(const PortRef& port,
+ const std::string& s,
+ const PortRef& sent_port) {
+ return SendStringMessageWithPort(port, s, sent_port.name());
+ }
+
+ void set_drop_messages(bool value) {
+ base::AutoLock lock(lock_);
+ drop_messages_ = value;
+ }
+
+ void set_save_messages(bool value) {
+ base::AutoLock lock(lock_);
+ save_messages_ = value;
+ }
+
+ bool ReadMessage(const PortRef& port, ScopedMessage* message) {
+ return node_.GetMessage(port, message, nullptr) == OK && *message;
+ }
+
+ bool GetSavedMessage(ScopedMessage* message) {
+ base::AutoLock lock(lock_);
+ if (saved_messages_.empty()) {
+ message->reset();
+ return false;
+ }
+ std::swap(*message, saved_messages_.front());
+ saved_messages_.pop();
+ return true;
+ }
+
+ void EnqueueMessage(ScopedMessage message) {
+ idle_event_.Reset();
+
+ // NOTE: This may be called from ForwardMessage and thus must not reenter
+ // |node_|.
+ base::AutoLock lock(lock_);
+ incoming_messages_.emplace(std::move(message));
+ messages_available_event_.Signal();
+ }
+
+ void GenerateRandomPortName(PortName* port_name) override {
+ DCHECK(router_);
+ router_->GeneratePortName(port_name);
+ }
+
+ void AllocMessage(size_t num_header_bytes, ScopedMessage* message) override {
+ message->reset(new TestMessage(num_header_bytes, 0, 0));
+ }
+
+ void ForwardMessage(const NodeName& node_name,
+ ScopedMessage message) override {
+ {
+ base::AutoLock lock(lock_);
+ if (drop_messages_) {
+ DVLOG(1) << "Dropping ForwardMessage from node "
+ << node_name_ << " to " << node_name;
+
+ base::AutoUnlock unlock(lock_);
+ ClosePortsInMessage(message.get());
+ return;
+ }
+ }
+
+ DCHECK(router_);
+ DVLOG(1) << "ForwardMessage from node "
+ << node_name_ << " to " << node_name;
+ router_->ForwardMessage(this, node_name, std::move(message));
+ }
+
+ void BroadcastMessage(ScopedMessage message) override {
+ router_->BroadcastMessage(this, std::move(message));
+ }
+
+ void PortStatusChanged(const PortRef& port) override {
+ // The port may be closed, in which case we ignore the notification.
+ base::AutoLock lock(lock_);
+ if (!save_messages_)
+ return;
+
+ for (;;) {
+ ScopedMessage message;
+ {
+ base::AutoUnlock unlock(lock_);
+ if (!ReadMessage(port, &message))
+ break;
+ }
+
+ saved_messages_.emplace(std::move(message));
+ }
+ }
+
+ void ClosePortsInMessage(Message* message) {
+ for (size_t i = 0; i < message->num_ports(); ++i) {
+ PortRef port;
+ ASSERT_EQ(OK, node_.GetPort(message->ports()[i], &port));
+ EXPECT_EQ(OK, node_.ClosePort(port));
+ }
+ }
+
+ private:
+ void ProcessMessages() {
+ for (;;) {
+ messages_available_event_.Wait();
+
+ base::AutoLock lock(lock_);
+
+ if (should_quit_)
+ return;
+
+ dispatching_ = true;
+ while (!incoming_messages_.empty()) {
+ if (block_on_event_ &&
+ GetEventHeader(*incoming_messages_.front())->type ==
+ blocked_event_type_) {
+ blocked_ = true;
+ // Go idle if we hit a blocked event type.
+ break;
+ } else {
+ blocked_ = false;
+ }
+ ScopedMessage message = std::move(incoming_messages_.front());
+ incoming_messages_.pop();
+
+ // NOTE: AcceptMessage() can re-enter this object to call any of the
+ // NodeDelegate interface methods.
+ base::AutoUnlock unlock(lock_);
+ node_.AcceptMessage(std::move(message));
+ }
+
+ dispatching_ = false;
+ started_ = true;
+ idle_event_.Signal();
+ };
+ }
+
+ const NodeName node_name_;
+ Node node_;
+ MessageRouter* router_ = nullptr;
+
+ base::Thread node_thread_;
+ base::WaitableEvent messages_available_event_;
+ base::WaitableEvent idle_event_;
+
+ // Guards fields below.
+ base::Lock lock_;
+ bool started_ = false;
+ bool dispatching_ = false;
+ bool should_quit_ = false;
+ bool drop_messages_ = false;
+ bool save_messages_ = false;
+ bool blocked_ = false;
+ bool block_on_event_ = false;
+ EventType blocked_event_type_;
+ std::queue<ScopedMessage> incoming_messages_;
+ std::queue<ScopedMessage> saved_messages_;
+};
+
+class PortsTest : public testing::Test, public MessageRouter {
+ public:
+ void AddNode(TestNode* node) {
+ {
+ base::AutoLock lock(lock_);
+ nodes_[node->name()] = node;
+ }
+ node->Start(this);
+ }
+
+ void RemoveNode(TestNode* node) {
+ {
+ base::AutoLock lock(lock_);
+ nodes_.erase(node->name());
+ }
+
+ for (const auto& entry : nodes_)
+ entry.second->node().LostConnectionToNode(node->name());
+ }
+
+ // Waits until all known Nodes are idle. Message forwarding and processing
+ // is handled in such a way that idleness is a stable state: once all nodes in
+ // the system are idle, they will remain idle until the test explicitly
+ // initiates some further event (e.g. sending a message, closing a port, or
+ // removing a Node).
+ void WaitForIdle() {
+ for (;;) {
+ base::AutoLock global_lock(global_lock_);
+ bool all_nodes_idle = true;
+ for (const auto& entry : nodes_) {
+ if (!entry.second->IsIdle())
+ all_nodes_idle = false;
+ entry.second->WakeUp();
+ }
+ if (all_nodes_idle)
+ return;
+
+ // Wait for any Node to signal that it's idle.
+ base::AutoUnlock global_unlock(global_lock_);
+ std::vector<base::WaitableEvent*> events;
+ for (const auto& entry : nodes_)
+ events.push_back(&entry.second->idle_event());
+ base::WaitableEvent::WaitMany(events.data(), events.size());
+ }
+ }
+
+ void CreatePortPair(TestNode* node0,
+ PortRef* port0,
+ TestNode* node1,
+ PortRef* port1) {
+ if (node0 == node1) {
+ EXPECT_EQ(OK, node0->node().CreatePortPair(port0, port1));
+ } else {
+ EXPECT_EQ(OK, node0->node().CreateUninitializedPort(port0));
+ EXPECT_EQ(OK, node1->node().CreateUninitializedPort(port1));
+ EXPECT_EQ(OK, node0->node().InitializePort(*port0, node1->name(),
+ port1->name()));
+ EXPECT_EQ(OK, node1->node().InitializePort(*port1, node0->name(),
+ port0->name()));
+ }
+ }
+
+ private:
+ // MessageRouter:
+ void GeneratePortName(PortName* name) override {
+ base::AutoLock lock(lock_);
+ name->v1 = next_port_id_++;
+ name->v2 = 0;
+ }
+
+ void ForwardMessage(TestNode* from_node,
+ const NodeName& node_name,
+ ScopedMessage message) override {
+ base::AutoLock global_lock(global_lock_);
+ base::AutoLock lock(lock_);
+ // Drop messages from nodes that have been removed.
+ if (nodes_.find(from_node->name()) == nodes_.end()) {
+ from_node->ClosePortsInMessage(message.get());
+ return;
+ }
+
+ auto it = nodes_.find(node_name);
+ if (it == nodes_.end()) {
+ DVLOG(1) << "Node not found: " << node_name;
+ return;
+ }
+
+ it->second->EnqueueMessage(std::move(message));
+ }
+
+ void BroadcastMessage(TestNode* from_node, ScopedMessage message) override {
+ base::AutoLock global_lock(global_lock_);
+ base::AutoLock lock(lock_);
+
+ // Drop messages from nodes that have been removed.
+ if (nodes_.find(from_node->name()) == nodes_.end())
+ return;
+
+ for (const auto& entry : nodes_) {
+ TestNode* node = entry.second;
+ // Broadcast doesn't deliver to the local node.
+ if (node == from_node)
+ continue;
+
+ // NOTE: We only need to support broadcast of events. Events have no
+ // payload or ports bytes.
+ ScopedMessage new_message(
+ new TestMessage(message->num_header_bytes(), 0, 0));
+ memcpy(new_message->mutable_header_bytes(), message->header_bytes(),
+ message->num_header_bytes());
+ node->EnqueueMessage(std::move(new_message));
+ }
+ }
+
+ base::MessageLoop message_loop_;
+
+ // Acquired before any operation which makes a Node busy, and before testing
+ // if all nodes are idle.
+ base::Lock global_lock_;
+
+ base::Lock lock_;
+ uint64_t next_port_id_ = 1;
+ std::map<NodeName, TestNode*> nodes_;
+};
+
+} // namespace
+
+TEST_F(PortsTest, Basic1) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ PortRef x0, x1;
+ CreatePortPair(&node0, &x0, &node1, &x1);
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&a0, &a1));
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "hello", a1));
+ EXPECT_EQ(OK, node0.node().ClosePort(a0));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(x0));
+ EXPECT_EQ(OK, node1.node().ClosePort(x1));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, Basic2) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ PortRef x0, x1;
+ CreatePortPair(&node0, &x0, &node1, &x1);
+
+ PortRef b0, b1;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&b0, &b1));
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "hello", b1));
+ EXPECT_EQ(OK, node0.SendStringMessage(b0, "hello again"));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(b0));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(x0));
+ EXPECT_EQ(OK, node1.node().ClosePort(x1));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, Basic3) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ PortRef x0, x1;
+ CreatePortPair(&node0, &x0, &node1, &x1);
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&a0, &a1));
+
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "hello", a1));
+ EXPECT_EQ(OK, node0.SendStringMessage(a0, "hello again"));
+
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "foo", a0));
+
+ PortRef b0, b1;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&b0, &b1));
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "bar", b1));
+ EXPECT_EQ(OK, node0.SendStringMessage(b0, "baz"));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(b0));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(x0));
+ EXPECT_EQ(OK, node1.node().ClosePort(x1));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, LostConnectionToNode1) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+ node1.set_drop_messages(true);
+
+ PortRef x0, x1;
+ CreatePortPair(&node0, &x0, &node1, &x1);
+
+ // Transfer a port to node1 and simulate a lost connection to node1.
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&a0, &a1));
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "foo", a1));
+
+ WaitForIdle();
+
+ RemoveNode(&node1);
+
+ WaitForIdle();
+
+ EXPECT_EQ(OK, node0.node().ClosePort(a0));
+ EXPECT_EQ(OK, node0.node().ClosePort(x0));
+ EXPECT_EQ(OK, node1.node().ClosePort(x1));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, LostConnectionToNode2) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ PortRef x0, x1;
+ CreatePortPair(&node0, &x0, &node1, &x1);
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&a0, &a1));
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "take a1", a1));
+
+ WaitForIdle();
+
+ node1.set_drop_messages(true);
+
+ RemoveNode(&node1);
+
+ WaitForIdle();
+
+ // a0 should have eventually detected peer closure after node loss.
+ ScopedMessage message;
+ EXPECT_EQ(ERROR_PORT_PEER_CLOSED,
+ node0.node().GetMessage(a0, &message, nullptr));
+ EXPECT_FALSE(message);
+
+ EXPECT_EQ(OK, node0.node().ClosePort(a0));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(x0));
+
+ EXPECT_EQ(OK, node1.node().GetMessage(x1, &message, nullptr));
+ EXPECT_TRUE(message);
+ node1.ClosePortsInMessage(message.get());
+
+ EXPECT_EQ(OK, node1.node().ClosePort(x1));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, LostConnectionToNodeWithSecondaryProxy) {
+ // Tests that a proxy gets cleaned up when its indirect peer lives on a lost
+ // node.
+
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ TestNode node2(2);
+ AddNode(&node2);
+
+ // Create A-B spanning nodes 0 and 1 and C-D spanning 1 and 2.
+ PortRef A, B, C, D;
+ CreatePortPair(&node0, &A, &node1, &B);
+ CreatePortPair(&node1, &C, &node2, &D);
+
+ // Create E-F and send F over A to node 1.
+ PortRef E, F;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&E, &F));
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(A, ".", F));
+
+ WaitForIdle();
+
+ ScopedMessage message;
+ ASSERT_TRUE(node1.ReadMessage(B, &message));
+ ASSERT_EQ(1u, message->num_ports());
+
+ EXPECT_EQ(OK, node1.node().GetPort(message->ports()[0], &F));
+
+ // Send F over C to node 2 and then simulate node 2 loss from node 1. Node 1
+ // will trivially become aware of the loss, and this test verifies that the
+ // port A on node 0 will eventually also become aware of it.
+
+ // Make sure node2 stops processing events when it encounters an ObserveProxy.
+ node2.BlockOnEvent(EventType::kObserveProxy);
+
+ EXPECT_EQ(OK, node1.SendStringMessageWithPort(C, ".", F));
+ WaitForIdle();
+
+ // Simulate node 1 and 2 disconnecting.
+ EXPECT_EQ(OK, node1.node().LostConnectionToNode(node2.name()));
+
+ // Let node2 continue processing events and wait for everyone to go idle.
+ node2.Unblock();
+ WaitForIdle();
+
+ // Port F should be gone.
+ EXPECT_EQ(ERROR_PORT_UNKNOWN, node1.node().GetPort(F.name(), &F));
+
+ // Port E should have detected peer closure despite the fact that there is
+ // no longer a continuous route from F to E over which the event could travel.
+ PortStatus status;
+ EXPECT_EQ(OK, node0.node().GetStatus(E, &status));
+ EXPECT_TRUE(status.peer_closed);
+
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+ EXPECT_EQ(OK, node1.node().ClosePort(B));
+ EXPECT_EQ(OK, node1.node().ClosePort(C));
+ EXPECT_EQ(OK, node0.node().ClosePort(E));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, LostConnectionToNodeWithLocalProxy) {
+ // Tests that a proxy gets cleaned up when its direct peer lives on a lost
+ // node and it's predecessor lives on the same node.
+
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ PortRef A, B;
+ CreatePortPair(&node0, &A, &node1, &B);
+
+ PortRef C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&C, &D));
+
+ // Send D but block node0 on an ObserveProxy event.
+ node0.BlockOnEvent(EventType::kObserveProxy);
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(A, ".", D));
+
+ // node0 won't collapse the proxy but node1 will receive the message before
+ // going idle.
+ WaitForIdle();
+
+ ScopedMessage message;
+ ASSERT_TRUE(node1.ReadMessage(B, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef E;
+ EXPECT_EQ(OK, node1.node().GetPort(message->ports()[0], &E));
+
+ RemoveNode(&node1);
+
+ node0.Unblock();
+ WaitForIdle();
+
+ // Port C should have detected peer closure.
+ PortStatus status;
+ EXPECT_EQ(OK, node0.node().GetStatus(C, &status));
+ EXPECT_TRUE(status.peer_closed);
+
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+ EXPECT_EQ(OK, node1.node().ClosePort(B));
+ EXPECT_EQ(OK, node0.node().ClosePort(C));
+ EXPECT_EQ(OK, node1.node().ClosePort(E));
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, GetMessage1) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&a0, &a1));
+
+ ScopedMessage message;
+ EXPECT_EQ(OK, node.node().GetMessage(a0, &message, nullptr));
+ EXPECT_FALSE(message);
+
+ EXPECT_EQ(OK, node.node().ClosePort(a1));
+
+ WaitForIdle();
+
+ EXPECT_EQ(ERROR_PORT_PEER_CLOSED,
+ node.node().GetMessage(a0, &message, nullptr));
+ EXPECT_FALSE(message);
+
+ EXPECT_EQ(OK, node.node().ClosePort(a0));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, GetMessage2) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&a0, &a1));
+
+ EXPECT_EQ(OK, node.SendStringMessage(a1, "1"));
+
+ ScopedMessage message;
+ EXPECT_EQ(OK, node.node().GetMessage(a0, &message, nullptr));
+
+ ASSERT_TRUE(message);
+ EXPECT_TRUE(MessageEquals(message, "1"));
+
+ EXPECT_EQ(OK, node.node().ClosePort(a0));
+ EXPECT_EQ(OK, node.node().ClosePort(a1));
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, GetMessage3) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&a0, &a1));
+
+ const char* kStrings[] = {
+ "1",
+ "2",
+ "3"
+ };
+
+ for (size_t i = 0; i < sizeof(kStrings)/sizeof(kStrings[0]); ++i)
+ EXPECT_EQ(OK, node.SendStringMessage(a1, kStrings[i]));
+
+ ScopedMessage message;
+ for (size_t i = 0; i < sizeof(kStrings)/sizeof(kStrings[0]); ++i) {
+ EXPECT_EQ(OK, node.node().GetMessage(a0, &message, nullptr));
+ ASSERT_TRUE(message);
+ EXPECT_TRUE(MessageEquals(message, kStrings[i]));
+ }
+
+ EXPECT_EQ(OK, node.node().ClosePort(a0));
+ EXPECT_EQ(OK, node.node().ClosePort(a1));
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, Delegation1) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ PortRef x0, x1;
+ CreatePortPair(&node0, &x0, &node1, &x1);
+
+ // In this test, we send a message to a port that has been moved.
+
+ PortRef a0, a1;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&a0, &a1));
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(x0, "a1", a1));
+ WaitForIdle();
+
+ ScopedMessage message;
+ ASSERT_TRUE(node1.ReadMessage(x1, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ EXPECT_TRUE(MessageEquals(message, "a1"));
+
+ // This is "a1" from the point of view of node1.
+ PortName a2_name = message->ports()[0];
+ EXPECT_EQ(OK, node1.SendStringMessageWithPort(x1, "a2", a2_name));
+ EXPECT_EQ(OK, node0.SendStringMessage(a0, "hello"));
+
+ WaitForIdle();
+
+ ASSERT_TRUE(node0.ReadMessage(x0, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ EXPECT_TRUE(MessageEquals(message, "a2"));
+
+ // This is "a2" from the point of view of node1.
+ PortName a3_name = message->ports()[0];
+
+ PortRef a3;
+ EXPECT_EQ(OK, node0.node().GetPort(a3_name, &a3));
+
+ ASSERT_TRUE(node0.ReadMessage(a3, &message));
+ EXPECT_EQ(0u, message->num_ports());
+ EXPECT_TRUE(MessageEquals(message, "hello"));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(a0));
+ EXPECT_EQ(OK, node0.node().ClosePort(a3));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(x0));
+ EXPECT_EQ(OK, node1.node().ClosePort(x1));
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, Delegation2) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ for (int i = 0; i < 100; ++i) {
+ // Setup pipe a<->b between node0 and node1.
+ PortRef A, B;
+ CreatePortPair(&node0, &A, &node1, &B);
+
+ PortRef C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&C, &D));
+
+ PortRef E, F;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&E, &F));
+
+ node1.set_save_messages(true);
+
+ // Pass D over A to B.
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(A, "1", D));
+
+ // Pass F over C to D.
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(C, "1", F));
+
+ // This message should find its way to node1.
+ EXPECT_EQ(OK, node0.SendStringMessage(E, "hello"));
+
+ WaitForIdle();
+
+ EXPECT_EQ(OK, node0.node().ClosePort(C));
+ EXPECT_EQ(OK, node0.node().ClosePort(E));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+ EXPECT_EQ(OK, node1.node().ClosePort(B));
+
+ bool got_hello = false;
+ ScopedMessage message;
+ while (node1.GetSavedMessage(&message)) {
+ node1.ClosePortsInMessage(message.get());
+ if (MessageEquals(message, "hello")) {
+ got_hello = true;
+ break;
+ }
+ }
+
+ EXPECT_TRUE(got_hello);
+
+ WaitForIdle(); // Because closing ports may have generated tasks.
+ }
+
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, SendUninitialized) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef x0;
+ EXPECT_EQ(OK, node.node().CreateUninitializedPort(&x0));
+ EXPECT_EQ(ERROR_PORT_STATE_UNEXPECTED, node.SendStringMessage(x0, "oops"));
+ EXPECT_EQ(OK, node.node().ClosePort(x0));
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, SendFailure) {
+ TestNode node(0);
+ AddNode(&node);
+
+ node.set_save_messages(true);
+
+ PortRef A, B;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&A, &B));
+
+ // Try to send A over itself.
+
+ EXPECT_EQ(ERROR_PORT_CANNOT_SEND_SELF,
+ node.SendStringMessageWithPort(A, "oops", A));
+
+ // Try to send B over A.
+
+ EXPECT_EQ(ERROR_PORT_CANNOT_SEND_PEER,
+ node.SendStringMessageWithPort(A, "nope", B));
+
+ // B should be closed immediately.
+ EXPECT_EQ(ERROR_PORT_UNKNOWN, node.node().GetPort(B.name(), &B));
+
+ WaitForIdle();
+
+ // There should have been no messages accepted.
+ ScopedMessage message;
+ EXPECT_FALSE(node.GetSavedMessage(&message));
+
+ EXPECT_EQ(OK, node.node().ClosePort(A));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, DontLeakUnreceivedPorts) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node.node().CreatePortPair(&C, &D));
+
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(A, "foo", D));
+
+ EXPECT_EQ(OK, node.node().ClosePort(C));
+ EXPECT_EQ(OK, node.node().ClosePort(A));
+ EXPECT_EQ(OK, node.node().ClosePort(B));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, AllowShutdownWithLocalPortsOpen) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node.node().CreatePortPair(&C, &D));
+
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(A, "foo", D));
+
+ ScopedMessage message;
+ EXPECT_TRUE(node.ReadMessage(B, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ EXPECT_TRUE(MessageEquals(message, "foo"));
+ PortRef E;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &E));
+
+ EXPECT_TRUE(
+ node.node().CanShutdownCleanly(Node::ShutdownPolicy::ALLOW_LOCAL_PORTS));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(
+ node.node().CanShutdownCleanly(Node::ShutdownPolicy::ALLOW_LOCAL_PORTS));
+ EXPECT_FALSE(node.node().CanShutdownCleanly());
+
+ EXPECT_EQ(OK, node.node().ClosePort(A));
+ EXPECT_EQ(OK, node.node().ClosePort(B));
+ EXPECT_EQ(OK, node.node().ClosePort(C));
+ EXPECT_EQ(OK, node.node().ClosePort(E));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, ProxyCollapse1) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef A, B;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&A, &B));
+
+ PortRef X, Y;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&X, &Y));
+
+ ScopedMessage message;
+
+ // Send B and receive it as C.
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", B));
+ ASSERT_TRUE(node.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef C;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &C));
+
+ // Send C and receive it as D.
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", C));
+ ASSERT_TRUE(node.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef D;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &D));
+
+ // Send D and receive it as E.
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", D));
+ ASSERT_TRUE(node.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef E;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &E));
+
+ EXPECT_EQ(OK, node.node().ClosePort(X));
+ EXPECT_EQ(OK, node.node().ClosePort(Y));
+
+ EXPECT_EQ(OK, node.node().ClosePort(A));
+ EXPECT_EQ(OK, node.node().ClosePort(E));
+
+ // The node should not idle until all proxies are collapsed.
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, ProxyCollapse2) {
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef A, B;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&A, &B));
+
+ PortRef X, Y;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&X, &Y));
+
+ ScopedMessage message;
+
+ // Send B and A to create proxies in each direction.
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", B));
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", A));
+
+ EXPECT_EQ(OK, node.node().ClosePort(X));
+ EXPECT_EQ(OK, node.node().ClosePort(Y));
+
+ // At this point we have a scenario with:
+ //
+ // D -> [B] -> C -> [A]
+ //
+ // Ensure that the proxies can collapse. The sent ports will be closed
+ // eventually as a result of Y's closure.
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, SendWithClosedPeer) {
+ // This tests that if a port is sent when its peer is already known to be
+ // closed, the newly created port will be aware of that peer closure, and the
+ // proxy will eventually collapse.
+
+ TestNode node(0);
+ AddNode(&node);
+
+ // Send a message from A to B, then close A.
+ PortRef A, B;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node.SendStringMessage(A, "hey"));
+ EXPECT_EQ(OK, node.node().ClosePort(A));
+
+ // Now send B over X-Y as new port C.
+ PortRef X, Y;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&X, &Y));
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", B));
+ ScopedMessage message;
+ ASSERT_TRUE(node.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef C;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &C));
+
+ EXPECT_EQ(OK, node.node().ClosePort(X));
+ EXPECT_EQ(OK, node.node().ClosePort(Y));
+
+ WaitForIdle();
+
+ // C should have received the message originally sent to B, and it should also
+ // be aware of A's closure.
+
+ ASSERT_TRUE(node.ReadMessage(C, &message));
+ EXPECT_TRUE(MessageEquals(message, "hey"));
+
+ PortStatus status;
+ EXPECT_EQ(OK, node.node().GetStatus(C, &status));
+ EXPECT_FALSE(status.receiving_messages);
+ EXPECT_FALSE(status.has_messages);
+ EXPECT_TRUE(status.peer_closed);
+
+ node.node().ClosePort(C);
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, SendWithClosedPeerSent) {
+ // This tests that if a port is closed while some number of proxies are still
+ // routing messages (directly or indirectly) to it, that the peer port is
+ // eventually notified of the closure, and the dead-end proxies will
+ // eventually be removed.
+
+ TestNode node(0);
+ AddNode(&node);
+
+ PortRef X, Y;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&X, &Y));
+
+ PortRef A, B;
+ EXPECT_EQ(OK, node.node().CreatePortPair(&A, &B));
+
+ ScopedMessage message;
+
+ // Send A as new port C.
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", A));
+
+ ASSERT_TRUE(node.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef C;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &C));
+
+ // Send C as new port D.
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", C));
+
+ ASSERT_TRUE(node.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef D;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &D));
+
+ // Send a message to B through D, then close D.
+ EXPECT_EQ(OK, node.SendStringMessage(D, "hey"));
+ EXPECT_EQ(OK, node.node().ClosePort(D));
+
+ // Now send B as new port E.
+
+ EXPECT_EQ(OK, node.SendStringMessageWithPort(X, "foo", B));
+ EXPECT_EQ(OK, node.node().ClosePort(X));
+
+ ASSERT_TRUE(node.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef E;
+ ASSERT_EQ(OK, node.node().GetPort(message->ports()[0], &E));
+
+ EXPECT_EQ(OK, node.node().ClosePort(Y));
+
+ WaitForIdle();
+
+ // E should receive the message originally sent to B, and it should also be
+ // aware of D's closure.
+
+ ASSERT_TRUE(node.ReadMessage(E, &message));
+ EXPECT_TRUE(MessageEquals(message, "hey"));
+
+ PortStatus status;
+ EXPECT_EQ(OK, node.node().GetStatus(E, &status));
+ EXPECT_FALSE(status.receiving_messages);
+ EXPECT_FALSE(status.has_messages);
+ EXPECT_TRUE(status.peer_closed);
+
+ EXPECT_EQ(OK, node.node().ClosePort(E));
+
+ WaitForIdle();
+
+ EXPECT_TRUE(node.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, MergePorts) {
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ // Setup two independent port pairs, A-B on node0 and C-D on node1.
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node1.node().CreatePortPair(&C, &D));
+
+ // Write a message on A.
+ EXPECT_EQ(OK, node0.SendStringMessage(A, "hey"));
+
+ // Initiate a merge between B and C.
+ EXPECT_EQ(OK, node0.node().MergePorts(B, node1.name(), C.name()));
+
+ WaitForIdle();
+
+ // Expect all proxies to be gone once idle.
+ EXPECT_TRUE(
+ node0.node().CanShutdownCleanly(Node::ShutdownPolicy::ALLOW_LOCAL_PORTS));
+ EXPECT_TRUE(
+ node1.node().CanShutdownCleanly(Node::ShutdownPolicy::ALLOW_LOCAL_PORTS));
+
+ // Expect D to have received the message sent on A.
+ ScopedMessage message;
+ ASSERT_TRUE(node1.ReadMessage(D, &message));
+ EXPECT_TRUE(MessageEquals(message, "hey"));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+ EXPECT_EQ(OK, node1.node().ClosePort(D));
+
+ // No more ports should be open.
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, MergePortWithClosedPeer1) {
+ // This tests that the right thing happens when initiating a merge on a port
+ // whose peer has already been closed.
+
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ // Setup two independent port pairs, A-B on node0 and C-D on node1.
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node1.node().CreatePortPair(&C, &D));
+
+ // Write a message on A.
+ EXPECT_EQ(OK, node0.SendStringMessage(A, "hey"));
+
+ // Close A.
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+
+ // Initiate a merge between B and C.
+ EXPECT_EQ(OK, node0.node().MergePorts(B, node1.name(), C.name()));
+
+ WaitForIdle();
+
+ // Expect all proxies to be gone once idle. node0 should have no ports since
+ // A was explicitly closed.
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(
+ node1.node().CanShutdownCleanly(Node::ShutdownPolicy::ALLOW_LOCAL_PORTS));
+
+ // Expect D to have received the message sent on A.
+ ScopedMessage message;
+ ASSERT_TRUE(node1.ReadMessage(D, &message));
+ EXPECT_TRUE(MessageEquals(message, "hey"));
+
+ EXPECT_EQ(OK, node1.node().ClosePort(D));
+
+ // No more ports should be open.
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, MergePortWithClosedPeer2) {
+ // This tests that the right thing happens when merging into a port whose peer
+ // has already been closed.
+
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ // Setup two independent port pairs, A-B on node0 and C-D on node1.
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node1.node().CreatePortPair(&C, &D));
+
+ // Write a message on D and close it.
+ EXPECT_EQ(OK, node0.SendStringMessage(D, "hey"));
+ EXPECT_EQ(OK, node1.node().ClosePort(D));
+
+ // Initiate a merge between B and C.
+ EXPECT_EQ(OK, node0.node().MergePorts(B, node1.name(), C.name()));
+
+ WaitForIdle();
+
+ // Expect all proxies to be gone once idle. node1 should have no ports since
+ // D was explicitly closed.
+ EXPECT_TRUE(
+ node0.node().CanShutdownCleanly(Node::ShutdownPolicy::ALLOW_LOCAL_PORTS));
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+
+ // Expect A to have received the message sent on D.
+ ScopedMessage message;
+ ASSERT_TRUE(node0.ReadMessage(A, &message));
+ EXPECT_TRUE(MessageEquals(message, "hey"));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+
+ // No more ports should be open.
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, MergePortsWithClosedPeers) {
+ // This tests that no residual ports are left behind if two ports are merged
+ // when both of their peers have been closed.
+
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ // Setup two independent port pairs, A-B on node0 and C-D on node1.
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node1.node().CreatePortPair(&C, &D));
+
+ // Close A and D.
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+ EXPECT_EQ(OK, node1.node().ClosePort(D));
+
+ WaitForIdle();
+
+ // Initiate a merge between B and C.
+ EXPECT_EQ(OK, node0.node().MergePorts(B, node1.name(), C.name()));
+
+ WaitForIdle();
+
+ // Expect everything to have gone away.
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, MergePortsWithMovedPeers) {
+ // This tests that ports can be merged successfully even if their peers are
+ // moved around.
+
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ // Setup two independent port pairs, A-B on node0 and C-D on node1.
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node1.node().CreatePortPair(&C, &D));
+
+ // Set up another pair X-Y for moving ports on node0.
+ PortRef X, Y;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&X, &Y));
+
+ ScopedMessage message;
+
+ // Move A to new port E.
+ EXPECT_EQ(OK, node0.SendStringMessageWithPort(X, "foo", A));
+ ASSERT_TRUE(node0.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef E;
+ ASSERT_EQ(OK, node0.node().GetPort(message->ports()[0], &E));
+
+ EXPECT_EQ(OK, node0.node().ClosePort(X));
+ EXPECT_EQ(OK, node0.node().ClosePort(Y));
+
+ // Write messages on E and D.
+ EXPECT_EQ(OK, node0.SendStringMessage(E, "hey"));
+ EXPECT_EQ(OK, node1.SendStringMessage(D, "hi"));
+
+ // Initiate a merge between B and C.
+ EXPECT_EQ(OK, node0.node().MergePorts(B, node1.name(), C.name()));
+
+ WaitForIdle();
+
+ // Expect to receive D's message on E and E's message on D.
+ ASSERT_TRUE(node0.ReadMessage(E, &message));
+ EXPECT_TRUE(MessageEquals(message, "hi"));
+ ASSERT_TRUE(node1.ReadMessage(D, &message));
+ EXPECT_TRUE(MessageEquals(message, "hey"));
+
+ // Close E and D.
+ EXPECT_EQ(OK, node0.node().ClosePort(E));
+ EXPECT_EQ(OK, node1.node().ClosePort(D));
+
+ WaitForIdle();
+
+ // Expect everything to have gone away.
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+TEST_F(PortsTest, MergePortsFailsGracefully) {
+ // This tests that the system remains in a well-defined state if something
+ // goes wrong during port merge.
+
+ TestNode node0(0);
+ AddNode(&node0);
+
+ TestNode node1(1);
+ AddNode(&node1);
+
+ // Setup two independent port pairs, A-B on node0 and C-D on node1.
+ PortRef A, B, C, D;
+ EXPECT_EQ(OK, node0.node().CreatePortPair(&A, &B));
+ EXPECT_EQ(OK, node1.node().CreatePortPair(&C, &D));
+
+ ScopedMessage message;
+ PortRef X, Y;
+ EXPECT_EQ(OK, node1.node().CreatePortPair(&X, &Y));
+
+ // Block the merge from proceeding until we can do something stupid with port
+ // C. This avoids the test logic racing with async merge logic.
+ node1.BlockOnEvent(EventType::kMergePort);
+
+ // Initiate the merge between B and C.
+ EXPECT_EQ(OK, node0.node().MergePorts(B, node1.name(), C.name()));
+
+ // Move C to a new port E. This is not a sane use of Node's public API but
+ // is still hypothetically possible. It allows us to force a merge failure
+ // because C will be in an invalid state by the term the merge is processed.
+ // As a result, B should be closed.
+ EXPECT_EQ(OK, node1.SendStringMessageWithPort(X, "foo", C));
+
+ node1.Unblock();
+
+ ASSERT_TRUE(node1.ReadMessage(Y, &message));
+ ASSERT_EQ(1u, message->num_ports());
+ PortRef E;
+ ASSERT_EQ(OK, node1.node().GetPort(message->ports()[0], &E));
+
+ EXPECT_EQ(OK, node1.node().ClosePort(X));
+ EXPECT_EQ(OK, node1.node().ClosePort(Y));
+
+ WaitForIdle();
+
+ // C goes away as a result of normal proxy removal. B should have been closed
+ // cleanly by the failed MergePorts.
+ EXPECT_EQ(ERROR_PORT_UNKNOWN, node1.node().GetPort(C.name(), &C));
+ EXPECT_EQ(ERROR_PORT_UNKNOWN, node0.node().GetPort(B.name(), &B));
+
+ // Close A, D, and E.
+ EXPECT_EQ(OK, node0.node().ClosePort(A));
+ EXPECT_EQ(OK, node1.node().ClosePort(D));
+ EXPECT_EQ(OK, node1.node().ClosePort(E));
+
+ WaitForIdle();
+
+ // Expect everything to have gone away.
+ EXPECT_TRUE(node0.node().CanShutdownCleanly());
+ EXPECT_TRUE(node1.node().CanShutdownCleanly());
+}
+
+} // namespace test
+} // namespace ports
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports/user_data.h b/mojo/edk/system/ports/user_data.h
new file mode 100644
index 0000000000..73e7d17b32
--- /dev/null
+++ b/mojo/edk/system/ports/user_data.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_USER_DATA_H_
+#define MOJO_EDK_SYSTEM_PORTS_USER_DATA_H_
+
+#include "base/memory/ref_counted.h"
+
+namespace mojo {
+namespace edk {
+namespace ports {
+
+class UserData : public base::RefCountedThreadSafe<UserData> {
+ protected:
+ friend class base::RefCountedThreadSafe<UserData>;
+
+ virtual ~UserData() {}
+};
+
+} // namespace ports
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_USER_DATA_H_
diff --git a/mojo/edk/system/ports_message.cc b/mojo/edk/system/ports_message.cc
new file mode 100644
index 0000000000..5f3e8c0125
--- /dev/null
+++ b/mojo/edk/system/ports_message.cc
@@ -0,0 +1,62 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/ports_message.h"
+
+#include "base/memory/ptr_util.h"
+#include "mojo/edk/system/node_channel.h"
+
+namespace mojo {
+namespace edk {
+
+// static
+std::unique_ptr<PortsMessage> PortsMessage::NewUserMessage(
+ size_t num_payload_bytes,
+ size_t num_ports,
+ size_t num_handles) {
+ return base::WrapUnique(
+ new PortsMessage(num_payload_bytes, num_ports, num_handles));
+}
+
+PortsMessage::~PortsMessage() {}
+
+PortsMessage::PortsMessage(size_t num_payload_bytes,
+ size_t num_ports,
+ size_t num_handles)
+ : ports::Message(num_payload_bytes, num_ports) {
+ size_t size = num_header_bytes_ + num_ports_bytes_ + num_payload_bytes;
+ void* ptr;
+ channel_message_ = NodeChannel::CreatePortsMessage(size, &ptr, num_handles);
+ InitializeUserMessageHeader(ptr);
+}
+
+PortsMessage::PortsMessage(size_t num_header_bytes,
+ size_t num_payload_bytes,
+ size_t num_ports_bytes,
+ Channel::MessagePtr channel_message)
+ : ports::Message(num_header_bytes,
+ num_payload_bytes,
+ num_ports_bytes) {
+ if (channel_message) {
+ channel_message_ = std::move(channel_message);
+ void* data;
+ size_t num_data_bytes;
+ NodeChannel::GetPortsMessageData(channel_message_.get(), &data,
+ &num_data_bytes);
+ start_ = static_cast<char*>(data);
+ } else {
+ // TODO: Clean this up. In practice this branch of the constructor should
+ // only be reached from Node-internal calls to AllocMessage, which never
+ // carry ports or non-header bytes.
+ CHECK_EQ(num_payload_bytes, 0u);
+ CHECK_EQ(num_ports_bytes, 0u);
+ void* ptr;
+ channel_message_ =
+ NodeChannel::CreatePortsMessage(num_header_bytes, &ptr, 0);
+ start_ = static_cast<char*>(ptr);
+ }
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/ports_message.h b/mojo/edk/system/ports_message.h
new file mode 100644
index 0000000000..542b981700
--- /dev/null
+++ b/mojo/edk/system/ports_message.h
@@ -0,0 +1,69 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_PORTS_MESSAGE_H__
+#define MOJO_EDK_SYSTEM_PORTS_MESSAGE_H__
+
+#include <memory>
+#include <utility>
+
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/system/channel.h"
+#include "mojo/edk/system/ports/message.h"
+#include "mojo/edk/system/ports/name.h"
+
+namespace mojo {
+namespace edk {
+
+class NodeController;
+
+class PortsMessage : public ports::Message {
+ public:
+ static std::unique_ptr<PortsMessage> NewUserMessage(size_t num_payload_bytes,
+ size_t num_ports,
+ size_t num_handles);
+
+ ~PortsMessage() override;
+
+ size_t num_handles() const { return channel_message_->num_handles(); }
+ bool has_handles() const { return channel_message_->has_handles(); }
+
+ void SetHandles(ScopedPlatformHandleVectorPtr handles) {
+ channel_message_->SetHandles(std::move(handles));
+ }
+
+ ScopedPlatformHandleVectorPtr TakeHandles() {
+ return channel_message_->TakeHandles();
+ }
+
+ Channel::MessagePtr TakeChannelMessage() {
+ return std::move(channel_message_);
+ }
+
+ void set_source_node(const ports::NodeName& name) { source_node_ = name; }
+ const ports::NodeName& source_node() const { return source_node_; }
+
+ private:
+ friend class NodeController;
+
+ // Construct a new user PortsMessage backed by a new Channel::Message.
+ PortsMessage(size_t num_payload_bytes, size_t num_ports, size_t num_handles);
+
+ // Construct a new PortsMessage backed by a Channel::Message. If
+ // |channel_message| is null, a new one is allocated internally.
+ PortsMessage(size_t num_header_bytes,
+ size_t num_payload_bytes,
+ size_t num_ports_bytes,
+ Channel::MessagePtr channel_message);
+
+ Channel::MessagePtr channel_message_;
+
+ // The node name from which this message was received, if known.
+ ports::NodeName source_node_ = ports::kInvalidNodeName;
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_PORTS_MESSAGE_H__
diff --git a/mojo/edk/system/request_context.cc b/mojo/edk/system/request_context.cc
new file mode 100644
index 0000000000..5de65d7b64
--- /dev/null
+++ b/mojo/edk/system/request_context.cc
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/request_context.h"
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+base::LazyInstance<base::ThreadLocalPointer<RequestContext>>::Leaky
+ g_current_context = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+RequestContext::RequestContext() : RequestContext(Source::LOCAL_API_CALL) {}
+
+RequestContext::RequestContext(Source source)
+ : source_(source), tls_context_(g_current_context.Pointer()) {
+ // We allow nested RequestContexts to exist as long as they aren't actually
+ // used for anything.
+ if (!tls_context_->Get())
+ tls_context_->Set(this);
+}
+
+RequestContext::~RequestContext() {
+ if (IsCurrent()) {
+ // NOTE: Callbacks invoked by this destructor are allowed to initiate new
+ // EDK requests on this thread, so we need to reset the thread-local context
+ // pointer before calling them. We persist the original notification source
+ // since we're starting over at the bottom of the stack.
+ tls_context_->Set(nullptr);
+
+ MojoWatcherNotificationFlags flags = MOJO_WATCHER_NOTIFICATION_FLAG_NONE;
+ if (source_ == Source::SYSTEM)
+ flags |= MOJO_WATCHER_NOTIFICATION_FLAG_FROM_SYSTEM;
+
+ // We send all cancellation notifications first. This is necessary because
+ // it's possible that cancelled watches have other pending notifications
+ // attached to this RequestContext.
+ //
+ // From the application's perspective the watch is cancelled as soon as this
+ // notification is received, and dispatching the cancellation notification
+ // updates some internal Watch state to ensure no further notifications
+ // fire. Because notifications on a single Watch are mutually exclusive,
+ // this is sufficient to guarantee that MOJO_RESULT_CANCELLED is the last
+ // notification received; which is the guarantee the API makes.
+ for (const scoped_refptr<Watch>& watch :
+ watch_cancel_finalizers_.container()) {
+ static const HandleSignalsState closed_state = {0, 0};
+
+ // Establish a new RequestContext to capture and run any new notifications
+ // triggered by the callback invocation.
+ RequestContext inner_context(source_);
+ watch->InvokeCallback(MOJO_RESULT_CANCELLED, closed_state, flags);
+ }
+
+ for (const WatchNotifyFinalizer& watch :
+ watch_notify_finalizers_.container()) {
+ RequestContext inner_context(source_);
+ watch.watch->InvokeCallback(watch.result, watch.state, flags);
+ }
+ } else {
+ // It should be impossible for nested contexts to have finalizers.
+ DCHECK(watch_notify_finalizers_.container().empty());
+ DCHECK(watch_cancel_finalizers_.container().empty());
+ }
+}
+
+// static
+RequestContext* RequestContext::current() {
+ DCHECK(g_current_context.Pointer()->Get());
+ return g_current_context.Pointer()->Get();
+}
+
+void RequestContext::AddWatchNotifyFinalizer(scoped_refptr<Watch> watch,
+ MojoResult result,
+ const HandleSignalsState& state) {
+ DCHECK(IsCurrent());
+ watch_notify_finalizers_->push_back(
+ WatchNotifyFinalizer(std::move(watch), result, state));
+}
+
+void RequestContext::AddWatchCancelFinalizer(scoped_refptr<Watch> watch) {
+ DCHECK(IsCurrent());
+ watch_cancel_finalizers_->push_back(std::move(watch));
+}
+
+bool RequestContext::IsCurrent() const {
+ return tls_context_->Get() == this;
+}
+
+RequestContext::WatchNotifyFinalizer::WatchNotifyFinalizer(
+ scoped_refptr<Watch> watch,
+ MojoResult result,
+ const HandleSignalsState& state)
+ : watch(std::move(watch)), result(result), state(state) {}
+
+RequestContext::WatchNotifyFinalizer::WatchNotifyFinalizer(
+ const WatchNotifyFinalizer& other) = default;
+
+RequestContext::WatchNotifyFinalizer::~WatchNotifyFinalizer() {}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/request_context.h b/mojo/edk/system/request_context.h
new file mode 100644
index 0000000000..d1f43bdfbd
--- /dev/null
+++ b/mojo/edk/system/request_context.h
@@ -0,0 +1,107 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_REQUEST_CONTEXT_H_
+#define MOJO_EDK_SYSTEM_REQUEST_CONTEXT_H_
+
+#include "base/containers/stack_container.h"
+#include "base/macros.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/edk/system/watch.h"
+
+namespace base {
+template<typename T> class ThreadLocalPointer;
+}
+
+namespace mojo {
+namespace edk {
+
+// A RequestContext is a thread-local object which exists for the duration of
+// a single system API call. It is constructed immediately upon EDK entry and
+// destructed immediately before returning to the caller, after any internal
+// locks have been released.
+//
+// NOTE: It is legal to construct a RequestContext while another one already
+// exists on the current thread, but it is not safe to use the nested context
+// for any reason. Therefore it is important to always use
+// |RequestContext::current()| rather than referring to any local instance
+// directly.
+class MOJO_SYSTEM_IMPL_EXPORT RequestContext {
+ public:
+ // Identifies the source of the current stack frame's RequestContext.
+ enum class Source {
+ LOCAL_API_CALL,
+ SYSTEM,
+ };
+
+ // Constructs a RequestContext with a LOCAL_API_CALL Source.
+ RequestContext();
+
+ explicit RequestContext(Source source);
+ ~RequestContext();
+
+ // Returns the current thread-local RequestContext.
+ static RequestContext* current();
+
+ Source source() const { return source_; }
+
+ // Adds a finalizer to this RequestContext corresponding to a watch callback
+ // which should be triggered in response to some handle state change. If
+ // the WatcherDispatcher hasn't been closed by the time this RequestContext is
+ // destroyed, its WatchCallback will be invoked with |result| and |state|
+ // arguments.
+ void AddWatchNotifyFinalizer(scoped_refptr<Watch> watch,
+ MojoResult result,
+ const HandleSignalsState& state);
+
+ // Adds a finalizer to this RequestContext corresponding to a watch callback
+ // which should be triggered to notify of watch cancellation. This appends to
+ // a separate finalizer list from AddWatchNotifyFinalizer, as pending
+ // cancellations must always preempt other pending notifications.
+ void AddWatchCancelFinalizer(scoped_refptr<Watch> watch);
+
+ private:
+ // Is this request context the current one?
+ bool IsCurrent() const;
+
+ struct WatchNotifyFinalizer {
+ WatchNotifyFinalizer(scoped_refptr<Watch> watch,
+ MojoResult result,
+ const HandleSignalsState& state);
+ WatchNotifyFinalizer(const WatchNotifyFinalizer& other);
+ ~WatchNotifyFinalizer();
+
+ scoped_refptr<Watch> watch;
+ MojoResult result;
+ HandleSignalsState state;
+ };
+
+ // NOTE: This upper bound was chosen somewhat arbitrarily after observing some
+ // rare worst-case behavior in Chrome. A vast majority of RequestContexts only
+ // ever accumulate 0 or 1 finalizers.
+ static const size_t kStaticWatchFinalizersCapacity = 8;
+
+ using WatchNotifyFinalizerList =
+ base::StackVector<WatchNotifyFinalizer, kStaticWatchFinalizersCapacity>;
+ using WatchCancelFinalizerList =
+ base::StackVector<scoped_refptr<Watch>, kStaticWatchFinalizersCapacity>;
+
+ const Source source_;
+
+ WatchNotifyFinalizerList watch_notify_finalizers_;
+ WatchCancelFinalizerList watch_cancel_finalizers_;
+
+ // Pointer to the TLS context. Although this can easily be accessed via the
+ // global LazyInstance, accessing a LazyInstance has a large cost relative to
+ // the rest of this class and its usages.
+ base::ThreadLocalPointer<RequestContext>* tls_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(RequestContext);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_REQUEST_CONTEXT_H_
diff --git a/mojo/edk/system/shared_buffer_dispatcher.cc b/mojo/edk/system/shared_buffer_dispatcher.cc
new file mode 100644
index 0000000000..df391050a2
--- /dev/null
+++ b/mojo/edk/system/shared_buffer_dispatcher.cc
@@ -0,0 +1,339 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/shared_buffer_dispatcher.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "base/logging.h"
+#include "mojo/edk/embedder/embedder_internal.h"
+#include "mojo/edk/system/configuration.h"
+#include "mojo/edk/system/node_controller.h"
+#include "mojo/edk/system/options_validation.h"
+
+namespace mojo {
+namespace edk {
+
+namespace {
+
+#pragma pack(push, 1)
+
+struct SerializedState {
+ uint64_t num_bytes;
+ uint32_t flags;
+ uint32_t padding;
+};
+
+const uint32_t kSerializedStateFlagsReadOnly = 1 << 0;
+
+#pragma pack(pop)
+
+static_assert(sizeof(SerializedState) % 8 == 0,
+ "Invalid SerializedState size.");
+
+} // namespace
+
+// static
+const MojoCreateSharedBufferOptions
+ SharedBufferDispatcher::kDefaultCreateOptions = {
+ static_cast<uint32_t>(sizeof(MojoCreateSharedBufferOptions)),
+ MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE};
+
+// static
+MojoResult SharedBufferDispatcher::ValidateCreateOptions(
+ const MojoCreateSharedBufferOptions* in_options,
+ MojoCreateSharedBufferOptions* out_options) {
+ const MojoCreateSharedBufferOptionsFlags kKnownFlags =
+ MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE;
+
+ *out_options = kDefaultCreateOptions;
+ if (!in_options)
+ return MOJO_RESULT_OK;
+
+ UserOptionsReader<MojoCreateSharedBufferOptions> reader(in_options);
+ if (!reader.is_valid())
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (!OPTIONS_STRUCT_HAS_MEMBER(MojoCreateSharedBufferOptions, flags, reader))
+ return MOJO_RESULT_OK;
+ if ((reader.options().flags & ~kKnownFlags))
+ return MOJO_RESULT_UNIMPLEMENTED;
+ out_options->flags = reader.options().flags;
+
+ // Checks for fields beyond |flags|:
+
+ // (Nothing here yet.)
+
+ return MOJO_RESULT_OK;
+}
+
+// static
+MojoResult SharedBufferDispatcher::Create(
+ const MojoCreateSharedBufferOptions& /*validated_options*/,
+ NodeController* node_controller,
+ uint64_t num_bytes,
+ scoped_refptr<SharedBufferDispatcher>* result) {
+ if (!num_bytes)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ if (num_bytes > GetConfiguration().max_shared_memory_num_bytes)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ scoped_refptr<PlatformSharedBuffer> shared_buffer;
+ if (node_controller) {
+ shared_buffer =
+ node_controller->CreateSharedBuffer(static_cast<size_t>(num_bytes));
+ } else {
+ shared_buffer =
+ PlatformSharedBuffer::Create(static_cast<size_t>(num_bytes));
+ }
+ if (!shared_buffer)
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+
+ *result = CreateInternal(std::move(shared_buffer));
+ return MOJO_RESULT_OK;
+}
+
+// static
+MojoResult SharedBufferDispatcher::CreateFromPlatformSharedBuffer(
+ const scoped_refptr<PlatformSharedBuffer>& shared_buffer,
+ scoped_refptr<SharedBufferDispatcher>* result) {
+ if (!shared_buffer)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ *result = CreateInternal(shared_buffer);
+ return MOJO_RESULT_OK;
+}
+
+// static
+scoped_refptr<SharedBufferDispatcher> SharedBufferDispatcher::Deserialize(
+ const void* bytes,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* platform_handles,
+ size_t num_platform_handles) {
+ if (num_bytes != sizeof(SerializedState)) {
+ LOG(ERROR) << "Invalid serialized shared buffer dispatcher (bad size)";
+ return nullptr;
+ }
+
+ const SerializedState* serialization =
+ static_cast<const SerializedState*>(bytes);
+ if (!serialization->num_bytes) {
+ LOG(ERROR)
+ << "Invalid serialized shared buffer dispatcher (invalid num_bytes)";
+ return nullptr;
+ }
+
+ if (!platform_handles || num_platform_handles != 1 || num_ports) {
+ LOG(ERROR)
+ << "Invalid serialized shared buffer dispatcher (missing handles)";
+ return nullptr;
+ }
+
+ // Starts off invalid, which is what we want.
+ PlatformHandle platform_handle;
+ // We take ownership of the handle, so we have to invalidate the one in
+ // |platform_handles|.
+ std::swap(platform_handle, *platform_handles);
+
+ // Wrapping |platform_handle| in a |ScopedPlatformHandle| means that it'll be
+ // closed even if creation fails.
+ bool read_only = (serialization->flags & kSerializedStateFlagsReadOnly);
+ scoped_refptr<PlatformSharedBuffer> shared_buffer(
+ PlatformSharedBuffer::CreateFromPlatformHandle(
+ static_cast<size_t>(serialization->num_bytes), read_only,
+ ScopedPlatformHandle(platform_handle)));
+ if (!shared_buffer) {
+ LOG(ERROR)
+ << "Invalid serialized shared buffer dispatcher (invalid num_bytes?)";
+ return nullptr;
+ }
+
+ return CreateInternal(std::move(shared_buffer));
+}
+
+scoped_refptr<PlatformSharedBuffer>
+SharedBufferDispatcher::PassPlatformSharedBuffer() {
+ base::AutoLock lock(lock_);
+ if (!shared_buffer_ || in_transit_)
+ return nullptr;
+
+ scoped_refptr<PlatformSharedBuffer> retval = shared_buffer_;
+ shared_buffer_ = nullptr;
+ return retval;
+}
+
+Dispatcher::Type SharedBufferDispatcher::GetType() const {
+ return Type::SHARED_BUFFER;
+}
+
+MojoResult SharedBufferDispatcher::Close() {
+ base::AutoLock lock(lock_);
+ if (in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ shared_buffer_ = nullptr;
+ return MOJO_RESULT_OK;
+}
+
+MojoResult SharedBufferDispatcher::DuplicateBufferHandle(
+ const MojoDuplicateBufferHandleOptions* options,
+ scoped_refptr<Dispatcher>* new_dispatcher) {
+ MojoDuplicateBufferHandleOptions validated_options;
+ MojoResult result = ValidateDuplicateOptions(options, &validated_options);
+ if (result != MOJO_RESULT_OK)
+ return result;
+
+ // Note: Since this is "duplicate", we keep our ref to |shared_buffer_|.
+ base::AutoLock lock(lock_);
+ if (in_transit_)
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if ((validated_options.flags &
+ MOJO_DUPLICATE_BUFFER_HANDLE_OPTIONS_FLAG_READ_ONLY) &&
+ (!shared_buffer_->IsReadOnly())) {
+ // If a read-only duplicate is requested and |shared_buffer_| is not
+ // read-only, make a read-only duplicate of |shared_buffer_|.
+ scoped_refptr<PlatformSharedBuffer> read_only_buffer =
+ shared_buffer_->CreateReadOnlyDuplicate();
+ if (!read_only_buffer)
+ return MOJO_RESULT_FAILED_PRECONDITION;
+ DCHECK(read_only_buffer->IsReadOnly());
+ *new_dispatcher = CreateInternal(std::move(read_only_buffer));
+ return MOJO_RESULT_OK;
+ }
+
+ *new_dispatcher = CreateInternal(shared_buffer_);
+ return MOJO_RESULT_OK;
+}
+
+MojoResult SharedBufferDispatcher::MapBuffer(
+ uint64_t offset,
+ uint64_t num_bytes,
+ MojoMapBufferFlags flags,
+ std::unique_ptr<PlatformSharedBufferMapping>* mapping) {
+ if (offset > static_cast<uint64_t>(std::numeric_limits<size_t>::max()))
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ if (num_bytes > static_cast<uint64_t>(std::numeric_limits<size_t>::max()))
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ base::AutoLock lock(lock_);
+ DCHECK(shared_buffer_);
+ if (in_transit_ ||
+ !shared_buffer_->IsValidMap(static_cast<size_t>(offset),
+ static_cast<size_t>(num_bytes))) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+
+ DCHECK(mapping);
+ *mapping = shared_buffer_->MapNoCheck(static_cast<size_t>(offset),
+ static_cast<size_t>(num_bytes));
+ if (!*mapping) {
+ LOG(ERROR) << "Unable to map: read_only" << shared_buffer_->IsReadOnly();
+ return MOJO_RESULT_RESOURCE_EXHAUSTED;
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+void SharedBufferDispatcher::StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_platform_handles) {
+ *num_bytes = sizeof(SerializedState);
+ *num_ports = 0;
+ *num_platform_handles = 1;
+}
+
+bool SharedBufferDispatcher::EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) {
+ SerializedState* serialization =
+ static_cast<SerializedState*>(destination);
+ base::AutoLock lock(lock_);
+ serialization->num_bytes =
+ static_cast<uint64_t>(shared_buffer_->GetNumBytes());
+ serialization->flags =
+ (shared_buffer_->IsReadOnly() ? kSerializedStateFlagsReadOnly : 0);
+ serialization->padding = 0;
+
+ handle_for_transit_ = shared_buffer_->DuplicatePlatformHandle();
+ if (!handle_for_transit_.is_valid()) {
+ shared_buffer_ = nullptr;
+ return false;
+ }
+ handles[0] = handle_for_transit_.get();
+ return true;
+}
+
+bool SharedBufferDispatcher::BeginTransit() {
+ base::AutoLock lock(lock_);
+ if (in_transit_)
+ return false;
+ in_transit_ = static_cast<bool>(shared_buffer_);
+ return in_transit_;
+}
+
+void SharedBufferDispatcher::CompleteTransitAndClose() {
+ base::AutoLock lock(lock_);
+ in_transit_ = false;
+ shared_buffer_ = nullptr;
+ ignore_result(handle_for_transit_.release());
+}
+
+void SharedBufferDispatcher::CancelTransit() {
+ base::AutoLock lock(lock_);
+ in_transit_ = false;
+ handle_for_transit_.reset();
+}
+
+SharedBufferDispatcher::SharedBufferDispatcher(
+ scoped_refptr<PlatformSharedBuffer> shared_buffer)
+ : shared_buffer_(shared_buffer) {
+ DCHECK(shared_buffer_);
+}
+
+SharedBufferDispatcher::~SharedBufferDispatcher() {
+ DCHECK(!shared_buffer_ && !in_transit_);
+}
+
+// static
+MojoResult SharedBufferDispatcher::ValidateDuplicateOptions(
+ const MojoDuplicateBufferHandleOptions* in_options,
+ MojoDuplicateBufferHandleOptions* out_options) {
+ const MojoDuplicateBufferHandleOptionsFlags kKnownFlags =
+ MOJO_DUPLICATE_BUFFER_HANDLE_OPTIONS_FLAG_READ_ONLY;
+ static const MojoDuplicateBufferHandleOptions kDefaultOptions = {
+ static_cast<uint32_t>(sizeof(MojoDuplicateBufferHandleOptions)),
+ MOJO_DUPLICATE_BUFFER_HANDLE_OPTIONS_FLAG_NONE};
+
+ *out_options = kDefaultOptions;
+ if (!in_options)
+ return MOJO_RESULT_OK;
+
+ UserOptionsReader<MojoDuplicateBufferHandleOptions> reader(in_options);
+ if (!reader.is_valid())
+ return MOJO_RESULT_INVALID_ARGUMENT;
+
+ if (!OPTIONS_STRUCT_HAS_MEMBER(MojoDuplicateBufferHandleOptions, flags,
+ reader))
+ return MOJO_RESULT_OK;
+ if ((reader.options().flags & ~kKnownFlags))
+ return MOJO_RESULT_UNIMPLEMENTED;
+ out_options->flags = reader.options().flags;
+
+ // Checks for fields beyond |flags|:
+
+ // (Nothing here yet.)
+
+ return MOJO_RESULT_OK;
+}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/shared_buffer_dispatcher.h b/mojo/edk/system/shared_buffer_dispatcher.h
new file mode 100644
index 0000000000..6015595317
--- /dev/null
+++ b/mojo/edk/system/shared_buffer_dispatcher.h
@@ -0,0 +1,127 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_SHARED_BUFFER_DISPATCHER_H_
+#define MOJO_EDK_SYSTEM_SHARED_BUFFER_DISPATCHER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/macros.h"
+#include "mojo/edk/embedder/platform_handle_vector.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/embedder/scoped_platform_handle.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/system_impl_export.h"
+
+namespace mojo {
+
+namespace edk {
+class NodeController;
+
+class MOJO_SYSTEM_IMPL_EXPORT SharedBufferDispatcher final : public Dispatcher {
+ public:
+ // The default options to use for |MojoCreateSharedBuffer()|. (Real uses
+ // should obtain this via |ValidateCreateOptions()| with a null |in_options|;
+ // this is exposed directly for testing convenience.)
+ static const MojoCreateSharedBufferOptions kDefaultCreateOptions;
+
+ // Validates and/or sets default options for |MojoCreateSharedBufferOptions|.
+ // If non-null, |in_options| must point to a struct of at least
+ // |in_options->struct_size| bytes. |out_options| must point to a (current)
+ // |MojoCreateSharedBufferOptions| and will be entirely overwritten on success
+ // (it may be partly overwritten on failure).
+ static MojoResult ValidateCreateOptions(
+ const MojoCreateSharedBufferOptions* in_options,
+ MojoCreateSharedBufferOptions* out_options);
+
+ // Static factory method: |validated_options| must be validated (obviously).
+ // On failure, |*result| will be left as-is.
+ // TODO(vtl): This should probably be made to return a scoped_refptr and have
+ // a MojoResult out parameter instead.
+ static MojoResult Create(
+ const MojoCreateSharedBufferOptions& validated_options,
+ NodeController* node_controller,
+ uint64_t num_bytes,
+ scoped_refptr<SharedBufferDispatcher>* result);
+
+ // Create a |SharedBufferDispatcher| from |shared_buffer|.
+ static MojoResult CreateFromPlatformSharedBuffer(
+ const scoped_refptr<PlatformSharedBuffer>& shared_buffer,
+ scoped_refptr<SharedBufferDispatcher>* result);
+
+ // The "opposite" of SerializeAndClose(). Called by Dispatcher::Deserialize().
+ static scoped_refptr<SharedBufferDispatcher> Deserialize(
+ const void* bytes,
+ size_t num_bytes,
+ const ports::PortName* ports,
+ size_t num_ports,
+ PlatformHandle* platform_handles,
+ size_t num_platform_handles);
+
+ // Passes the underlying platform shared buffer. This dispatcher must be
+ // closed after calling this function.
+ scoped_refptr<PlatformSharedBuffer> PassPlatformSharedBuffer();
+
+ // Dispatcher:
+ Type GetType() const override;
+ MojoResult Close() override;
+ MojoResult DuplicateBufferHandle(
+ const MojoDuplicateBufferHandleOptions* options,
+ scoped_refptr<Dispatcher>* new_dispatcher) override;
+ MojoResult MapBuffer(
+ uint64_t offset,
+ uint64_t num_bytes,
+ MojoMapBufferFlags flags,
+ std::unique_ptr<PlatformSharedBufferMapping>* mapping) override;
+ void StartSerialize(uint32_t* num_bytes,
+ uint32_t* num_ports,
+ uint32_t* num_platform_handles) override;
+ bool EndSerialize(void* destination,
+ ports::PortName* ports,
+ PlatformHandle* handles) override;
+ bool BeginTransit() override;
+ void CompleteTransitAndClose() override;
+ void CancelTransit() override;
+
+ private:
+ static scoped_refptr<SharedBufferDispatcher> CreateInternal(
+ scoped_refptr<PlatformSharedBuffer> shared_buffer) {
+ return make_scoped_refptr(
+ new SharedBufferDispatcher(std::move(shared_buffer)));
+ }
+
+ explicit SharedBufferDispatcher(
+ scoped_refptr<PlatformSharedBuffer> shared_buffer);
+ ~SharedBufferDispatcher() override;
+
+ // Validates and/or sets default options for
+ // |MojoDuplicateBufferHandleOptions|. If non-null, |in_options| must point to
+ // a struct of at least |in_options->struct_size| bytes. |out_options| must
+ // point to a (current) |MojoDuplicateBufferHandleOptions| and will be
+ // entirely overwritten on success (it may be partly overwritten on failure).
+ static MojoResult ValidateDuplicateOptions(
+ const MojoDuplicateBufferHandleOptions* in_options,
+ MojoDuplicateBufferHandleOptions* out_options);
+
+ // Guards access to |shared_buffer_|.
+ base::Lock lock_;
+
+ bool in_transit_ = false;
+
+ // We keep a copy of the buffer's platform handle during transit so we can
+ // close it if something goes wrong.
+ ScopedPlatformHandle handle_for_transit_;
+
+ scoped_refptr<PlatformSharedBuffer> shared_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedBufferDispatcher);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_SHARED_BUFFER_DISPATCHER_H_
diff --git a/mojo/edk/system/shared_buffer_dispatcher_unittest.cc b/mojo/edk/system/shared_buffer_dispatcher_unittest.cc
new file mode 100644
index 0000000000..c95bdc3b70
--- /dev/null
+++ b/mojo/edk/system/shared_buffer_dispatcher_unittest.cc
@@ -0,0 +1,312 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/shared_buffer_dispatcher.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "mojo/edk/embedder/platform_shared_buffer.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+// NOTE(vtl): There's currently not much to test for in
+// |SharedBufferDispatcher::ValidateCreateOptions()|, but the tests should be
+// expanded if/when options are added, so I've kept the general form of the
+// tests from data_pipe_unittest.cc.
+
+const uint32_t kSizeOfCreateOptions = sizeof(MojoCreateSharedBufferOptions);
+
+// Does a cursory sanity check of |validated_options|. Calls
+// |ValidateCreateOptions()| on already-validated options. The validated options
+// should be valid, and the revalidated copy should be the same.
+void RevalidateCreateOptions(
+ const MojoCreateSharedBufferOptions& validated_options) {
+ EXPECT_EQ(kSizeOfCreateOptions, validated_options.struct_size);
+ // Nothing to check for flags.
+
+ MojoCreateSharedBufferOptions revalidated_options = {};
+ EXPECT_EQ(MOJO_RESULT_OK,
+ SharedBufferDispatcher::ValidateCreateOptions(
+ &validated_options, &revalidated_options));
+ EXPECT_EQ(validated_options.struct_size, revalidated_options.struct_size);
+ EXPECT_EQ(validated_options.flags, revalidated_options.flags);
+}
+
+class SharedBufferDispatcherTest : public testing::Test {
+ public:
+ SharedBufferDispatcherTest() {}
+ ~SharedBufferDispatcherTest() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SharedBufferDispatcherTest);
+};
+
+// Tests valid inputs to |ValidateCreateOptions()|.
+TEST_F(SharedBufferDispatcherTest, ValidateCreateOptionsValid) {
+ // Default options.
+ {
+ MojoCreateSharedBufferOptions validated_options = {};
+ EXPECT_EQ(MOJO_RESULT_OK, SharedBufferDispatcher::ValidateCreateOptions(
+ nullptr, &validated_options));
+ RevalidateCreateOptions(validated_options);
+ }
+
+ // Different flags.
+ MojoCreateSharedBufferOptionsFlags flags_values[] = {
+ MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE};
+ for (size_t i = 0; i < arraysize(flags_values); i++) {
+ const MojoCreateSharedBufferOptionsFlags flags = flags_values[i];
+
+ // Different capacities (size 1).
+ for (uint32_t capacity = 1; capacity <= 100 * 1000 * 1000; capacity *= 10) {
+ MojoCreateSharedBufferOptions options = {
+ kSizeOfCreateOptions, // |struct_size|.
+ flags // |flags|.
+ };
+ MojoCreateSharedBufferOptions validated_options = {};
+ EXPECT_EQ(MOJO_RESULT_OK,
+ SharedBufferDispatcher::ValidateCreateOptions(
+ &options, &validated_options))
+ << capacity;
+ RevalidateCreateOptions(validated_options);
+ EXPECT_EQ(options.flags, validated_options.flags);
+ }
+ }
+}
+
+TEST_F(SharedBufferDispatcherTest, ValidateCreateOptionsInvalid) {
+ // Invalid |struct_size|.
+ {
+ MojoCreateSharedBufferOptions options = {
+ 1, // |struct_size|.
+ MOJO_CREATE_SHARED_BUFFER_OPTIONS_FLAG_NONE // |flags|.
+ };
+ MojoCreateSharedBufferOptions unused;
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ SharedBufferDispatcher::ValidateCreateOptions(
+ &options, &unused));
+ }
+
+ // Unknown |flags|.
+ {
+ MojoCreateSharedBufferOptions options = {
+ kSizeOfCreateOptions, // |struct_size|.
+ ~0u // |flags|.
+ };
+ MojoCreateSharedBufferOptions unused;
+ EXPECT_EQ(MOJO_RESULT_UNIMPLEMENTED,
+ SharedBufferDispatcher::ValidateCreateOptions(
+ &options, &unused));
+ }
+}
+
+TEST_F(SharedBufferDispatcherTest, CreateAndMapBuffer) {
+ scoped_refptr<SharedBufferDispatcher> dispatcher;
+ EXPECT_EQ(MOJO_RESULT_OK, SharedBufferDispatcher::Create(
+ SharedBufferDispatcher::kDefaultCreateOptions,
+ nullptr, 100, &dispatcher));
+ ASSERT_TRUE(dispatcher);
+ EXPECT_EQ(Dispatcher::Type::SHARED_BUFFER, dispatcher->GetType());
+
+ // Make a couple of mappings.
+ std::unique_ptr<PlatformSharedBufferMapping> mapping1;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->MapBuffer(
+ 0, 100, MOJO_MAP_BUFFER_FLAG_NONE, &mapping1));
+ ASSERT_TRUE(mapping1);
+ ASSERT_TRUE(mapping1->GetBase());
+ EXPECT_EQ(100u, mapping1->GetLength());
+ // Write something.
+ static_cast<char*>(mapping1->GetBase())[50] = 'x';
+
+ std::unique_ptr<PlatformSharedBufferMapping> mapping2;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->MapBuffer(
+ 50, 50, MOJO_MAP_BUFFER_FLAG_NONE, &mapping2));
+ ASSERT_TRUE(mapping2);
+ ASSERT_TRUE(mapping2->GetBase());
+ EXPECT_EQ(50u, mapping2->GetLength());
+ EXPECT_EQ('x', static_cast<char*>(mapping2->GetBase())[0]);
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->Close());
+
+ // Check that we can still read/write to mappings after the dispatcher has
+ // gone away.
+ static_cast<char*>(mapping2->GetBase())[1] = 'y';
+ EXPECT_EQ('y', static_cast<char*>(mapping1->GetBase())[51]);
+}
+
+TEST_F(SharedBufferDispatcherTest, CreateAndMapBufferFromPlatformBuffer) {
+ scoped_refptr<PlatformSharedBuffer> platform_shared_buffer =
+ PlatformSharedBuffer::Create(100);
+ ASSERT_TRUE(platform_shared_buffer);
+ scoped_refptr<SharedBufferDispatcher> dispatcher;
+ EXPECT_EQ(MOJO_RESULT_OK,
+ SharedBufferDispatcher::CreateFromPlatformSharedBuffer(
+ platform_shared_buffer, &dispatcher));
+ ASSERT_TRUE(dispatcher);
+ EXPECT_EQ(Dispatcher::Type::SHARED_BUFFER, dispatcher->GetType());
+
+ // Make a couple of mappings.
+ std::unique_ptr<PlatformSharedBufferMapping> mapping1;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->MapBuffer(
+ 0, 100, MOJO_MAP_BUFFER_FLAG_NONE, &mapping1));
+ ASSERT_TRUE(mapping1);
+ ASSERT_TRUE(mapping1->GetBase());
+ EXPECT_EQ(100u, mapping1->GetLength());
+ // Write something.
+ static_cast<char*>(mapping1->GetBase())[50] = 'x';
+
+ std::unique_ptr<PlatformSharedBufferMapping> mapping2;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->MapBuffer(
+ 50, 50, MOJO_MAP_BUFFER_FLAG_NONE, &mapping2));
+ ASSERT_TRUE(mapping2);
+ ASSERT_TRUE(mapping2->GetBase());
+ EXPECT_EQ(50u, mapping2->GetLength());
+ EXPECT_EQ('x', static_cast<char*>(mapping2->GetBase())[0]);
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->Close());
+
+ // Check that we can still read/write to mappings after the dispatcher has
+ // gone away.
+ static_cast<char*>(mapping2->GetBase())[1] = 'y';
+ EXPECT_EQ('y', static_cast<char*>(mapping1->GetBase())[51]);
+}
+
+TEST_F(SharedBufferDispatcherTest, DuplicateBufferHandle) {
+ scoped_refptr<SharedBufferDispatcher> dispatcher1;
+ EXPECT_EQ(MOJO_RESULT_OK, SharedBufferDispatcher::Create(
+ SharedBufferDispatcher::kDefaultCreateOptions,
+ nullptr, 100, &dispatcher1));
+
+ // Map and write something.
+ std::unique_ptr<PlatformSharedBufferMapping> mapping;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher1->MapBuffer(
+ 0, 100, MOJO_MAP_BUFFER_FLAG_NONE, &mapping));
+ static_cast<char*>(mapping->GetBase())[0] = 'x';
+ mapping.reset();
+
+ // Duplicate |dispatcher1| and then close it.
+ scoped_refptr<Dispatcher> dispatcher2;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher1->DuplicateBufferHandle(
+ nullptr, &dispatcher2));
+ ASSERT_TRUE(dispatcher2);
+ EXPECT_EQ(Dispatcher::Type::SHARED_BUFFER, dispatcher2->GetType());
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher1->Close());
+
+ // Map |dispatcher2| and read something.
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher2->MapBuffer(
+ 0, 100, MOJO_MAP_BUFFER_FLAG_NONE, &mapping));
+ EXPECT_EQ('x', static_cast<char*>(mapping->GetBase())[0]);
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher2->Close());
+}
+
+TEST_F(SharedBufferDispatcherTest, DuplicateBufferHandleOptionsValid) {
+ scoped_refptr<SharedBufferDispatcher> dispatcher1;
+ EXPECT_EQ(MOJO_RESULT_OK, SharedBufferDispatcher::Create(
+ SharedBufferDispatcher::kDefaultCreateOptions,
+ nullptr, 100, &dispatcher1));
+
+ MojoDuplicateBufferHandleOptions options[] = {
+ {sizeof(MojoDuplicateBufferHandleOptions),
+ MOJO_DUPLICATE_BUFFER_HANDLE_OPTIONS_FLAG_NONE},
+ {sizeof(MojoDuplicateBufferHandleOptions),
+ MOJO_DUPLICATE_BUFFER_HANDLE_OPTIONS_FLAG_READ_ONLY},
+ {sizeof(MojoDuplicateBufferHandleOptionsFlags), ~0u}};
+ for (size_t i = 0; i < arraysize(options); i++) {
+ scoped_refptr<Dispatcher> dispatcher2;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher1->DuplicateBufferHandle(
+ &options[i], &dispatcher2));
+ ASSERT_TRUE(dispatcher2);
+ EXPECT_EQ(Dispatcher::Type::SHARED_BUFFER, dispatcher2->GetType());
+ {
+ std::unique_ptr<PlatformSharedBufferMapping> mapping;
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher2->MapBuffer(0, 100, 0, &mapping));
+ }
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher2->Close());
+ }
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher1->Close());
+}
+
+TEST_F(SharedBufferDispatcherTest, DuplicateBufferHandleOptionsInvalid) {
+ scoped_refptr<SharedBufferDispatcher> dispatcher1;
+ EXPECT_EQ(MOJO_RESULT_OK, SharedBufferDispatcher::Create(
+ SharedBufferDispatcher::kDefaultCreateOptions,
+ nullptr, 100, &dispatcher1));
+
+ // Invalid |struct_size|.
+ {
+ MojoDuplicateBufferHandleOptions options = {
+ 1u, MOJO_DUPLICATE_BUFFER_HANDLE_OPTIONS_FLAG_NONE};
+ scoped_refptr<Dispatcher> dispatcher2;
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ dispatcher1->DuplicateBufferHandle(&options, &dispatcher2));
+ EXPECT_FALSE(dispatcher2);
+ }
+
+ // Unknown |flags|.
+ {
+ MojoDuplicateBufferHandleOptions options = {
+ sizeof(MojoDuplicateBufferHandleOptions), ~0u};
+ scoped_refptr<Dispatcher> dispatcher2;
+ EXPECT_EQ(MOJO_RESULT_UNIMPLEMENTED,
+ dispatcher1->DuplicateBufferHandle(&options, &dispatcher2));
+ EXPECT_FALSE(dispatcher2);
+ }
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher1->Close());
+}
+
+TEST_F(SharedBufferDispatcherTest, CreateInvalidNumBytes) {
+ // Size too big.
+ scoped_refptr<SharedBufferDispatcher> dispatcher;
+ EXPECT_EQ(MOJO_RESULT_RESOURCE_EXHAUSTED,
+ SharedBufferDispatcher::Create(
+ SharedBufferDispatcher::kDefaultCreateOptions, nullptr,
+ std::numeric_limits<uint64_t>::max(), &dispatcher));
+ EXPECT_FALSE(dispatcher);
+
+ // Zero size.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ SharedBufferDispatcher::Create(
+ SharedBufferDispatcher::kDefaultCreateOptions, nullptr, 0,
+ &dispatcher));
+ EXPECT_FALSE(dispatcher);
+}
+
+TEST_F(SharedBufferDispatcherTest, MapBufferInvalidArguments) {
+ scoped_refptr<SharedBufferDispatcher> dispatcher;
+ EXPECT_EQ(MOJO_RESULT_OK, SharedBufferDispatcher::Create(
+ SharedBufferDispatcher::kDefaultCreateOptions,
+ nullptr, 100, &dispatcher));
+
+ std::unique_ptr<PlatformSharedBufferMapping> mapping;
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ dispatcher->MapBuffer(0, 101, MOJO_MAP_BUFFER_FLAG_NONE, &mapping));
+ EXPECT_FALSE(mapping);
+
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ dispatcher->MapBuffer(1, 100, MOJO_MAP_BUFFER_FLAG_NONE, &mapping));
+ EXPECT_FALSE(mapping);
+
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ dispatcher->MapBuffer(0, 0, MOJO_MAP_BUFFER_FLAG_NONE, &mapping));
+ EXPECT_FALSE(mapping);
+
+ EXPECT_EQ(MOJO_RESULT_OK, dispatcher->Close());
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/shared_buffer_unittest.cc b/mojo/edk/system/shared_buffer_unittest.cc
new file mode 100644
index 0000000000..3a728728a5
--- /dev/null
+++ b/mojo/edk/system/shared_buffer_unittest.cc
@@ -0,0 +1,318 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+
+#include <string>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/strings/string_piece.h"
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/public/c/system/types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+using SharedBufferTest = test::MojoTestBase;
+
+TEST_F(SharedBufferTest, CreateSharedBuffer) {
+ const std::string message = "hello";
+ MojoHandle h = CreateBuffer(message.size());
+ WriteToBuffer(h, 0, message);
+ ExpectBufferContents(h, 0, message);
+}
+
+TEST_F(SharedBufferTest, DuplicateSharedBuffer) {
+ const std::string message = "hello";
+ MojoHandle h = CreateBuffer(message.size());
+ WriteToBuffer(h, 0, message);
+
+ MojoHandle dupe = DuplicateBuffer(h, false);
+ ExpectBufferContents(dupe, 0, message);
+}
+
+TEST_F(SharedBufferTest, PassSharedBufferLocal) {
+ const std::string message = "hello";
+ MojoHandle h = CreateBuffer(message.size());
+ WriteToBuffer(h, 0, message);
+
+ MojoHandle dupe = DuplicateBuffer(h, false);
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+
+ WriteMessageWithHandles(p0, "...", &dupe, 1);
+ EXPECT_EQ("...", ReadMessageWithHandles(p1, &dupe, 1));
+
+ ExpectBufferContents(dupe, 0, message);
+}
+
+#if !defined(OS_IOS)
+
+// Reads a single message with a shared buffer handle, maps the buffer, copies
+// the message contents into it, then exits.
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(CopyToBufferClient, SharedBufferTest, h) {
+ MojoHandle b;
+ std::string message = ReadMessageWithHandles(h, &b, 1);
+ WriteToBuffer(b, 0, message);
+
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+TEST_F(SharedBufferTest, PassSharedBufferCrossProcess) {
+ const std::string message = "hello";
+ MojoHandle b = CreateBuffer(message.size());
+
+ RUN_CHILD_ON_PIPE(CopyToBufferClient, h)
+ MojoHandle dupe = DuplicateBuffer(b, false);
+ WriteMessageWithHandles(h, message, &dupe, 1);
+ WriteMessage(h, "quit");
+ END_CHILD()
+
+ ExpectBufferContents(b, 0, message);
+}
+
+// Creates a new buffer, maps it, writes a message contents to it, unmaps it,
+// and finally passes it back to the parent.
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(CreateBufferClient, SharedBufferTest, h) {
+ std::string message = ReadMessage(h);
+ MojoHandle b = CreateBuffer(message.size());
+ WriteToBuffer(b, 0, message);
+ WriteMessageWithHandles(h, "have a buffer", &b, 1);
+
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+TEST_F(SharedBufferTest, PassSharedBufferFromChild) {
+ const std::string message = "hello";
+ MojoHandle b;
+ RUN_CHILD_ON_PIPE(CreateBufferClient, h)
+ WriteMessage(h, message);
+ ReadMessageWithHandles(h, &b, 1);
+ WriteMessage(h, "quit");
+ END_CHILD()
+
+ ExpectBufferContents(b, 0, message);
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(CreateAndPassBuffer, SharedBufferTest, h) {
+ // Receive a pipe handle over the primordial pipe. This will be connected to
+ // another child process.
+ MojoHandle other_child;
+ std::string message = ReadMessageWithHandles(h, &other_child, 1);
+
+ // Create a new shared buffer.
+ MojoHandle b = CreateBuffer(message.size());
+
+ // Send a copy of the buffer to the parent and the other child.
+ MojoHandle dupe = DuplicateBuffer(b, false);
+ WriteMessageWithHandles(h, "", &b, 1);
+ WriteMessageWithHandles(other_child, "", &dupe, 1);
+
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReceiveAndEditBuffer, SharedBufferTest, h) {
+ // Receive a pipe handle over the primordial pipe. This will be connected to
+ // another child process (running CreateAndPassBuffer).
+ MojoHandle other_child;
+ std::string message = ReadMessageWithHandles(h, &other_child, 1);
+
+ // Receive a shared buffer from the other child.
+ MojoHandle b;
+ ReadMessageWithHandles(other_child, &b, 1);
+
+ // Write the message from the parent into the buffer and exit.
+ WriteToBuffer(b, 0, message);
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ("quit", ReadMessage(h));
+}
+
+TEST_F(SharedBufferTest, PassSharedBufferFromChildToChild) {
+ const std::string message = "hello";
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+
+ MojoHandle b;
+ RUN_CHILD_ON_PIPE(CreateAndPassBuffer, h0)
+ RUN_CHILD_ON_PIPE(ReceiveAndEditBuffer, h1)
+ // Send one end of the pipe to each child. The first child will create
+ // and pass a buffer to the second child and back to us. The second child
+ // will write our message into the buffer.
+ WriteMessageWithHandles(h0, message, &p0, 1);
+ WriteMessageWithHandles(h1, message, &p1, 1);
+
+ // Receive the buffer back from the first child.
+ ReadMessageWithHandles(h0, &b, 1);
+
+ WriteMessage(h1, "quit");
+ END_CHILD()
+ WriteMessage(h0, "quit");
+ END_CHILD()
+
+ // The second child should have written this message.
+ ExpectBufferContents(b, 0, message);
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(CreateAndPassBufferParent, SharedBufferTest,
+ parent) {
+ RUN_CHILD_ON_PIPE(CreateAndPassBuffer, child)
+ // Read a pipe from the parent and forward it to our child.
+ MojoHandle pipe;
+ std::string message = ReadMessageWithHandles(parent, &pipe, 1);
+
+ WriteMessageWithHandles(child, message, &pipe, 1);
+
+ // Read a buffer handle from the child and pass it back to the parent.
+ MojoHandle buffer;
+ EXPECT_EQ("", ReadMessageWithHandles(child, &buffer, 1));
+ WriteMessageWithHandles(parent, "", &buffer, 1);
+
+ EXPECT_EQ("quit", ReadMessage(parent));
+ WriteMessage(child, "quit");
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReceiveAndEditBufferParent, SharedBufferTest,
+ parent) {
+ RUN_CHILD_ON_PIPE(ReceiveAndEditBuffer, child)
+ // Read a pipe from the parent and forward it to our child.
+ MojoHandle pipe;
+ std::string message = ReadMessageWithHandles(parent, &pipe, 1);
+ WriteMessageWithHandles(child, message, &pipe, 1);
+
+ EXPECT_EQ("quit", ReadMessage(parent));
+ WriteMessage(child, "quit");
+ END_CHILD()
+}
+
+#if defined(OS_ANDROID) || defined(OS_MACOSX)
+// Android multi-process tests are not executing the new process. This is flaky.
+// Passing shared memory handles between cousins is not currently supported on
+// OSX.
+#define MAYBE_PassHandleBetweenCousins DISABLED_PassHandleBetweenCousins
+#else
+#define MAYBE_PassHandleBetweenCousins PassHandleBetweenCousins
+#endif
+TEST_F(SharedBufferTest, MAYBE_PassHandleBetweenCousins) {
+ const std::string message = "hello";
+ MojoHandle p0, p1;
+ CreateMessagePipe(&p0, &p1);
+
+ // Spawn two children who will each spawn their own child. Make sure the
+ // grandchildren (cousins to each other) can pass platform handles.
+ MojoHandle b;
+ RUN_CHILD_ON_PIPE(CreateAndPassBufferParent, child1)
+ RUN_CHILD_ON_PIPE(ReceiveAndEditBufferParent, child2)
+ MojoHandle pipe[2];
+ CreateMessagePipe(&pipe[0], &pipe[1]);
+
+ WriteMessageWithHandles(child1, message, &pipe[0], 1);
+ WriteMessageWithHandles(child2, message, &pipe[1], 1);
+
+ // Receive the buffer back from the first child.
+ ReadMessageWithHandles(child1, &b, 1);
+
+ WriteMessage(child2, "quit");
+ END_CHILD()
+ WriteMessage(child1, "quit");
+ END_CHILD()
+
+ // The second grandchild should have written this message.
+ ExpectBufferContents(b, 0, message);
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(ReadAndMapWriteSharedBuffer,
+ SharedBufferTest, h) {
+ // Receive the shared buffer.
+ MojoHandle b;
+ EXPECT_EQ("hello", ReadMessageWithHandles(h, &b, 1));
+
+ // Read from the bufer.
+ ExpectBufferContents(b, 0, "hello");
+
+ // Extract the shared memory handle and try to map it writable.
+ base::SharedMemoryHandle shm_handle;
+ bool read_only = false;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ PassSharedMemoryHandle(b, &shm_handle, nullptr, &read_only));
+ base::SharedMemory shared_memory(shm_handle, false);
+ EXPECT_TRUE(read_only);
+ EXPECT_FALSE(shared_memory.Map(1234));
+
+ EXPECT_EQ("quit", ReadMessage(h));
+ WriteMessage(h, "ok");
+}
+
+#if defined(OS_ANDROID)
+// Android multi-process tests are not executing the new process. This is flaky.
+#define MAYBE_CreateAndPassReadOnlyBuffer DISABLED_CreateAndPassReadOnlyBuffer
+#else
+#define MAYBE_CreateAndPassReadOnlyBuffer CreateAndPassReadOnlyBuffer
+#endif
+TEST_F(SharedBufferTest, MAYBE_CreateAndPassReadOnlyBuffer) {
+ RUN_CHILD_ON_PIPE(ReadAndMapWriteSharedBuffer, h)
+ // Create a new shared buffer.
+ MojoHandle b = CreateBuffer(1234);
+ WriteToBuffer(b, 0, "hello");
+
+ // Send a read-only copy of the buffer to the child.
+ MojoHandle dupe = DuplicateBuffer(b, true /* read_only */);
+ WriteMessageWithHandles(h, "hello", &dupe, 1);
+
+ WriteMessage(h, "quit");
+ EXPECT_EQ("ok", ReadMessage(h));
+ END_CHILD()
+}
+
+DEFINE_TEST_CLIENT_TEST_WITH_PIPE(CreateAndPassReadOnlyBuffer,
+ SharedBufferTest, h) {
+ // Create a new shared buffer.
+ MojoHandle b = CreateBuffer(1234);
+ WriteToBuffer(b, 0, "hello");
+
+ // Send a read-only copy of the buffer to the parent.
+ MojoHandle dupe = DuplicateBuffer(b, true /* read_only */);
+ WriteMessageWithHandles(h, "", &dupe, 1);
+
+ EXPECT_EQ("quit", ReadMessage(h));
+ WriteMessage(h, "ok");
+}
+
+#if defined(OS_ANDROID)
+// Android multi-process tests are not executing the new process. This is flaky.
+#define MAYBE_CreateAndPassFromChildReadOnlyBuffer \
+ DISABLED_CreateAndPassFromChildReadOnlyBuffer
+#else
+#define MAYBE_CreateAndPassFromChildReadOnlyBuffer \
+ CreateAndPassFromChildReadOnlyBuffer
+#endif
+TEST_F(SharedBufferTest, MAYBE_CreateAndPassFromChildReadOnlyBuffer) {
+ RUN_CHILD_ON_PIPE(CreateAndPassReadOnlyBuffer, h)
+ MojoHandle b;
+ EXPECT_EQ("", ReadMessageWithHandles(h, &b, 1));
+ ExpectBufferContents(b, 0, "hello");
+
+ // Extract the shared memory handle and try to map it writable.
+ base::SharedMemoryHandle shm_handle;
+ bool read_only = false;
+ ASSERT_EQ(MOJO_RESULT_OK,
+ PassSharedMemoryHandle(b, &shm_handle, nullptr, &read_only));
+ base::SharedMemory shared_memory(shm_handle, false);
+ EXPECT_TRUE(read_only);
+ EXPECT_FALSE(shared_memory.Map(1234));
+
+ WriteMessage(h, "quit");
+ EXPECT_EQ("ok", ReadMessage(h));
+ END_CHILD()
+}
+
+#endif // !defined(OS_IOS)
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/signals_unittest.cc b/mojo/edk/system/signals_unittest.cc
new file mode 100644
index 0000000000..e8b0cd1914
--- /dev/null
+++ b/mojo/edk/system/signals_unittest.cc
@@ -0,0 +1,76 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/public/c/system/buffer.h"
+#include "mojo/public/c/system/data_pipe.h"
+#include "mojo/public/c/system/functions.h"
+#include "mojo/public/c/system/message_pipe.h"
+#include "mojo/public/c/system/types.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+using SignalsTest = test::MojoTestBase;
+
+TEST_F(SignalsTest, QueryInvalidArguments) {
+ MojoHandleSignalsState state = {0, 0};
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoQueryHandleSignalsState(MOJO_HANDLE_INVALID, &state));
+
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoQueryHandleSignalsState(a, nullptr));
+}
+
+TEST_F(SignalsTest, QueryMessagePipeSignals) {
+ MojoHandleSignalsState state = {0, 0};
+
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoQueryHandleSignalsState(a, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, state.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ state.satisfiable_signals);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoQueryHandleSignalsState(b, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, state.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ state.satisfiable_signals);
+
+ WriteMessage(a, "ok");
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(b, MOJO_HANDLE_SIGNAL_READABLE));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoQueryHandleSignalsState(b, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE,
+ state.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ state.satisfiable_signals);
+
+ EXPECT_EQ("ok", ReadMessage(b));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoQueryHandleSignalsState(b, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_WRITABLE, state.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_READABLE | MOJO_HANDLE_SIGNAL_WRITABLE |
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED,
+ state.satisfiable_signals);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+
+ EXPECT_EQ(MOJO_RESULT_OK, WaitForSignals(b, MOJO_HANDLE_SIGNAL_PEER_CLOSED));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoQueryHandleSignalsState(b, &state));
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, state.satisfied_signals);
+ EXPECT_EQ(MOJO_HANDLE_SIGNAL_PEER_CLOSED, state.satisfiable_signals);
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/system_impl_export.h b/mojo/edk/system/system_impl_export.h
new file mode 100644
index 0000000000..5bbf0057b0
--- /dev/null
+++ b/mojo/edk/system/system_impl_export.h
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_SYSTEM_IMPL_EXPORT_H_
+#define MOJO_EDK_SYSTEM_SYSTEM_IMPL_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(MOJO_SYSTEM_IMPL_IMPLEMENTATION)
+#define MOJO_SYSTEM_IMPL_EXPORT __declspec(dllexport)
+#else
+#define MOJO_SYSTEM_IMPL_EXPORT __declspec(dllimport)
+#endif // defined(MOJO_SYSTEM_IMPL_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(MOJO_SYSTEM_IMPL_IMPLEMENTATION)
+#define MOJO_SYSTEM_IMPL_EXPORT __attribute__((visibility("default")))
+#else
+#define MOJO_SYSTEM_IMPL_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define MOJO_SYSTEM_IMPL_EXPORT
+#endif
+
+#endif // MOJO_EDK_SYSTEM_SYSTEM_IMPL_EXPORT_H_
diff --git a/mojo/edk/system/test_utils.cc b/mojo/edk/system/test_utils.cc
new file mode 100644
index 0000000000..4a39cf73da
--- /dev/null
+++ b/mojo/edk/system/test_utils.cc
@@ -0,0 +1,76 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/test_utils.h"
+
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h" // For |Sleep()|.
+#include "build/build_config.h"
+
+namespace mojo {
+namespace edk {
+namespace test {
+
+MojoDeadline DeadlineFromMilliseconds(unsigned milliseconds) {
+ return static_cast<MojoDeadline>(milliseconds) * 1000;
+}
+
+MojoDeadline EpsilonDeadline() {
+// Originally, our epsilon timeout was 10 ms, which was mostly fine but flaky on
+// some Windows bots. I don't recall ever seeing flakes on other bots. At 30 ms
+// tests seem reliable on Windows bots, but not at 25 ms. We'd like this timeout
+// to be as small as possible (see the description in the .h file).
+//
+// Currently, |tiny_timeout()| is usually 100 ms (possibly scaled under ASAN,
+// etc.). Based on this, set it to (usually be) 30 ms on Windows and 20 ms
+// elsewhere.
+#if defined(OS_WIN) || defined(OS_ANDROID)
+ return (TinyDeadline() * 3) / 10;
+#else
+ return (TinyDeadline() * 2) / 10;
+#endif
+}
+
+MojoDeadline TinyDeadline() {
+ return static_cast<MojoDeadline>(
+ TestTimeouts::tiny_timeout().InMicroseconds());
+}
+
+MojoDeadline ActionDeadline() {
+ return static_cast<MojoDeadline>(
+ TestTimeouts::action_timeout().InMicroseconds());
+}
+
+void Sleep(MojoDeadline deadline) {
+ CHECK_LE(deadline,
+ static_cast<MojoDeadline>(std::numeric_limits<int64_t>::max()));
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromMicroseconds(static_cast<int64_t>(deadline)));
+}
+
+Stopwatch::Stopwatch() {
+}
+
+Stopwatch::~Stopwatch() {
+}
+
+void Stopwatch::Start() {
+ start_time_ = base::TimeTicks::Now();
+}
+
+MojoDeadline Stopwatch::Elapsed() {
+ int64_t result = (base::TimeTicks::Now() - start_time_).InMicroseconds();
+ // |DCHECK_GE|, not |CHECK_GE|, since this may be performance-important.
+ DCHECK_GE(result, 0);
+ return static_cast<MojoDeadline>(result);
+}
+
+} // namespace test
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/test_utils.h b/mojo/edk/system/test_utils.h
new file mode 100644
index 0000000000..1c90dc1717
--- /dev/null
+++ b/mojo/edk/system/test_utils.h
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_TEST_UTILS_H_
+#define MOJO_EDK_SYSTEM_TEST_UTILS_H_
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "mojo/public/c/system/types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace test {
+
+MojoDeadline DeadlineFromMilliseconds(unsigned milliseconds);
+
+// A timeout smaller than |TestTimeouts::tiny_timeout()|, as a |MojoDeadline|.
+// Warning: This may lead to flakiness, but this is unavoidable if, e.g., you're
+// trying to ensure that functions with timeouts are reasonably accurate. We
+// want this to be as small as possible without causing too much flakiness.
+MojoDeadline EpsilonDeadline();
+
+// |TestTimeouts::tiny_timeout()|, as a |MojoDeadline|. (Expect this to be on
+// the order of 100 ms.)
+MojoDeadline TinyDeadline();
+
+// |TestTimeouts::action_timeout()|, as a |MojoDeadline|. (Expect this to be on
+// the order of 10 s.)
+MojoDeadline ActionDeadline();
+
+// Sleeps for at least the specified duration.
+void Sleep(MojoDeadline deadline);
+
+// Stopwatch -------------------------------------------------------------------
+
+// A simple "stopwatch" for measuring time elapsed from a given starting point.
+class Stopwatch {
+ public:
+ Stopwatch();
+ ~Stopwatch();
+
+ void Start();
+ // Returns the amount of time elapsed since the last call to |Start()| (in
+ // microseconds).
+ MojoDeadline Elapsed();
+
+ private:
+ base::TimeTicks start_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(Stopwatch);
+};
+
+} // namespace test
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_TEST_UTILS_H_
diff --git a/mojo/edk/system/watch.cc b/mojo/edk/system/watch.cc
new file mode 100644
index 0000000000..cf08ac37ee
--- /dev/null
+++ b/mojo/edk/system/watch.cc
@@ -0,0 +1,83 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/watch.h"
+
+#include "mojo/edk/system/request_context.h"
+#include "mojo/edk/system/watcher_dispatcher.h"
+
+namespace mojo {
+namespace edk {
+
+Watch::Watch(const scoped_refptr<WatcherDispatcher>& watcher,
+ const scoped_refptr<Dispatcher>& dispatcher,
+ uintptr_t context,
+ MojoHandleSignals signals)
+ : watcher_(watcher),
+ dispatcher_(dispatcher),
+ context_(context),
+ signals_(signals) {}
+
+bool Watch::NotifyState(const HandleSignalsState& state,
+ bool allowed_to_call_callback) {
+ AssertWatcherLockAcquired();
+
+ // NOTE: This method must NEVER call into |dispatcher_| directly, because it
+ // may be called while |dispatcher_| holds a lock.
+
+ MojoResult rv = MOJO_RESULT_SHOULD_WAIT;
+ RequestContext* const request_context = RequestContext::current();
+ if (state.satisfies(signals_)) {
+ rv = MOJO_RESULT_OK;
+ if (allowed_to_call_callback && rv != last_known_result_) {
+ request_context->AddWatchNotifyFinalizer(this, MOJO_RESULT_OK, state);
+ }
+ } else if (!state.can_satisfy(signals_)) {
+ rv = MOJO_RESULT_FAILED_PRECONDITION;
+ if (allowed_to_call_callback && rv != last_known_result_) {
+ request_context->AddWatchNotifyFinalizer(
+ this, MOJO_RESULT_FAILED_PRECONDITION, state);
+ }
+ }
+
+ last_known_signals_state_ =
+ *static_cast<const MojoHandleSignalsState*>(&state);
+ last_known_result_ = rv;
+ return ready();
+}
+
+void Watch::Cancel() {
+ RequestContext::current()->AddWatchCancelFinalizer(this);
+}
+
+void Watch::InvokeCallback(MojoResult result,
+ const HandleSignalsState& state,
+ MojoWatcherNotificationFlags flags) {
+ // We hold the lock through invocation to ensure that only one notification
+ // callback runs for this context at any given time.
+ base::AutoLock lock(notification_lock_);
+ if (result == MOJO_RESULT_CANCELLED) {
+ // Make sure cancellation is the last notification we dispatch.
+ DCHECK(!is_cancelled_);
+ is_cancelled_ = true;
+ } else if (is_cancelled_) {
+ return;
+ }
+
+ // NOTE: This will acquire |watcher_|'s internal lock. It's safe because a
+ // thread can only enter InvokeCallback() from within a RequestContext
+ // destructor where no dispatcher locks are held.
+ watcher_->InvokeWatchCallback(context_, result, state, flags);
+}
+
+Watch::~Watch() {}
+
+#if DCHECK_IS_ON()
+void Watch::AssertWatcherLockAcquired() const {
+ watcher_->lock_.AssertAcquired();
+}
+#endif
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/watch.h b/mojo/edk/system/watch.h
new file mode 100644
index 0000000000..f277de9917
--- /dev/null
+++ b/mojo/edk/system/watch.h
@@ -0,0 +1,124 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_WATCH_H_
+#define MOJO_EDK_SYSTEM_WATCH_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/system/atomic_flag.h"
+#include "mojo/edk/system/handle_signals_state.h"
+
+namespace mojo {
+namespace edk {
+
+class Dispatcher;
+class WatcherDispatcher;
+
+// Encapsulates the state associated with a single watch context within a
+// watcher.
+//
+// Every Watch has its own cancellation state, and is captured by RequestContext
+// notification finalizers to avoid redundant context resolution during
+// finalizer execution.
+class Watch : public base::RefCountedThreadSafe<Watch> {
+ public:
+ // Constructs a Watch which represents a watch within |watcher| associated
+ // with |context|, watching |dispatcher| for |signals|.
+ Watch(const scoped_refptr<WatcherDispatcher>& watcher,
+ const scoped_refptr<Dispatcher>& dispatcher,
+ uintptr_t context,
+ MojoHandleSignals signals);
+
+ // Notifies the Watch of a potential state change.
+ //
+ // If |allowed_to_call_callback| is true, this may add a notification
+ // finalizer to the current RequestContext to invoke the watcher's callback
+ // with this watch's context. See return values below.
+ //
+ // This is called directly by WatcherDispatcher whenever the Watch's observed
+ // dispatcher notifies the WatcherDispatcher of a state change.
+ //
+ // Returns |true| if the Watch entered or remains in a ready state as a result
+ // of the state change. If |allowed_to_call_callback| was true in this case,
+ // the Watch will have also attached a notification finalizer to the current
+ // RequestContext.
+ //
+ // Returns |false| if the
+ bool NotifyState(const HandleSignalsState& state,
+ bool allowed_to_call_callback);
+
+ // Notifies the watch of cancellation ASAP. This will always be the last
+ // notification sent for the watch.
+ void Cancel();
+
+ // Finalizer method for RequestContexts. This method is invoked once for every
+ // notification finalizer added to a RequestContext by this object. This calls
+ // down into the WatcherDispatcher to do the actual notification call.
+ void InvokeCallback(MojoResult result,
+ const HandleSignalsState& state,
+ MojoWatcherNotificationFlags flags);
+
+ const scoped_refptr<Dispatcher>& dispatcher() const { return dispatcher_; }
+ uintptr_t context() const { return context_; }
+
+ MojoResult last_known_result() const {
+ AssertWatcherLockAcquired();
+ return last_known_result_;
+ }
+
+ MojoHandleSignalsState last_known_signals_state() const {
+ AssertWatcherLockAcquired();
+ return last_known_signals_state_;
+ }
+
+ bool ready() const {
+ AssertWatcherLockAcquired();
+ return last_known_result_ == MOJO_RESULT_OK ||
+ last_known_result_ == MOJO_RESULT_FAILED_PRECONDITION;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<Watch>;
+
+ ~Watch();
+
+#if DCHECK_IS_ON()
+ void AssertWatcherLockAcquired() const;
+#else
+ void AssertWatcherLockAcquired() const {}
+#endif
+
+ const scoped_refptr<WatcherDispatcher> watcher_;
+ const scoped_refptr<Dispatcher> dispatcher_;
+ const uintptr_t context_;
+ const MojoHandleSignals signals_;
+
+ // The result code with which this Watch would notify if currently armed,
+ // based on the last known signaling state of |dispatcher_|. Guarded by the
+ // owning WatcherDispatcher's lock.
+ MojoResult last_known_result_ = MOJO_RESULT_UNKNOWN;
+
+ // The last known signaling state of |dispatcher_|. Guarded by the owning
+ // WatcherDispatcher's lock.
+ MojoHandleSignalsState last_known_signals_state_ = {0, 0};
+
+ // Guards |is_cancelled_| below and mutually excludes individual watch
+ // notification executions for this same watch context.
+ //
+ // Note that this should only be acquired from a RequestContext finalizer to
+ // ensure that no other internal locks are already held.
+ base::Lock notification_lock_;
+
+ // Guarded by |notification_lock_|.
+ bool is_cancelled_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(Watch);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_WATCH_H_
diff --git a/mojo/edk/system/watcher_dispatcher.cc b/mojo/edk/system/watcher_dispatcher.cc
new file mode 100644
index 0000000000..409dd2a922
--- /dev/null
+++ b/mojo/edk/system/watcher_dispatcher.cc
@@ -0,0 +1,232 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/watcher_dispatcher.h"
+
+#include <algorithm>
+#include <limits>
+#include <map>
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "mojo/edk/system/watch.h"
+
+namespace mojo {
+namespace edk {
+
+WatcherDispatcher::WatcherDispatcher(MojoWatcherCallback callback)
+ : callback_(callback) {}
+
+void WatcherDispatcher::NotifyHandleState(Dispatcher* dispatcher,
+ const HandleSignalsState& state) {
+ base::AutoLock lock(lock_);
+ auto it = watched_handles_.find(dispatcher);
+ if (it == watched_handles_.end())
+ return;
+
+ // Maybe fire a notification to the watch assoicated with this dispatcher,
+ // provided we're armed it cares about the new state.
+ if (it->second->NotifyState(state, armed_)) {
+ ready_watches_.insert(it->second.get());
+
+ // If we were armed and got here, we notified the watch. Disarm.
+ armed_ = false;
+ } else {
+ ready_watches_.erase(it->second.get());
+ }
+}
+
+void WatcherDispatcher::NotifyHandleClosed(Dispatcher* dispatcher) {
+ scoped_refptr<Watch> watch;
+ {
+ base::AutoLock lock(lock_);
+ auto it = watched_handles_.find(dispatcher);
+ if (it == watched_handles_.end())
+ return;
+
+ watch = std::move(it->second);
+
+ // Wipe out all state associated with the closed dispatcher.
+ watches_.erase(watch->context());
+ ready_watches_.erase(watch.get());
+ watched_handles_.erase(it);
+ }
+
+ // NOTE: It's important that this is called outside of |lock_| since it
+ // acquires internal Watch locks.
+ watch->Cancel();
+}
+
+void WatcherDispatcher::InvokeWatchCallback(
+ uintptr_t context,
+ MojoResult result,
+ const HandleSignalsState& state,
+ MojoWatcherNotificationFlags flags) {
+ {
+ // We avoid holding the lock during dispatch. It's OK for notification
+ // callbacks to close this watcher, and it's OK for notifications to race
+ // with closure, if for example the watcher is closed from another thread
+ // between this test and the invocation of |callback_| below.
+ //
+ // Because cancellation synchronously blocks all future notifications, and
+ // because notifications themselves are mutually exclusive for any given
+ // context, we still guarantee that a single MOJO_RESULT_CANCELLED result
+ // is the last notification received for any given context.
+ //
+ // This guarantee is sufficient to make safe, synchronized, per-context
+ // state management possible in user code.
+ base::AutoLock lock(lock_);
+ if (closed_ && result != MOJO_RESULT_CANCELLED)
+ return;
+ }
+
+ callback_(context, result, static_cast<MojoHandleSignalsState>(state), flags);
+}
+
+Dispatcher::Type WatcherDispatcher::GetType() const {
+ return Type::WATCHER;
+}
+
+MojoResult WatcherDispatcher::Close() {
+ // We swap out all the watched handle information onto the stack so we can
+ // call into their dispatchers without our own lock held.
+ std::map<uintptr_t, scoped_refptr<Watch>> watches;
+ {
+ base::AutoLock lock(lock_);
+ DCHECK(!closed_);
+ closed_ = true;
+ std::swap(watches, watches_);
+ watched_handles_.clear();
+ }
+
+ // Remove all refs from our watched dispatchers and fire cancellations.
+ for (auto& entry : watches) {
+ entry.second->dispatcher()->RemoveWatcherRef(this, entry.first);
+ entry.second->Cancel();
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult WatcherDispatcher::WatchDispatcher(
+ scoped_refptr<Dispatcher> dispatcher,
+ MojoHandleSignals signals,
+ uintptr_t context) {
+ // NOTE: Because it's critical to avoid acquiring any other dispatcher locks
+ // while |lock_| is held, we defer adding oursevles to the dispatcher until
+ // after we've updated all our own relevant state and released |lock_|.
+ {
+ base::AutoLock lock(lock_);
+ if (watches_.count(context) || watched_handles_.count(dispatcher.get()))
+ return MOJO_RESULT_ALREADY_EXISTS;
+
+ scoped_refptr<Watch> watch = new Watch(this, dispatcher, context, signals);
+ watches_.insert({context, watch});
+ auto result =
+ watched_handles_.insert(std::make_pair(dispatcher.get(), watch));
+ DCHECK(result.second);
+ }
+
+ MojoResult rv = dispatcher->AddWatcherRef(this, context);
+ if (rv != MOJO_RESULT_OK) {
+ // Oops. This was not a valid handle to watch. Undo the above work and
+ // fail gracefully.
+ base::AutoLock lock(lock_);
+ watches_.erase(context);
+ watched_handles_.erase(dispatcher.get());
+ return rv;
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult WatcherDispatcher::CancelWatch(uintptr_t context) {
+ // We may remove the last stored ref to the Watch below, so we retain
+ // a reference on the stack.
+ scoped_refptr<Watch> watch;
+ {
+ base::AutoLock lock(lock_);
+ auto it = watches_.find(context);
+ if (it == watches_.end())
+ return MOJO_RESULT_NOT_FOUND;
+ watch = it->second;
+ watches_.erase(it);
+ }
+
+ // Mark the watch as cancelled so no further notifications get through.
+ watch->Cancel();
+
+ // We remove the watcher ref for this context before updating any more
+ // internal watcher state, ensuring that we don't receiving further
+ // notifications for this context.
+ watch->dispatcher()->RemoveWatcherRef(this, context);
+
+ {
+ base::AutoLock lock(lock_);
+ auto handle_it = watched_handles_.find(watch->dispatcher().get());
+ DCHECK(handle_it != watched_handles_.end());
+ ready_watches_.erase(handle_it->second.get());
+ watched_handles_.erase(handle_it);
+ }
+
+ return MOJO_RESULT_OK;
+}
+
+MojoResult WatcherDispatcher::Arm(
+ uint32_t* num_ready_contexts,
+ uintptr_t* ready_contexts,
+ MojoResult* ready_results,
+ MojoHandleSignalsState* ready_signals_states) {
+ base::AutoLock lock(lock_);
+ if (num_ready_contexts &&
+ (!ready_contexts || !ready_results || !ready_signals_states)) {
+ return MOJO_RESULT_INVALID_ARGUMENT;
+ }
+
+ if (watched_handles_.empty())
+ return MOJO_RESULT_NOT_FOUND;
+
+ if (ready_watches_.empty()) {
+ // Fast path: No watches are ready to notify, so we're done.
+ armed_ = true;
+ return MOJO_RESULT_OK;
+ }
+
+ if (num_ready_contexts) {
+ DCHECK_LE(ready_watches_.size(), std::numeric_limits<uint32_t>::max());
+ *num_ready_contexts = std::min(
+ *num_ready_contexts, static_cast<uint32_t>(ready_watches_.size()));
+
+ WatchSet::const_iterator next_ready_iter = ready_watches_.begin();
+ if (last_watch_to_block_arming_) {
+ // Find the next watch to notify in simple round-robin order on the
+ // |ready_watches_| map, wrapping around to the beginning if necessary.
+ next_ready_iter = ready_watches_.find(last_watch_to_block_arming_);
+ if (next_ready_iter != ready_watches_.end())
+ ++next_ready_iter;
+ if (next_ready_iter == ready_watches_.end())
+ next_ready_iter = ready_watches_.begin();
+ }
+
+ for (size_t i = 0; i < *num_ready_contexts; ++i) {
+ const Watch* const watch = *next_ready_iter;
+ ready_contexts[i] = watch->context();
+ ready_results[i] = watch->last_known_result();
+ ready_signals_states[i] = watch->last_known_signals_state();
+
+ // Iterate and wrap around.
+ last_watch_to_block_arming_ = watch;
+ ++next_ready_iter;
+ if (next_ready_iter == ready_watches_.end())
+ next_ready_iter = ready_watches_.begin();
+ }
+ }
+
+ return MOJO_RESULT_FAILED_PRECONDITION;
+}
+
+WatcherDispatcher::~WatcherDispatcher() {}
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/watcher_dispatcher.h b/mojo/edk/system/watcher_dispatcher.h
new file mode 100644
index 0000000000..605a3150cc
--- /dev/null
+++ b/mojo/edk/system/watcher_dispatcher.h
@@ -0,0 +1,101 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_WATCHER_DISPATCHER_H_
+#define MOJO_EDK_SYSTEM_WATCHER_DISPATCHER_H_
+
+#include <map>
+#include <set>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "mojo/edk/system/dispatcher.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/system_impl_export.h"
+#include "mojo/public/c/system/watcher.h"
+
+namespace mojo {
+namespace edk {
+
+class Watch;
+
+// The dispatcher type which backs watcher handles.
+class WatcherDispatcher : public Dispatcher {
+ public:
+ // Constructs a new WatcherDispatcher which invokes |callback| when a
+ // registered watch observes some relevant state change.
+ explicit WatcherDispatcher(MojoWatcherCallback callback);
+
+ // Methods used by watched dispatchers to notify watchers of events.
+ void NotifyHandleState(Dispatcher* dispatcher,
+ const HandleSignalsState& state);
+ void NotifyHandleClosed(Dispatcher* dispatcher);
+
+ // Method used by RequestContext (indirectly, via Watch) to complete
+ // notification operations from a safe stack frame to avoid reentrancy.
+ void InvokeWatchCallback(uintptr_t context,
+ MojoResult result,
+ const HandleSignalsState& state,
+ MojoWatcherNotificationFlags flags);
+
+ // Dispatcher:
+ Type GetType() const override;
+ MojoResult Close() override;
+ MojoResult WatchDispatcher(scoped_refptr<Dispatcher> dispatcher,
+ MojoHandleSignals signals,
+ uintptr_t context) override;
+ MojoResult CancelWatch(uintptr_t context) override;
+ MojoResult Arm(uint32_t* num_ready_contexts,
+ uintptr_t* ready_contexts,
+ MojoResult* ready_results,
+ MojoHandleSignalsState* ready_signals_states) override;
+
+ private:
+ friend class Watch;
+
+ using WatchSet = std::set<const Watch*>;
+
+ ~WatcherDispatcher() override;
+
+ const MojoWatcherCallback callback_;
+
+ // Guards access to the fields below.
+ //
+ // NOTE: This may be acquired while holding another dispatcher's lock, as
+ // watched dispatchers call into WatcherDispatcher methods which lock this
+ // when issuing state change notifications. WatcherDispatcher must therefore
+ // take caution to NEVER acquire other dispatcher locks while this is held.
+ base::Lock lock_;
+
+ bool armed_ = false;
+ bool closed_ = false;
+
+ // A mapping from context to Watch.
+ std::map<uintptr_t, scoped_refptr<Watch>> watches_;
+
+ // A mapping from watched dispatcher to Watch.
+ std::map<Dispatcher*, scoped_refptr<Watch>> watched_handles_;
+
+ // The set of all Watch instances which are currently ready to signal. This is
+ // used for efficient arming behavior, as it allows for O(1) discovery of
+ // whether or not arming can succeed and quick determination of who's
+ // responsible if it can't.
+ WatchSet ready_watches_;
+
+ // Tracks the last Watch whose state was returned by Arm(). This is used to
+ // ensure consistent round-robin behavior in the event that multiple Watches
+ // remain ready over the span of several Arm() attempts.
+ //
+ // NOTE: This pointer is only used to index |ready_watches_| and may point to
+ // an invalid object. It must therefore never be dereferenced.
+ const Watch* last_watch_to_block_arming_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(WatcherDispatcher);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_WATCHER_DISPATCHER_H_
diff --git a/mojo/edk/system/watcher_set.cc b/mojo/edk/system/watcher_set.cc
new file mode 100644
index 0000000000..0355b58795
--- /dev/null
+++ b/mojo/edk/system/watcher_set.cc
@@ -0,0 +1,82 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mojo/edk/system/watcher_set.h"
+
+#include <utility>
+
+namespace mojo {
+namespace edk {
+
+WatcherSet::WatcherSet(Dispatcher* owner) : owner_(owner) {}
+
+WatcherSet::~WatcherSet() = default;
+
+void WatcherSet::NotifyState(const HandleSignalsState& state) {
+ // Avoid notifying watchers if they have already seen this state.
+ if (last_known_state_.has_value() && state.equals(last_known_state_.value()))
+ return;
+ last_known_state_ = state;
+ for (const auto& entry : watchers_)
+ entry.first->NotifyHandleState(owner_, state);
+}
+
+void WatcherSet::NotifyClosed() {
+ for (const auto& entry : watchers_)
+ entry.first->NotifyHandleClosed(owner_);
+}
+
+MojoResult WatcherSet::Add(const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context,
+ const HandleSignalsState& current_state) {
+ auto it = watchers_.find(watcher.get());
+ if (it == watchers_.end()) {
+ auto result =
+ watchers_.insert(std::make_pair(watcher.get(), Entry{watcher}));
+ it = result.first;
+ }
+
+ if (!it->second.contexts.insert(context).second)
+ return MOJO_RESULT_ALREADY_EXISTS;
+
+ if (last_known_state_.has_value() &&
+ !current_state.equals(last_known_state_.value())) {
+ // This new state may be relevant to everyone, in which case we just
+ // notify everyone.
+ NotifyState(current_state);
+ } else {
+ // Otherwise only notify the newly added Watcher.
+ watcher->NotifyHandleState(owner_, current_state);
+ }
+ return MOJO_RESULT_OK;
+}
+
+MojoResult WatcherSet::Remove(WatcherDispatcher* watcher, uintptr_t context) {
+ auto it = watchers_.find(watcher);
+ if (it == watchers_.end())
+ return MOJO_RESULT_NOT_FOUND;
+
+ ContextSet& contexts = it->second.contexts;
+ auto context_it = contexts.find(context);
+ if (context_it == contexts.end())
+ return MOJO_RESULT_NOT_FOUND;
+
+ contexts.erase(context_it);
+ if (contexts.empty())
+ watchers_.erase(it);
+
+ return MOJO_RESULT_OK;
+}
+
+WatcherSet::Entry::Entry(const scoped_refptr<WatcherDispatcher>& dispatcher)
+ : dispatcher(dispatcher) {}
+
+WatcherSet::Entry::Entry(Entry&& other) = default;
+
+WatcherSet::Entry::~Entry() = default;
+
+WatcherSet::Entry& WatcherSet::Entry::operator=(Entry&& other) = default;
+
+} // namespace edk
+} // namespace mojo
diff --git a/mojo/edk/system/watcher_set.h b/mojo/edk/system/watcher_set.h
new file mode 100644
index 0000000000..2b7ef2c5ac
--- /dev/null
+++ b/mojo/edk/system/watcher_set.h
@@ -0,0 +1,71 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_EDK_SYSTEM_WATCHER_SET_H_
+#define MOJO_EDK_SYSTEM_WATCHER_SET_H_
+
+#include <map>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/optional.h"
+#include "mojo/edk/system/handle_signals_state.h"
+#include "mojo/edk/system/watcher_dispatcher.h"
+
+namespace mojo {
+namespace edk {
+
+// A WatcherSet maintains a set of references to WatcherDispatchers to be
+// notified when a handle changes state.
+//
+// Dispatchers which may be watched by a watcher should own a WatcherSet and
+// notify it of all relevant state changes.
+class WatcherSet {
+ public:
+ // |owner| is the Dispatcher who owns this WatcherSet.
+ explicit WatcherSet(Dispatcher* owner);
+ ~WatcherSet();
+
+ // Notifies all watchers of the handle's current signals state.
+ void NotifyState(const HandleSignalsState& state);
+
+ // Notifies all watchers that this handle has been closed.
+ void NotifyClosed();
+
+ // Adds a new watcher+context.
+ MojoResult Add(const scoped_refptr<WatcherDispatcher>& watcher,
+ uintptr_t context,
+ const HandleSignalsState& current_state);
+
+ // Removes a watcher+context.
+ MojoResult Remove(WatcherDispatcher* watcher, uintptr_t context);
+
+ private:
+ using ContextSet = std::set<uintptr_t>;
+
+ struct Entry {
+ Entry(const scoped_refptr<WatcherDispatcher>& dispatcher);
+ Entry(Entry&& other);
+ ~Entry();
+
+ Entry& operator=(Entry&& other);
+
+ scoped_refptr<WatcherDispatcher> dispatcher;
+ ContextSet contexts;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Entry);
+ };
+
+ Dispatcher* const owner_;
+ std::map<WatcherDispatcher*, Entry> watchers_;
+ base::Optional<HandleSignalsState> last_known_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(WatcherSet);
+};
+
+} // namespace edk
+} // namespace mojo
+
+#endif // MOJO_EDK_SYSTEM_WATCHER_SET_H_
diff --git a/mojo/edk/system/watcher_unittest.cc b/mojo/edk/system/watcher_unittest.cc
new file mode 100644
index 0000000000..dd396cd905
--- /dev/null
+++ b/mojo/edk/system/watcher_unittest.cc
@@ -0,0 +1,1637 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <set>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "mojo/edk/test/mojo_test_base.h"
+#include "mojo/public/c/system/data_pipe.h"
+#include "mojo/public/c/system/types.h"
+#include "mojo/public/c/system/watcher.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace mojo {
+namespace edk {
+namespace {
+
+using WatcherTest = test::MojoTestBase;
+
+class WatchHelper {
+ public:
+ using ContextCallback =
+ base::Callback<void(MojoResult, MojoHandleSignalsState)>;
+
+ WatchHelper() {}
+ ~WatchHelper() {}
+
+ MojoResult CreateWatcher(MojoHandle* handle) {
+ return MojoCreateWatcher(&Notify, handle);
+ }
+
+ uintptr_t CreateContext(const ContextCallback& callback) {
+ return CreateContextWithCancel(callback, base::Closure());
+ }
+
+ uintptr_t CreateContextWithCancel(const ContextCallback& callback,
+ const base::Closure& cancel_callback) {
+ auto context = base::MakeUnique<NotificationContext>(callback);
+ NotificationContext* raw_context = context.get();
+ raw_context->SetCancelCallback(base::Bind(
+ [](std::unique_ptr<NotificationContext> context,
+ const base::Closure& cancel_callback) {
+ if (cancel_callback)
+ cancel_callback.Run();
+ },
+ base::Passed(&context), cancel_callback));
+ return reinterpret_cast<uintptr_t>(raw_context);
+ }
+
+ private:
+ class NotificationContext {
+ public:
+ explicit NotificationContext(const ContextCallback& callback)
+ : callback_(callback) {}
+
+ ~NotificationContext() {}
+
+ void SetCancelCallback(const base::Closure& cancel_callback) {
+ cancel_callback_ = cancel_callback;
+ }
+
+ void Notify(MojoResult result, MojoHandleSignalsState state) {
+ if (result == MOJO_RESULT_CANCELLED)
+ cancel_callback_.Run();
+ else
+ callback_.Run(result, state);
+ }
+
+ private:
+ const ContextCallback callback_;
+ base::Closure cancel_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(NotificationContext);
+ };
+
+ static void Notify(uintptr_t context,
+ MojoResult result,
+ MojoHandleSignalsState state,
+ MojoWatcherNotificationFlags flags) {
+ reinterpret_cast<NotificationContext*>(context)->Notify(result, state);
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(WatchHelper);
+};
+
+class ThreadedRunner : public base::SimpleThread {
+ public:
+ explicit ThreadedRunner(const base::Closure& callback)
+ : SimpleThread("ThreadedRunner"), callback_(callback) {}
+ ~ThreadedRunner() override {}
+
+ void Run() override { callback_.Run(); }
+
+ private:
+ const base::Closure callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadedRunner);
+};
+
+void ExpectNoNotification(uintptr_t context,
+ MojoResult result,
+ MojoHandleSignalsState state,
+ MojoWatcherNotificationFlags flags) {
+ NOTREACHED();
+}
+
+void ExpectOnlyCancel(uintptr_t context,
+ MojoResult result,
+ MojoHandleSignalsState state,
+ MojoWatcherNotificationFlags flags) {
+ EXPECT_EQ(result, MOJO_RESULT_CANCELLED);
+}
+
+TEST_F(WatcherTest, InvalidArguments) {
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoCreateWatcher(&ExpectNoNotification, nullptr));
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateWatcher(&ExpectNoNotification, &w));
+
+ // Try to watch unwatchable handles.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoWatch(w, w, MOJO_HANDLE_SIGNAL_READABLE, 0));
+ MojoHandle buffer_handle = CreateBuffer(42);
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoWatch(w, buffer_handle, MOJO_HANDLE_SIGNAL_READABLE, 0));
+
+ // Try to cancel a watch on an invalid watcher handle.
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT, MojoCancelWatch(buffer_handle, 0));
+
+ // Try to arm an invalid handle.
+ EXPECT_EQ(
+ MOJO_RESULT_INVALID_ARGUMENT,
+ MojoArmWatcher(MOJO_HANDLE_INVALID, nullptr, nullptr, nullptr, nullptr));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoArmWatcher(buffer_handle, nullptr, nullptr, nullptr, nullptr));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(buffer_handle));
+
+ // Try to arm with a non-null count but at least one null output buffer.
+ uint32_t num_ready_contexts = 1;
+ uintptr_t ready_context;
+ MojoResult ready_result;
+ MojoHandleSignalsState ready_state;
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoArmWatcher(w, &num_ready_contexts, nullptr, &ready_result,
+ &ready_state));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoArmWatcher(w, &num_ready_contexts, &ready_context, nullptr,
+ &ready_state));
+ EXPECT_EQ(MOJO_RESULT_INVALID_ARGUMENT,
+ MojoArmWatcher(w, &num_ready_contexts, &ready_context,
+ &ready_result, nullptr));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+TEST_F(WatcherTest, WatchMessagePipeReadable) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ int num_expected_notifications = 1;
+ const uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, int* expected_count, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_GT(*expected_count, 0);
+ *expected_count -= 1;
+
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ event->Signal();
+ },
+ &event, &num_expected_notifications));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ const char kMessage1[] = "hey hey hey hey";
+ const char kMessage2[] = "i said hey";
+ const char kMessage3[] = "what's goin' on?";
+
+ // Writing to |b| multiple times should notify exactly once.
+ WriteMessage(b, kMessage1);
+ WriteMessage(b, kMessage2);
+ event.Wait();
+
+ // This also shouldn't fire a notification; the watcher is still disarmed.
+ WriteMessage(b, kMessage3);
+
+ // Arming should fail with relevant information.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(readable_a_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+
+ // Flush the three messages from above.
+ EXPECT_EQ(kMessage1, ReadMessage(a));
+ EXPECT_EQ(kMessage2, ReadMessage(a));
+ EXPECT_EQ(kMessage3, ReadMessage(a));
+
+ // Now we can rearm the watcher.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+}
+
+TEST_F(WatcherTest, CloseWatchedMessagePipeHandle) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ const uintptr_t readable_a_context = helper.CreateContextWithCancel(
+ WatchHelper::ContextCallback(),
+ base::Bind([](base::WaitableEvent* event) { event->Signal(); }, &event));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+
+ // Test that closing a watched handle fires an appropriate notification, even
+ // when the watcher is unarmed.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+TEST_F(WatcherTest, CloseWatchedMessagePipeHandlePeer) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ const uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, result);
+ event->Signal();
+ },
+ &event));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+
+ // Test that closing a watched handle's peer with an armed watcher fires an
+ // appropriate notification.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ event.Wait();
+
+ // And now arming should fail with correct information about |a|'s state.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(readable_a_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals &
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+ EXPECT_FALSE(ready_states[0].satisfiable_signals &
+ MOJO_HANDLE_SIGNAL_READABLE);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+}
+
+TEST_F(WatcherTest, WatchDataPipeConsumerReadable) {
+ constexpr size_t kTestPipeCapacity = 64;
+ MojoHandle producer, consumer;
+ CreateDataPipe(&producer, &consumer, kTestPipeCapacity);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ int num_expected_notifications = 1;
+ const uintptr_t readable_consumer_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, int* expected_count, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_GT(*expected_count, 0);
+ *expected_count -= 1;
+
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ event->Signal();
+ },
+ &event, &num_expected_notifications));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, consumer, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_consumer_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ const char kMessage1[] = "hey hey hey hey";
+ const char kMessage2[] = "i said hey";
+ const char kMessage3[] = "what's goin' on?";
+
+ // Writing to |producer| multiple times should notify exactly once.
+ WriteData(producer, kMessage1);
+ WriteData(producer, kMessage2);
+ event.Wait();
+
+ // This also shouldn't fire a notification; the watcher is still disarmed.
+ WriteData(producer, kMessage3);
+
+ // Arming should fail with relevant information.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(readable_consumer_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+
+ // Flush the three messages from above.
+ EXPECT_EQ(kMessage1, ReadData(consumer, sizeof(kMessage1) - 1));
+ EXPECT_EQ(kMessage2, ReadData(consumer, sizeof(kMessage2) - 1));
+ EXPECT_EQ(kMessage3, ReadData(consumer, sizeof(kMessage3) - 1));
+
+ // Now we can rearm the watcher.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(consumer));
+}
+
+TEST_F(WatcherTest, WatchDataPipeConsumerNewDataReadable) {
+ constexpr size_t kTestPipeCapacity = 64;
+ MojoHandle producer, consumer;
+ CreateDataPipe(&producer, &consumer, kTestPipeCapacity);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ int num_new_data_notifications = 0;
+ const uintptr_t new_data_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, int* notification_count, MojoResult result,
+ MojoHandleSignalsState state) {
+ *notification_count += 1;
+
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ event->Signal();
+ },
+ &event, &num_new_data_notifications));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, consumer, MOJO_HANDLE_SIGNAL_NEW_DATA_READABLE,
+ new_data_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ const char kMessage1[] = "hey hey hey hey";
+ const char kMessage2[] = "i said hey";
+ const char kMessage3[] = "what's goin' on?";
+
+ // Writing to |producer| multiple times should notify exactly once.
+ WriteData(producer, kMessage1);
+ WriteData(producer, kMessage2);
+ event.Wait();
+
+ // This also shouldn't fire a notification; the watcher is still disarmed.
+ WriteData(producer, kMessage3);
+
+ // Arming should fail with relevant information.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(new_data_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+
+ // Attempt to read more data than is available. Should fail but clear the
+ // NEW_DATA_READABLE signal.
+ char large_buffer[512];
+ uint32_t large_read_size = 512;
+ EXPECT_EQ(MOJO_RESULT_OUT_OF_RANGE,
+ MojoReadData(consumer, large_buffer, &large_read_size,
+ MOJO_READ_DATA_FLAG_ALL_OR_NONE));
+
+ // Attempt to arm again. Should succeed.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Write more data. Should notify.
+ event.Reset();
+ WriteData(producer, kMessage1);
+ event.Wait();
+
+ // Reading some data should clear NEW_DATA_READABLE again so we can rearm.
+ EXPECT_EQ(kMessage1, ReadData(consumer, sizeof(kMessage1) - 1));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ EXPECT_EQ(2, num_new_data_notifications);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(consumer));
+}
+
+TEST_F(WatcherTest, WatchDataPipeProducerWritable) {
+ constexpr size_t kTestPipeCapacity = 8;
+ MojoHandle producer, consumer;
+ CreateDataPipe(&producer, &consumer, kTestPipeCapacity);
+
+ // Half the capacity of the data pipe.
+ const char kTestData[] = "aaaa";
+ static_assert((sizeof(kTestData) - 1) * 2 == kTestPipeCapacity,
+ "Invalid test data for this test.");
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ int num_expected_notifications = 1;
+ const uintptr_t writable_producer_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, int* expected_count, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_GT(*expected_count, 0);
+ *expected_count -= 1;
+
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ event->Signal();
+ },
+ &event, &num_expected_notifications));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, producer, MOJO_HANDLE_SIGNAL_WRITABLE,
+ writable_producer_context));
+
+ // The producer is already writable, so arming should fail with relevant
+ // information.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(writable_producer_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+
+ // Write some data, but don't fill the pipe yet. Arming should fail again.
+ WriteData(producer, kTestData);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(writable_producer_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+
+ // Write more data, filling the pipe to capacity. Arming should succeed now.
+ WriteData(producer, kTestData);
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Now read from the pipe, making the producer writable again. Should notify.
+ EXPECT_EQ(kTestData, ReadData(consumer, sizeof(kTestData) - 1));
+ event.Wait();
+
+ // Arming should fail again.
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(writable_producer_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+
+ // Fill the pipe once more and arm the watcher. Should succeed.
+ WriteData(producer, kTestData);
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(consumer));
+};
+
+TEST_F(WatcherTest, CloseWatchedDataPipeConsumerHandle) {
+ constexpr size_t kTestPipeCapacity = 8;
+ MojoHandle producer, consumer;
+ CreateDataPipe(&producer, &consumer, kTestPipeCapacity);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ const uintptr_t readable_consumer_context = helper.CreateContextWithCancel(
+ WatchHelper::ContextCallback(),
+ base::Bind([](base::WaitableEvent* event) { event->Signal(); }, &event));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, consumer, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_consumer_context));
+
+ // Closing the consumer should fire a cancellation notification.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(consumer));
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+TEST_F(WatcherTest, CloseWatcherDataPipeConsumerHandlePeer) {
+ constexpr size_t kTestPipeCapacity = 8;
+ MojoHandle producer, consumer;
+ CreateDataPipe(&producer, &consumer, kTestPipeCapacity);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ const uintptr_t readable_consumer_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, result);
+ event->Signal();
+ },
+ &event));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, consumer, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_consumer_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Closing the producer should fire a notification for an unsatisfiable watch.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+ event.Wait();
+
+ // Now attempt to rearm and expect appropriate error feedback.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(readable_consumer_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, ready_results[0]);
+ EXPECT_FALSE(ready_states[0].satisfiable_signals &
+ MOJO_HANDLE_SIGNAL_READABLE);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(consumer));
+}
+
+TEST_F(WatcherTest, CloseWatchedDataPipeProducerHandle) {
+ constexpr size_t kTestPipeCapacity = 8;
+ MojoHandle producer, consumer;
+ CreateDataPipe(&producer, &consumer, kTestPipeCapacity);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ const uintptr_t writable_producer_context = helper.CreateContextWithCancel(
+ WatchHelper::ContextCallback(),
+ base::Bind([](base::WaitableEvent* event) { event->Signal(); }, &event));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, producer, MOJO_HANDLE_SIGNAL_WRITABLE,
+ writable_producer_context));
+
+ // Closing the consumer should fire a cancellation notification.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(consumer));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+TEST_F(WatcherTest, CloseWatchedDataPipeProducerHandlePeer) {
+ constexpr size_t kTestPipeCapacity = 8;
+ MojoHandle producer, consumer;
+ CreateDataPipe(&producer, &consumer, kTestPipeCapacity);
+
+ const char kTestMessageFullCapacity[] = "xxxxxxxx";
+ static_assert(sizeof(kTestMessageFullCapacity) - 1 == kTestPipeCapacity,
+ "Invalid test message size for this test.");
+
+ // Make the pipe unwritable initially.
+ WriteData(producer, kTestMessageFullCapacity);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ const uintptr_t writable_producer_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, result);
+ event->Signal();
+ },
+ &event));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, producer, MOJO_HANDLE_SIGNAL_WRITABLE,
+ writable_producer_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Closing the consumer should fire a notification for an unsatisfiable watch,
+ // as the full data pipe can never be read from again and is therefore
+ // permanently full and unwritable.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(consumer));
+ event.Wait();
+
+ // Now attempt to rearm and expect appropriate error feedback.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(writable_producer_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, ready_results[0]);
+ EXPECT_FALSE(ready_states[0].satisfiable_signals &
+ MOJO_HANDLE_SIGNAL_WRITABLE);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(producer));
+}
+
+TEST_F(WatcherTest, ArmWithNoWatches) {
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateWatcher(&ExpectNoNotification, &w));
+ EXPECT_EQ(MOJO_RESULT_NOT_FOUND,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+TEST_F(WatcherTest, WatchDuplicateContext) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateWatcher(&ExpectOnlyCancel, &w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, 0));
+ EXPECT_EQ(MOJO_RESULT_ALREADY_EXISTS,
+ MojoWatch(w, b, MOJO_HANDLE_SIGNAL_READABLE, 0));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+}
+
+TEST_F(WatcherTest, CancelUnknownWatch) {
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateWatcher(&ExpectNoNotification, &w));
+ EXPECT_EQ(MOJO_RESULT_NOT_FOUND, MojoCancelWatch(w, 1234));
+}
+
+TEST_F(WatcherTest, ArmWithWatchAlreadySatisfied) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateWatcher(&ExpectOnlyCancel, &w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, a, MOJO_HANDLE_SIGNAL_WRITABLE, 0));
+
+ // |a| is always writable, so we can never arm this watcher.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(0u, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+}
+
+TEST_F(WatcherTest, ArmWithWatchAlreadyUnsatisfiable) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateWatcher(&ExpectOnlyCancel, &w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, 0));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+
+ // |b| is closed and never wrote any messages, so |a| won't be readable again.
+ // MojoArmWatcher() should fail, incidcating as much.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = kMaxReadyContexts;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(0u, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals &
+ MOJO_HANDLE_SIGNAL_PEER_CLOSED);
+ EXPECT_FALSE(ready_states[0].satisfiable_signals &
+ MOJO_HANDLE_SIGNAL_READABLE);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+}
+
+TEST_F(WatcherTest, MultipleWatches) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ base::WaitableEvent a_event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ base::WaitableEvent b_event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ WatchHelper helper;
+ int num_a_notifications = 0;
+ int num_b_notifications = 0;
+ auto notify_callback =
+ base::Bind([](base::WaitableEvent* event, int* notification_count,
+ MojoResult result, MojoHandleSignalsState state) {
+ *notification_count += 1;
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ event->Signal();
+ });
+ uintptr_t readable_a_context = helper.CreateContext(
+ base::Bind(notify_callback, &a_event, &num_a_notifications));
+ uintptr_t readable_b_context = helper.CreateContext(
+ base::Bind(notify_callback, &b_event, &num_b_notifications));
+
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ // Add two independent watch contexts to watch for |a| or |b| readability.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, b, MOJO_HANDLE_SIGNAL_READABLE, readable_b_context));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ const char kMessage1[] = "things are happening";
+ const char kMessage2[] = "ok. ok. ok. ok.";
+ const char kMessage3[] = "plz wake up";
+
+ // Writing to |b| should signal |a|'s watch.
+ WriteMessage(b, kMessage1);
+ a_event.Wait();
+ a_event.Reset();
+
+ // Subsequent messages on |b| should not trigger another notification.
+ WriteMessage(b, kMessage2);
+ WriteMessage(b, kMessage3);
+
+ // Messages on |a| also shouldn't trigger |b|'s notification, since the
+ // watcher should be disarmed by now.
+ WriteMessage(a, kMessage1);
+ WriteMessage(a, kMessage2);
+ WriteMessage(a, kMessage3);
+
+ // Arming should fail. Since we only ask for at most one context's information
+ // that's all we should get back. Which one we get is unspecified.
+ constexpr size_t kMaxReadyContexts = 10;
+ uint32_t num_ready_contexts = 1;
+ uintptr_t ready_contexts[kMaxReadyContexts];
+ MojoResult ready_results[kMaxReadyContexts];
+ MojoHandleSignalsState ready_states[kMaxReadyContexts];
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_TRUE(ready_contexts[0] == readable_a_context ||
+ ready_contexts[0] == readable_b_context);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+
+ // Now try arming again, verifying that both contexts are returned.
+ num_ready_contexts = kMaxReadyContexts;
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(2u, num_ready_contexts);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[1]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+ EXPECT_TRUE(ready_states[1].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+ EXPECT_TRUE((ready_contexts[0] == readable_a_context &&
+ ready_contexts[1] == readable_b_context) ||
+ (ready_contexts[0] == readable_b_context &&
+ ready_contexts[1] == readable_a_context));
+
+ // Flush out the test messages so we should be able to successfully rearm.
+ EXPECT_EQ(kMessage1, ReadMessage(a));
+ EXPECT_EQ(kMessage2, ReadMessage(a));
+ EXPECT_EQ(kMessage3, ReadMessage(a));
+ EXPECT_EQ(kMessage1, ReadMessage(b));
+ EXPECT_EQ(kMessage2, ReadMessage(b));
+ EXPECT_EQ(kMessage3, ReadMessage(b));
+
+ // Add a watch which is always satisfied, so we can't arm. Arming should fail
+ // with only this new watch's information.
+ uintptr_t writable_c_context = helper.CreateContext(base::Bind(
+ [](MojoResult result, MojoHandleSignalsState state) { NOTREACHED(); }));
+ MojoHandle c, d;
+ CreateMessagePipe(&c, &d);
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, c, MOJO_HANDLE_SIGNAL_WRITABLE, writable_c_context));
+ num_ready_contexts = kMaxReadyContexts;
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, ready_contexts,
+ ready_results, ready_states));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(writable_c_context, ready_contexts[0]);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_results[0]);
+ EXPECT_TRUE(ready_states[0].satisfied_signals & MOJO_HANDLE_SIGNAL_WRITABLE);
+
+ // Cancel the new watch and arming should succeed once again.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCancelWatch(w, writable_c_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(c));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(WatcherTest, NotifyOtherFromNotificationCallback) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ static const char kTestMessageToA[] = "hello a";
+ static const char kTestMessageToB[] = "hello b";
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WatchHelper helper;
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](MojoHandle w, MojoHandle a, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ("hello a", ReadMessage(a));
+
+ // Re-arm the watcher and signal |b|.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+ WriteMessage(a, kTestMessageToB);
+ },
+ w, a));
+
+ uintptr_t readable_b_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoHandle w, MojoHandle b,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToB, ReadMessage(b));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+ event->Signal();
+ },
+ &event, w, b));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, b, MOJO_HANDLE_SIGNAL_READABLE, readable_b_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Send a message to |a|. The relevant watch context should be notified, and
+ // should in turn send a message to |b|, waking up the other context. The
+ // second context signals |event|.
+ WriteMessage(b, kTestMessageToA);
+ event.Wait();
+}
+
+TEST_F(WatcherTest, NotifySelfFromNotificationCallback) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ static const char kTestMessageToA[] = "hello a";
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WatchHelper helper;
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ int expected_notifications = 10;
+ uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](int* expected_count, MojoHandle w, MojoHandle a, MojoHandle b,
+ base::WaitableEvent* event, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ("hello a", ReadMessage(a));
+
+ EXPECT_GT(*expected_count, 0);
+ *expected_count -= 1;
+ if (*expected_count == 0) {
+ event->Signal();
+ return;
+ } else {
+ // Re-arm the watcher and signal |a| again.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+ WriteMessage(b, kTestMessageToA);
+ }
+ },
+ &expected_notifications, w, a, b, &event));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Send a message to |a|. When the watch above is notified, it will rearm and
+ // send another message to |a|. This will happen until
+ // |expected_notifications| reaches 0.
+ WriteMessage(b, kTestMessageToA);
+ event.Wait();
+}
+
+TEST_F(WatcherTest, ImplicitCancelOtherFromNotificationCallback) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle c, d;
+ CreateMessagePipe(&c, &d);
+
+ static const char kTestMessageToA[] = "hi a";
+ static const char kTestMessageToC[] = "hi c";
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WatchHelper helper;
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ uintptr_t readable_a_context = helper.CreateContextWithCancel(
+ base::Bind([](MojoResult result, MojoHandleSignalsState state) {
+ NOTREACHED();
+ }),
+ base::Bind([](base::WaitableEvent* event) { event->Signal(); }, &event));
+
+ uintptr_t readable_c_context = helper.CreateContext(base::Bind(
+ [](MojoHandle w, MojoHandle a, MojoHandle b, MojoHandle c,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToC, ReadMessage(c));
+
+ // Now rearm the watcher.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Must result in exactly ONE notification on the above context, for
+ // CANCELLED only. Because we cannot dispatch notifications until the
+ // stack unwinds, and because we must never dispatch non-cancellation
+ // notifications for a handle once it's been closed, we must be certain
+ // that cancellation due to closure preemptively invalidates any
+ // pending non-cancellation notifications queued on the current
+ // RequestContext, such as the one resulting from the WriteMessage here.
+ WriteMessage(b, kTestMessageToA);
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+
+ // Rearming should be fine since |a|'s watch should already be
+ // implicitly cancelled (even though the notification will not have
+ // been invoked yet.)
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Nothing interesting should happen as a result of this.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ },
+ w, a, b, c));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, c, MOJO_HANDLE_SIGNAL_READABLE, readable_c_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ WriteMessage(d, kTestMessageToC);
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(c));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(WatcherTest, ExplicitCancelOtherFromNotificationCallback) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle c, d;
+ CreateMessagePipe(&c, &d);
+
+ static const char kTestMessageToA[] = "hi a";
+ static const char kTestMessageToC[] = "hi c";
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WatchHelper helper;
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](MojoResult result, MojoHandleSignalsState state) { NOTREACHED(); }));
+
+ uintptr_t readable_c_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, uintptr_t readable_a_context, MojoHandle w,
+ MojoHandle a, MojoHandle b, MojoHandle c, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToC, ReadMessage(c));
+
+ // Now rearm the watcher.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Should result in no notifications on the above context, because the
+ // watch will have been cancelled by the time the notification callback
+ // can execute.
+ WriteMessage(b, kTestMessageToA);
+ WriteMessage(b, kTestMessageToA);
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCancelWatch(w, readable_a_context));
+
+ // Rearming should be fine now.
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // Nothing interesting should happen as a result of these.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+
+ event->Signal();
+ },
+ &event, readable_a_context, w, a, b, c));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, c, MOJO_HANDLE_SIGNAL_READABLE, readable_c_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ WriteMessage(d, kTestMessageToC);
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(c));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(WatcherTest, NestedCancellation) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle c, d;
+ CreateMessagePipe(&c, &d);
+
+ static const char kTestMessageToA[] = "hey a";
+ static const char kTestMessageToC[] = "hey c";
+ static const char kTestMessageToD[] = "hey d";
+
+ // This is a tricky test. It establishes a watch on |b| using one watcher and
+ // watches on |c| and |d| using another watcher.
+ //
+ // A message is written to |d| to wake up |c|'s watch, and the notification
+ // handler for that event does the following:
+ // 1. Writes to |a| to eventually wake up |b|'s watcher.
+ // 2. Rearms |c|'s watcher.
+ // 3. Writes to |d| to eventually wake up |c|'s watcher again.
+ //
+ // Meanwhile, |b|'s watch notification handler cancels |c|'s watch altogether
+ // before writing to |c| to wake up |d|.
+ //
+ // The net result should be that |c|'s context only gets notified once (from
+ // the first write to |d| above) and everyone else gets notified as expected.
+
+ MojoHandle b_watcher;
+ MojoHandle cd_watcher;
+ WatchHelper helper;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&b_watcher));
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&cd_watcher));
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ uintptr_t readable_d_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoHandle d, MojoResult result,
+ MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToD, ReadMessage(d));
+ event->Signal();
+ },
+ &event, d));
+
+ static int num_expected_c_notifications = 1;
+ uintptr_t readable_c_context = helper.CreateContext(base::Bind(
+ [](MojoHandle cd_watcher, MojoHandle a, MojoHandle c, MojoHandle d,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_GT(num_expected_c_notifications--, 0);
+
+ // Trigger an eventual |readable_b_context| notification.
+ WriteMessage(a, kTestMessageToA);
+
+ EXPECT_EQ(kTestMessageToC, ReadMessage(c));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoArmWatcher(cd_watcher, nullptr, nullptr,
+ nullptr, nullptr));
+
+ // Trigger another eventual |readable_c_context| notification.
+ WriteMessage(d, kTestMessageToC);
+ },
+ cd_watcher, a, c, d));
+
+ uintptr_t readable_b_context = helper.CreateContext(base::Bind(
+ [](MojoHandle cd_watcher, uintptr_t readable_c_context, MojoHandle c,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoCancelWatch(cd_watcher, readable_c_context));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoArmWatcher(cd_watcher, nullptr, nullptr,
+ nullptr, nullptr));
+
+ WriteMessage(c, kTestMessageToD);
+ },
+ cd_watcher, readable_c_context, c));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(b_watcher, b, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_b_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(cd_watcher, c, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_c_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(cd_watcher, d, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_d_context));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(b_watcher, nullptr, nullptr, nullptr, nullptr));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(cd_watcher, nullptr, nullptr, nullptr, nullptr));
+
+ WriteMessage(d, kTestMessageToC);
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(cd_watcher));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b_watcher));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(c));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(d));
+}
+
+TEST_F(WatcherTest, CancelSelfInNotificationCallback) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ static const char kTestMessageToA[] = "hey a";
+
+ MojoHandle w;
+ WatchHelper helper;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ static uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoHandle w, MojoHandle a,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+
+ // There should be no problem cancelling this watch from its own
+ // notification invocation.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCancelWatch(w, readable_a_context));
+ EXPECT_EQ(kTestMessageToA, ReadMessage(a));
+
+ // Arming should fail because there are no longer any registered
+ // watches on the watcher.
+ EXPECT_EQ(MOJO_RESULT_NOT_FOUND,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // And closing |a| should be fine (and should not invoke this
+ // notification with MOJO_RESULT_CANCELLED) for the same reason.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+
+ event->Signal();
+ },
+ &event, w, a));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ WriteMessage(b, kTestMessageToA);
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+TEST_F(WatcherTest, CloseWatcherInNotificationCallback) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ static const char kTestMessageToA1[] = "hey a";
+ static const char kTestMessageToA2[] = "hey a again";
+
+ MojoHandle w;
+ WatchHelper helper;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoHandle w, MojoHandle a, MojoHandle b,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToA1, ReadMessage(a));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // There should be no problem closing this watcher from its own
+ // notification callback.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+
+ // And these should not trigger more notifications, because |w| has been
+ // closed already.
+ WriteMessage(b, kTestMessageToA2);
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+
+ event->Signal();
+ },
+ &event, w, a, b));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ WriteMessage(b, kTestMessageToA1);
+ event.Wait();
+}
+
+TEST_F(WatcherTest, CloseWatcherAfterImplicitCancel) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ static const char kTestMessageToA[] = "hey a";
+
+ MojoHandle w;
+ WatchHelper helper;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ uintptr_t readable_a_context = helper.CreateContext(base::Bind(
+ [](base::WaitableEvent* event, MojoHandle w, MojoHandle a,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToA, ReadMessage(a));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ // This will cue up a notification for |MOJO_RESULT_CANCELLED|...
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+
+ // ...but it should never fire because we close the watcher here.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+
+ event->Signal();
+ },
+ &event, w, a));
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ WriteMessage(b, kTestMessageToA);
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+}
+
+TEST_F(WatcherTest, OtherThreadCancelDuringNotification) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ static const char kTestMessageToA[] = "hey a";
+
+ MojoHandle w;
+ WatchHelper helper;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ base::WaitableEvent wait_for_notification(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ base::WaitableEvent wait_for_cancellation(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ static bool callback_done = false;
+ uintptr_t readable_a_context = helper.CreateContextWithCancel(
+ base::Bind(
+ [](base::WaitableEvent* wait_for_notification, MojoHandle w,
+ MojoHandle a, MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToA, ReadMessage(a));
+
+ wait_for_notification->Signal();
+
+ // Give the other thread sufficient time to race with the completion
+ // of this callback. There should be no race, since the cancellation
+ // notification must be mutually exclusive to this notification.
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(1));
+
+ callback_done = true;
+ },
+ &wait_for_notification, w, a),
+ base::Bind(
+ [](base::WaitableEvent* wait_for_cancellation) {
+ EXPECT_TRUE(callback_done);
+ wait_for_cancellation->Signal();
+ },
+ &wait_for_cancellation));
+
+ ThreadedRunner runner(base::Bind(
+ [](base::WaitableEvent* wait_for_notification,
+ base::WaitableEvent* wait_for_cancellation, MojoHandle w,
+ uintptr_t readable_a_context) {
+ wait_for_notification->Wait();
+
+ // Cancel the watch while the notification is still running.
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCancelWatch(w, readable_a_context));
+
+ wait_for_cancellation->Wait();
+
+ EXPECT_TRUE(callback_done);
+ },
+ &wait_for_notification, &wait_for_cancellation, w, readable_a_context));
+ runner.Start();
+
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(w, nullptr, nullptr, nullptr, nullptr));
+
+ WriteMessage(b, kTestMessageToA);
+ runner.Join();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+TEST_F(WatcherTest, WatchesCancelEachOtherFromNotifications) {
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ static const char kTestMessageToA[] = "hey a";
+ static const char kTestMessageToB[] = "hey b";
+
+ base::WaitableEvent wait_for_a_to_notify(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ base::WaitableEvent wait_for_b_to_notify(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ base::WaitableEvent wait_for_a_to_cancel(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ base::WaitableEvent wait_for_b_to_cancel(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ MojoHandle a_watcher;
+ MojoHandle b_watcher;
+ WatchHelper helper;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&a_watcher));
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&b_watcher));
+
+ // We set up two watchers, one on |a| and one on |b|. They cancel each other
+ // from within their respective watch notifications. This should be safe,
+ // i.e., it should not deadlock, in spite of the fact that we also guarantee
+ // mutually exclusive notification execution (including cancellations) on any
+ // given watch.
+ bool a_cancelled = false;
+ bool b_cancelled = false;
+ static uintptr_t readable_b_context;
+ uintptr_t readable_a_context = helper.CreateContextWithCancel(
+ base::Bind(
+ [](base::WaitableEvent* wait_for_a_to_notify,
+ base::WaitableEvent* wait_for_b_to_notify, MojoHandle b_watcher,
+ MojoHandle a, MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToA, ReadMessage(a));
+ wait_for_a_to_notify->Signal();
+ wait_for_b_to_notify->Wait();
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoCancelWatch(b_watcher, readable_b_context));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b_watcher));
+ },
+ &wait_for_a_to_notify, &wait_for_b_to_notify, b_watcher, a),
+ base::Bind(
+ [](base::WaitableEvent* wait_for_a_to_cancel,
+ base::WaitableEvent* wait_for_b_to_cancel, bool* a_cancelled) {
+ *a_cancelled = true;
+ wait_for_a_to_cancel->Signal();
+ wait_for_b_to_cancel->Wait();
+ },
+ &wait_for_a_to_cancel, &wait_for_b_to_cancel, &a_cancelled));
+
+ readable_b_context = helper.CreateContextWithCancel(
+ base::Bind(
+ [](base::WaitableEvent* wait_for_a_to_notify,
+ base::WaitableEvent* wait_for_b_to_notify,
+ uintptr_t readable_a_context, MojoHandle a_watcher, MojoHandle b,
+ MojoResult result, MojoHandleSignalsState state) {
+ EXPECT_EQ(MOJO_RESULT_OK, result);
+ EXPECT_EQ(kTestMessageToB, ReadMessage(b));
+ wait_for_b_to_notify->Signal();
+ wait_for_a_to_notify->Wait();
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoCancelWatch(a_watcher, readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a_watcher));
+ },
+ &wait_for_a_to_notify, &wait_for_b_to_notify, readable_a_context,
+ a_watcher, b),
+ base::Bind(
+ [](base::WaitableEvent* wait_for_a_to_cancel,
+ base::WaitableEvent* wait_for_b_to_cancel, bool* b_cancelled) {
+ *b_cancelled = true;
+ wait_for_b_to_cancel->Signal();
+ wait_for_a_to_cancel->Wait();
+ },
+ &wait_for_a_to_cancel, &wait_for_b_to_cancel, &b_cancelled));
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(a_watcher, a, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_a_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(a_watcher, nullptr, nullptr, nullptr, nullptr));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoWatch(b_watcher, b, MOJO_HANDLE_SIGNAL_READABLE,
+ readable_b_context));
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoArmWatcher(b_watcher, nullptr, nullptr, nullptr, nullptr));
+
+ ThreadedRunner runner(
+ base::Bind([](MojoHandle b) { WriteMessage(b, kTestMessageToA); }, b));
+ runner.Start();
+
+ WriteMessage(a, kTestMessageToB);
+
+ wait_for_a_to_cancel.Wait();
+ wait_for_b_to_cancel.Wait();
+ runner.Join();
+
+ EXPECT_TRUE(a_cancelled);
+ EXPECT_TRUE(b_cancelled);
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+}
+
+TEST_F(WatcherTest, AlwaysCancel) {
+ // Basic sanity check to ensure that all possible ways to cancel a watch
+ // result in a final MOJO_RESULT_CANCELLED notification.
+
+ MojoHandle a, b;
+ CreateMessagePipe(&a, &b);
+
+ MojoHandle w;
+ WatchHelper helper;
+ EXPECT_EQ(MOJO_RESULT_OK, helper.CreateWatcher(&w));
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ const base::Closure signal_event =
+ base::Bind(&base::WaitableEvent::Signal, base::Unretained(&event));
+
+ // Cancel via |MojoCancelWatch()|.
+ uintptr_t context = helper.CreateContextWithCancel(
+ WatchHelper::ContextCallback(), signal_event);
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, context));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCancelWatch(w, context));
+ event.Wait();
+ event.Reset();
+
+ // Cancel by closing the watched handle.
+ context = helper.CreateContextWithCancel(WatchHelper::ContextCallback(),
+ signal_event);
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, a, MOJO_HANDLE_SIGNAL_READABLE, context));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(a));
+ event.Wait();
+ event.Reset();
+
+ // Cancel by closing the watcher handle.
+ context = helper.CreateContextWithCancel(WatchHelper::ContextCallback(),
+ signal_event);
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, b, MOJO_HANDLE_SIGNAL_READABLE, context));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+ event.Wait();
+
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(b));
+}
+
+TEST_F(WatcherTest, ArmFailureCirculation) {
+ // Sanity check to ensure that all ready handles will eventually be returned
+ // over a finite number of calls to MojoArmWatcher().
+
+ constexpr size_t kNumTestPipes = 100;
+ constexpr size_t kNumTestHandles = kNumTestPipes * 2;
+ MojoHandle handles[kNumTestHandles];
+
+ // Create a bunch of pipes and make sure they're all readable.
+ for (size_t i = 0; i < kNumTestPipes; ++i) {
+ CreateMessagePipe(&handles[i], &handles[i + kNumTestPipes]);
+ WriteMessage(handles[i], "hey");
+ WriteMessage(handles[i + kNumTestPipes], "hay");
+ WaitForSignals(handles[i], MOJO_HANDLE_SIGNAL_READABLE);
+ WaitForSignals(handles[i + kNumTestPipes], MOJO_HANDLE_SIGNAL_READABLE);
+ }
+
+ // Create a watcher and watch all of them.
+ MojoHandle w;
+ EXPECT_EQ(MOJO_RESULT_OK, MojoCreateWatcher(&ExpectOnlyCancel, &w));
+ for (size_t i = 0; i < kNumTestHandles; ++i) {
+ EXPECT_EQ(MOJO_RESULT_OK,
+ MojoWatch(w, handles[i], MOJO_HANDLE_SIGNAL_READABLE, i));
+ }
+
+ // Keep trying to arm |w| until every watch gets an entry in |ready_contexts|.
+ // If MojoArmWatcher() is well-behaved, this should terminate eventually.
+ std::set<uintptr_t> ready_contexts;
+ while (ready_contexts.size() < kNumTestHandles) {
+ uint32_t num_ready_contexts = 1;
+ uintptr_t ready_context;
+ MojoResult ready_result;
+ MojoHandleSignalsState ready_state;
+ EXPECT_EQ(MOJO_RESULT_FAILED_PRECONDITION,
+ MojoArmWatcher(w, &num_ready_contexts, &ready_context,
+ &ready_result, &ready_state));
+ EXPECT_EQ(1u, num_ready_contexts);
+ EXPECT_EQ(MOJO_RESULT_OK, ready_result);
+ ready_contexts.insert(ready_context);
+ }
+
+ for (size_t i = 0; i < kNumTestHandles; ++i)
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(handles[i]));
+ EXPECT_EQ(MOJO_RESULT_OK, MojoClose(w));
+}
+
+} // namespace
+} // namespace edk
+} // namespace mojo