summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQijiang Fan <fqj@google.com>2020-02-17 13:18:55 +0900
committerQijiang Fan <fqj@google.com>2020-02-17 13:22:07 +0900
commit64062ae5f9882e20a61911965d8aa21296eae1a8 (patch)
treed2d447dae547637f86ad5c82a559c34a112302c6
parent7f5561196db242c11ffdca483b1a39fd7f1765dd (diff)
downloadlibchrome-64062ae5f9882e20a61911965d8aa21296eae1a8.tar.gz
sync non-crypto with Chromium OS
In preparation for upstream swap. Bug: chromium:915258 Test: Presubmit Change-Id: Ib1ef7ed6975def5309c79c61a3012bc28a1a168c
-rw-r--r--BUILD.gn758
-rw-r--r--base/metrics/field_trial_params.cc149
-rw-r--r--base/metrics/field_trial_params.h258
-rw-r--r--base/task_scheduler/delayed_task_manager.cc95
-rw-r--r--base/task_scheduler/delayed_task_manager.h80
-rw-r--r--base/task_scheduler/priority_queue.cc110
-rw-r--r--base/task_scheduler/priority_queue.h104
-rw-r--r--base/task_scheduler/scheduler_single_thread_task_runner_manager.cc656
-rw-r--r--base/task_scheduler/scheduler_single_thread_task_runner_manager.h155
-rw-r--r--base/task_scheduler/scheduler_worker.cc362
-rw-r--r--base/task_scheduler/scheduler_worker.h262
-rw-r--r--base/task_scheduler/scheduler_worker_params.h24
-rw-r--r--base/task_scheduler/scheduler_worker_pool.h79
-rw-r--r--base/task_scheduler/scheduler_worker_pool_impl.cc1028
-rw-r--r--base/task_scheduler/scheduler_worker_pool_impl.h357
-rw-r--r--base/task_scheduler/scheduler_worker_pool_params.cc23
-rw-r--r--base/task_scheduler/scheduler_worker_pool_params.h44
-rw-r--r--base/task_scheduler/scheduler_worker_stack.cc57
-rw-r--r--base/task_scheduler/scheduler_worker_stack.h67
-rw-r--r--base/task_scheduler/task_scheduler.cc86
-rw-r--r--base/task_scheduler/task_scheduler_impl.cc281
-rw-r--r--base/task_scheduler/task_scheduler_impl.h136
-rw-r--r--base/task_scheduler/task_tracker.cc837
-rw-r--r--base/task_scheduler/task_tracker.h365
-rw-r--r--base/task_scheduler/task_tracker_posix.cc33
-rw-r--r--base/task_scheduler/task_tracker_posix.h74
-rw-r--r--base/test/fuzzed_data_provider.cc98
-rw-r--r--base/test/fuzzed_data_provider.h80
-rw-r--r--components/json_schema/json_schema_constants.cc39
-rw-r--r--components/json_schema/json_schema_constants.h43
-rw-r--r--components/json_schema/json_schema_validator.cc862
-rw-r--r--components/json_schema/json_schema_validator.h251
-rw-r--r--components/policy/core/common/policy_load_status.cc43
-rw-r--r--components/policy/core/common/policy_load_status.h76
-rw-r--r--components/policy/core/common/policy_types.h58
-rw-r--r--components/policy/core/common/registry_dict.cc356
-rw-r--r--components/policy/core/common/registry_dict.h103
-rw-r--r--components/policy/core/common/registry_dict_unittest.cc304
-rw-r--r--components/policy/core/common/schema.cc1206
-rw-r--r--components/policy/core/common/schema.h211
-rw-r--r--components/policy/core/common/schema_internal.h143
-rw-r--r--components/policy/policy_export.h34
-rw-r--r--dbus/mock_object_manager.cc17
-rw-r--r--dbus/mock_object_manager.h41
-rw-r--r--third_party/re2/src/re2/re2.h6
45 files changed, 10273 insertions, 178 deletions
diff --git a/BUILD.gn b/BUILD.gn
new file mode 100644
index 0000000000..b956970b24
--- /dev/null
+++ b/BUILD.gn
@@ -0,0 +1,758 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//common-mk/mojom_bindings_generator.gni")
+import("//common-mk/pkg_config.gni")
+
+group("all") {
+ deps = [
+ ":libchrome",
+ ":libchrome-test",
+ ]
+ if (use.mojo) {
+ deps += [ ":libmojo" ]
+ }
+}
+
+config("libchrome_config") {
+ # TODO(hidehiko): Consolidate with build_config.h.
+ defines = [
+ "OS_CHROMEOS",
+ "USE_NSS_CERTS",
+ "USE_SYSTEM_LIBEVENT",
+ "NO_TCMALLOC",
+ "MOJO_CORE_LEGACY_PROTOCOL",
+ ]
+ if (use.asan) {
+ defines += [ "LEAK_SANITIZER" ]
+ }
+
+ include_dirs = [ "." ]
+ cflags = [
+ "-Wno-deprecated-register",
+ "-Wno-narrowing",
+ "-Wno-unused-local-typedefs",
+ "-Xclang-only=-Wno-char-subscripts",
+ ]
+ # Address sanitizer + coverage builds do not support -z,defs.
+ if (!(use.asan || use.coverage)) {
+ ldflags = [ "-Wl,-z,defs" ]
+ }
+}
+
+config("base_core_config") {
+ cflags = [
+ # Suppressing warning in base/strings/stringprintf.cc.
+ "-Wno-format-nonliteral",
+ # This is for _exit(1) in base/debug/debugger_posix.cc.
+ "-Wno-unreachable-code",
+ ]
+}
+
+libbase_sublibs = [
+ {
+ name = "base-core"
+ output_name = name + "-${libbase_ver}"
+ libs = [ "pthread", "rt", "modp_b64" ]
+ pkg_deps = [ "glib-2.0", "libevent" ]
+ configs = [ ":base_core_config" ]
+ sources = [
+ "base/allocator/allocator_extension.cc",
+ "base/allocator/allocator_shim.cc",
+ "base/allocator/allocator_shim_default_dispatch_to_glibc.cc",
+ "base/at_exit.cc",
+ "base/barrier_closure.cc",
+ "base/base64.cc",
+ "base/base64url.cc",
+ "base/base_paths.cc",
+ "base/base_paths_posix.cc",
+ "base/base_switches.cc",
+ "base/big_endian.cc",
+ "base/build_time.cc",
+ "base/callback_helpers.cc",
+ "base/callback_internal.cc",
+ "base/command_line.cc",
+ "base/cpu.cc",
+ "base/debug/activity_tracker.cc",
+ "base/debug/alias.cc",
+ "base/debug/crash_logging.cc",
+ "base/debug/debugger.cc",
+ "base/debug/debugger_posix.cc",
+ "base/debug/dump_without_crashing.cc",
+ "base/debug/stack_trace.cc",
+ "base/debug/stack_trace_posix.cc",
+ "base/debug/task_annotator.cc",
+ "base/environment.cc",
+ "base/feature_list.cc",
+ "base/files/file.cc",
+ "base/files/file_descriptor_watcher_posix.cc",
+ "base/files/file_enumerator.cc",
+ "base/files/file_enumerator_posix.cc",
+ "base/files/file_path.cc",
+ "base/files/file_path_constants.cc",
+ "base/files/file_path_watcher.cc",
+ "base/files/file_path_watcher_linux.cc",
+ "base/files/file_posix.cc",
+ "base/files/file_tracing.cc",
+ "base/files/file_util.cc",
+ "base/files/file_util_linux.cc",
+ "base/files/file_util_posix.cc",
+ "base/files/important_file_writer.cc",
+ "base/files/memory_mapped_file.cc",
+ "base/files/memory_mapped_file_posix.cc",
+ "base/files/scoped_file.cc",
+ "base/files/scoped_temp_dir.cc",
+ "base/guid.cc",
+ "base/hash.cc",
+ "base/json/json_file_value_serializer.cc",
+ "base/json/json_parser.cc",
+ "base/json/json_reader.cc",
+ "base/json/json_string_value_serializer.cc",
+ "base/json/json_value_converter.cc",
+ "base/json/json_writer.cc",
+ "base/json/string_escape.cc",
+ "base/lazy_instance_helpers.cc",
+ "base/location.cc",
+ "base/logging.cc",
+ "base/md5.cc",
+ "base/memory/aligned_memory.cc",
+ "base/memory/platform_shared_memory_region.cc",
+ "base/memory/platform_shared_memory_region_posix.cc",
+ "base/memory/read_only_shared_memory_region.cc",
+ "base/memory/ref_counted.cc",
+ "base/memory/ref_counted_memory.cc",
+ "base/memory/shared_memory_handle.cc",
+ "base/memory/shared_memory_handle_posix.cc",
+ "base/memory/shared_memory_helper.cc",
+ "base/memory/shared_memory_posix.cc",
+ "base/memory/shared_memory_mapping.cc",
+ "base/memory/unsafe_shared_memory_region.cc",
+ "base/memory/weak_ptr.cc",
+ "base/memory/writable_shared_memory_region.cc",
+ "base/message_loop/incoming_task_queue.cc",
+ "base/message_loop/message_loop.cc",
+ "base/message_loop/message_loop_current.cc",
+ "base/message_loop/message_loop_task_runner.cc",
+ "base/message_loop/message_pump.cc",
+ "base/message_loop/message_pump_default.cc",
+ "base/message_loop/message_pump_glib.cc",
+ "base/message_loop/message_pump_libevent.cc",
+ "base/message_loop/watchable_io_message_pump_posix.cc",
+ "base/metrics/bucket_ranges.cc",
+ "base/metrics/dummy_histogram.cc",
+ "base/metrics/field_trial.cc",
+ "base/metrics/field_trial_param_associator.cc",
+ "base/metrics/field_trial_params.cc",
+ "base/metrics/histogram.cc",
+ "base/metrics/histogram_base.cc",
+ "base/metrics/histogram_functions.cc",
+ "base/metrics/histogram_samples.cc",
+ "base/metrics/histogram_snapshot_manager.cc",
+ "base/metrics/metrics_hashes.cc",
+ "base/metrics/persistent_histogram_allocator.cc",
+ "base/metrics/persistent_memory_allocator.cc",
+ "base/metrics/persistent_sample_map.cc",
+ "base/metrics/sample_map.cc",
+ "base/metrics/sample_vector.cc",
+ "base/metrics/sparse_histogram.cc",
+ "base/metrics/statistics_recorder.cc",
+ "base/path_service.cc",
+ "base/observer_list_threadsafe.cc",
+ "base/pending_task.cc",
+ "base/pickle.cc",
+ "base/posix/file_descriptor_shuffle.cc",
+ "base/posix/global_descriptors.cc",
+ "base/posix/safe_strerror.cc",
+ "base/posix/unix_domain_socket.cc",
+ "base/process/internal_linux.cc",
+ "base/process/kill.cc",
+ "base/process/kill_posix.cc",
+ "base/process/launch.cc",
+ "base/process/launch_posix.cc",
+ "base/process/memory.cc",
+ "base/process/memory_linux.cc",
+ "base/process/process_handle.cc",
+ "base/process/process_handle_linux.cc",
+ "base/process/process_handle_posix.cc",
+ "base/process/process_info_linux.cc",
+ "base/process/process_iterator.cc",
+ "base/process/process_iterator_linux.cc",
+ "base/process/process_metrics.cc",
+ "base/process/process_metrics_linux.cc",
+ "base/process/process_metrics_posix.cc",
+ "base/process/process_posix.cc",
+ "base/rand_util.cc",
+ "base/rand_util_posix.cc",
+ "base/run_loop.cc",
+ "base/sequence_checker_impl.cc",
+ "base/sequenced_task_runner.cc",
+ "base/sequence_token.cc",
+ "base/sha1.cc",
+ "base/strings/nullable_string16.cc",
+ "base/strings/pattern.cc",
+ "base/strings/safe_sprintf.cc",
+ "base/strings/strcat.cc",
+ "base/strings/string16.cc",
+ "base/strings/string_number_conversions.cc",
+ "base/strings/string_piece.cc",
+ "base/strings/string_split.cc",
+ "base/strings/string_util.cc",
+ "base/strings/string_util_constants.cc",
+ "base/strings/stringprintf.cc",
+ "base/strings/sys_string_conversions_posix.cc",
+ "base/strings/utf_string_conversion_utils.cc",
+ "base/strings/utf_string_conversions.cc",
+ "base/sync_socket_posix.cc",
+ "base/synchronization/atomic_flag.cc",
+ "base/synchronization/condition_variable_posix.cc",
+ "base/synchronization/lock.cc",
+ "base/synchronization/lock_impl_posix.cc",
+ "base/synchronization/waitable_event_posix.cc",
+ "base/synchronization/waitable_event_watcher_posix.cc",
+ "base/sys_info.cc",
+ "base/sys_info_chromeos.cc",
+ "base/sys_info_linux.cc",
+ "base/sys_info_posix.cc",
+ "base/task_runner.cc",
+ "base/task/cancelable_task_tracker.cc",
+ "base/task_scheduler/delayed_task_manager.cc",
+ "base/task_scheduler/environment_config.cc",
+ "base/task_scheduler/post_task.cc",
+ "base/task_scheduler/priority_queue.cc",
+ "base/task_scheduler/scheduler_lock_impl.cc",
+ "base/task_scheduler/scheduler_single_thread_task_runner_manager.cc",
+ "base/task_scheduler/scheduler_worker.cc",
+ "base/task_scheduler/scheduler_worker_pool.cc",
+ "base/task_scheduler/scheduler_worker_pool_impl.cc",
+ "base/task_scheduler/scheduler_worker_pool_params.cc",
+ "base/task_scheduler/scheduler_worker_stack.cc",
+ "base/task_scheduler/scoped_set_task_priority_for_current_thread.cc",
+ "base/task_scheduler/sequence.cc",
+ "base/task_scheduler/sequence_sort_key.cc",
+ "base/task_scheduler/service_thread.cc",
+ "base/task_scheduler/task.cc",
+ "base/task_scheduler/task_scheduler.cc",
+ "base/task_scheduler/task_scheduler_impl.cc",
+ "base/task_scheduler/task_traits.cc",
+ "base/task_scheduler/task_tracker.cc",
+ "base/task_scheduler/task_tracker_posix.cc",
+ "base/third_party/dynamic_annotations/dynamic_annotations.c",
+ "base/third_party/icu/icu_utf.cc",
+ "base/third_party/nspr/prtime.cc",
+ "base/threading/platform_thread_internal_posix.cc",
+ "base/threading/platform_thread_linux.cc",
+ "base/threading/platform_thread_posix.cc",
+ "base/threading/post_task_and_reply_impl.cc",
+ "base/threading/scoped_blocking_call.cc",
+ "base/threading/sequence_local_storage_map.cc",
+ "base/threading/sequence_local_storage_slot.cc",
+ "base/threading/sequenced_task_runner_handle.cc",
+ "base/threading/simple_thread.cc",
+ "base/threading/thread.cc",
+ "base/threading/thread_checker_impl.cc",
+ "base/threading/thread_collision_warner.cc",
+ "base/threading/thread_id_name_manager.cc",
+ "base/threading/thread_local_storage.cc",
+ "base/threading/thread_local_storage_posix.cc",
+ "base/threading/thread_restrictions.cc",
+ "base/threading/thread_task_runner_handle.cc",
+ "base/timer/elapsed_timer.cc",
+ "base/timer/timer.cc",
+ "base/time/clock.cc",
+ "base/time/default_clock.cc",
+ "base/time/default_tick_clock.cc",
+ "base/time/tick_clock.cc",
+ "base/time/time.cc",
+ "base/time/time_conversion_posix.cc",
+ "base/time/time_exploded_posix.cc",
+ "base/time/time_now_posix.cc",
+ "base/time/time_override.cc",
+ "base/unguessable_token.cc",
+ "base/value_iterators.cc",
+ "base/values.cc",
+ "base/version.cc",
+ "base/vlog.cc",
+ ]
+ },
+
+ {
+ name = "base-dl"
+ output_name = name + "-${libbase_ver}"
+ deps = [ ":base-core" ]
+ libs = [ "dl" ]
+ sources = [
+ "base/native_library_posix.cc",
+ "base/native_library.cc",
+ "base/scoped_native_library.cc",
+ ]
+ },
+
+ {
+ name = "base-policy"
+ output_name = name + "-${libbase_ver}"
+ deps = [ ":base-core" ]
+ libs = [ "re2" ]
+ sources = [
+ "components/json_schema/json_schema_constants.cc",
+ "components/json_schema/json_schema_validator.cc",
+ "components/policy/core/common/policy_load_status.cc",
+ "components/policy/core/common/registry_dict.cc",
+ "components/policy/core/common/schema.cc",
+ ]
+ },
+
+ {
+ name = "base-base_test_support"
+ output_name = name + "-${libbase_ver}"
+ testonly = true
+ sources = [
+ "base/test/simple_test_clock.cc",
+ "base/test/simple_test_tick_clock.cc",
+ "base/test/test_file_util.cc",
+ "base/test/test_file_util_linux.cc",
+ "base/test/test_mock_time_task_runner.cc",
+ "base/test/test_pending_task.cc",
+ "base/test/test_simple_task_runner.cc",
+ "base/test/test_switches.cc",
+ "base/test/test_timeouts.cc",
+ ]
+ }
+]
+
+if (use.crypto) {
+ libbase_sublibs += [
+ {
+ name = "base-crypto"
+ output_name = name + "-${libbase_ver}"
+ deps = [ ":base-core", ":base-dl" ]
+ pkg_deps = [ "nss", "openssl" ]
+ sources = [
+ "crypto/hmac.cc",
+ "crypto/hmac_nss.cc",
+ "crypto/nss_key_util.cc",
+ "crypto/nss_util.cc",
+ "crypto/openssl_util.cc",
+ "crypto/p224.cc",
+ "crypto/p224_spake.cc",
+ "crypto/random.cc",
+ "crypto/rsa_private_key.cc",
+ "crypto/rsa_private_key_nss.cc",
+ "crypto/scoped_test_nss_db.cc",
+ "crypto/secure_hash.cc",
+ "crypto/secure_util.cc",
+ "crypto/sha2.cc",
+ "crypto/signature_creator_nss.cc",
+ "crypto/signature_verifier_nss.cc",
+ "crypto/symmetric_key_nss.cc",
+ "crypto/third_party/nss/rsawrapr.c",
+ "crypto/third_party/nss/sha512.cc",
+ # Added to libchrome only (not upstream) to support OpenSSL 1.1 API
+ "crypto/libcrypto-compat.c",
+ ]
+ }
+ ]
+}
+
+if (use.dbus) {
+ libbase_sublibs += [
+ {
+ name = "base-dbus"
+ output_name = name + "-${libbase_ver}"
+ deps = [ ":base-core" ]
+ pkg_deps = [ "dbus-1" ]
+ if (use.fuzzer) {
+ pkg_deps += [ "protobuf" ]
+ } else {
+ pkg_deps += [ "protobuf-lite" ]
+ }
+ sources = [
+ "dbus/bus.cc",
+ "dbus/dbus_statistics.cc",
+ "dbus/exported_object.cc",
+ "dbus/message.cc",
+ "dbus/object_manager.cc",
+ "dbus/object_path.cc",
+ "dbus/object_proxy.cc",
+ "dbus/property.cc",
+ "dbus/scoped_dbus_error.cc",
+ "dbus/string_util.cc",
+ "dbus/util.cc",
+ "dbus/values_util.cc",
+ ]
+ },
+
+ {
+ name = "base-dbus_test_support"
+ output_name = name + "-${libbase_ver}"
+ testonly = true
+ pkg_deps = [ "dbus-1" ]
+ if (use.fuzzer) {
+ pkg_deps += [ "protobuf" ]
+ } else {
+ pkg_deps += [ "protobuf-lite" ]
+ }
+ sources = [
+ "dbus/mock_bus.cc",
+ "dbus/mock_exported_object.cc",
+ "dbus/mock_object_manager.cc",
+ "dbus/mock_object_proxy.cc",
+ ]
+ },
+ ]
+}
+
+if (use.timers) {
+ libbase_sublibs += [
+ {
+ name = "base-timers"
+ output_name = name + "-${libbase_ver}"
+ deps = [ ":base-core" ]
+ sources = [ "components/timers/alarm_timer_chromeos.cc" ]
+ },
+
+ {
+ name = "base-timer_test_support"
+ output_name = name + "-${libbase_ver}"
+ testonly = true
+ sources = [ "base/timer/mock_timer.cc" ]
+ },
+ ]
+}
+
+# Generate static/shared libraries.
+foreach(attr, libbase_sublibs) {
+ if (defined(attr.pkg_deps)) {
+ # If it depends on external packages, introduces -pkg-config config.
+ pkg_config(attr.name + "-pkg-config") {
+ pkg_deps = attr.pkg_deps
+ }
+ }
+
+ if (defined(attr.testonly) && attr.testonly) {
+ buildtype = "static_library"
+ } else {
+ buildtype = "shared_library"
+ }
+ target(buildtype, attr.name) {
+ output_name = attr.output_name
+ sources = attr.sources
+ if (defined(attr.deps)) {
+ deps = attr.deps
+ }
+
+ if (defined(attr.libs)) {
+ libs = attr.libs
+ }
+
+ if (defined(attr.pkg_deps)) {
+ configs += [ ":" + attr.name + "-pkg-config" ]
+ }
+ configs += [
+ ":libchrome_config",
+ "//common-mk:visibility_default"
+ ]
+ if (buildtype == "static_library") {
+ configs -= [ "//common-mk:use_thin_archive" ]
+ configs += [ "//common-mk:nouse_thin_archive" ]
+ }
+ if (defined(attr.configs)) {
+ configs += attr.configs
+ }
+ }
+}
+
+action("base") {
+ deps = []
+ foreach(attr, libbase_sublibs) {
+ if (!defined(attr.testonly) || !attr.testonly) {
+ deps += [ ":" + attr.name ]
+ }
+ }
+
+ script = "//common-mk/write_args.py"
+ outputs = [ "${root_out_dir}/lib/lib${target_name}-${libbase_ver}.so" ]
+ args = [ "--output" ] + outputs + [ "--" ] + [
+ "GROUP", "(", "AS_NEEDED", "(",
+ ]
+ foreach(attr, libbase_sublibs) {
+ if (!defined(attr.testonly) || !attr.testonly) {
+ args += [ "-l" + attr.output_name ]
+ }
+ }
+ args += [ ")", ")" ]
+}
+
+libchrome_exported_cflags = [
+ "-I/usr/include/base-${libbase_ver}",
+ "-Wno-unused-local-typedefs",
+ "-DBASE_VER=${libbase_ver}",
+]
+
+if (use.asan) {
+ libchrome_exported_cflags += [ "-DLEAK_SANITIZER" ]
+}
+
+generate_pkg_config("libchrome") {
+ deps = [ ":base" ]
+ output_name = "libchrome-${libbase_ver}"
+ description = "chrome base library"
+ version = "${libbase_ver}"
+ requires_private = []
+ foreach(attr, libbase_sublibs) {
+ if ((!defined(attr.testonly) || !attr.testonly)
+ && defined(attr.pkg_deps)) {
+ requires_private += attr.pkg_deps
+ }
+ }
+ libs = [ "-lbase-${libbase_ver}" ]
+ libs_private = []
+ foreach(attr, libbase_sublibs) {
+ if (!defined(attr.testonly) || !attr.testonly) {
+ libs_private += [ "-l" + attr.output_name ]
+ if (defined(attr.libs)) {
+ foreach(lib, attr.libs) {
+ libs_private += [ "-l" + lib ]
+ }
+ }
+ }
+ }
+ cflags = libchrome_exported_cflags
+}
+
+action("base-test") {
+ deps = []
+ foreach(attr, libbase_sublibs) {
+ if (defined(attr.testonly) && attr.testonly) {
+ deps += [ ":" + attr.name ]
+ }
+ }
+
+ script = "//common-mk/write_args.py"
+ outputs = [ "${root_out_dir}/lib${target_name}-${libbase_ver}.a" ]
+ args = [ "--output" ] + outputs + [ "--" ] + [
+ "GROUP", "(", "AS_NEEDED", "(",
+ ]
+ foreach(attr, libbase_sublibs) {
+ if (defined(attr.testonly) && attr.testonly) {
+ args += [ "-l" + attr.output_name ]
+ }
+ }
+ args += [ ")", ")" ]
+}
+
+generate_pkg_config("libchrome-test") {
+ deps = [ ":base-test" ]
+ output_name = "libchrome-test-${libbase_ver}"
+ description = "chrome base test library"
+ version = "${libbase_ver}"
+ requires_private = []
+ foreach(attr, libbase_sublibs) {
+ if (defined(attr.testonly) && attr.testonly && defined(attr.pkg_deps)) {
+ requires_private += attr.pkg_deps
+ }
+ }
+ libs = [ "-lbase-test-${libbase_ver}" ]
+ libs_private = []
+ foreach(attr, libbase_sublibs) {
+ if (defined(attr.testonly) && attr.testonly) {
+ libs_private += [ "-l" + attr.output_name ]
+ if (defined(attr.libs)) {
+ libs_private += [ "-l" + lib ]
+ }
+ }
+ }
+ cflags = libchrome_exported_cflags
+}
+
+if (use.mojo) {
+ generate_mojom_bindings_gen("mojom_bindings_gen") {
+ mojom_bindings_generator = "mojo/public/tools/bindings/mojom_bindings_generator.py"
+ sources = [
+ "ipc/ipc.mojom",
+ "mojo/public/mojom/base/big_buffer.mojom",
+ "mojo/public/mojom/base/big_string.mojom",
+ "mojo/public/mojom/base/file.mojom",
+ "mojo/public/mojom/base/file_error.mojom",
+ "mojo/public/mojom/base/file_info.mojom",
+ "mojo/public/mojom/base/file_path.mojom",
+ "mojo/public/mojom/base/process_id.mojom",
+ "mojo/public/mojom/base/read_only_buffer.mojom",
+ "mojo/public/mojom/base/ref_counted_memory.mojom",
+ "mojo/public/mojom/base/shared_memory.mojom",
+ "mojo/public/mojom/base/string16.mojom",
+ "mojo/public/mojom/base/text_direction.mojom",
+ "mojo/public/mojom/base/thread_priority.mojom",
+ "mojo/public/mojom/base/time.mojom",
+ "mojo/public/mojom/base/unguessable_token.mojom",
+ "mojo/public/mojom/base/values.mojom",
+ "ui/gfx/geometry/mojo/geometry.mojom",
+ "ui/gfx/range/mojo/range.mojom",
+ ]
+ }
+
+ generate_mojom_bindings_gen("mojom_bindings_native_gen") {
+ mojom_bindings_generator = "mojo/public/tools/bindings/mojom_bindings_generator.py"
+ sources = [
+ "mojo/public/interfaces/bindings/interface_control_messages.mojom",
+ "mojo/public/interfaces/bindings/native_struct.mojom",
+ "mojo/public/interfaces/bindings/pipe_control_messages.mojom",
+ ]
+ disallow_native_types = true
+ disallow_interfaces = true
+ }
+
+ # Probably we should consider build libmojo as a part of libchrome.
+ # crbug.com/924035.
+ static_library("mojo") {
+ output_name = "mojo-${libbase_ver}"
+ deps = [ ":base-core", ":base-crypto", ":mojom_bindings_gen", ":mojom_bindings_native_gen" ]
+ # TODO(hidehiko): Consolidate with build_config.h.
+ configs -= [
+ "//common-mk:use_thin_archive",
+ "//common-mk:pie",
+ ]
+ configs += [
+ ":libchrome_config",
+ "//common-mk:visibility_default",
+ "//common-mk:nouse_thin_archive",
+ "//common-mk:pic",
+ ]
+ sources = [
+ "ipc/ipc_message.cc",
+ "ipc/ipc_message_attachment.cc",
+ "ipc/ipc_message_attachment_set.cc",
+ "ipc/ipc_message_utils.cc",
+ "ipc/ipc_mojo_handle_attachment.cc",
+ "ipc/ipc_mojo_message_helper.cc",
+ "ipc/ipc_mojo_param_traits.cc",
+ "ipc/ipc_platform_file_attachment_posix.cc",
+ "mojo/core/invitation_dispatcher.cc",
+ "mojo/core/connection_params.cc",
+ "mojo/core/channel_posix.cc",
+ "mojo/core/platform_handle_dispatcher.cc",
+ "mojo/core/mojo_core.cc",
+ "mojo/core/channel.cc",
+ "mojo/core/entrypoints.cc",
+ "mojo/core/broker_posix.cc",
+ "mojo/core/data_pipe_producer_dispatcher.cc",
+ "mojo/core/broker_host.cc",
+ "mojo/core/watcher_dispatcher.cc",
+ "mojo/core/request_context.cc",
+ "mojo/core/configuration.cc",
+ "mojo/core/node_channel.cc",
+ "mojo/core/shared_buffer_dispatcher.cc",
+ "mojo/core/watch.cc",
+ "mojo/core/embedder/scoped_ipc_support.cc",
+ "mojo/core/embedder/embedder.cc",
+ "mojo/core/message_pipe_dispatcher.cc",
+ "mojo/core/handle_table.cc",
+ "mojo/core/core.cc",
+ "mojo/core/ports/event.cc",
+ "mojo/core/ports/name.cc",
+ "mojo/core/ports/port.cc",
+ "mojo/core/ports/message_queue.cc",
+ "mojo/core/ports/port_locker.cc",
+ "mojo/core/ports/node.cc",
+ "mojo/core/ports/user_message.cc",
+ "mojo/core/ports/port_ref.cc",
+ "mojo/core/data_pipe_consumer_dispatcher.cc",
+ "mojo/core/scoped_process_handle.cc",
+ "mojo/core/node_controller.cc",
+ "mojo/core/watcher_set.cc",
+ "mojo/core/dispatcher.cc",
+ "mojo/core/platform_handle_in_transit.cc",
+ "mojo/core/platform_handle_utils.cc",
+ "mojo/core/platform_shared_memory_mapping.cc",
+ "mojo/core/user_message_impl.cc",
+ "mojo/core/data_pipe_control_message.cc",
+ "mojo/public/c/system/thunks.cc",
+ "mojo/public/cpp/base/big_string_mojom_traits.cc",
+ "mojo/public/cpp/base/shared_memory_mojom_traits.cc",
+ "mojo/public/cpp/base/unguessable_token_mojom_traits.cc",
+ "mojo/public/cpp/base/ref_counted_memory_mojom_traits.cc",
+ "mojo/public/cpp/base/big_buffer.cc",
+ "mojo/public/cpp/base/read_only_buffer_mojom_traits.cc",
+ "mojo/public/cpp/base/string16_mojom_traits.cc",
+ "mojo/public/cpp/base/big_buffer_mojom_traits.cc",
+ "mojo/public/cpp/base/file_info_mojom_traits.cc",
+ "mojo/public/cpp/base/time_mojom_traits.cc",
+ "mojo/public/cpp/base/thread_priority_mojom_traits.cc",
+ "mojo/public/cpp/base/text_direction_mojom_traits.cc",
+ "mojo/public/cpp/base/values_mojom_traits.cc",
+ "mojo/public/cpp/base/file_path_mojom_traits.cc",
+ "mojo/public/cpp/base/process_id_mojom_traits.cc",
+ "mojo/public/cpp/base/file_mojom_traits.cc",
+ "mojo/public/cpp/bindings/lib/serialization_context.cc",
+ "mojo/public/cpp/bindings/lib/associated_interface_ptr_state.cc",
+ "mojo/public/cpp/bindings/lib/array_internal.cc",
+ "mojo/public/cpp/bindings/lib/interface_ptr_state.cc",
+ "mojo/public/cpp/bindings/lib/buffer.cc",
+ "mojo/public/cpp/bindings/lib/sync_call_restrictions.cc",
+ "mojo/public/cpp/bindings/lib/multiplex_router.cc",
+ "mojo/public/cpp/bindings/lib/sync_handle_watcher.cc",
+ "mojo/public/cpp/bindings/lib/validation_errors.cc",
+ "mojo/public/cpp/bindings/lib/scoped_interface_endpoint_handle.cc",
+ "mojo/public/cpp/bindings/lib/message_dumper.cc",
+ "mojo/public/cpp/bindings/lib/sync_event_watcher.cc",
+ "mojo/public/cpp/bindings/lib/task_runner_helper.cc",
+ "mojo/public/cpp/bindings/lib/sequence_local_sync_event_watcher.cc",
+ "mojo/public/cpp/bindings/lib/validation_context.cc",
+ "mojo/public/cpp/bindings/lib/associated_group.cc",
+ "mojo/public/cpp/bindings/lib/native_struct_serialization.cc",
+ "mojo/public/cpp/bindings/lib/validation_util.cc",
+ "mojo/public/cpp/bindings/lib/pipe_control_message_handler.cc",
+ "mojo/public/cpp/bindings/lib/filter_chain.cc",
+ "mojo/public/cpp/bindings/lib/message.cc",
+ "mojo/public/cpp/bindings/lib/unserialized_message_context.cc",
+ "mojo/public/cpp/bindings/lib/pipe_control_message_proxy.cc",
+ "mojo/public/cpp/bindings/lib/control_message_proxy.cc",
+ "mojo/public/cpp/bindings/lib/control_message_handler.cc",
+ "mojo/public/cpp/bindings/lib/connector.cc",
+ "mojo/public/cpp/bindings/lib/interface_endpoint_client.cc",
+ "mojo/public/cpp/bindings/lib/sync_handle_registry.cc",
+ "mojo/public/cpp/bindings/lib/associated_binding.cc",
+ "mojo/public/cpp/bindings/lib/message_header_validator.cc",
+ "mojo/public/cpp/bindings/lib/associated_group_controller.cc",
+ "mojo/public/cpp/bindings/lib/associated_interface_ptr.cc",
+ "mojo/public/cpp/bindings/lib/fixed_buffer.cc",
+ "mojo/public/cpp/bindings/lib/message_internal.cc",
+ "mojo/public/cpp/bindings/lib/binding_state.cc",
+ "mojo/public/cpp/platform/platform_channel_endpoint.cc",
+ "mojo/public/cpp/platform/platform_handle.cc",
+ "mojo/public/cpp/platform/named_platform_channel.cc",
+ "mojo/public/cpp/platform/platform_channel.cc",
+ "mojo/public/cpp/platform/platform_channel_server_endpoint.cc",
+ "mojo/public/cpp/platform/socket_utils_posix.cc",
+ "mojo/public/cpp/platform/named_platform_channel_posix.cc",
+ "mojo/public/cpp/system/buffer.cc",
+ "mojo/public/cpp/system/platform_handle.cc",
+ "mojo/public/cpp/system/wait.cc",
+ "mojo/public/cpp/system/wait_set.cc",
+ "mojo/public/cpp/system/data_pipe_utils.cc",
+ "mojo/public/cpp/system/scope_to_message_pipe.cc",
+ "mojo/public/cpp/system/handle_signal_tracker.cc",
+ "mojo/public/cpp/system/trap.cc",
+ "mojo/public/cpp/system/isolated_connection.cc",
+ "mojo/public/cpp/system/string_data_pipe_producer.cc",
+ "mojo/public/cpp/system/data_pipe_drainer.cc",
+ "mojo/public/cpp/system/invitation.cc",
+ "mojo/public/cpp/system/simple_watcher.cc",
+ "mojo/public/cpp/system/file_data_pipe_producer.cc",
+ "mojo/public/cpp/system/message_pipe.cc",
+ ] + get_target_outputs(":mojom_bindings_gen") + get_target_outputs(":mojom_bindings_native_gen")
+ }
+
+ generate_pkg_config("libmojo") {
+ deps = [ ":mojo" ]
+ output_name = "libmojo-${libbase_ver}"
+ description = "Chrome Mojo IPC library"
+ version = "${libbase_ver}"
+ libs = [ "-lmojo-${libbase_ver}" ]
+ cflags = [
+ "-I/usr/lib/base-${libbase_ver}",
+ "-Wno-cast-qual",
+ "-Wno-cast-align",
+ ]
+ }
+}
diff --git a/base/metrics/field_trial_params.cc b/base/metrics/field_trial_params.cc
new file mode 100644
index 0000000000..7195f4a813
--- /dev/null
+++ b/base/metrics/field_trial_params.cc
@@ -0,0 +1,149 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial_params.h"
+
+#include "base/feature_list.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_param_associator.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace base {
+
+bool AssociateFieldTrialParams(
+ const std::string& trial_name,
+ const std::string& group_name,
+ const std::map<std::string, std::string>& params) {
+ return base::FieldTrialParamAssociator::GetInstance()
+ ->AssociateFieldTrialParams(trial_name, group_name, params);
+}
+
+bool GetFieldTrialParams(const std::string& trial_name,
+ std::map<std::string, std::string>* params) {
+ return base::FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(
+ trial_name, params);
+}
+
+bool GetFieldTrialParamsByFeature(const base::Feature& feature,
+ std::map<std::string, std::string>* params) {
+ if (!base::FeatureList::IsEnabled(feature))
+ return false;
+
+ base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
+ if (!trial)
+ return false;
+
+ return GetFieldTrialParams(trial->trial_name(), params);
+}
+
+std::string GetFieldTrialParamValue(const std::string& trial_name,
+ const std::string& param_name) {
+ std::map<std::string, std::string> params;
+ if (GetFieldTrialParams(trial_name, &params)) {
+ std::map<std::string, std::string>::iterator it = params.find(param_name);
+ if (it != params.end())
+ return it->second;
+ }
+ return std::string();
+}
+
+std::string GetFieldTrialParamValueByFeature(const base::Feature& feature,
+ const std::string& param_name) {
+ if (!base::FeatureList::IsEnabled(feature))
+ return std::string();
+
+ base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
+ if (!trial)
+ return std::string();
+
+ return GetFieldTrialParamValue(trial->trial_name(), param_name);
+}
+
+int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
+ const std::string& param_name,
+ int default_value) {
+ std::string value_as_string =
+ GetFieldTrialParamValueByFeature(feature, param_name);
+ int value_as_int = 0;
+ if (!base::StringToInt(value_as_string, &value_as_int)) {
+ if (!value_as_string.empty()) {
+ DLOG(WARNING) << "Failed to parse field trial param " << param_name
+ << " with string value " << value_as_string
+ << " under feature " << feature.name
+ << " into an int. Falling back to default value of "
+ << default_value;
+ }
+ value_as_int = default_value;
+ }
+ return value_as_int;
+}
+
+double GetFieldTrialParamByFeatureAsDouble(const base::Feature& feature,
+ const std::string& param_name,
+ double default_value) {
+ std::string value_as_string =
+ GetFieldTrialParamValueByFeature(feature, param_name);
+ double value_as_double = 0;
+ if (!base::StringToDouble(value_as_string, &value_as_double)) {
+ if (!value_as_string.empty()) {
+ DLOG(WARNING) << "Failed to parse field trial param " << param_name
+ << " with string value " << value_as_string
+ << " under feature " << feature.name
+ << " into a double. Falling back to default value of "
+ << default_value;
+ }
+ value_as_double = default_value;
+ }
+ return value_as_double;
+}
+
+bool GetFieldTrialParamByFeatureAsBool(const base::Feature& feature,
+ const std::string& param_name,
+ bool default_value) {
+ std::string value_as_string =
+ GetFieldTrialParamValueByFeature(feature, param_name);
+ if (value_as_string == "true")
+ return true;
+ if (value_as_string == "false")
+ return false;
+
+ if (!value_as_string.empty()) {
+ DLOG(WARNING) << "Failed to parse field trial param " << param_name
+ << " with string value " << value_as_string
+ << " under feature " << feature.name
+ << " into a bool. Falling back to default value of "
+ << default_value;
+ }
+ return default_value;
+}
+
+std::string FeatureParam<std::string>::Get() const {
+ const std::string value = GetFieldTrialParamValueByFeature(*feature, name);
+ return value.empty() ? default_value : value;
+}
+
+double FeatureParam<double>::Get() const {
+ return GetFieldTrialParamByFeatureAsDouble(*feature, name, default_value);
+}
+
+int FeatureParam<int>::Get() const {
+ return GetFieldTrialParamByFeatureAsInt(*feature, name, default_value);
+}
+
+bool FeatureParam<bool>::Get() const {
+ return GetFieldTrialParamByFeatureAsBool(*feature, name, default_value);
+}
+
+void LogInvalidEnumValue(const base::Feature& feature,
+ const std::string& param_name,
+ const std::string& value_as_string,
+ int default_value_as_int) {
+ DLOG(WARNING) << "Failed to parse field trial param " << param_name
+ << " with string value " << value_as_string << " under feature "
+ << feature.name
+ << " into an enum. Falling back to default value of "
+ << default_value_as_int;
+}
+
+} // namespace base
diff --git a/base/metrics/field_trial_params.h b/base/metrics/field_trial_params.h
new file mode 100644
index 0000000000..8682226a20
--- /dev/null
+++ b/base/metrics/field_trial_params.h
@@ -0,0 +1,258 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_FIELD_TRIAL_PARAMS_H_
+#define BASE_METRICS_FIELD_TRIAL_PARAMS_H_
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+struct Feature;
+
+// Associates the specified set of key-value |params| with the field trial
+// specified by |trial_name| and |group_name|. Fails and returns false if the
+// specified field trial already has params associated with it or the trial
+// is already active (group() has been called on it). Thread safe.
+BASE_EXPORT bool AssociateFieldTrialParams(
+ const std::string& trial_name,
+ const std::string& group_name,
+ const std::map<std::string, std::string>& params);
+
+// Retrieves the set of key-value |params| for the specified field trial, based
+// on its selected group. If the field trial does not exist or its selected
+// group does not have any parameters associated with it, returns false and
+// does not modify |params|. Calling this function will result in the field
+// trial being marked as active if found (i.e. group() will be called on it),
+// if it wasn't already. Thread safe.
+BASE_EXPORT bool GetFieldTrialParams(
+ const std::string& trial_name,
+ std::map<std::string, std::string>* params);
+
+// Retrieves the set of key-value |params| for the field trial associated with
+// the specified |feature|. A feature is associated with at most one field
+// trial and selected group. See base/feature_list.h for more information on
+// features. If the feature is not enabled, or if there's no associated params,
+// returns false and does not modify |params|. Calling this function will
+// result in the associated field trial being marked as active if found (i.e.
+// group() will be called on it), if it wasn't already. Thread safe.
+BASE_EXPORT bool GetFieldTrialParamsByFeature(
+ const base::Feature& feature,
+ std::map<std::string, std::string>* params);
+
+// Retrieves a specific parameter value corresponding to |param_name| for the
+// specified field trial, based on its selected group. If the field trial does
+// not exist or the specified parameter does not exist, returns an empty
+// string. Calling this function will result in the field trial being marked as
+// active if found (i.e. group() will be called on it), if it wasn't already.
+// Thread safe.
+BASE_EXPORT std::string GetFieldTrialParamValue(const std::string& trial_name,
+ const std::string& param_name);
+
+// Retrieves a specific parameter value corresponding to |param_name| for the
+// field trial associated with the specified |feature|. A feature is associated
+// with at most one field trial and selected group. See base/feature_list.h for
+// more information on features. If the feature is not enabled, or the
+// specified parameter does not exist, returns an empty string. Calling this
+// function will result in the associated field trial being marked as active if
+// found (i.e. group() will be called on it), if it wasn't already. Thread safe.
+BASE_EXPORT std::string GetFieldTrialParamValueByFeature(
+ const base::Feature& feature,
+ const std::string& param_name);
+
+// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
+// string value into an int using base::StringToInt() and returns it, if
+// successful. Otherwise, it returns |default_value|. If the string value is not
+// empty and the conversion does not succeed, it produces a warning to LOG.
+BASE_EXPORT int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
+ const std::string& param_name,
+ int default_value);
+
+// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
+// string value into a double using base::StringToDouble() and returns it, if
+// successful. Otherwise, it returns |default_value|. If the string value is not
+// empty and the conversion does not succeed, it produces a warning to LOG.
+BASE_EXPORT double GetFieldTrialParamByFeatureAsDouble(
+ const base::Feature& feature,
+ const std::string& param_name,
+ double default_value);
+
+// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
+// string value into a boolean and returns it, if successful. Otherwise, it
+// returns |default_value|. The only string representations accepted here are
+// "true" and "false". If the string value is not empty and the conversion does
+// not succeed, it produces a warning to LOG.
+BASE_EXPORT bool GetFieldTrialParamByFeatureAsBool(
+ const base::Feature& feature,
+ const std::string& param_name,
+ bool default_value);
+
+// Shared declaration for various FeatureParam<T> types.
+//
+// This template is defined for the following types T:
+// bool
+// int
+// double
+// std::string
+// enum types
+//
+// See the individual definitions below for the appropriate interfaces.
+// Attempting to use it with any other type is a compile error.
+template <typename T, bool IsEnum = std::is_enum<T>::value>
+struct FeatureParam {
+ // Prevent use of FeatureParam<> with unsupported types (e.g. void*). Uses T
+ // in its definition so that evaluation is deferred until the template is
+ // instantiated.
+ static_assert(!std::is_same<T, T>::value, "unsupported FeatureParam<> type");
+};
+
+// Declares a string-valued parameter. Example:
+//
+// constexpr FeatureParam<string> kAssistantName{
+// &kAssistantFeature, "assistant_name", "HAL"};
+//
+// If the feature is not set, or set to the empty string, then Get() will return
+// the default value.
+template <>
+struct FeatureParam<std::string> {
+ constexpr FeatureParam(const Feature* feature,
+ const char* name,
+ const char* default_value)
+ : feature(feature), name(name), default_value(default_value) {}
+
+ BASE_EXPORT std::string Get() const;
+
+ const Feature* const feature;
+ const char* const name;
+ const char* const default_value;
+};
+
+// Declares a double-valued parameter. Example:
+//
+// constexpr FeatureParam<double> kAssistantTriggerThreshold{
+// &kAssistantFeature, "trigger_threshold", 0.10};
+//
+// If the feature is not set, or set to an invalid double value, then Get() will
+// return the default value.
+template <>
+struct FeatureParam<double> {
+ constexpr FeatureParam(const Feature* feature,
+ const char* name,
+ double default_value)
+ : feature(feature), name(name), default_value(default_value) {}
+
+ BASE_EXPORT double Get() const;
+
+ const Feature* const feature;
+ const char* const name;
+ const double default_value;
+};
+
+// Declares an int-valued parameter. Example:
+//
+// constexpr FeatureParam<int> kAssistantParallelism{
+// &kAssistantFeature, "parallelism", 4};
+//
+// If the feature is not set, or set to an invalid int value, then Get() will
+// return the default value.
+template <>
+struct FeatureParam<int> {
+ constexpr FeatureParam(const Feature* feature,
+ const char* name,
+ int default_value)
+ : feature(feature), name(name), default_value(default_value) {}
+
+ BASE_EXPORT int Get() const;
+
+ const Feature* const feature;
+ const char* const name;
+ const int default_value;
+};
+
+// Declares a bool-valued parameter. Example:
+//
+// constexpr FeatureParam<int> kAssistantIsHelpful{
+// &kAssistantFeature, "is_helpful", true};
+//
+// If the feature is not set, or set to value other than "true" or "false", then
+// Get() will return the default value.
+template <>
+struct FeatureParam<bool> {
+ constexpr FeatureParam(const Feature* feature,
+ const char* name,
+ bool default_value)
+ : feature(feature), name(name), default_value(default_value) {}
+
+ BASE_EXPORT bool Get() const;
+
+ const Feature* const feature;
+ const char* const name;
+ const bool default_value;
+};
+
+BASE_EXPORT void LogInvalidEnumValue(const Feature& feature,
+ const std::string& param_name,
+ const std::string& value_as_string,
+ int default_value_as_int);
+
+// Feature param declaration for an enum, with associated options. Example:
+//
+// constexpr FeatureParam<ShapeEnum>::Option[] kShapeParamOptions[] = {
+// {SHAPE_CIRCLE, "circle"},
+// {SHAPE_CYLINDER, "cylinder"},
+// {SHAPE_PAPERCLIP, "paperclip"}};
+// constexpr FeatureParam<ShapeEnum> kAssistantShapeParam{
+// &kAssistantFeature, "shape", SHAPE_CIRCLE, &kShapeParamOptions};
+//
+// With this declaration, the parameter may be set to "circle", "cylinder", or
+// "paperclip", and that will be translated to one of the three enum values. By
+// default, or if the param is set to an unknown value, the parameter will be
+// assumed to be SHAPE_CIRCLE.
+template <typename Enum>
+struct FeatureParam<Enum, true> {
+ struct Option {
+ constexpr Option(Enum value, const char* name) : value(value), name(name) {}
+
+ const Enum value;
+ const char* const name;
+ };
+
+ template <size_t option_count>
+ constexpr FeatureParam(const Feature* feature,
+ const char* name,
+ const Enum default_value,
+ const Option (*options)[option_count])
+ : feature(feature),
+ name(name),
+ default_value(default_value),
+ options(*options),
+ option_count(option_count) {
+ static_assert(option_count >= 1, "FeatureParam<enum> has no options");
+ }
+
+ Enum Get() const {
+ std::string value = GetFieldTrialParamValueByFeature(*feature, name);
+ if (value.empty())
+ return default_value;
+ for (size_t i = 0; i < option_count; ++i) {
+ if (value == options[i].name)
+ return options[i].value;
+ }
+ LogInvalidEnumValue(*feature, name, value, static_cast<int>(default_value));
+ return default_value;
+ }
+
+ const base::Feature* const feature;
+ const char* const name;
+ const Enum default_value;
+ const Option* const options;
+ const size_t option_count;
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_FIELD_TRIAL_PARAMS_H_
diff --git a/base/task_scheduler/delayed_task_manager.cc b/base/task_scheduler/delayed_task_manager.cc
new file mode 100644
index 0000000000..86a67219e4
--- /dev/null
+++ b/base/task_scheduler/delayed_task_manager.cc
@@ -0,0 +1,95 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/delayed_task_manager.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/task.h"
+
+namespace base {
+namespace internal {
+
+DelayedTaskManager::DelayedTaskManager(
+ std::unique_ptr<const TickClock> tick_clock)
+ : tick_clock_(std::move(tick_clock)) {
+ DCHECK(tick_clock_);
+}
+
+DelayedTaskManager::~DelayedTaskManager() = default;
+
+void DelayedTaskManager::Start(
+ scoped_refptr<TaskRunner> service_thread_task_runner) {
+ DCHECK(service_thread_task_runner);
+
+ decltype(tasks_added_before_start_) tasks_added_before_start;
+
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!service_thread_task_runner_);
+ DCHECK(!started_.IsSet());
+ service_thread_task_runner_ = std::move(service_thread_task_runner);
+ tasks_added_before_start = std::move(tasks_added_before_start_);
+ // |service_thread_task_runner_| must not change after |started_| is set
+ // (cf. comment above |lock_| in header file).
+ started_.Set();
+ }
+
+ const TimeTicks now = tick_clock_->NowTicks();
+ for (auto& task_and_callback : tasks_added_before_start) {
+ const TimeDelta delay =
+ std::max(TimeDelta(), task_and_callback.first.delayed_run_time - now);
+ AddDelayedTaskNow(std::move(task_and_callback.first), delay,
+ std::move(task_and_callback.second));
+ }
+}
+
+void DelayedTaskManager::AddDelayedTask(
+ Task task,
+ PostTaskNowCallback post_task_now_callback) {
+ DCHECK(task.task);
+
+ const TimeDelta delay = task.delay;
+ DCHECK(!delay.is_zero());
+
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task.task);
+
+ // If |started_| is set, the DelayedTaskManager is in a stable state and
+ // AddDelayedTaskNow() can be called without synchronization. Otherwise, it is
+ // necessary to acquire |lock_| and recheck.
+ if (started_.IsSet()) {
+ AddDelayedTaskNow(std::move(task), delay,
+ std::move(post_task_now_callback));
+ } else {
+ AutoSchedulerLock auto_lock(lock_);
+ if (started_.IsSet()) {
+ AddDelayedTaskNow(std::move(task), delay,
+ std::move(post_task_now_callback));
+ } else {
+ tasks_added_before_start_.push_back(
+ {std::move(task), std::move(post_task_now_callback)});
+ }
+ }
+}
+
+void DelayedTaskManager::AddDelayedTaskNow(
+ Task task,
+ TimeDelta delay,
+ PostTaskNowCallback post_task_now_callback) {
+ DCHECK(task.task);
+ DCHECK(started_.IsSet());
+ // TODO(fdoray): Use |task->delayed_run_time| on the service thread
+ // MessageLoop rather than recomputing it from |delay|.
+ service_thread_task_runner_->PostDelayedTask(
+ FROM_HERE, BindOnce(std::move(post_task_now_callback), std::move(task)),
+ delay);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/delayed_task_manager.h b/base/task_scheduler/delayed_task_manager.h
new file mode 100644
index 0000000000..c48aeb1e6b
--- /dev/null
+++ b/base/task_scheduler/delayed_task_manager.h
@@ -0,0 +1,80 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
+#define BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+class TaskRunner;
+
+namespace internal {
+
+struct Task;
+
+// The DelayedTaskManager forwards tasks to post task callbacks when they become
+// ripe for execution. Tasks are not forwarded before Start() is called. This
+// class is thread-safe.
+class BASE_EXPORT DelayedTaskManager {
+ public:
+ // Posts |task| for execution immediately.
+ using PostTaskNowCallback = OnceCallback<void(Task task)>;
+
+ // |tick_clock| can be specified for testing.
+ DelayedTaskManager(std::unique_ptr<const TickClock> tick_clock =
+ std::make_unique<DefaultTickClock>());
+ ~DelayedTaskManager();
+
+ // Starts the delayed task manager, allowing past and future tasks to be
+ // forwarded to their callbacks as they become ripe for execution.
+ // |service_thread_task_runner| posts tasks to the TaskScheduler service
+ // thread.
+ void Start(scoped_refptr<TaskRunner> service_thread_task_runner);
+
+ // Schedules a call to |post_task_now_callback| with |task| as argument when
+ // |task| is ripe for execution and Start() has been called.
+ void AddDelayedTask(Task task, PostTaskNowCallback post_task_now_callback);
+
+ private:
+ // Schedules a call to |post_task_now_callback| with |task| as argument when
+ // |delay| expires. Start() must have been called before this.
+ void AddDelayedTaskNow(Task task,
+ TimeDelta delay,
+ PostTaskNowCallback post_task_now_callback);
+
+ const std::unique_ptr<const TickClock> tick_clock_;
+
+ AtomicFlag started_;
+
+ // Synchronizes access to all members below before |started_| is set. Once
+ // |started_| is set:
+ // - |service_thread_task_runner| doest not change, so it can be read without
+ // holding the lock.
+ // - |tasks_added_before_start_| isn't accessed anymore.
+ SchedulerLock lock_;
+
+ scoped_refptr<TaskRunner> service_thread_task_runner_;
+ std::vector<std::pair<Task, PostTaskNowCallback>> tasks_added_before_start_;
+
+ DISALLOW_COPY_AND_ASSIGN(DelayedTaskManager);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
diff --git a/base/task_scheduler/priority_queue.cc b/base/task_scheduler/priority_queue.cc
new file mode 100644
index 0000000000..fa11b10b97
--- /dev/null
+++ b/base/task_scheduler/priority_queue.cc
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/priority_queue.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+
+namespace base {
+namespace internal {
+
+// A class combining a Sequence and the SequenceSortKey that determines its
+// position in a PriorityQueue. Instances are only mutable via take_sequence()
+// which can only be called once and renders its instance invalid after the
+// call.
+class PriorityQueue::SequenceAndSortKey {
+ public:
+ SequenceAndSortKey(scoped_refptr<Sequence> sequence,
+ const SequenceSortKey& sort_key)
+ : sequence_(std::move(sequence)), sort_key_(sort_key) {
+ DCHECK(sequence_);
+ }
+
+ // Note: while |sequence_| should always be non-null post-move (i.e. we
+ // shouldn't be moving an invalid SequenceAndSortKey around), there can't be a
+ // DCHECK(sequence_) on moves as the Windows STL moves elements on pop instead
+ // of overwriting them: resulting in the move of a SequenceAndSortKey with a
+ // null |sequence_| in Transaction::Pop()'s implementation.
+ SequenceAndSortKey(SequenceAndSortKey&& other) = default;
+ SequenceAndSortKey& operator=(SequenceAndSortKey&& other) = default;
+
+ // Extracts |sequence_| from this object. This object is invalid after this
+ // call.
+ scoped_refptr<Sequence> take_sequence() {
+ DCHECK(sequence_);
+ return std::move(sequence_);
+ }
+
+ // Compares this SequenceAndSortKey to |other| based on their respective
+ // |sort_key_|.
+ bool operator<(const SequenceAndSortKey& other) const {
+ return sort_key_ < other.sort_key_;
+ }
+ // Style-guide dictates to define operator> when defining operator< but it's
+ // unused in this case and this isn't a public API. Explicitly delete it so
+ // any errors point here if that ever changes.
+ bool operator>(const SequenceAndSortKey& other) const = delete;
+
+ const SequenceSortKey& sort_key() const { return sort_key_; }
+
+ private:
+ scoped_refptr<Sequence> sequence_;
+ SequenceSortKey sort_key_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceAndSortKey);
+};
+
+PriorityQueue::Transaction::Transaction(PriorityQueue* outer_queue)
+ : auto_lock_(outer_queue->container_lock_), outer_queue_(outer_queue) {
+}
+
+PriorityQueue::Transaction::~Transaction() = default;
+
+void PriorityQueue::Transaction::Push(
+ scoped_refptr<Sequence> sequence,
+ const SequenceSortKey& sequence_sort_key) {
+ outer_queue_->container_.emplace(std::move(sequence), sequence_sort_key);
+}
+
+const SequenceSortKey& PriorityQueue::Transaction::PeekSortKey() const {
+ DCHECK(!IsEmpty());
+ return outer_queue_->container_.top().sort_key();
+}
+
+scoped_refptr<Sequence> PriorityQueue::Transaction::PopSequence() {
+ DCHECK(!IsEmpty());
+
+ // The const_cast on top() is okay since the SequenceAndSortKey is
+ // transactionally being popped from |container_| right after and taking its
+ // Sequence does not alter its sort order (a requirement for the Windows STL's
+ // consistency debug-checks for std::priority_queue::top()).
+ scoped_refptr<Sequence> sequence =
+ const_cast<PriorityQueue::SequenceAndSortKey&>(
+ outer_queue_->container_.top())
+ .take_sequence();
+ outer_queue_->container_.pop();
+ return sequence;
+}
+
+bool PriorityQueue::Transaction::IsEmpty() const {
+ return outer_queue_->container_.empty();
+}
+
+size_t PriorityQueue::Transaction::Size() const {
+ return outer_queue_->container_.size();
+}
+
+PriorityQueue::PriorityQueue() = default;
+
+PriorityQueue::~PriorityQueue() = default;
+
+std::unique_ptr<PriorityQueue::Transaction> PriorityQueue::BeginTransaction() {
+ return WrapUnique(new Transaction(this));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/priority_queue.h b/base/task_scheduler/priority_queue.h
new file mode 100644
index 0000000000..d882364b28
--- /dev/null
+++ b/base/task_scheduler/priority_queue.h
@@ -0,0 +1,104 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_PRIORITY_QUEUE_H_
+#define BASE_TASK_SCHEDULER_PRIORITY_QUEUE_H_
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+
+namespace base {
+namespace internal {
+
+// A PriorityQueue holds Sequences of Tasks. This class is thread-safe.
+class BASE_EXPORT PriorityQueue {
+ public:
+ // A Transaction can perform multiple operations atomically on a
+ // PriorityQueue. While a Transaction is alive, it is guaranteed that nothing
+ // else will access the PriorityQueue.
+ //
+ // A Worker needs to be able to Peek sequences from both its PriorityQueues
+ // (single-threaded and shared) and then Pop the sequence with the highest
+ // priority. If the Peek and the Pop are done through the same Transaction, it
+ // is guaranteed that the PriorityQueue hasn't changed between the 2
+ // operations.
+ class BASE_EXPORT Transaction {
+ public:
+ ~Transaction();
+
+ // Inserts |sequence| in the PriorityQueue with |sequence_sort_key|.
+ // Note: |sequence_sort_key| is required as a parameter instead of being
+ // extracted from |sequence| in Push() to avoid this Transaction having a
+ // lock interdependency with |sequence|.
+ void Push(scoped_refptr<Sequence> sequence,
+ const SequenceSortKey& sequence_sort_key);
+
+ // Returns a reference to the SequenceSortKey representing the priority of
+ // the highest pending task in this PriorityQueue. The reference becomes
+ // invalid the next time that this PriorityQueue is modified.
+ // Cannot be called on an empty PriorityQueue.
+ const SequenceSortKey& PeekSortKey() const;
+
+ // Removes and returns the highest priority Sequence in this PriorityQueue.
+ // Cannot be called on an empty PriorityQueue.
+ scoped_refptr<Sequence> PopSequence();
+
+ // Returns true if the PriorityQueue is empty.
+ bool IsEmpty() const;
+
+ // Returns the number of Sequences in the PriorityQueue.
+ size_t Size() const;
+
+ private:
+ friend class PriorityQueue;
+
+ explicit Transaction(PriorityQueue* outer_queue);
+
+ // Holds the lock of |outer_queue_| for the lifetime of this Transaction.
+ AutoSchedulerLock auto_lock_;
+
+ PriorityQueue* const outer_queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(Transaction);
+ };
+
+ PriorityQueue();
+
+ ~PriorityQueue();
+
+ // Begins a Transaction. This method cannot be called on a thread which has an
+ // active Transaction unless the last Transaction created on the thread was
+ // for the allowed predecessor specified in the constructor of this
+ // PriorityQueue.
+ std::unique_ptr<Transaction> BeginTransaction();
+
+ const SchedulerLock* container_lock() const { return &container_lock_; }
+
+ private:
+ // A class combining a Sequence and the SequenceSortKey that determines its
+ // position in a PriorityQueue.
+ class SequenceAndSortKey;
+
+ using ContainerType = std::priority_queue<SequenceAndSortKey>;
+
+ // Synchronizes access to |container_|.
+ SchedulerLock container_lock_;
+
+ ContainerType container_;
+
+ DISALLOW_COPY_AND_ASSIGN(PriorityQueue);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_PRIORITY_QUEUE_H_
diff --git a/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc b/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
new file mode 100644
index 0000000000..737df42b1c
--- /dev/null
+++ b/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
@@ -0,0 +1,656 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/ptr_util.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_worker.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+
+#include "base/win/scoped_com_initializer.h"
+#endif // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+namespace {
+
+// Boolean indicating whether there's a SchedulerSingleThreadTaskRunnerManager
+// instance alive in this process. This variable should only be set when the
+// SchedulerSingleThreadTaskRunnerManager instance is brought up (on the main
+// thread; before any tasks are posted) and decremented when the instance is
+// brought down (i.e., only when unit tests tear down the task environment and
+// never in production). This makes the variable const while worker threads are
+// up and as such it doesn't need to be atomic. It is used to tell when a task
+// is posted from the main thread after the task environment was brought down in
+// unit tests so that SchedulerSingleThreadTaskRunnerManager bound TaskRunners
+// can return false on PostTask, letting such callers know they should complete
+// necessary work synchronously. Note: |!g_manager_is_alive| is generally
+// equivalent to |!TaskScheduler::GetInstance()| but has the advantage of being
+// valid in task_scheduler unit tests that don't instantiate a full
+// TaskScheduler.
+bool g_manager_is_alive = false;
+
+// Allows for checking the PlatformThread::CurrentRef() against a set
+// PlatformThreadRef atomically without using locks.
+class AtomicThreadRefChecker {
+ public:
+ AtomicThreadRefChecker() = default;
+ ~AtomicThreadRefChecker() = default;
+
+ void Set() {
+ thread_ref_ = PlatformThread::CurrentRef();
+ is_set_.Set();
+ }
+
+ bool IsCurrentThreadSameAsSetThread() {
+ return is_set_.IsSet() && thread_ref_ == PlatformThread::CurrentRef();
+ }
+
+ private:
+ AtomicFlag is_set_;
+ PlatformThreadRef thread_ref_;
+
+ DISALLOW_COPY_AND_ASSIGN(AtomicThreadRefChecker);
+};
+
+class SchedulerWorkerDelegate : public SchedulerWorker::Delegate {
+ public:
+ SchedulerWorkerDelegate(const std::string& thread_name,
+ SchedulerWorker::ThreadLabel thread_label)
+ : thread_name_(thread_name), thread_label_(thread_label) {}
+
+ void set_worker(SchedulerWorker* worker) {
+ DCHECK(!worker_);
+ worker_ = worker;
+ }
+
+ // SchedulerWorker::Delegate:
+ void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override {
+ DCHECK(worker_);
+ ReEnqueueSequence(std::move(sequence));
+ worker_->WakeUp();
+ }
+
+ SchedulerWorker::ThreadLabel GetThreadLabel() const final {
+ return thread_label_;
+ }
+
+ void OnMainEntry(const SchedulerWorker* /* worker */) override {
+ thread_ref_checker_.Set();
+ PlatformThread::SetName(thread_name_);
+ }
+
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ bool has_work = has_work_;
+ has_work_ = false;
+ return has_work ? sequence_ : nullptr;
+ }
+
+ void DidRunTask() override {}
+
+ void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ // We've shut down, so no-op this work request. Any sequence cleanup will
+ // occur in the caller's context.
+ if (!sequence_)
+ return;
+
+ DCHECK_EQ(sequence, sequence_);
+ DCHECK(!has_work_);
+ has_work_ = true;
+ }
+
+ TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
+
+ bool RunsTasksInCurrentSequence() {
+ // We check the thread ref instead of the sequence for the benefit of COM
+ // callbacks which may execute without a sequence context.
+ return thread_ref_checker_.IsCurrentThreadSameAsSetThread();
+ }
+
+ void OnMainExit(SchedulerWorker* /* worker */) override {
+ // Move |sequence_| to |local_sequence| so that if we have the last
+ // reference to the sequence we don't destroy it (and its tasks) within
+ // |sequence_lock_|.
+ scoped_refptr<Sequence> local_sequence;
+ {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ // To reclaim skipped tasks on shutdown, we null out the sequence to allow
+ // the tasks to destroy themselves.
+ local_sequence = std::move(sequence_);
+ }
+ }
+
+ // SchedulerWorkerDelegate:
+
+ // Consumers should release their sequence reference as soon as possible to
+ // ensure timely cleanup for general shutdown.
+ scoped_refptr<Sequence> sequence() {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ return sequence_;
+ }
+
+ private:
+ const std::string thread_name_;
+ const SchedulerWorker::ThreadLabel thread_label_;
+
+ // The SchedulerWorker that has |this| as a delegate. Must be set before
+ // starting or posting a task to the SchedulerWorker, because it's used in
+ // OnMainEntry() and OnCanScheduleSequence() (called when a sequence held up
+ // by WillScheduleSequence() in PostTaskNow() can be scheduled).
+ SchedulerWorker* worker_ = nullptr;
+
+ // Synchronizes access to |sequence_| and |has_work_|.
+ SchedulerLock sequence_lock_;
+ scoped_refptr<Sequence> sequence_ = new Sequence;
+ bool has_work_ = false;
+
+ AtomicThreadRefChecker thread_ref_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegate);
+};
+
+#if defined(OS_WIN)
+
+class SchedulerWorkerCOMDelegate : public SchedulerWorkerDelegate {
+ public:
+ SchedulerWorkerCOMDelegate(const std::string& thread_name,
+ SchedulerWorker::ThreadLabel thread_label,
+ TrackedRef<TaskTracker> task_tracker)
+ : SchedulerWorkerDelegate(thread_name, thread_label),
+ task_tracker_(std::move(task_tracker)) {}
+
+ ~SchedulerWorkerCOMDelegate() override { DCHECK(!scoped_com_initializer_); }
+
+ // SchedulerWorker::Delegate:
+ void OnMainEntry(const SchedulerWorker* worker) override {
+ SchedulerWorkerDelegate::OnMainEntry(worker);
+
+ scoped_com_initializer_ = std::make_unique<win::ScopedCOMInitializer>();
+ }
+
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+ // This scheme below allows us to cover the following scenarios:
+ // * Only SchedulerWorkerDelegate::GetWork() has work:
+ // Always return the sequence from GetWork().
+ // * Only the Windows Message Queue has work:
+ // Always return the sequence from GetWorkFromWindowsMessageQueue();
+ // * Both SchedulerWorkerDelegate::GetWork() and the Windows Message Queue
+ // have work:
+ // Process sequences from each source round-robin style.
+ scoped_refptr<Sequence> sequence;
+ if (get_work_first_) {
+ sequence = SchedulerWorkerDelegate::GetWork(worker);
+ if (sequence)
+ get_work_first_ = false;
+ }
+
+ if (!sequence) {
+ sequence = GetWorkFromWindowsMessageQueue();
+ if (sequence)
+ get_work_first_ = true;
+ }
+
+ if (!sequence && !get_work_first_) {
+ // This case is important if we checked the Windows Message Queue first
+ // and found there was no work. We don't want to return null immediately
+ // as that could cause the thread to go to sleep while work is waiting via
+ // SchedulerWorkerDelegate::GetWork().
+ sequence = SchedulerWorkerDelegate::GetWork(worker);
+ }
+ return sequence;
+ }
+
+ void OnMainExit(SchedulerWorker* /* worker */) override {
+ scoped_com_initializer_.reset();
+ }
+
+ void WaitForWork(WaitableEvent* wake_up_event) override {
+ DCHECK(wake_up_event);
+ const TimeDelta sleep_time = GetSleepTimeout();
+ const DWORD milliseconds_wait =
+ sleep_time.is_max() ? INFINITE : sleep_time.InMilliseconds();
+ const HANDLE wake_up_event_handle = wake_up_event->handle();
+ MsgWaitForMultipleObjectsEx(1, &wake_up_event_handle, milliseconds_wait,
+ QS_ALLINPUT, 0);
+ }
+
+ private:
+ scoped_refptr<Sequence> GetWorkFromWindowsMessageQueue() {
+ MSG msg;
+ if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) {
+ Task pump_message_task(FROM_HERE,
+ Bind(
+ [](MSG msg) {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ },
+ std::move(msg)),
+ TaskTraits(MayBlock()), TimeDelta());
+ if (task_tracker_->WillPostTask(&pump_message_task)) {
+ bool was_empty =
+ message_pump_sequence_->PushTask(std::move(pump_message_task));
+ DCHECK(was_empty) << "GetWorkFromWindowsMessageQueue() does not expect "
+ "queueing of pump tasks.";
+ return message_pump_sequence_;
+ }
+ }
+ return nullptr;
+ }
+
+ bool get_work_first_ = true;
+ const scoped_refptr<Sequence> message_pump_sequence_ = new Sequence;
+ const TrackedRef<TaskTracker> task_tracker_;
+ std::unique_ptr<win::ScopedCOMInitializer> scoped_com_initializer_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerCOMDelegate);
+};
+
+#endif // defined(OS_WIN)
+
+} // namespace
+
+class SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner
+ : public SingleThreadTaskRunner {
+ public:
+ // Constructs a SchedulerSingleThreadTaskRunner that indirectly controls the
+ // lifetime of a dedicated |worker| for |traits|.
+ SchedulerSingleThreadTaskRunner(
+ SchedulerSingleThreadTaskRunnerManager* const outer,
+ const TaskTraits& traits,
+ SchedulerWorker* worker,
+ SingleThreadTaskRunnerThreadMode thread_mode)
+ : outer_(outer),
+ traits_(traits),
+ worker_(worker),
+ thread_mode_(thread_mode) {
+ DCHECK(outer_);
+ DCHECK(worker_);
+ }
+
+ // SingleThreadTaskRunner:
+ bool PostDelayedTask(const Location& from_here,
+ OnceClosure closure,
+ TimeDelta delay) override {
+ if (!g_manager_is_alive)
+ return false;
+
+ Task task(from_here, std::move(closure), traits_, delay);
+ task.single_thread_task_runner_ref = this;
+
+ if (!outer_->task_tracker_->WillPostTask(&task))
+ return false;
+
+ if (task.delayed_run_time.is_null()) {
+ PostTaskNow(std::move(task));
+ } else {
+ outer_->delayed_task_manager_->AddDelayedTask(
+ std::move(task),
+ BindOnce(&SchedulerSingleThreadTaskRunner::PostTaskNow,
+ Unretained(this)));
+ }
+ return true;
+ }
+
+ bool PostNonNestableDelayedTask(const Location& from_here,
+ OnceClosure closure,
+ TimeDelta delay) override {
+ // Tasks are never nested within the task scheduler.
+ return PostDelayedTask(from_here, std::move(closure), delay);
+ }
+
+ bool RunsTasksInCurrentSequence() const override {
+ if (!g_manager_is_alive)
+ return false;
+ return GetDelegate()->RunsTasksInCurrentSequence();
+ }
+
+ private:
+ ~SchedulerSingleThreadTaskRunner() override {
+ // Only unregister if this is a DEDICATED SingleThreadTaskRunner. SHARED
+ // task runner SchedulerWorkers are managed separately as they are reused.
+ // |g_manager_is_alive| avoids a use-after-free should this
+ // SchedulerSingleThreadTaskRunner outlive its manager. It is safe to access
+ // |g_manager_is_alive| without synchronization primitives as it is const
+ // for the lifetime of the manager and ~SchedulerSingleThreadTaskRunner()
+ // either happens prior to the end of JoinForTesting() (which happens-before
+ // manager's destruction) or on main thread after the task environment's
+ // entire destruction (which happens-after the manager's destruction). Yes,
+ // there's a theoretical use case where the last ref to this
+ // SchedulerSingleThreadTaskRunner is handed to a thread not controlled by
+ // task_scheduler and that this ends up causing
+ // ~SchedulerSingleThreadTaskRunner() to race with
+ // ~SchedulerSingleThreadTaskRunnerManager() but this is intentionally not
+ // supported (and it doesn't matter in production where we leak the task
+ // environment for such reasons). TSan should catch this weird paradigm
+ // should anyone elect to use it in a unit test and the error would point
+ // here.
+ if (g_manager_is_alive &&
+ thread_mode_ == SingleThreadTaskRunnerThreadMode::DEDICATED) {
+ outer_->UnregisterSchedulerWorker(worker_);
+ }
+ }
+
+ void PostTaskNow(Task task) {
+ scoped_refptr<Sequence> sequence = GetDelegate()->sequence();
+ // If |sequence| is null, then the thread is effectively gone (either
+ // shutdown or joined).
+ if (!sequence)
+ return;
+
+ const bool sequence_was_empty = sequence->PushTask(std::move(task));
+ if (sequence_was_empty) {
+ sequence = outer_->task_tracker_->WillScheduleSequence(
+ std::move(sequence), GetDelegate());
+ if (sequence) {
+ GetDelegate()->ReEnqueueSequence(std::move(sequence));
+ worker_->WakeUp();
+ }
+ }
+ }
+
+ SchedulerWorkerDelegate* GetDelegate() const {
+ return static_cast<SchedulerWorkerDelegate*>(worker_->delegate());
+ }
+
+ SchedulerSingleThreadTaskRunnerManager* const outer_;
+ const TaskTraits traits_;
+ SchedulerWorker* const worker_;
+ const SingleThreadTaskRunnerThreadMode thread_mode_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunner);
+};
+
+SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunnerManager(
+ TrackedRef<TaskTracker> task_tracker,
+ DelayedTaskManager* delayed_task_manager)
+ : task_tracker_(std::move(task_tracker)),
+ delayed_task_manager_(delayed_task_manager) {
+ DCHECK(task_tracker_);
+ DCHECK(delayed_task_manager_);
+#if defined(OS_WIN)
+ static_assert(arraysize(shared_com_scheduler_workers_) ==
+ arraysize(shared_scheduler_workers_),
+ "The size of |shared_com_scheduler_workers_| must match "
+ "|shared_scheduler_workers_|");
+ static_assert(arraysize(shared_com_scheduler_workers_[0]) ==
+ arraysize(shared_scheduler_workers_[0]),
+ "The size of |shared_com_scheduler_workers_| must match "
+ "|shared_scheduler_workers_|");
+#endif // defined(OS_WIN)
+ DCHECK(!g_manager_is_alive);
+ g_manager_is_alive = true;
+}
+
+SchedulerSingleThreadTaskRunnerManager::
+ ~SchedulerSingleThreadTaskRunnerManager() {
+ DCHECK(g_manager_is_alive);
+ g_manager_is_alive = false;
+}
+
+void SchedulerSingleThreadTaskRunnerManager::Start(
+ SchedulerWorkerObserver* scheduler_worker_observer) {
+ DCHECK(!scheduler_worker_observer_);
+ scheduler_worker_observer_ = scheduler_worker_observer;
+
+ decltype(workers_) workers_to_start;
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ started_ = true;
+ workers_to_start = workers_;
+ }
+
+ // Start workers that were created before this method was called.
+ // Workers that already need to wake up are already signaled as part of
+ // SchedulerSingleThreadTaskRunner::PostTaskNow(). As a result, it's
+ // unnecessary to call WakeUp() for each worker (in fact, an extraneous
+ // WakeUp() would be racy and wrong - see https://crbug.com/862582).
+ for (scoped_refptr<SchedulerWorker> worker : workers_to_start)
+ worker->Start(scheduler_worker_observer_);
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return CreateTaskRunnerWithTraitsImpl<SchedulerWorkerDelegate>(traits,
+ thread_mode);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return CreateTaskRunnerWithTraitsImpl<SchedulerWorkerCOMDelegate>(
+ traits, thread_mode);
+}
+#endif // defined(OS_WIN)
+
+// static
+SchedulerSingleThreadTaskRunnerManager::ContinueOnShutdown
+SchedulerSingleThreadTaskRunnerManager::TraitsToContinueOnShutdown(
+ const TaskTraits& traits) {
+ if (traits.shutdown_behavior() == TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN)
+ return IS_CONTINUE_ON_SHUTDOWN;
+ return IS_NOT_CONTINUE_ON_SHUTDOWN;
+}
+
+template <typename DelegateType>
+scoped_refptr<
+ SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateTaskRunnerWithTraitsImpl(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ DCHECK(thread_mode != SingleThreadTaskRunnerThreadMode::SHARED ||
+ !traits.with_base_sync_primitives())
+ << "Using WithBaseSyncPrimitives() on a shared SingleThreadTaskRunner "
+ "may cause deadlocks. Either reevaluate your usage (e.g. use "
+ "SequencedTaskRunner) or use "
+ "SingleThreadTaskRunnerThreadMode::DEDICATED.";
+ // To simplify the code, |dedicated_worker| is a local only variable that
+ // allows the code to treat both the DEDICATED and SHARED cases similarly for
+ // SingleThreadTaskRunnerThreadMode. In DEDICATED, the scoped_refptr is backed
+ // by a local variable and in SHARED, the scoped_refptr is backed by a member
+ // variable.
+ SchedulerWorker* dedicated_worker = nullptr;
+ SchedulerWorker*& worker =
+ thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
+ ? dedicated_worker
+ : GetSharedSchedulerWorkerForTraits<DelegateType>(traits);
+ bool new_worker = false;
+ bool started;
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ if (!worker) {
+ const auto& environment_params =
+ kEnvironmentParams[GetEnvironmentIndexForTraits(traits)];
+ std::string worker_name;
+ if (thread_mode == SingleThreadTaskRunnerThreadMode::SHARED)
+ worker_name += "Shared";
+ worker_name += environment_params.name_suffix;
+ worker = CreateAndRegisterSchedulerWorker<DelegateType>(
+ worker_name, thread_mode,
+ CanUseBackgroundPriorityForSchedulerWorker()
+ ? environment_params.priority_hint
+ : ThreadPriority::NORMAL);
+ new_worker = true;
+ }
+ started = started_;
+ }
+
+ if (new_worker && started)
+ worker->Start(scheduler_worker_observer_);
+
+ return MakeRefCounted<SchedulerSingleThreadTaskRunner>(this, traits, worker,
+ thread_mode);
+}
+
+void SchedulerSingleThreadTaskRunnerManager::JoinForTesting() {
+ decltype(workers_) local_workers;
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ local_workers = std::move(workers_);
+ }
+
+ for (const auto& worker : local_workers)
+ worker->JoinForTesting();
+
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(workers_.empty())
+ << "New worker(s) unexpectedly registered during join.";
+ workers_ = std::move(local_workers);
+ }
+
+ // Release shared SchedulerWorkers at the end so they get joined above. If
+ // this call happens before the joins, the SchedulerWorkers are effectively
+ // detached and may outlive the SchedulerSingleThreadTaskRunnerManager.
+ ReleaseSharedSchedulerWorkers();
+}
+
+template <>
+std::unique_ptr<SchedulerWorkerDelegate>
+SchedulerSingleThreadTaskRunnerManager::CreateSchedulerWorkerDelegate<
+ SchedulerWorkerDelegate>(const std::string& name,
+ int id,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return std::make_unique<SchedulerWorkerDelegate>(
+ StringPrintf("TaskSchedulerSingleThread%s%d", name.c_str(), id),
+ thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
+ ? SchedulerWorker::ThreadLabel::DEDICATED
+ : SchedulerWorker::ThreadLabel::SHARED);
+}
+
+#if defined(OS_WIN)
+template <>
+std::unique_ptr<SchedulerWorkerDelegate>
+SchedulerSingleThreadTaskRunnerManager::CreateSchedulerWorkerDelegate<
+ SchedulerWorkerCOMDelegate>(const std::string& name,
+ int id,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return std::make_unique<SchedulerWorkerCOMDelegate>(
+ StringPrintf("TaskSchedulerSingleThreadCOMSTA%s%d", name.c_str(), id),
+ thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
+ ? SchedulerWorker::ThreadLabel::DEDICATED_COM
+ : SchedulerWorker::ThreadLabel::SHARED_COM,
+ task_tracker_);
+}
+#endif // defined(OS_WIN)
+
+template <typename DelegateType>
+SchedulerWorker*
+SchedulerSingleThreadTaskRunnerManager::CreateAndRegisterSchedulerWorker(
+ const std::string& name,
+ SingleThreadTaskRunnerThreadMode thread_mode,
+ ThreadPriority priority_hint) {
+ lock_.AssertAcquired();
+ int id = next_worker_id_++;
+ std::unique_ptr<SchedulerWorkerDelegate> delegate =
+ CreateSchedulerWorkerDelegate<DelegateType>(name, id, thread_mode);
+ SchedulerWorkerDelegate* delegate_raw = delegate.get();
+ scoped_refptr<SchedulerWorker> worker = MakeRefCounted<SchedulerWorker>(
+ priority_hint, std::move(delegate), task_tracker_);
+ delegate_raw->set_worker(worker.get());
+ workers_.emplace_back(std::move(worker));
+ return workers_.back().get();
+}
+
+template <>
+SchedulerWorker*&
+SchedulerSingleThreadTaskRunnerManager::GetSharedSchedulerWorkerForTraits<
+ SchedulerWorkerDelegate>(const TaskTraits& traits) {
+ return shared_scheduler_workers_[GetEnvironmentIndexForTraits(traits)]
+ [TraitsToContinueOnShutdown(traits)];
+}
+
+#if defined(OS_WIN)
+template <>
+SchedulerWorker*&
+SchedulerSingleThreadTaskRunnerManager::GetSharedSchedulerWorkerForTraits<
+ SchedulerWorkerCOMDelegate>(const TaskTraits& traits) {
+ return shared_com_scheduler_workers_[GetEnvironmentIndexForTraits(traits)]
+ [TraitsToContinueOnShutdown(traits)];
+}
+#endif // defined(OS_WIN)
+
+void SchedulerSingleThreadTaskRunnerManager::UnregisterSchedulerWorker(
+ SchedulerWorker* worker) {
+ // Cleanup uses a SchedulerLock, so call Cleanup() after releasing
+ // |lock_|.
+ scoped_refptr<SchedulerWorker> worker_to_destroy;
+ {
+ AutoSchedulerLock auto_lock(lock_);
+
+ // Skip when joining (the join logic takes care of the rest).
+ if (workers_.empty())
+ return;
+
+ auto worker_iter =
+ std::find_if(workers_.begin(), workers_.end(),
+ [worker](const scoped_refptr<SchedulerWorker>& candidate) {
+ return candidate.get() == worker;
+ });
+ DCHECK(worker_iter != workers_.end());
+ worker_to_destroy = std::move(*worker_iter);
+ workers_.erase(worker_iter);
+ }
+ worker_to_destroy->Cleanup();
+}
+
+void SchedulerSingleThreadTaskRunnerManager::ReleaseSharedSchedulerWorkers() {
+ decltype(shared_scheduler_workers_) local_shared_scheduler_workers;
+#if defined(OS_WIN)
+ decltype(shared_com_scheduler_workers_) local_shared_com_scheduler_workers;
+#endif
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ for (size_t i = 0; i < arraysize(shared_scheduler_workers_); ++i) {
+ for (size_t j = 0; j < arraysize(shared_scheduler_workers_[i]); ++j) {
+ local_shared_scheduler_workers[i][j] = shared_scheduler_workers_[i][j];
+ shared_scheduler_workers_[i][j] = nullptr;
+#if defined(OS_WIN)
+ local_shared_com_scheduler_workers[i][j] =
+ shared_com_scheduler_workers_[i][j];
+ shared_com_scheduler_workers_[i][j] = nullptr;
+#endif
+ }
+ }
+ }
+
+ for (size_t i = 0; i < arraysize(local_shared_scheduler_workers); ++i) {
+ for (size_t j = 0; j < arraysize(local_shared_scheduler_workers[i]); ++j) {
+ if (local_shared_scheduler_workers[i][j])
+ UnregisterSchedulerWorker(local_shared_scheduler_workers[i][j]);
+#if defined(OS_WIN)
+ if (local_shared_com_scheduler_workers[i][j])
+ UnregisterSchedulerWorker(local_shared_com_scheduler_workers[i][j]);
+#endif
+ }
+ }
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/scheduler_single_thread_task_runner_manager.h b/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
new file mode 100644
index 0000000000..b25230d7c3
--- /dev/null
+++ b/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
@@ -0,0 +1,155 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/environment_config.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
+#include "base/task_scheduler/tracked_ref.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class TaskTraits;
+class SchedulerWorkerObserver;
+class SingleThreadTaskRunner;
+
+namespace internal {
+
+class DelayedTaskManager;
+class SchedulerWorker;
+class TaskTracker;
+
+namespace {
+
+class SchedulerWorkerDelegate;
+
+} // namespace
+
+// Manages a pool of threads which are each associated with one or more
+// SingleThreadTaskRunners.
+//
+// SingleThreadTaskRunners using SingleThreadTaskRunnerThreadMode::SHARED are
+// backed by shared SchedulerWorkers for each COM+task environment combination.
+// These workers are lazily instantiated and then only reclaimed during
+// JoinForTesting()
+//
+// No threads are created (and hence no tasks can run) before Start() is called.
+//
+// This class is thread-safe.
+class BASE_EXPORT SchedulerSingleThreadTaskRunnerManager final {
+ public:
+ SchedulerSingleThreadTaskRunnerManager(
+ TrackedRef<TaskTracker> task_tracker,
+ DelayedTaskManager* delayed_task_manager);
+ ~SchedulerSingleThreadTaskRunnerManager();
+
+ // Starts threads for existing SingleThreadTaskRunners and allows threads to
+ // be started when SingleThreadTaskRunners are created in the future. If
+ // specified, |scheduler_worker_observer| will be notified when a worker
+ // enters and exits its main function. It must not be destroyed before
+ // JoinForTesting() has returned (must never be destroyed in production).
+ void Start(SchedulerWorkerObserver* scheduler_worker_observer = nullptr);
+
+ // Creates a SingleThreadTaskRunner which runs tasks with |traits| on a thread
+ // named "TaskSchedulerSingleThread[Shared]" +
+ // kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
+ // index.
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode);
+
+#if defined(OS_WIN)
+ // Creates a SingleThreadTaskRunner which runs tasks with |traits| on a COM
+ // STA thread named "TaskSchedulerSingleThreadCOMSTA[Shared]" +
+ // kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
+ // index.
+ scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode);
+#endif // defined(OS_WIN)
+
+ void JoinForTesting();
+
+ private:
+ class SchedulerSingleThreadTaskRunner;
+
+ enum ContinueOnShutdown {
+ IS_CONTINUE_ON_SHUTDOWN,
+ IS_NOT_CONTINUE_ON_SHUTDOWN,
+ CONTINUE_ON_SHUTDOWN_COUNT,
+ };
+
+ static ContinueOnShutdown TraitsToContinueOnShutdown(
+ const TaskTraits& traits);
+
+ template <typename DelegateType>
+ scoped_refptr<SchedulerSingleThreadTaskRunner> CreateTaskRunnerWithTraitsImpl(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode);
+
+ template <typename DelegateType>
+ std::unique_ptr<SchedulerWorkerDelegate> CreateSchedulerWorkerDelegate(
+ const std::string& name,
+ int id,
+ SingleThreadTaskRunnerThreadMode thread_mode);
+
+ template <typename DelegateType>
+ SchedulerWorker* CreateAndRegisterSchedulerWorker(
+ const std::string& name,
+ SingleThreadTaskRunnerThreadMode thread_mode,
+ ThreadPriority priority_hint);
+
+ template <typename DelegateType>
+ SchedulerWorker*& GetSharedSchedulerWorkerForTraits(const TaskTraits& traits);
+
+ void UnregisterSchedulerWorker(SchedulerWorker* worker);
+
+ void ReleaseSharedSchedulerWorkers();
+
+ const TrackedRef<TaskTracker> task_tracker_;
+ DelayedTaskManager* const delayed_task_manager_;
+
+ // Optional observer notified when a worker enters and exits its main
+ // function. Set in Start() and never modified afterwards.
+ SchedulerWorkerObserver* scheduler_worker_observer_ = nullptr;
+
+ // Synchronizes access to all members below.
+ SchedulerLock lock_;
+ std::vector<scoped_refptr<SchedulerWorker>> workers_;
+ int next_worker_id_ = 0;
+
+ // Workers for SingleThreadTaskRunnerThreadMode::SHARED tasks. It is
+ // important to have separate threads for CONTINUE_ON_SHUTDOWN and non-
+ // CONTINUE_ON_SHUTDOWN to avoid being in a situation where a
+ // CONTINUE_ON_SHUTDOWN task effectively blocks shutdown by preventing a
+ // BLOCK_SHUTDOWN task to be scheduled. https://crbug.com/829786
+ SchedulerWorker* shared_scheduler_workers_[ENVIRONMENT_COUNT]
+ [CONTINUE_ON_SHUTDOWN_COUNT] = {};
+#if defined(OS_WIN)
+ SchedulerWorker* shared_com_scheduler_workers_[ENVIRONMENT_COUNT]
+ [CONTINUE_ON_SHUTDOWN_COUNT] =
+ {};
+#endif // defined(OS_WIN)
+
+ // Set to true when Start() is called.
+ bool started_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunnerManager);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
diff --git a/base/task_scheduler/scheduler_worker.cc b/base/task_scheduler/scheduler_worker.cc
new file mode 100644
index 0000000000..152b534cbb
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker.cc
@@ -0,0 +1,362 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/task_scheduler/environment_config.h"
+#include "base/task_scheduler/scheduler_worker_observer.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/trace_event/trace_event.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#elif defined(OS_WIN)
+#include "base/win/com_init_check_hook.h"
+#include "base/win/scoped_com_initializer.h"
+#endif
+
+namespace base {
+namespace internal {
+
+void SchedulerWorker::Delegate::WaitForWork(WaitableEvent* wake_up_event) {
+ DCHECK(wake_up_event);
+ const TimeDelta sleep_time = GetSleepTimeout();
+ if (sleep_time.is_max()) {
+ // Calling TimedWait with TimeDelta::Max is not recommended per
+ // http://crbug.com/465948.
+ wake_up_event->Wait();
+ } else {
+ wake_up_event->TimedWait(sleep_time);
+ }
+}
+
+SchedulerWorker::SchedulerWorker(
+ ThreadPriority priority_hint,
+ std::unique_ptr<Delegate> delegate,
+ TrackedRef<TaskTracker> task_tracker,
+ const SchedulerLock* predecessor_lock,
+ SchedulerBackwardCompatibility backward_compatibility)
+ : thread_lock_(predecessor_lock),
+ delegate_(std::move(delegate)),
+ task_tracker_(std::move(task_tracker)),
+ priority_hint_(priority_hint),
+ current_thread_priority_(GetDesiredThreadPriority())
+#if defined(OS_WIN) && !defined(COM_INIT_CHECK_HOOK_ENABLED)
+ ,
+ backward_compatibility_(backward_compatibility)
+#endif
+{
+ DCHECK(delegate_);
+ DCHECK(task_tracker_);
+ DCHECK(CanUseBackgroundPriorityForSchedulerWorker() ||
+ priority_hint_ != ThreadPriority::BACKGROUND);
+}
+
+bool SchedulerWorker::Start(
+ SchedulerWorkerObserver* scheduler_worker_observer) {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ DCHECK(thread_handle_.is_null());
+
+ if (should_exit_.IsSet())
+ return true;
+
+ DCHECK(!scheduler_worker_observer_);
+ scheduler_worker_observer_ = scheduler_worker_observer;
+
+ self_ = this;
+
+ constexpr size_t kDefaultStackSize = 0;
+ PlatformThread::CreateWithPriority(kDefaultStackSize, this, &thread_handle_,
+ current_thread_priority_);
+
+ if (thread_handle_.is_null()) {
+ self_ = nullptr;
+ return false;
+ }
+
+ return true;
+}
+
+void SchedulerWorker::WakeUp() {
+ // Calling WakeUp() after Cleanup() or Join() is wrong because the
+ // SchedulerWorker cannot run more tasks.
+ DCHECK(!join_called_for_testing_.IsSet());
+ DCHECK(!should_exit_.IsSet());
+ wake_up_event_.Signal();
+}
+
+void SchedulerWorker::JoinForTesting() {
+ DCHECK(!join_called_for_testing_.IsSet());
+ join_called_for_testing_.Set();
+ wake_up_event_.Signal();
+
+ PlatformThreadHandle thread_handle;
+
+ {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ DCHECK(!thread_handle_.is_null());
+ thread_handle = thread_handle_;
+ // Reset |thread_handle_| so it isn't joined by the destructor.
+ thread_handle_ = PlatformThreadHandle();
+ }
+
+ PlatformThread::Join(thread_handle);
+}
+
+bool SchedulerWorker::ThreadAliveForTesting() const {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ return !thread_handle_.is_null();
+}
+
+SchedulerWorker::~SchedulerWorker() {
+ AutoSchedulerLock auto_lock(thread_lock_);
+
+ // If |thread_handle_| wasn't joined, detach it.
+ if (!thread_handle_.is_null()) {
+ DCHECK(!join_called_for_testing_.IsSet());
+ PlatformThread::Detach(thread_handle_);
+ }
+}
+
+void SchedulerWorker::Cleanup() {
+ DCHECK(!should_exit_.IsSet());
+ should_exit_.Set();
+ wake_up_event_.Signal();
+}
+
+void SchedulerWorker::BeginUnusedPeriod() {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ DCHECK(last_used_time_.is_null());
+ last_used_time_ = TimeTicks::Now();
+}
+
+void SchedulerWorker::EndUnusedPeriod() {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ DCHECK(!last_used_time_.is_null());
+ last_used_time_ = TimeTicks();
+}
+
+TimeTicks SchedulerWorker::GetLastUsedTime() const {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ return last_used_time_;
+}
+
+bool SchedulerWorker::ShouldExit() const {
+ // The ordering of the checks is important below. This SchedulerWorker may be
+ // released and outlive |task_tracker_| in unit tests. However, when the
+ // SchedulerWorker is released, |should_exit_| will be set, so check that
+ // first.
+ return should_exit_.IsSet() || join_called_for_testing_.IsSet() ||
+ task_tracker_->IsShutdownComplete();
+}
+
+ThreadPriority SchedulerWorker::GetDesiredThreadPriority() const {
+ // To avoid shutdown hangs, disallow a priority below NORMAL during shutdown
+ if (task_tracker_->HasShutdownStarted())
+ return ThreadPriority::NORMAL;
+
+ return priority_hint_;
+}
+
+void SchedulerWorker::UpdateThreadPriority(
+ ThreadPriority desired_thread_priority) {
+ if (desired_thread_priority == current_thread_priority_)
+ return;
+
+ PlatformThread::SetCurrentThreadPriority(desired_thread_priority);
+ current_thread_priority_ = desired_thread_priority;
+}
+
+void SchedulerWorker::ThreadMain() {
+ if (priority_hint_ == ThreadPriority::BACKGROUND) {
+ switch (delegate_->GetThreadLabel()) {
+ case ThreadLabel::POOLED:
+ RunBackgroundPooledWorker();
+ return;
+ case ThreadLabel::SHARED:
+ RunBackgroundSharedWorker();
+ return;
+ case ThreadLabel::DEDICATED:
+ RunBackgroundDedicatedWorker();
+ return;
+#if defined(OS_WIN)
+ case ThreadLabel::SHARED_COM:
+ RunBackgroundSharedCOMWorker();
+ return;
+ case ThreadLabel::DEDICATED_COM:
+ RunBackgroundDedicatedCOMWorker();
+ return;
+#endif // defined(OS_WIN)
+ }
+ }
+
+ switch (delegate_->GetThreadLabel()) {
+ case ThreadLabel::POOLED:
+ RunPooledWorker();
+ return;
+ case ThreadLabel::SHARED:
+ RunSharedWorker();
+ return;
+ case ThreadLabel::DEDICATED:
+ RunDedicatedWorker();
+ return;
+#if defined(OS_WIN)
+ case ThreadLabel::SHARED_COM:
+ RunSharedCOMWorker();
+ return;
+ case ThreadLabel::DEDICATED_COM:
+ RunDedicatedCOMWorker();
+ return;
+#endif // defined(OS_WIN)
+ }
+}
+
+NOINLINE void SchedulerWorker::RunPooledWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundPooledWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunSharedWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundSharedWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunDedicatedWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundDedicatedWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+#if defined(OS_WIN)
+NOINLINE void SchedulerWorker::RunSharedCOMWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundSharedCOMWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunDedicatedCOMWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundDedicatedCOMWorker() {
+ const int line_number = __LINE__;
+ RunWorker();
+ base::debug::Alias(&line_number);
+}
+#endif // defined(OS_WIN)
+
+void SchedulerWorker::RunWorker() {
+ DCHECK_EQ(self_, this);
+ TRACE_EVENT_BEGIN0("task_scheduler", "SchedulerWorkerThread active");
+
+ if (scheduler_worker_observer_)
+ scheduler_worker_observer_->OnSchedulerWorkerMainEntry();
+
+ delegate_->OnMainEntry(this);
+
+ // A SchedulerWorker starts out waiting for work.
+ {
+ TRACE_EVENT_END0("task_scheduler", "SchedulerWorkerThread active");
+ delegate_->WaitForWork(&wake_up_event_);
+ TRACE_EVENT_BEGIN0("task_scheduler", "SchedulerWorkerThread active");
+ }
+
+// When defined(COM_INIT_CHECK_HOOK_ENABLED), ignore
+// SchedulerBackwardCompatibility::INIT_COM_STA to find incorrect uses of
+// COM that should be running in a COM STA Task Runner.
+#if defined(OS_WIN) && !defined(COM_INIT_CHECK_HOOK_ENABLED)
+ std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
+ if (backward_compatibility_ == SchedulerBackwardCompatibility::INIT_COM_STA)
+ com_initializer = std::make_unique<win::ScopedCOMInitializer>();
+#endif
+
+ while (!ShouldExit()) {
+#if defined(OS_MACOSX)
+ mac::ScopedNSAutoreleasePool autorelease_pool;
+#endif
+
+ UpdateThreadPriority(GetDesiredThreadPriority());
+
+ // Get the sequence containing the next task to execute.
+ scoped_refptr<Sequence> sequence = delegate_->GetWork(this);
+ if (!sequence) {
+ // Exit immediately if GetWork() resulted in detaching this worker.
+ if (ShouldExit())
+ break;
+
+ TRACE_EVENT_END0("task_scheduler", "SchedulerWorkerThread active");
+ delegate_->WaitForWork(&wake_up_event_);
+ TRACE_EVENT_BEGIN0("task_scheduler", "SchedulerWorkerThread active");
+ continue;
+ }
+
+ sequence =
+ task_tracker_->RunAndPopNextTask(std::move(sequence), delegate_.get());
+
+ delegate_->DidRunTask();
+
+ // Re-enqueue |sequence| if allowed by RunNextTask().
+ if (sequence)
+ delegate_->ReEnqueueSequence(std::move(sequence));
+
+ // Calling WakeUp() guarantees that this SchedulerWorker will run Tasks from
+ // Sequences returned by the GetWork() method of |delegate_| until it
+ // returns nullptr. Resetting |wake_up_event_| here doesn't break this
+ // invariant and avoids a useless loop iteration before going to sleep if
+ // WakeUp() is called while this SchedulerWorker is awake.
+ wake_up_event_.Reset();
+ }
+
+ // Important: It is unsafe to access unowned state (e.g. |task_tracker_|)
+ // after invoking OnMainExit().
+
+ delegate_->OnMainExit(this);
+
+ if (scheduler_worker_observer_)
+ scheduler_worker_observer_->OnSchedulerWorkerMainExit();
+
+ // Release the self-reference to |this|. This can result in deleting |this|
+ // and as such no more member accesses should be made after this point.
+ self_ = nullptr;
+
+ TRACE_EVENT_END0("task_scheduler", "SchedulerWorkerThread active");
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/scheduler_worker.h b/base/task_scheduler/scheduler_worker.h
new file mode 100644
index 0000000000..61f049bfc7
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker.h
@@ -0,0 +1,262 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/can_schedule_sequence_observer.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker_params.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/tracked_ref.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/com_init_check_hook.h"
+#endif
+
+namespace base {
+
+class SchedulerWorkerObserver;
+
+namespace internal {
+
+class TaskTracker;
+
+// A worker that manages a single thread to run Tasks from Sequences returned
+// by a delegate.
+//
+// A SchedulerWorker starts out sleeping. It is woken up by a call to WakeUp().
+// After a wake-up, a SchedulerWorker runs Tasks from Sequences returned by the
+// GetWork() method of its delegate as long as it doesn't return nullptr. It
+// also periodically checks with its TaskTracker whether shutdown has completed
+// and exits when it has.
+//
+// This class is thread-safe.
+class BASE_EXPORT SchedulerWorker
+ : public RefCountedThreadSafe<SchedulerWorker>,
+ public PlatformThread::Delegate {
+ public:
+ // Labels this SchedulerWorker's association. This doesn't affect any logic
+ // but will add a stack frame labeling this thread for ease of stack trace
+ // identification.
+ enum class ThreadLabel {
+ POOLED,
+ SHARED,
+ DEDICATED,
+#if defined(OS_WIN)
+ SHARED_COM,
+ DEDICATED_COM,
+#endif // defined(OS_WIN)
+ };
+
+ // Delegate interface for SchedulerWorker. All methods except
+ // OnCanScheduleSequence() (inherited from CanScheduleSequenceObserver) are
+ // called from the thread managed by the SchedulerWorker instance.
+ class BASE_EXPORT Delegate : public CanScheduleSequenceObserver {
+ public:
+ ~Delegate() override = default;
+
+ // Returns the ThreadLabel the Delegate wants its SchedulerWorkers' stacks
+ // to be labeled with.
+ virtual ThreadLabel GetThreadLabel() const = 0;
+
+ // Called by |worker|'s thread when it enters its main function.
+ virtual void OnMainEntry(const SchedulerWorker* worker) = 0;
+
+ // Called by |worker|'s thread to get a Sequence from which to run a Task.
+ virtual scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) = 0;
+
+ // Called by the SchedulerWorker after it ran a task.
+ virtual void DidRunTask() = 0;
+
+ // Called when |sequence| isn't empty after the SchedulerWorker pops a Task
+ // from it. |sequence| is the last Sequence returned by GetWork().
+ //
+ // TODO(fdoray): Rename to RescheduleSequence() to match TaskTracker
+ // terminology.
+ virtual void ReEnqueueSequence(scoped_refptr<Sequence> sequence) = 0;
+
+ // Called to determine how long to sleep before the next call to GetWork().
+ // GetWork() may be called before this timeout expires if the worker's
+ // WakeUp() method is called.
+ virtual TimeDelta GetSleepTimeout() = 0;
+
+ // Called by the SchedulerWorker's thread to wait for work. Override this
+ // method if the thread in question needs special handling to go to sleep.
+ // |wake_up_event| is a manually resettable event and is signaled on
+ // SchedulerWorker::WakeUp()
+ virtual void WaitForWork(WaitableEvent* wake_up_event);
+
+ // Called by |worker|'s thread right before the main function exits. The
+ // Delegate is free to release any associated resources in this call. It is
+ // guaranteed that SchedulerWorker won't access the Delegate or the
+ // TaskTracker after calling OnMainExit() on the Delegate.
+ virtual void OnMainExit(SchedulerWorker* worker) {}
+ };
+
+ // Creates a SchedulerWorker that runs Tasks from Sequences returned by
+ // |delegate|. No actual thread will be created for this SchedulerWorker
+ // before Start() is called. |priority_hint| is the preferred thread priority;
+ // the actual thread priority depends on shutdown state and platform
+ // capabilities. |task_tracker| is used to handle shutdown behavior of Tasks.
+ // |predecessor_lock| is a lock that is allowed to be held when calling
+ // methods on this SchedulerWorker. |backward_compatibility| indicates
+ // whether backward compatibility is enabled. Either JoinForTesting() or
+ // Cleanup() must be called before releasing the last external reference.
+ SchedulerWorker(ThreadPriority priority_hint,
+ std::unique_ptr<Delegate> delegate,
+ TrackedRef<TaskTracker> task_tracker,
+ const SchedulerLock* predecessor_lock = nullptr,
+ SchedulerBackwardCompatibility backward_compatibility =
+ SchedulerBackwardCompatibility::DISABLED);
+
+ // Creates a thread to back the SchedulerWorker. The thread will be in a wait
+ // state pending a WakeUp() call. No thread will be created if Cleanup() was
+ // called. If specified, |scheduler_worker_observer| will be notified when the
+ // worker enters and exits its main function. It must not be destroyed before
+ // JoinForTesting() has returned (must never be destroyed in production).
+ // Returns true on success.
+ bool Start(SchedulerWorkerObserver* scheduler_worker_observer = nullptr);
+
+ // Wakes up this SchedulerWorker if it wasn't already awake. After this is
+ // called, this SchedulerWorker will run Tasks from Sequences returned by the
+ // GetWork() method of its delegate until it returns nullptr. No-op if Start()
+ // wasn't called. DCHECKs if called after Start() has failed or after
+ // Cleanup() has been called.
+ void WakeUp();
+
+ SchedulerWorker::Delegate* delegate() { return delegate_.get(); }
+
+ // Joins this SchedulerWorker. If a Task is already running, it will be
+ // allowed to complete its execution. This can only be called once.
+ //
+ // Note: A thread that detaches before JoinForTesting() is called may still be
+ // running after JoinForTesting() returns. However, it can't run tasks after
+ // JoinForTesting() returns.
+ void JoinForTesting();
+
+ // Returns true if the worker is alive.
+ bool ThreadAliveForTesting() const;
+
+ // Makes a request to cleanup the worker. This may be called from any thread.
+ // The caller is expected to release its reference to this object after
+ // calling Cleanup(). Further method calls after Cleanup() returns are
+ // undefined.
+ //
+ // Expected Usage:
+ // scoped_refptr<SchedulerWorker> worker_ = /* Existing Worker */
+ // worker_->Cleanup();
+ // worker_ = nullptr;
+ void Cleanup();
+
+ // Informs this SchedulerWorker about periods during which it is not being
+ // used. Thread-safe.
+ void BeginUnusedPeriod();
+ void EndUnusedPeriod();
+ // Returns the last time this SchedulerWorker was used. Returns a null time if
+ // this SchedulerWorker is currently in-use. Thread-safe.
+ TimeTicks GetLastUsedTime() const;
+
+ private:
+ friend class RefCountedThreadSafe<SchedulerWorker>;
+ class Thread;
+
+ ~SchedulerWorker() override;
+
+ bool ShouldExit() const;
+
+ // Returns the thread priority to use based on the priority hint, current
+ // shutdown state, and platform capabilities.
+ ThreadPriority GetDesiredThreadPriority() const;
+
+ // Changes the thread priority to |desired_thread_priority|. Must be called on
+ // the thread managed by |this|.
+ void UpdateThreadPriority(ThreadPriority desired_thread_priority);
+
+ // PlatformThread::Delegate:
+ void ThreadMain() override;
+
+ // Dummy frames to act as "RunLabeledWorker()" (see RunMain() below). Their
+ // impl is aliased to prevent compiler/linker from optimizing them out.
+ void RunPooledWorker();
+ void RunBackgroundPooledWorker();
+ void RunSharedWorker();
+ void RunBackgroundSharedWorker();
+ void RunDedicatedWorker();
+ void RunBackgroundDedicatedWorker();
+#if defined(OS_WIN)
+ void RunSharedCOMWorker();
+ void RunBackgroundSharedCOMWorker();
+ void RunDedicatedCOMWorker();
+ void RunBackgroundDedicatedCOMWorker();
+#endif // defined(OS_WIN)
+
+ // The real main, invoked through :
+ // ThreadMain() -> RunLabeledWorker() -> RunWorker().
+ // "RunLabeledWorker()" is a dummy frame based on ThreadLabel+ThreadPriority
+ // and used to easily identify threads in stack traces.
+ void RunWorker();
+
+ // Self-reference to prevent destruction of |this| while the thread is alive.
+ // Set in Start() before creating the thread. Reset in ThreadMain() before the
+ // thread exits. No lock required because the first access occurs before the
+ // thread is created and the second access occurs on the thread.
+ scoped_refptr<SchedulerWorker> self_;
+
+ // Synchronizes access to |thread_handle_| and |last_used_time_|.
+ mutable SchedulerLock thread_lock_;
+
+ // Handle for the thread managed by |this|.
+ PlatformThreadHandle thread_handle_;
+
+ // The last time this worker was used by its owner (e.g. to process work or
+ // stand as a required idle thread).
+ TimeTicks last_used_time_;
+
+ // Event to wake up the thread managed by |this|.
+ WaitableEvent wake_up_event_{WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED};
+
+ // Whether the thread should exit. Set by Cleanup().
+ AtomicFlag should_exit_;
+
+ const std::unique_ptr<Delegate> delegate_;
+ const TrackedRef<TaskTracker> task_tracker_;
+
+ // Optional observer notified when a worker enters and exits its main
+ // function. Set in Start() and never modified afterwards.
+ SchedulerWorkerObserver* scheduler_worker_observer_ = nullptr;
+
+ // Desired thread priority.
+ const ThreadPriority priority_hint_;
+
+ // Actual thread priority. Can be different than |priority_hint_| depending on
+ // system capabilities and shutdown state. No lock required because all post-
+ // construction accesses occur on the thread.
+ ThreadPriority current_thread_priority_;
+
+#if defined(OS_WIN) && !defined(COM_INIT_CHECK_HOOK_ENABLED)
+ const SchedulerBackwardCompatibility backward_compatibility_;
+#endif
+
+ // Set once JoinForTesting() has been called.
+ AtomicFlag join_called_for_testing_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorker);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
diff --git a/base/task_scheduler/scheduler_worker_params.h b/base/task_scheduler/scheduler_worker_params.h
new file mode 100644
index 0000000000..ea753fff59
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_params.h
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
+
+namespace base {
+
+enum class SchedulerBackwardCompatibility {
+ // No backward compatibility.
+ DISABLED,
+
+ // On Windows, initialize COM STA to mimic SequencedWorkerPool and
+ // BrowserThreadImpl. Behaves like DISABLED on other platforms.
+ // TODO(fdoray): Get rid of this and force tasks that care about a
+ // CoInitialized environment to request one explicitly (via an upcoming
+ // execution mode).
+ INIT_COM_STA,
+};
+
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
diff --git a/base/task_scheduler/scheduler_worker_pool.h b/base/task_scheduler/scheduler_worker_pool.h
new file mode 100644
index 0000000000..de5329e4f2
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/can_schedule_sequence_observer.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/tracked_ref.h"
+
+namespace base {
+namespace internal {
+
+class DelayedTaskManager;
+class TaskTracker;
+
+// Interface for a worker pool.
+class BASE_EXPORT SchedulerWorkerPool : public CanScheduleSequenceObserver {
+ public:
+ ~SchedulerWorkerPool() override;
+
+ // Returns a TaskRunner whose PostTask invocations result in scheduling tasks
+ // in this SchedulerWorkerPool using |traits|. Tasks may run in any order and
+ // in parallel.
+ scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ const TaskTraits& traits);
+
+ // Returns a SequencedTaskRunner whose PostTask invocations result in
+ // scheduling tasks in this SchedulerWorkerPool using |traits|. Tasks run one
+ // at a time in posting order.
+ scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits);
+
+ // Posts |task| to be executed by this SchedulerWorkerPool as part of
+ // |sequence|. |task| won't be executed before its delayed run time, if any.
+ // Returns true if |task| is posted.
+ bool PostTaskWithSequence(Task task, scoped_refptr<Sequence> sequence);
+
+ // Registers the worker pool in TLS.
+ void BindToCurrentThread();
+
+ // Resets the worker pool in TLS.
+ void UnbindFromCurrentThread();
+
+ // Prevents new tasks from starting to run and waits for currently running
+ // tasks to complete their execution. It is guaranteed that no thread will do
+ // work on behalf of this SchedulerWorkerPool after this returns. It is
+ // invalid to post a task once this is called. TaskTracker::Flush() can be
+ // called before this to complete existing tasks, which might otherwise post a
+ // task during JoinForTesting(). This can only be called once.
+ virtual void JoinForTesting() = 0;
+
+ protected:
+ SchedulerWorkerPool(TrackedRef<TaskTracker> task_tracker,
+ DelayedTaskManager* delayed_task_manager);
+
+ // Posts |task| to be executed by this SchedulerWorkerPool as part of
+ // |sequence|. This must only be called after |task| has gone through
+ // PostTaskWithSequence() and after |task|'s delayed run time.
+ void PostTaskWithSequenceNow(Task task, scoped_refptr<Sequence> sequence);
+
+ const TrackedRef<TaskTracker> task_tracker_;
+ DelayedTaskManager* const delayed_task_manager_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerPool);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
diff --git a/base/task_scheduler/scheduler_worker_pool_impl.cc b/base/task_scheduler/scheduler_worker_pool_impl.cc
new file mode 100644
index 0000000000..81dc5df4e0
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -0,0 +1,1028 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/atomicops.h"
+#include "base/auto_reset.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+#include "base/sequence_token.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_restrictions.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_windows_thread_environment.h"
+#include "base/win/scoped_winrt_initializer.h"
+#include "base/win/windows_version.h"
+#endif // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+constexpr TimeDelta SchedulerWorkerPoolImpl::kBlockedWorkersPollPeriod;
+
+namespace {
+
+constexpr char kPoolNameSuffix[] = "Pool";
+constexpr char kDetachDurationHistogramPrefix[] =
+ "TaskScheduler.DetachDuration.";
+constexpr char kNumTasksBeforeDetachHistogramPrefix[] =
+ "TaskScheduler.NumTasksBeforeDetach.";
+constexpr char kNumTasksBetweenWaitsHistogramPrefix[] =
+ "TaskScheduler.NumTasksBetweenWaits.";
+constexpr size_t kMaxNumberOfWorkers = 256;
+
+// Only used in DCHECKs.
+bool ContainsWorker(const std::vector<scoped_refptr<SchedulerWorker>>& workers,
+ const SchedulerWorker* worker) {
+ auto it = std::find_if(workers.begin(), workers.end(),
+ [worker](const scoped_refptr<SchedulerWorker>& i) {
+ return i.get() == worker;
+ });
+ return it != workers.end();
+}
+
+} // namespace
+
+class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
+ : public SchedulerWorker::Delegate,
+ public BlockingObserver {
+ public:
+ // |outer| owns the worker for which this delegate is constructed.
+ SchedulerWorkerDelegateImpl(TrackedRef<SchedulerWorkerPoolImpl> outer);
+ ~SchedulerWorkerDelegateImpl() override;
+
+ // SchedulerWorker::Delegate:
+ void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override;
+ SchedulerWorker::ThreadLabel GetThreadLabel() const override;
+ void OnMainEntry(const SchedulerWorker* worker) override;
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override;
+ void DidRunTask() override;
+ void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override;
+ TimeDelta GetSleepTimeout() override;
+ void OnMainExit(SchedulerWorker* worker) override;
+
+ // BlockingObserver:
+ void BlockingStarted(BlockingType blocking_type) override;
+ void BlockingTypeUpgraded() override;
+ void BlockingEnded() override;
+
+ void MayBlockEntered();
+ void WillBlockEntered();
+
+ // Returns true iff this worker has been within a MAY_BLOCK ScopedBlockingCall
+ // for more than |outer_->MayBlockThreshold()|. The max tasks must be
+ // incremented if this returns true.
+ bool MustIncrementMaxTasksLockRequired();
+
+ bool is_running_background_task_lock_required() const {
+ outer_->lock_.AssertAcquired();
+ return is_running_background_task_;
+ }
+
+ private:
+ // Returns true if |worker| is allowed to cleanup and remove itself from the
+ // pool. Called from GetWork() when no work is available.
+ bool CanCleanupLockRequired(const SchedulerWorker* worker) const;
+
+ // Calls cleanup on |worker| and removes it from the pool. Called from
+ // GetWork() when no work is available and CanCleanupLockRequired() returns
+ // true.
+ void CleanupLockRequired(SchedulerWorker* worker);
+
+ // Called in GetWork() when a worker becomes idle.
+ void OnWorkerBecomesIdleLockRequired(SchedulerWorker* worker);
+
+ const TrackedRef<SchedulerWorkerPoolImpl> outer_;
+
+ // Time of the last detach.
+ TimeTicks last_detach_time_;
+
+ // Number of tasks executed since the last time the
+ // TaskScheduler.NumTasksBetweenWaits histogram was recorded.
+ size_t num_tasks_since_last_wait_ = 0;
+
+ // Number of tasks executed since the last time the
+ // TaskScheduler.NumTasksBeforeDetach histogram was recorded.
+ size_t num_tasks_since_last_detach_ = 0;
+
+ // Whether |outer_->max_tasks_| was incremented due to a ScopedBlockingCall on
+ // the thread. Access synchronized by |outer_->lock_|.
+ bool incremented_max_tasks_since_blocked_ = false;
+
+ // Time when MayBlockScopeEntered() was last called. Reset when
+ // BlockingScopeExited() is called. Access synchronized by |outer_->lock_|.
+ TimeTicks may_block_start_time_;
+
+ // Whether this worker is currently running a task (i.e. GetWork() has
+ // returned a non-empty sequence and DidRunTask() hasn't been called yet).
+ bool is_running_task_ = false;
+
+ // Whether this worker is currently running a TaskPriority::BACKGROUND task.
+ // Writes are made from the worker thread and are protected by
+ // |outer_->lock_|. Reads are made from any thread, they are protected by
+ // |outer_->lock_| when made outside of the worker thread.
+ bool is_running_background_task_ = false;
+
+#if defined(OS_WIN)
+ std::unique_ptr<win::ScopedWindowsThreadEnvironment> win_thread_environment_;
+#endif // defined(OS_WIN)
+
+ // Verifies that specific calls are always made from the worker thread.
+ THREAD_CHECKER(worker_thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl);
+};
+
+SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
+ StringPiece histogram_label,
+ StringPiece pool_label,
+ ThreadPriority priority_hint,
+ TrackedRef<TaskTracker> task_tracker,
+ DelayedTaskManager* delayed_task_manager)
+ : SchedulerWorkerPool(std::move(task_tracker), delayed_task_manager),
+ pool_label_(pool_label.as_string()),
+ priority_hint_(priority_hint),
+ lock_(shared_priority_queue_.container_lock()),
+ idle_workers_stack_cv_for_testing_(lock_.CreateConditionVariable()),
+ // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
+ detach_duration_histogram_(Histogram::FactoryTimeGet(
+ JoinString({kDetachDurationHistogramPrefix, histogram_label,
+ kPoolNameSuffix},
+ ""),
+ TimeDelta::FromMilliseconds(1),
+ TimeDelta::FromHours(1),
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
+ // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
+ // than 1000 tasks before detaching, there is no need to know the exact
+ // number of tasks that ran.
+ num_tasks_before_detach_histogram_(Histogram::FactoryGet(
+ JoinString({kNumTasksBeforeDetachHistogramPrefix, histogram_label,
+ kPoolNameSuffix},
+ ""),
+ 1,
+ 1000,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
+ // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
+ // expected to run between zero and a few tens of tasks between waits.
+ // When it runs more than 100 tasks, there is no need to know the exact
+ // number of tasks that ran.
+ num_tasks_between_waits_histogram_(Histogram::FactoryGet(
+ JoinString({kNumTasksBetweenWaitsHistogramPrefix, histogram_label,
+ kPoolNameSuffix},
+ ""),
+ 1,
+ 100,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
+ tracked_ref_factory_(this) {
+ DCHECK(!histogram_label.empty());
+ DCHECK(!pool_label_.empty());
+}
+
+void SchedulerWorkerPoolImpl::Start(
+ const SchedulerWorkerPoolParams& params,
+ int max_background_tasks,
+ scoped_refptr<TaskRunner> service_thread_task_runner,
+ SchedulerWorkerObserver* scheduler_worker_observer,
+ WorkerEnvironment worker_environment) {
+ AutoSchedulerLock auto_lock(lock_);
+
+ DCHECK(workers_.empty());
+
+ max_tasks_ = params.max_tasks();
+ DCHECK_GE(max_tasks_, 1U);
+ initial_max_tasks_ = max_tasks_;
+ DCHECK_LE(initial_max_tasks_, kMaxNumberOfWorkers);
+ max_background_tasks_ = max_background_tasks;
+ suggested_reclaim_time_ = params.suggested_reclaim_time();
+ backward_compatibility_ = params.backward_compatibility();
+ worker_environment_ = worker_environment;
+
+ service_thread_task_runner_ = std::move(service_thread_task_runner);
+
+ DCHECK(!scheduler_worker_observer_);
+ scheduler_worker_observer_ = scheduler_worker_observer;
+
+ // The initial number of workers is |num_wake_ups_before_start_| + 1 to try to
+ // keep one at least one standby thread at all times (capacity permitting).
+ const int num_initial_workers =
+ std::min(num_wake_ups_before_start_ + 1, static_cast<int>(max_tasks_));
+ workers_.reserve(num_initial_workers);
+
+ for (int index = 0; index < num_initial_workers; ++index) {
+ SchedulerWorker* worker =
+ CreateRegisterAndStartSchedulerWorkerLockRequired();
+
+ // CHECK that the first worker can be started (assume that failure means
+ // that threads can't be created on this machine).
+ CHECK(worker || index > 0);
+
+ if (worker) {
+ if (index < num_wake_ups_before_start_) {
+ worker->WakeUp();
+ } else {
+ idle_workers_stack_.Push(worker);
+ }
+ }
+ }
+}
+
+SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() {
+ // SchedulerWorkerPool should only ever be deleted:
+ // 1) In tests, after JoinForTesting().
+ // 2) In production, iff initialization failed.
+ // In both cases |workers_| should be empty.
+ DCHECK(workers_.empty());
+}
+
+void SchedulerWorkerPoolImpl::OnCanScheduleSequence(
+ scoped_refptr<Sequence> sequence) {
+ const auto sequence_sort_key = sequence->GetSortKey();
+ shared_priority_queue_.BeginTransaction()->Push(std::move(sequence),
+ sequence_sort_key);
+
+ WakeUpOneWorker();
+}
+
+void SchedulerWorkerPoolImpl::GetHistograms(
+ std::vector<const HistogramBase*>* histograms) const {
+ histograms->push_back(detach_duration_histogram_);
+ histograms->push_back(num_tasks_between_waits_histogram_);
+}
+
+int SchedulerWorkerPoolImpl::GetMaxConcurrentNonBlockedTasksDeprecated() const {
+#if DCHECK_IS_ON()
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK_NE(initial_max_tasks_, 0U)
+ << "GetMaxConcurrentTasksDeprecated() should only be called after the "
+ << "worker pool has started.";
+#endif
+ return initial_max_tasks_;
+}
+
+void SchedulerWorkerPoolImpl::WaitForWorkersIdleForTesting(size_t n) {
+ AutoSchedulerLock auto_lock(lock_);
+
+#if DCHECK_IS_ON()
+ DCHECK(!some_workers_cleaned_up_for_testing_)
+ << "Workers detached prior to waiting for a specific number of idle "
+ "workers. Doing the wait under such conditions is flaky. Consider "
+ "using |suggested_reclaim_time_ = TimeDelta::Max()| for this test.";
+#endif
+
+ WaitForWorkersIdleLockRequiredForTesting(n);
+}
+
+void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
+ AutoSchedulerLock auto_lock(lock_);
+ WaitForWorkersIdleLockRequiredForTesting(workers_.size());
+}
+
+void SchedulerWorkerPoolImpl::WaitForWorkersCleanedUpForTesting(size_t n) {
+ AutoSchedulerLock auto_lock(lock_);
+
+ if (!num_workers_cleaned_up_for_testing_cv_)
+ num_workers_cleaned_up_for_testing_cv_ = lock_.CreateConditionVariable();
+
+ while (num_workers_cleaned_up_for_testing_ < n)
+ num_workers_cleaned_up_for_testing_cv_->Wait();
+
+ num_workers_cleaned_up_for_testing_ = 0;
+}
+
+void SchedulerWorkerPoolImpl::JoinForTesting() {
+#if DCHECK_IS_ON()
+ join_for_testing_started_.Set();
+#endif
+
+ decltype(workers_) workers_copy;
+ {
+ AutoSchedulerLock auto_lock(lock_);
+
+ DCHECK_GT(workers_.size(), size_t(0)) << "Joined an unstarted worker pool.";
+
+ // Ensure SchedulerWorkers in |workers_| do not attempt to cleanup while
+ // being joined.
+ worker_cleanup_disallowed_for_testing_ = true;
+
+ // Make a copy of the SchedulerWorkers so that we can call
+ // SchedulerWorker::JoinForTesting() without holding |lock_| since
+ // SchedulerWorkers may need to access |workers_|.
+ workers_copy = workers_;
+ }
+ for (const auto& worker : workers_copy)
+ worker->JoinForTesting();
+
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(workers_ == workers_copy);
+ // Release |workers_| to clear their TrackedRef against |this|.
+ workers_.clear();
+}
+
+size_t SchedulerWorkerPoolImpl::NumberOfWorkersForTesting() const {
+ AutoSchedulerLock auto_lock(lock_);
+ return workers_.size();
+}
+
+size_t SchedulerWorkerPoolImpl::GetMaxTasksForTesting() const {
+ AutoSchedulerLock auto_lock(lock_);
+ return max_tasks_;
+}
+
+size_t SchedulerWorkerPoolImpl::NumberOfIdleWorkersForTesting() const {
+ AutoSchedulerLock auto_lock(lock_);
+ return idle_workers_stack_.Size();
+}
+
+void SchedulerWorkerPoolImpl::MaximizeMayBlockThresholdForTesting() {
+ maximum_blocked_threshold_for_testing_.Set();
+}
+
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ SchedulerWorkerDelegateImpl(TrackedRef<SchedulerWorkerPoolImpl> outer)
+ : outer_(std::move(outer)) {
+ // Bound in OnMainEntry().
+ DETACH_FROM_THREAD(worker_thread_checker_);
+}
+
+// OnMainExit() handles the thread-affine cleanup; SchedulerWorkerDelegateImpl
+// can thereafter safely be deleted from any thread.
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ ~SchedulerWorkerDelegateImpl() = default;
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ OnCanScheduleSequence(scoped_refptr<Sequence> sequence) {
+ outer_->OnCanScheduleSequence(std::move(sequence));
+}
+
+SchedulerWorker::ThreadLabel
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetThreadLabel() const {
+ return SchedulerWorker::ThreadLabel::POOLED;
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
+ const SchedulerWorker* worker) {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ {
+#if DCHECK_IS_ON()
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ DCHECK(ContainsWorker(outer_->workers_, worker));
+#endif
+ }
+
+#if defined(OS_WIN)
+ if (outer_->worker_environment_ == WorkerEnvironment::COM_MTA) {
+ if (win::GetVersion() >= win::VERSION_WIN8) {
+ win_thread_environment_ = std::make_unique<win::ScopedWinrtInitializer>();
+ } else {
+ win_thread_environment_ = std::make_unique<win::ScopedCOMInitializer>(
+ win::ScopedCOMInitializer::kMTA);
+ }
+ DCHECK(win_thread_environment_->Succeeded());
+ }
+#endif // defined(OS_WIN)
+
+ DCHECK_EQ(num_tasks_since_last_wait_, 0U);
+
+ PlatformThread::SetName(
+ StringPrintf("TaskScheduler%sWorker", outer_->pool_label_.c_str()));
+
+ outer_->BindToCurrentThread();
+ SetBlockingObserverForCurrentThread(this);
+}
+
+scoped_refptr<Sequence>
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
+ SchedulerWorker* worker) {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+ DCHECK(!is_running_task_);
+ DCHECK(!is_running_background_task_);
+
+ {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+
+ DCHECK(ContainsWorker(outer_->workers_, worker));
+
+ // Calling GetWork() while on the idle worker stack indicates that we
+ // must've reached GetWork() because of the WaitableEvent timing out. In
+ // which case, we return no work and possibly cleanup the worker. To avoid
+ // searching through the idle stack : use GetLastUsedTime() not being null
+ // (or being directly on top of the idle stack) as a proxy for being on the
+ // idle stack.
+ const bool is_on_idle_workers_stack =
+ outer_->idle_workers_stack_.Peek() == worker ||
+ !worker->GetLastUsedTime().is_null();
+ DCHECK_EQ(is_on_idle_workers_stack,
+ outer_->idle_workers_stack_.Contains(worker));
+ if (is_on_idle_workers_stack) {
+ if (CanCleanupLockRequired(worker))
+ CleanupLockRequired(worker);
+ return nullptr;
+ }
+
+ // Excess workers should not get work, until they are no longer excess (i.e.
+ // max tasks increases or another worker cleans up). This ensures that if we
+ // have excess workers in the pool, they get a chance to no longer be excess
+ // before being cleaned up.
+ if (outer_->NumberOfExcessWorkersLockRequired() >
+ outer_->idle_workers_stack_.Size()) {
+ OnWorkerBecomesIdleLockRequired(worker);
+ return nullptr;
+ }
+ }
+ scoped_refptr<Sequence> sequence;
+ {
+ std::unique_ptr<PriorityQueue::Transaction> transaction(
+ outer_->shared_priority_queue_.BeginTransaction());
+
+ if (transaction->IsEmpty()) {
+ // |transaction| is kept alive while |worker| is added to
+ // |idle_workers_stack_| to avoid this race:
+ // 1. This thread creates a Transaction, finds |shared_priority_queue_|
+ // empty and ends the Transaction.
+ // 2. Other thread creates a Transaction, inserts a Sequence into
+ // |shared_priority_queue_| and ends the Transaction. This can't happen
+ // if the Transaction of step 1 is still active because because there
+ // can only be one active Transaction per PriorityQueue at a time.
+ // 3. Other thread calls WakeUpOneWorker(). No thread is woken up because
+ // |idle_workers_stack_| is empty.
+ // 4. This thread adds itself to |idle_workers_stack_| and goes to sleep.
+ // No thread runs the Sequence inserted in step 2.
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ OnWorkerBecomesIdleLockRequired(worker);
+ return nullptr;
+ }
+
+ // Enforce that no more than |max_background_tasks_| run concurrently.
+ const TaskPriority priority = transaction->PeekSortKey().priority();
+ if (priority == TaskPriority::BACKGROUND) {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ if (outer_->num_running_background_tasks_ <
+ outer_->max_background_tasks_) {
+ ++outer_->num_running_background_tasks_;
+ is_running_background_task_ = true;
+ } else {
+ OnWorkerBecomesIdleLockRequired(worker);
+ return nullptr;
+ }
+ }
+
+ sequence = transaction->PopSequence();
+ }
+ DCHECK(sequence);
+#if DCHECK_IS_ON()
+ {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ DCHECK(!outer_->idle_workers_stack_.Contains(worker));
+ }
+#endif
+
+ is_running_task_ = true;
+ return sequence;
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask() {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+ DCHECK(may_block_start_time_.is_null());
+ DCHECK(!incremented_max_tasks_since_blocked_);
+ DCHECK(is_running_task_);
+
+ is_running_task_ = false;
+
+ if (is_running_background_task_) {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ --outer_->num_running_background_tasks_;
+ is_running_background_task_ = false;
+ }
+
+ ++num_tasks_since_last_wait_;
+ ++num_tasks_since_last_detach_;
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ ReEnqueueSequence(scoped_refptr<Sequence> sequence) {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ const SequenceSortKey sequence_sort_key = sequence->GetSortKey();
+ outer_->shared_priority_queue_.BeginTransaction()->Push(std::move(sequence),
+ sequence_sort_key);
+ // This worker will soon call GetWork(). Therefore, there is no need to wake
+ // up a worker to run the sequence that was just inserted into
+ // |outer_->shared_priority_queue_|.
+}
+
+TimeDelta SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ GetSleepTimeout() {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+ // Sleep for an extra 10% to avoid the following pathological case:
+ // 0) A task is running on a timer which matches |suggested_reclaim_time_|.
+ // 1) The timer fires and this worker is created by
+ // MaintainAtLeastOneIdleWorkerLockRequired() because the last idle
+ // worker was assigned the task.
+ // 2) This worker begins sleeping |suggested_reclaim_time_| (on top of the
+ // idle stack).
+ // 3) The task assigned to the other worker completes and the worker goes
+ // back on the idle stack (this worker is now second on the idle stack;
+ // its GetLastUsedTime() is set to Now()).
+ // 4) The sleep in (2) expires. Since (3) was fast this worker is likely to
+ // have been second on the idle stack long enough for
+ // CanCleanupLockRequired() to be satisfied in which case this worker is
+ // cleaned up.
+ // 5) The timer fires at roughly the same time and we're back to (1) if (4)
+ // resulted in a clean up; causing thread churn.
+ //
+ // Sleeping 10% longer in (2) makes it much less likely that (4) occurs
+ // before (5). In that case (5) will cause (3) and refresh this worker's
+ // GetLastUsedTime(), making CanCleanupLockRequired() return false in (4)
+ // and avoiding churn.
+ //
+ // Of course the same problem arises if in (0) the timer matches
+ // |suggested_reclaim_time_ * 1.1| but it's expected that any timer slower
+ // than |suggested_reclaim_time_| will cause such churn during long idle
+ // periods. If this is a problem in practice, the standby thread
+ // configuration and algorithm should be revisited.
+ return outer_->suggested_reclaim_time_ * 1.1;
+}
+
+bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ CanCleanupLockRequired(const SchedulerWorker* worker) const {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ const TimeTicks last_used_time = worker->GetLastUsedTime();
+ return !last_used_time.is_null() &&
+ TimeTicks::Now() - last_used_time >= outer_->suggested_reclaim_time_ &&
+ LIKELY(!outer_->worker_cleanup_disallowed_for_testing_);
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::CleanupLockRequired(
+ SchedulerWorker* worker) {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ outer_->lock_.AssertAcquired();
+ outer_->num_tasks_before_detach_histogram_->Add(num_tasks_since_last_detach_);
+ outer_->cleanup_timestamps_.push(TimeTicks::Now());
+ worker->Cleanup();
+ outer_->RemoveFromIdleWorkersStackLockRequired(worker);
+
+ // Remove the worker from |workers_|.
+ auto worker_iter =
+ std::find(outer_->workers_.begin(), outer_->workers_.end(), worker);
+ DCHECK(worker_iter != outer_->workers_.end());
+ outer_->workers_.erase(worker_iter);
+
+ ++outer_->num_workers_cleaned_up_for_testing_;
+#if DCHECK_IS_ON()
+ outer_->some_workers_cleaned_up_for_testing_ = true;
+#endif
+ if (outer_->num_workers_cleaned_up_for_testing_cv_)
+ outer_->num_workers_cleaned_up_for_testing_cv_->Signal();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ OnWorkerBecomesIdleLockRequired(SchedulerWorker* worker) {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ outer_->lock_.AssertAcquired();
+ // Record the TaskScheduler.NumTasksBetweenWaits histogram. After GetWork()
+ // returns nullptr, the SchedulerWorker will perform a wait on its
+ // WaitableEvent, so we record how many tasks were ran since the last wait
+ // here.
+ outer_->num_tasks_between_waits_histogram_->Add(num_tasks_since_last_wait_);
+ num_tasks_since_last_wait_ = 0;
+ outer_->AddToIdleWorkersStackLockRequired(worker);
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainExit(
+ SchedulerWorker* worker) {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+#if DCHECK_IS_ON()
+ {
+ bool shutdown_complete = outer_->task_tracker_->IsShutdownComplete();
+ AutoSchedulerLock auto_lock(outer_->lock_);
+
+ // |worker| should already have been removed from the idle workers stack and
+ // |workers_| by the time the thread is about to exit. (except in the cases
+ // where the pool is no longer going to be used - in which case, it's fine
+ // for there to be invalid workers in the pool.
+ if (!shutdown_complete && !outer_->join_for_testing_started_.IsSet()) {
+ DCHECK(!outer_->idle_workers_stack_.Contains(worker));
+ DCHECK(!ContainsWorker(outer_->workers_, worker));
+ }
+ }
+#endif
+
+#if defined(OS_WIN)
+ win_thread_environment_.reset();
+#endif // defined(OS_WIN)
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::BlockingStarted(
+ BlockingType blocking_type) {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ // Blocking calls made outside of tasks should not influence the max tasks.
+ if (!is_running_task_)
+ return;
+
+ switch (blocking_type) {
+ case BlockingType::MAY_BLOCK:
+ MayBlockEntered();
+ break;
+ case BlockingType::WILL_BLOCK:
+ WillBlockEntered();
+ break;
+ }
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ BlockingTypeUpgraded() {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+
+ // Don't do anything if a MAY_BLOCK ScopedBlockingCall instantiated in the
+ // same scope already caused the max tasks to be incremented.
+ if (incremented_max_tasks_since_blocked_)
+ return;
+
+ // Cancel the effect of a MAY_BLOCK ScopedBlockingCall instantiated in the
+ // same scope.
+ if (!may_block_start_time_.is_null()) {
+ may_block_start_time_ = TimeTicks();
+ --outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ --outer_->num_pending_background_may_block_workers_;
+ }
+ }
+
+ WillBlockEntered();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::BlockingEnded() {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ // Ignore blocking calls made outside of tasks.
+ if (!is_running_task_)
+ return;
+
+ AutoSchedulerLock auto_lock(outer_->lock_);
+ if (incremented_max_tasks_since_blocked_) {
+ outer_->DecrementMaxTasksLockRequired(is_running_background_task_);
+ } else {
+ DCHECK(!may_block_start_time_.is_null());
+ --outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ --outer_->num_pending_background_may_block_workers_;
+ }
+
+ incremented_max_tasks_since_blocked_ = false;
+ may_block_start_time_ = TimeTicks();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::MayBlockEntered() {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ {
+ AutoSchedulerLock auto_lock(outer_->lock_);
+
+ DCHECK(!incremented_max_tasks_since_blocked_);
+ DCHECK(may_block_start_time_.is_null());
+ may_block_start_time_ = TimeTicks::Now();
+ ++outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ ++outer_->num_pending_background_may_block_workers_;
+ }
+ outer_->ScheduleAdjustMaxTasksIfNeeded();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::WillBlockEntered() {
+ DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+ bool wake_up_allowed = false;
+ {
+ std::unique_ptr<PriorityQueue::Transaction> transaction(
+ outer_->shared_priority_queue_.BeginTransaction());
+ AutoSchedulerLock auto_lock(outer_->lock_);
+
+ DCHECK(!incremented_max_tasks_since_blocked_);
+ DCHECK(may_block_start_time_.is_null());
+ incremented_max_tasks_since_blocked_ = true;
+ outer_->IncrementMaxTasksLockRequired(is_running_background_task_);
+
+ // If the number of workers was less than the old max tasks, PostTask
+ // would've handled creating extra workers during WakeUpOneWorker.
+ // Therefore, we don't need to do anything here.
+ if (outer_->workers_.size() < outer_->max_tasks_ - 1)
+ return;
+
+ if (transaction->IsEmpty()) {
+ outer_->MaintainAtLeastOneIdleWorkerLockRequired();
+ } else {
+ // TODO(crbug.com/757897): We may create extra workers in this case:
+ // |workers.size()| was equal to the old |max_tasks_|, we had multiple
+ // ScopedBlockingCalls in parallel and we had work on the PQ.
+ wake_up_allowed = outer_->WakeUpOneWorkerLockRequired();
+ // |wake_up_allowed| is true when the pool is started, and a WILL_BLOCK
+ // scope cannot be entered before the pool starts.
+ DCHECK(wake_up_allowed);
+ }
+ }
+ // TODO(crbug.com/813857): This can be better handled in the PostTask()
+ // codepath. We really only should do this if there are tasks pending.
+ if (wake_up_allowed)
+ outer_->ScheduleAdjustMaxTasksIfNeeded();
+}
+
+bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ MustIncrementMaxTasksLockRequired() {
+ outer_->lock_.AssertAcquired();
+
+ if (!incremented_max_tasks_since_blocked_ &&
+ !may_block_start_time_.is_null() &&
+ TimeTicks::Now() - may_block_start_time_ >= outer_->MayBlockThreshold()) {
+ incremented_max_tasks_since_blocked_ = true;
+
+ // Reset |may_block_start_time_| so that BlockingScopeExited() knows that it
+ // doesn't have to decrement the number of pending MAY_BLOCK workers.
+ may_block_start_time_ = TimeTicks();
+ --outer_->num_pending_may_block_workers_;
+ if (is_running_background_task_)
+ --outer_->num_pending_background_may_block_workers_;
+
+ return true;
+ }
+
+ return false;
+}
+
+void SchedulerWorkerPoolImpl::WaitForWorkersIdleLockRequiredForTesting(
+ size_t n) {
+ lock_.AssertAcquired();
+
+ // Make sure workers do not cleanup while watching the idle count.
+ AutoReset<bool> ban_cleanups(&worker_cleanup_disallowed_for_testing_, true);
+
+ while (idle_workers_stack_.Size() < n)
+ idle_workers_stack_cv_for_testing_->Wait();
+}
+
+bool SchedulerWorkerPoolImpl::WakeUpOneWorkerLockRequired() {
+ lock_.AssertAcquired();
+
+ if (workers_.empty()) {
+ ++num_wake_ups_before_start_;
+ return false;
+ }
+
+ // Ensure that there is one worker that can run tasks on top of the idle
+ // stack, capacity permitting.
+ MaintainAtLeastOneIdleWorkerLockRequired();
+
+ // If the worker on top of the idle stack can run tasks, wake it up.
+ if (NumberOfExcessWorkersLockRequired() < idle_workers_stack_.Size()) {
+ SchedulerWorker* worker = idle_workers_stack_.Pop();
+ if (worker) {
+ worker->WakeUp();
+ }
+ }
+
+ // Ensure that there is one worker that can run tasks on top of the idle
+ // stack, capacity permitting.
+ MaintainAtLeastOneIdleWorkerLockRequired();
+
+ return true;
+}
+
+void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
+ bool wake_up_allowed;
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ wake_up_allowed = WakeUpOneWorkerLockRequired();
+ }
+ if (wake_up_allowed)
+ ScheduleAdjustMaxTasksIfNeeded();
+}
+
+void SchedulerWorkerPoolImpl::MaintainAtLeastOneIdleWorkerLockRequired() {
+ lock_.AssertAcquired();
+
+ if (workers_.size() == kMaxNumberOfWorkers)
+ return;
+ DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
+
+ if (idle_workers_stack_.IsEmpty() && workers_.size() < max_tasks_) {
+ SchedulerWorker* new_worker =
+ CreateRegisterAndStartSchedulerWorkerLockRequired();
+ if (new_worker)
+ idle_workers_stack_.Push(new_worker);
+ }
+}
+
+void SchedulerWorkerPoolImpl::AddToIdleWorkersStackLockRequired(
+ SchedulerWorker* worker) {
+ lock_.AssertAcquired();
+
+ DCHECK(!idle_workers_stack_.Contains(worker));
+ idle_workers_stack_.Push(worker);
+
+ DCHECK_LE(idle_workers_stack_.Size(), workers_.size());
+
+ idle_workers_stack_cv_for_testing_->Broadcast();
+}
+
+void SchedulerWorkerPoolImpl::RemoveFromIdleWorkersStackLockRequired(
+ SchedulerWorker* worker) {
+ lock_.AssertAcquired();
+ idle_workers_stack_.Remove(worker);
+}
+
+SchedulerWorker*
+SchedulerWorkerPoolImpl::CreateRegisterAndStartSchedulerWorkerLockRequired() {
+ lock_.AssertAcquired();
+
+ DCHECK_LT(workers_.size(), max_tasks_);
+ DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
+ // SchedulerWorker needs |lock_| as a predecessor for its thread lock
+ // because in WakeUpOneWorker, |lock_| is first acquired and then
+ // the thread lock is acquired when WakeUp is called on the worker.
+ scoped_refptr<SchedulerWorker> worker = MakeRefCounted<SchedulerWorker>(
+ priority_hint_,
+ std::make_unique<SchedulerWorkerDelegateImpl>(
+ tracked_ref_factory_.GetTrackedRef()),
+ task_tracker_, &lock_, backward_compatibility_);
+
+ if (!worker->Start(scheduler_worker_observer_))
+ return nullptr;
+
+ workers_.push_back(worker);
+ DCHECK_LE(workers_.size(), max_tasks_);
+
+ if (!cleanup_timestamps_.empty()) {
+ detach_duration_histogram_->AddTime(TimeTicks::Now() -
+ cleanup_timestamps_.top());
+ cleanup_timestamps_.pop();
+ }
+ return worker.get();
+}
+
+size_t SchedulerWorkerPoolImpl::NumberOfExcessWorkersLockRequired() const {
+ lock_.AssertAcquired();
+ return std::max<int>(0, workers_.size() - max_tasks_);
+}
+
+void SchedulerWorkerPoolImpl::AdjustMaxTasks() {
+ DCHECK(service_thread_task_runner_->RunsTasksInCurrentSequence());
+
+ std::unique_ptr<PriorityQueue::Transaction> transaction(
+ shared_priority_queue_.BeginTransaction());
+ AutoSchedulerLock auto_lock(lock_);
+
+ const size_t previous_max_tasks = max_tasks_;
+
+ // Increment max tasks for each worker that has been within a MAY_BLOCK
+ // ScopedBlockingCall for more than MayBlockThreshold().
+ for (scoped_refptr<SchedulerWorker> worker : workers_) {
+ // The delegates of workers inside a SchedulerWorkerPoolImpl should be
+ // SchedulerWorkerDelegateImpls.
+ SchedulerWorkerDelegateImpl* delegate =
+ static_cast<SchedulerWorkerDelegateImpl*>(worker->delegate());
+ if (delegate->MustIncrementMaxTasksLockRequired()) {
+ IncrementMaxTasksLockRequired(
+ delegate->is_running_background_task_lock_required());
+ }
+ }
+
+ // Wake up a worker per pending sequence, capacity permitting.
+ const size_t num_pending_sequences = transaction->Size();
+ const size_t num_wake_ups_needed =
+ std::min(max_tasks_ - previous_max_tasks, num_pending_sequences);
+
+ for (size_t i = 0; i < num_wake_ups_needed; ++i) {
+ // No need to call ScheduleAdjustMaxTasksIfNeeded() as the caller will
+ // take care of that for us.
+ WakeUpOneWorkerLockRequired();
+ }
+
+ MaintainAtLeastOneIdleWorkerLockRequired();
+}
+
+TimeDelta SchedulerWorkerPoolImpl::MayBlockThreshold() const {
+ if (maximum_blocked_threshold_for_testing_.IsSet())
+ return TimeDelta::Max();
+ // This value was set unscientifically based on intuition and may be adjusted
+ // in the future. This value is smaller than |kBlockedWorkersPollPeriod|
+ // because we hope than when multiple workers block around the same time, a
+ // single AdjustMaxTasks() call will perform all the necessary max tasks
+ // adjustments.
+ return TimeDelta::FromMilliseconds(10);
+}
+
+void SchedulerWorkerPoolImpl::ScheduleAdjustMaxTasksIfNeeded() {
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ if (polling_max_tasks_ || !ShouldPeriodicallyAdjustMaxTasksLockRequired()) {
+ return;
+ }
+ polling_max_tasks_ = true;
+ }
+ service_thread_task_runner_->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&SchedulerWorkerPoolImpl::AdjustMaxTasksFunction,
+ Unretained(this)),
+ kBlockedWorkersPollPeriod);
+}
+
+void SchedulerWorkerPoolImpl::AdjustMaxTasksFunction() {
+ DCHECK(service_thread_task_runner_->RunsTasksInCurrentSequence());
+
+ AdjustMaxTasks();
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(polling_max_tasks_);
+
+ if (!ShouldPeriodicallyAdjustMaxTasksLockRequired()) {
+ polling_max_tasks_ = false;
+ return;
+ }
+ }
+ service_thread_task_runner_->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&SchedulerWorkerPoolImpl::AdjustMaxTasksFunction,
+ Unretained(this)),
+ kBlockedWorkersPollPeriod);
+}
+
+bool SchedulerWorkerPoolImpl::ShouldPeriodicallyAdjustMaxTasksLockRequired() {
+ lock_.AssertAcquired();
+
+ // The maximum number of background tasks that can run concurrently must be
+ // adjusted periodically when (1) the number of background tasks that are
+ // currently running is equal to it and (2) there are workers running
+ // background tasks within the scope of a MAY_BLOCK ScopedBlockingCall but
+ // haven't cause a max background tasks increment yet.
+ // - When (1) is false: A newly posted background task will be allowed to run
+ // normally. There is no hurry to increase max background tasks.
+ // - When (2) is false: AdjustMaxTasks() wouldn't affect
+ // |max_background_tasks_|.
+ if (num_running_background_tasks_ >= max_background_tasks_ &&
+ num_pending_background_may_block_workers_ > 0) {
+ return true;
+ }
+
+ // The maximum number of tasks that can run concurrently must be adjusted
+ // periodically when (1) there are no idle workers that can do work (2) there
+ // are workers that are within the scope of a MAY_BLOCK ScopedBlockingCall but
+ // haven't cause a max tasks increment yet.
+ // - When (1) is false: A newly posted task will run on one of the idle
+ // workers that are allowed to do work. There is no hurry to increase max
+ // tasks.
+ // - When (2) is false: AdjustMaxTasks() wouldn't affect |max_tasks_|.
+ const int idle_workers_that_can_do_work =
+ idle_workers_stack_.Size() - NumberOfExcessWorkersLockRequired();
+ return idle_workers_that_can_do_work <= 0 &&
+ num_pending_may_block_workers_ > 0;
+}
+
+void SchedulerWorkerPoolImpl::DecrementMaxTasksLockRequired(
+ bool is_running_background_task) {
+ lock_.AssertAcquired();
+ --max_tasks_;
+ if (is_running_background_task)
+ --max_background_tasks_;
+}
+
+void SchedulerWorkerPoolImpl::IncrementMaxTasksLockRequired(
+ bool is_running_background_task) {
+ lock_.AssertAcquired();
+ ++max_tasks_;
+ if (is_running_background_task)
+ ++max_background_tasks_;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/scheduler_worker_pool_impl.h b/base/task_scheduler/scheduler_worker_pool_impl.h
new file mode 100644
index 0000000000..a641cb39a5
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -0,0 +1,357 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/stack.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/priority_queue.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker.h"
+#include "base/task_scheduler/scheduler_worker_pool.h"
+#include "base/task_scheduler/scheduler_worker_stack.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/tracked_ref.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class HistogramBase;
+class SchedulerWorkerObserver;
+class SchedulerWorkerPoolParams;
+
+namespace internal {
+
+class DelayedTaskManager;
+class TaskTracker;
+
+// A pool of workers that run Tasks.
+//
+// The pool doesn't create threads until Start() is called. Tasks can be posted
+// at any time but will not run until after Start() is called.
+//
+// This class is thread-safe.
+class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
+ public:
+ enum class WorkerEnvironment {
+ // No special worker environment required.
+ NONE,
+#if defined(OS_WIN)
+ // Initialize a COM MTA on the worker.
+ COM_MTA,
+#endif // defined(OS_WIN)
+ };
+
+ // Constructs a pool without workers.
+ //
+ // |histogram_label| is used to label the pool's histograms ("TaskScheduler."
+ // + histogram_name + "." + |histogram_label| + extra suffixes), it must not
+ // be empty. |pool_label| is used to label the pool's threads, it must not be
+ // empty. |priority_hint| is the preferred thread priority; the actual thread
+ // priority depends on shutdown state and platform capabilities.
+ // |task_tracker| keeps track of tasks. |delayed_task_manager| handles tasks
+ // posted with a delay.
+ SchedulerWorkerPoolImpl(StringPiece histogram_label,
+ StringPiece pool_label,
+ ThreadPriority priority_hint,
+ TrackedRef<TaskTracker> task_tracker,
+ DelayedTaskManager* delayed_task_manager);
+
+ // Creates workers following the |params| specification, allowing existing and
+ // future tasks to run. The pool will run at most |max_background_tasks|
+ // unblocked TaskPriority::BACKGROUND tasks concurrently. Uses
+ // |service_thread_task_runner| to monitor for blocked threads in the pool. If
+ // specified, |scheduler_worker_observer| will be notified when a worker
+ // enters and exits its main function. It must not be destroyed before
+ // JoinForTesting() has returned (must never be destroyed in production).
+ // |worker_environment| specifies any requested environment to execute the
+ // tasks. Can only be called once. CHECKs on failure.
+ void Start(const SchedulerWorkerPoolParams& params,
+ int max_background_tasks,
+ scoped_refptr<TaskRunner> service_thread_task_runner,
+ SchedulerWorkerObserver* scheduler_worker_observer,
+ WorkerEnvironment worker_environment);
+
+ // Destroying a SchedulerWorkerPoolImpl returned by Create() is not allowed in
+ // production; it is always leaked. In tests, it can only be destroyed after
+ // JoinForTesting() has returned.
+ ~SchedulerWorkerPoolImpl() override;
+
+ // SchedulerWorkerPool:
+ void JoinForTesting() override;
+
+ const HistogramBase* num_tasks_before_detach_histogram() const {
+ return num_tasks_before_detach_histogram_;
+ }
+
+ const HistogramBase* num_tasks_between_waits_histogram() const {
+ return num_tasks_between_waits_histogram_;
+ }
+
+ void GetHistograms(std::vector<const HistogramBase*>* histograms) const;
+
+ // Returns the maximum number of non-blocked tasks that can run concurrently
+ // in this pool.
+ //
+ // TODO(fdoray): Remove this method. https://crbug.com/687264
+ int GetMaxConcurrentNonBlockedTasksDeprecated() const;
+
+ // Waits until at least |n| workers are idle. Note that while workers are
+ // disallowed from cleaning up during this call: tests using a custom
+ // |suggested_reclaim_time_| need to be careful to invoke this swiftly after
+ // unblocking the waited upon workers as: if a worker is already detached by
+ // the time this is invoked, it will never make it onto the idle stack and
+ // this call will hang.
+ void WaitForWorkersIdleForTesting(size_t n);
+
+ // Waits until all workers are idle.
+ void WaitForAllWorkersIdleForTesting();
+
+ // Waits until |n| workers have cleaned up (since the last call to
+ // WaitForWorkersCleanedUpForTesting() or Start() if it wasn't called yet).
+ void WaitForWorkersCleanedUpForTesting(size_t n);
+
+ // Returns the number of workers in this worker pool.
+ size_t NumberOfWorkersForTesting() const;
+
+ // Returns |max_tasks_|.
+ size_t GetMaxTasksForTesting() const;
+
+ // Returns the number of workers that are idle (i.e. not running tasks).
+ size_t NumberOfIdleWorkersForTesting() const;
+
+ // Sets the MayBlock waiting threshold to TimeDelta::Max().
+ void MaximizeMayBlockThresholdForTesting();
+
+ private:
+ class SchedulerWorkerDelegateImpl;
+
+ // Friend tests so that they can access |kBlockedWorkersPollPeriod| and
+ // BlockedThreshold().
+ friend class TaskSchedulerWorkerPoolBlockingTest;
+ friend class TaskSchedulerWorkerPoolMayBlockTest;
+
+ // The period between calls to AdjustMaxTasks() when the pool is at capacity.
+ // This value was set unscientifically based on intuition and may be adjusted
+ // in the future.
+ static constexpr TimeDelta kBlockedWorkersPollPeriod =
+ TimeDelta::FromMilliseconds(50);
+
+ // SchedulerWorkerPool:
+ void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override;
+
+ // Waits until at least |n| workers are idle. |lock_| must be held to call
+ // this function.
+ void WaitForWorkersIdleLockRequiredForTesting(size_t n);
+
+ // Wakes up the last worker from this worker pool to go idle, if any.
+ void WakeUpOneWorker();
+
+ // Performs the same action as WakeUpOneWorker() except asserts |lock_| is
+ // acquired rather than acquires it and returns true if worker wakeups are
+ // permitted.
+ bool WakeUpOneWorkerLockRequired();
+
+ // Adds a worker, if needed, to maintain one idle worker, |max_tasks_|
+ // permitting.
+ void MaintainAtLeastOneIdleWorkerLockRequired();
+
+ // Adds |worker| to |idle_workers_stack_|.
+ void AddToIdleWorkersStackLockRequired(SchedulerWorker* worker);
+
+ // Removes |worker| from |idle_workers_stack_|.
+ void RemoveFromIdleWorkersStackLockRequired(SchedulerWorker* worker);
+
+ // Returns true if worker cleanup is permitted.
+ bool CanWorkerCleanupForTestingLockRequired();
+
+ // Tries to add a new SchedulerWorker to the pool. Returns the new
+ // SchedulerWorker on success, nullptr otherwise. Cannot be called before
+ // Start(). Must be called under the protection of |lock_|.
+ SchedulerWorker* CreateRegisterAndStartSchedulerWorkerLockRequired();
+
+ // Returns the number of workers in the pool that should not run tasks due to
+ // the pool being over capacity.
+ size_t NumberOfExcessWorkersLockRequired() const;
+
+ // Examines the list of SchedulerWorkers and increments |max_tasks_| for each
+ // worker that has been within the scope of a MAY_BLOCK ScopedBlockingCall for
+ // more than BlockedThreshold().
+ void AdjustMaxTasks();
+
+ // Returns the threshold after which the max tasks is increased to compensate
+ // for a worker that is within a MAY_BLOCK ScopedBlockingCall.
+ TimeDelta MayBlockThreshold() const;
+
+ // Starts calling AdjustMaxTasks() periodically on
+ // |service_thread_task_runner_| if not already requested.
+ void ScheduleAdjustMaxTasksIfNeeded();
+
+ // Calls AdjustMaxTasks() and schedules it again as necessary. May only be
+ // called from the service thread.
+ void AdjustMaxTasksFunction();
+
+ // Returns true if AdjustMaxTasks() should periodically be called on
+ // |service_thread_task_runner_|.
+ bool ShouldPeriodicallyAdjustMaxTasksLockRequired();
+
+ // Increments/decrements the number of tasks that can run in this pool.
+ // |is_running_background_task| indicates whether the worker causing the
+ // change is currently running a TaskPriority::BACKGROUND task.
+ void DecrementMaxTasksLockRequired(bool is_running_background_task);
+ void IncrementMaxTasksLockRequired(bool is_running_background_task);
+
+ const std::string pool_label_;
+ const ThreadPriority priority_hint_;
+
+ // PriorityQueue from which all threads of this worker pool get work.
+ PriorityQueue shared_priority_queue_;
+
+ // Suggested reclaim time for workers. Initialized by Start(). Never modified
+ // afterwards (i.e. can be read without synchronization after Start()).
+ TimeDelta suggested_reclaim_time_;
+
+ SchedulerBackwardCompatibility backward_compatibility_;
+
+ // Synchronizes accesses to |workers_|, |max_tasks_|, |max_background_tasks_|,
+ // |num_running_background_tasks_|, |num_pending_may_block_workers_|,
+ // |idle_workers_stack_|, |idle_workers_stack_cv_for_testing_|,
+ // |num_wake_ups_before_start_|, |cleanup_timestamps_|, |polling_max_tasks_|,
+ // |worker_cleanup_disallowed_for_testing_|,
+ // |num_workers_cleaned_up_for_testing_|,
+ // |SchedulerWorkerDelegateImpl::is_on_idle_workers_stack_|,
+ // |SchedulerWorkerDelegateImpl::incremented_max_tasks_since_blocked_| and
+ // |SchedulerWorkerDelegateImpl::may_block_start_time_|. Has
+ // |shared_priority_queue_|'s lock as its predecessor so that a worker can be
+ // pushed to |idle_workers_stack_| within the scope of a Transaction (more
+ // details in GetWork()).
+ mutable SchedulerLock lock_;
+
+ // All workers owned by this worker pool.
+ std::vector<scoped_refptr<SchedulerWorker>> workers_;
+
+ // The maximum number of tasks that can run concurrently in this pool. Workers
+ // can be added as needed up until there are |max_tasks_| workers.
+ size_t max_tasks_ = 0;
+
+ // Initial value of |max_tasks_| as set in Start().
+ size_t initial_max_tasks_ = 0;
+
+ // The maximum number of background tasks that can run concurrently in this
+ // pool.
+ int max_background_tasks_ = 0;
+
+ // The number of background tasks that are currently running in this pool.
+ int num_running_background_tasks_ = 0;
+
+ // Number of workers that are within the scope of a MAY_BLOCK
+ // ScopedBlockingCall but haven't caused a max task increase yet.
+ int num_pending_may_block_workers_ = 0;
+
+ // Number of workers that are running a TaskPriority::BACKGROUND task and are
+ // within the scope of a MAY_BLOCK ScopedBlockingCall but haven't caused a max
+ // task increase yet.
+ int num_pending_background_may_block_workers_ = 0;
+
+ // Environment to be initialized per worker.
+ WorkerEnvironment worker_environment_ = WorkerEnvironment::NONE;
+
+ // Stack of idle workers. Initially, all workers are on this stack. A worker
+ // is removed from the stack before its WakeUp() function is called and when
+ // it receives work from GetWork() (a worker calls GetWork() when its sleep
+ // timeout expires, even if its WakeUp() method hasn't been called). A worker
+ // is pushed on this stack when it receives nullptr from GetWork().
+ SchedulerWorkerStack idle_workers_stack_;
+
+ // Signaled when a worker is added to the idle workers stack.
+ std::unique_ptr<ConditionVariable> idle_workers_stack_cv_for_testing_;
+
+ // Number of wake ups that occurred before Start(). Never modified after
+ // Start() (i.e. can be read without synchronization after Start()).
+ int num_wake_ups_before_start_ = 0;
+
+ // Stack that contains the timestamps of when workers get cleaned up.
+ // Timestamps get popped off the stack as new workers are added.
+ base::stack<TimeTicks, std::vector<TimeTicks>> cleanup_timestamps_;
+
+ // Whether we are currently polling for necessary adjustments to |max_tasks_|.
+ bool polling_max_tasks_ = false;
+
+ // Indicates to the delegates that workers are not permitted to cleanup.
+ bool worker_cleanup_disallowed_for_testing_ = false;
+
+ // Counts the number of workers cleaned up since the last call to
+ // WaitForWorkersCleanedUpForTesting() (or Start() if it wasn't called yet).
+ // |some_workers_cleaned_up_for_testing_| is true if this was ever
+ // incremented. Tests with a custom |suggested_reclaim_time_| can wait on a
+ // specific number of workers being cleaned up via
+ // WaitForWorkersCleanedUpForTesting().
+ size_t num_workers_cleaned_up_for_testing_ = 0;
+#if DCHECK_IS_ON()
+ bool some_workers_cleaned_up_for_testing_ = false;
+#endif
+
+ // Signaled, if non-null, when |num_workers_cleaned_up_for_testing_| is
+ // incremented.
+ std::unique_ptr<ConditionVariable> num_workers_cleaned_up_for_testing_cv_;
+
+ // Used for testing and makes MayBlockThreshold() return the maximum
+ // TimeDelta.
+ AtomicFlag maximum_blocked_threshold_for_testing_;
+
+#if DCHECK_IS_ON()
+ // Set at the start of JoinForTesting().
+ AtomicFlag join_for_testing_started_;
+#endif
+
+ // TaskScheduler.DetachDuration.[worker pool name] histogram. Intentionally
+ // leaked.
+ HistogramBase* const detach_duration_histogram_;
+
+ // TaskScheduler.NumTasksBeforeDetach.[worker pool name] histogram.
+ // Intentionally leaked.
+ HistogramBase* const num_tasks_before_detach_histogram_;
+
+ // TaskScheduler.NumTasksBetweenWaits.[worker pool name] histogram.
+ // Intentionally leaked.
+ HistogramBase* const num_tasks_between_waits_histogram_;
+
+ scoped_refptr<TaskRunner> service_thread_task_runner_;
+
+ // Optional observer notified when a worker enters and exits its main
+ // function. Set in Start() and never modified afterwards.
+ SchedulerWorkerObserver* scheduler_worker_observer_ = nullptr;
+
+ // Ensures recently cleaned up workers (ref.
+ // SchedulerWorkerDelegateImpl::CleanupLockRequired()) had time to exit as
+ // they have a raw reference to |this| (and to TaskTracker) which can
+ // otherwise result in racy use-after-frees per no longer being part of
+ // |workers_| and hence not being explicitly joined in JoinForTesting() :
+ // https://crbug.com/810464.
+ TrackedRefFactory<SchedulerWorkerPoolImpl> tracked_ref_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerPoolImpl);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
diff --git a/base/task_scheduler/scheduler_worker_pool_params.cc b/base/task_scheduler/scheduler_worker_pool_params.cc
new file mode 100644
index 0000000000..08b4f7b0d9
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_params.cc
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+
+namespace base {
+
+SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
+ int max_tasks,
+ TimeDelta suggested_reclaim_time,
+ SchedulerBackwardCompatibility backward_compatibility)
+ : max_tasks_(max_tasks),
+ suggested_reclaim_time_(suggested_reclaim_time),
+ backward_compatibility_(backward_compatibility) {}
+
+SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
+ const SchedulerWorkerPoolParams& other) = default;
+
+SchedulerWorkerPoolParams& SchedulerWorkerPoolParams::operator=(
+ const SchedulerWorkerPoolParams& other) = default;
+
+} // namespace base
diff --git a/base/task_scheduler/scheduler_worker_pool_params.h b/base/task_scheduler/scheduler_worker_pool_params.h
new file mode 100644
index 0000000000..9ee9472c34
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_params.h
@@ -0,0 +1,44 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_PARAMS_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_PARAMS_H_
+
+#include "base/task_scheduler/scheduler_worker_params.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BASE_EXPORT SchedulerWorkerPoolParams final {
+ public:
+ // Constructs a set of params used to initialize a pool. The pool will run
+ // concurrently at most |max_tasks| that aren't blocked (ScopedBlockingCall).
+ // |suggested_reclaim_time| sets a suggestion on when to reclaim idle threads.
+ // The pool is free to ignore this value for performance or correctness
+ // reasons. |backward_compatibility| indicates whether backward compatibility
+ // is enabled.
+ SchedulerWorkerPoolParams(
+ int max_tasks,
+ TimeDelta suggested_reclaim_time,
+ SchedulerBackwardCompatibility backward_compatibility =
+ SchedulerBackwardCompatibility::DISABLED);
+
+ SchedulerWorkerPoolParams(const SchedulerWorkerPoolParams& other);
+ SchedulerWorkerPoolParams& operator=(const SchedulerWorkerPoolParams& other);
+
+ int max_tasks() const { return max_tasks_; }
+ TimeDelta suggested_reclaim_time() const { return suggested_reclaim_time_; }
+ SchedulerBackwardCompatibility backward_compatibility() const {
+ return backward_compatibility_;
+ }
+
+ private:
+ int max_tasks_;
+ TimeDelta suggested_reclaim_time_;
+ SchedulerBackwardCompatibility backward_compatibility_;
+};
+
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_PARAMS_H_
diff --git a/base/task_scheduler/scheduler_worker_stack.cc b/base/task_scheduler/scheduler_worker_stack.cc
new file mode 100644
index 0000000000..613fe09a0c
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_stack.cc
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_stack.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "base/task_scheduler/scheduler_worker.h"
+
+namespace base {
+namespace internal {
+
+SchedulerWorkerStack::SchedulerWorkerStack() = default;
+
+SchedulerWorkerStack::~SchedulerWorkerStack() = default;
+
+void SchedulerWorkerStack::Push(SchedulerWorker* worker) {
+ DCHECK(!Contains(worker)) << "SchedulerWorker already on stack";
+ if (!IsEmpty())
+ stack_.back()->BeginUnusedPeriod();
+ stack_.push_back(worker);
+}
+
+SchedulerWorker* SchedulerWorkerStack::Pop() {
+ if (IsEmpty())
+ return nullptr;
+ SchedulerWorker* const worker = stack_.back();
+ stack_.pop_back();
+ if (!IsEmpty())
+ stack_.back()->EndUnusedPeriod();
+ return worker;
+}
+
+SchedulerWorker* SchedulerWorkerStack::Peek() const {
+ if (IsEmpty())
+ return nullptr;
+ return stack_.back();
+}
+
+bool SchedulerWorkerStack::Contains(const SchedulerWorker* worker) const {
+ return ContainsValue(stack_, worker);
+}
+
+void SchedulerWorkerStack::Remove(const SchedulerWorker* worker) {
+ DCHECK(!IsEmpty());
+ DCHECK_NE(worker, stack_.back());
+ auto it = std::find(stack_.begin(), stack_.end(), worker);
+ DCHECK(it != stack_.end());
+ DCHECK_NE(TimeTicks(), (*it)->GetLastUsedTime());
+ stack_.erase(it);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/scheduler_worker_stack.h b/base/task_scheduler/scheduler_worker_stack.h
new file mode 100644
index 0000000000..e9cedec336
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_stack.h
@@ -0,0 +1,67 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace internal {
+
+class SchedulerWorker;
+
+// A stack of SchedulerWorkers which has custom logic to treat the worker on top
+// of the stack as being "in-use" (so its time in that position doesn't count
+// towards being inactive / reclaimable). Supports removal of arbitrary
+// SchedulerWorkers. DCHECKs when a SchedulerWorker is inserted multiple times.
+// SchedulerWorkers are not owned by the stack. Push() is amortized O(1). Pop(),
+// Peek(), Size() and Empty() are O(1). Contains() and Remove() are O(n). This
+// class is NOT thread-safe.
+class BASE_EXPORT SchedulerWorkerStack {
+ public:
+ SchedulerWorkerStack();
+ ~SchedulerWorkerStack();
+
+ // Inserts |worker| at the top of the stack. |worker| must not already be on
+ // the stack. Flags the SchedulerWorker previously on top of the stack, if
+ // any, as unused.
+ void Push(SchedulerWorker* worker);
+
+ // Removes the top SchedulerWorker from the stack and returns it. Returns
+ // nullptr if the stack is empty. Flags the SchedulerWorker now on top of the
+ // stack, if any, as being in-use.
+ SchedulerWorker* Pop();
+
+ // Returns the top SchedulerWorker from the stack, nullptr if empty.
+ SchedulerWorker* Peek() const;
+
+ // Returns true if |worker| is already on the stack.
+ bool Contains(const SchedulerWorker* worker) const;
+
+ // Removes |worker| from the stack. Must not be invoked for the first worker
+ // on the stack.
+ void Remove(const SchedulerWorker* worker);
+
+ // Returns the number of SchedulerWorkers on the stack.
+ size_t Size() const { return stack_.size(); }
+
+ // Returns true if the stack is empty.
+ bool IsEmpty() const { return stack_.empty(); }
+
+ private:
+ std::vector<SchedulerWorker*> stack_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerStack);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
diff --git a/base/task_scheduler/task_scheduler.cc b/base/task_scheduler/task_scheduler.cc
new file mode 100644
index 0000000000..6d20ead4a5
--- /dev/null
+++ b/base/task_scheduler/task_scheduler.cc
@@ -0,0 +1,86 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_scheduler.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/sys_info.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_scheduler_impl.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+namespace {
+
+// |g_task_scheduler| is intentionally leaked on shutdown.
+TaskScheduler* g_task_scheduler = nullptr;
+
+} // namespace
+
+TaskScheduler::InitParams::InitParams(
+ const SchedulerWorkerPoolParams& background_worker_pool_params_in,
+ const SchedulerWorkerPoolParams& background_blocking_worker_pool_params_in,
+ const SchedulerWorkerPoolParams& foreground_worker_pool_params_in,
+ const SchedulerWorkerPoolParams& foreground_blocking_worker_pool_params_in,
+ SharedWorkerPoolEnvironment shared_worker_pool_environment_in)
+ : background_worker_pool_params(background_worker_pool_params_in),
+ background_blocking_worker_pool_params(
+ background_blocking_worker_pool_params_in),
+ foreground_worker_pool_params(foreground_worker_pool_params_in),
+ foreground_blocking_worker_pool_params(
+ foreground_blocking_worker_pool_params_in),
+ shared_worker_pool_environment(shared_worker_pool_environment_in) {}
+
+TaskScheduler::InitParams::~InitParams() = default;
+
+#if !defined(OS_NACL)
+// static
+void TaskScheduler::CreateAndStartWithDefaultParams(StringPiece name) {
+ Create(name);
+ GetInstance()->StartWithDefaultParams();
+}
+
+void TaskScheduler::StartWithDefaultParams() {
+ // Values were chosen so that:
+ // * There are few background threads.
+ // * Background threads never outnumber foreground threads.
+ // * The system is utilized maximally by foreground threads.
+ // * The main thread is assumed to be busy, cap foreground workers at
+ // |num_cores - 1|.
+ const int num_cores = SysInfo::NumberOfProcessors();
+ constexpr int kBackgroundMaxThreads = 1;
+ constexpr int kBackgroundBlockingMaxThreads = 2;
+ const int kForegroundMaxThreads = std::max(1, num_cores - 1);
+ const int kForegroundBlockingMaxThreads = std::max(2, num_cores - 1);
+
+ constexpr TimeDelta kSuggestedReclaimTime = TimeDelta::FromSeconds(30);
+
+ Start({{kBackgroundMaxThreads, kSuggestedReclaimTime},
+ {kBackgroundBlockingMaxThreads, kSuggestedReclaimTime},
+ {kForegroundMaxThreads, kSuggestedReclaimTime},
+ {kForegroundBlockingMaxThreads, kSuggestedReclaimTime}});
+}
+#endif // !defined(OS_NACL)
+
+void TaskScheduler::Create(StringPiece name) {
+ SetInstance(std::make_unique<internal::TaskSchedulerImpl>(name));
+}
+
+// static
+void TaskScheduler::SetInstance(std::unique_ptr<TaskScheduler> task_scheduler) {
+ delete g_task_scheduler;
+ g_task_scheduler = task_scheduler.release();
+}
+
+// static
+TaskScheduler* TaskScheduler::GetInstance() {
+ return g_task_scheduler;
+}
+
+} // namespace base
diff --git a/base/task_scheduler/task_scheduler_impl.cc b/base/task_scheduler/task_scheduler_impl.cc
new file mode 100644
index 0000000000..605c1e88ca
--- /dev/null
+++ b/base/task_scheduler/task_scheduler_impl.cc
@@ -0,0 +1,281 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_scheduler_impl.h"
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/environment_config.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+#include "base/task_scheduler/service_thread.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+TaskSchedulerImpl::TaskSchedulerImpl(StringPiece histogram_label)
+ : TaskSchedulerImpl(histogram_label,
+ std::make_unique<TaskTrackerImpl>(histogram_label)) {}
+
+TaskSchedulerImpl::TaskSchedulerImpl(
+ StringPiece histogram_label,
+ std::unique_ptr<TaskTrackerImpl> task_tracker)
+ : task_tracker_(std::move(task_tracker)),
+ service_thread_(std::make_unique<ServiceThread>(task_tracker_.get())),
+ single_thread_task_runner_manager_(task_tracker_->GetTrackedRef(),
+ &delayed_task_manager_) {
+ DCHECK(!histogram_label.empty());
+
+ static_assert(arraysize(environment_to_worker_pool_) == ENVIRONMENT_COUNT,
+ "The size of |environment_to_worker_pool_| must match "
+ "ENVIRONMENT_COUNT.");
+ static_assert(
+ size(kEnvironmentParams) == ENVIRONMENT_COUNT,
+ "The size of |kEnvironmentParams| must match ENVIRONMENT_COUNT.");
+
+ int num_pools_to_create = CanUseBackgroundPriorityForSchedulerWorker()
+ ? ENVIRONMENT_COUNT
+ : ENVIRONMENT_COUNT_WITHOUT_BACKGROUND_PRIORITY;
+ for (int environment_type = 0; environment_type < num_pools_to_create;
+ ++environment_type) {
+ worker_pools_.emplace_back(std::make_unique<SchedulerWorkerPoolImpl>(
+ JoinString(
+ {histogram_label, kEnvironmentParams[environment_type].name_suffix},
+ "."),
+ kEnvironmentParams[environment_type].name_suffix,
+ kEnvironmentParams[environment_type].priority_hint,
+ task_tracker_->GetTrackedRef(), &delayed_task_manager_));
+ }
+
+ // Map environment indexes to pools.
+ environment_to_worker_pool_[FOREGROUND] = worker_pools_[FOREGROUND].get();
+ environment_to_worker_pool_[FOREGROUND_BLOCKING] =
+ worker_pools_[FOREGROUND_BLOCKING].get();
+
+ if (CanUseBackgroundPriorityForSchedulerWorker()) {
+ environment_to_worker_pool_[BACKGROUND] = worker_pools_[BACKGROUND].get();
+ environment_to_worker_pool_[BACKGROUND_BLOCKING] =
+ worker_pools_[BACKGROUND_BLOCKING].get();
+ } else {
+ // On platforms without background thread priority, tasks posted to the
+ // background environment are run by foreground pools.
+ environment_to_worker_pool_[BACKGROUND] = worker_pools_[FOREGROUND].get();
+ environment_to_worker_pool_[BACKGROUND_BLOCKING] =
+ worker_pools_[FOREGROUND_BLOCKING].get();
+ }
+}
+
+TaskSchedulerImpl::~TaskSchedulerImpl() {
+#if DCHECK_IS_ON()
+ DCHECK(join_for_testing_returned_.IsSet());
+#endif
+}
+
+void TaskSchedulerImpl::Start(
+ const TaskScheduler::InitParams& init_params,
+ SchedulerWorkerObserver* scheduler_worker_observer) {
+ // This is set in Start() and not in the constructor because variation params
+ // are usually not ready when TaskSchedulerImpl is instantiated in a process.
+ if (base::GetFieldTrialParamValue("BrowserScheduler",
+ "AllTasksUserBlocking") == "true") {
+ all_tasks_user_blocking_.Set();
+ }
+
+ // Start the service thread. On platforms that support it (POSIX except NaCL
+ // SFI), the service thread runs a MessageLoopForIO which is used to support
+ // FileDescriptorWatcher in the scope in which tasks run.
+ ServiceThread::Options service_thread_options;
+ service_thread_options.message_loop_type =
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+ MessageLoop::TYPE_IO;
+#else
+ MessageLoop::TYPE_DEFAULT;
+#endif
+ service_thread_options.timer_slack = TIMER_SLACK_MAXIMUM;
+ CHECK(service_thread_->StartWithOptions(service_thread_options));
+
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+ // Needs to happen after starting the service thread to get its
+ // message_loop().
+ task_tracker_->set_watch_file_descriptor_message_loop(
+ static_cast<MessageLoopForIO*>(service_thread_->message_loop()));
+
+#if DCHECK_IS_ON()
+ task_tracker_->set_service_thread_handle(service_thread_->GetThreadHandle());
+#endif // DCHECK_IS_ON()
+#endif // defined(OS_POSIX) && !defined(OS_NACL_SFI)
+
+ // Needs to happen after starting the service thread to get its task_runner().
+ scoped_refptr<TaskRunner> service_thread_task_runner =
+ service_thread_->task_runner();
+ delayed_task_manager_.Start(service_thread_task_runner);
+
+ single_thread_task_runner_manager_.Start(scheduler_worker_observer);
+
+ const SchedulerWorkerPoolImpl::WorkerEnvironment worker_environment =
+#if defined(OS_WIN)
+ init_params.shared_worker_pool_environment ==
+ InitParams::SharedWorkerPoolEnvironment::COM_MTA
+ ? SchedulerWorkerPoolImpl::WorkerEnvironment::COM_MTA
+ : SchedulerWorkerPoolImpl::WorkerEnvironment::NONE;
+#else
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE;
+#endif
+
+ // On platforms that can't use the background thread priority, background
+ // tasks run in foreground pools. A cap is set on the number of background
+ // tasks that can run in foreground pools to ensure that there is always room
+ // for incoming foreground tasks and to minimize the performance impact of
+ // background tasks.
+ const int max_background_tasks_in_foreground_pool = std::max(
+ 1, std::min(init_params.background_worker_pool_params.max_tasks(),
+ init_params.foreground_worker_pool_params.max_tasks() / 2));
+ worker_pools_[FOREGROUND]->Start(
+ init_params.foreground_worker_pool_params,
+ max_background_tasks_in_foreground_pool, service_thread_task_runner,
+ scheduler_worker_observer, worker_environment);
+ const int max_background_tasks_in_foreground_blocking_pool = std::max(
+ 1,
+ std::min(
+ init_params.background_blocking_worker_pool_params.max_tasks(),
+ init_params.foreground_blocking_worker_pool_params.max_tasks() / 2));
+ worker_pools_[FOREGROUND_BLOCKING]->Start(
+ init_params.foreground_blocking_worker_pool_params,
+ max_background_tasks_in_foreground_blocking_pool,
+ service_thread_task_runner, scheduler_worker_observer,
+ worker_environment);
+
+ if (CanUseBackgroundPriorityForSchedulerWorker()) {
+ worker_pools_[BACKGROUND]->Start(
+ init_params.background_worker_pool_params,
+ init_params.background_worker_pool_params.max_tasks(),
+ service_thread_task_runner, scheduler_worker_observer,
+ worker_environment);
+ worker_pools_[BACKGROUND_BLOCKING]->Start(
+ init_params.background_blocking_worker_pool_params,
+ init_params.background_blocking_worker_pool_params.max_tasks(),
+ service_thread_task_runner, scheduler_worker_observer,
+ worker_environment);
+ }
+}
+
+void TaskSchedulerImpl::PostDelayedTaskWithTraits(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) {
+ // Post |task| as part of a one-off single-task Sequence.
+ const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
+ GetWorkerPoolForTraits(new_traits)
+ ->PostTaskWithSequence(
+ Task(from_here, std::move(task), new_traits, delay),
+ MakeRefCounted<Sequence>());
+}
+
+scoped_refptr<TaskRunner> TaskSchedulerImpl::CreateTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
+ return GetWorkerPoolForTraits(new_traits)
+ ->CreateTaskRunnerWithTraits(new_traits);
+}
+
+scoped_refptr<SequencedTaskRunner>
+TaskSchedulerImpl::CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
+ return GetWorkerPoolForTraits(new_traits)
+ ->CreateSequencedTaskRunnerWithTraits(new_traits);
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+TaskSchedulerImpl::CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return single_thread_task_runner_manager_
+ .CreateSingleThreadTaskRunnerWithTraits(
+ SetUserBlockingPriorityIfNeeded(traits), thread_mode);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner>
+TaskSchedulerImpl::CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return single_thread_task_runner_manager_.CreateCOMSTATaskRunnerWithTraits(
+ SetUserBlockingPriorityIfNeeded(traits), thread_mode);
+}
+#endif // defined(OS_WIN)
+
+std::vector<const HistogramBase*> TaskSchedulerImpl::GetHistograms() const {
+ std::vector<const HistogramBase*> histograms;
+ for (const auto& worker_pool : worker_pools_)
+ worker_pool->GetHistograms(&histograms);
+
+ return histograms;
+}
+
+int TaskSchedulerImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ const TaskTraits& traits) const {
+ // This method does not support getting the maximum number of BACKGROUND tasks
+ // that can run concurrently in a pool.
+ DCHECK_NE(traits.priority(), TaskPriority::BACKGROUND);
+ return GetWorkerPoolForTraits(traits)
+ ->GetMaxConcurrentNonBlockedTasksDeprecated();
+}
+
+void TaskSchedulerImpl::Shutdown() {
+ // TODO(fdoray): Increase the priority of BACKGROUND tasks blocking shutdown.
+ task_tracker_->Shutdown();
+}
+
+void TaskSchedulerImpl::FlushForTesting() {
+ task_tracker_->FlushForTesting();
+}
+
+void TaskSchedulerImpl::FlushAsyncForTesting(OnceClosure flush_callback) {
+ task_tracker_->FlushAsyncForTesting(std::move(flush_callback));
+}
+
+void TaskSchedulerImpl::JoinForTesting() {
+#if DCHECK_IS_ON()
+ DCHECK(!join_for_testing_returned_.IsSet());
+#endif
+ // The service thread must be stopped before the workers are joined, otherwise
+ // tasks scheduled by the DelayedTaskManager might be posted between joining
+ // those workers and stopping the service thread which will cause a CHECK. See
+ // https://crbug.com/771701.
+ service_thread_->Stop();
+ single_thread_task_runner_manager_.JoinForTesting();
+ for (const auto& worker_pool : worker_pools_)
+ worker_pool->JoinForTesting();
+#if DCHECK_IS_ON()
+ join_for_testing_returned_.Set();
+#endif
+}
+
+SchedulerWorkerPoolImpl* TaskSchedulerImpl::GetWorkerPoolForTraits(
+ const TaskTraits& traits) const {
+ return environment_to_worker_pool_[GetEnvironmentIndexForTraits(traits)];
+}
+
+TaskTraits TaskSchedulerImpl::SetUserBlockingPriorityIfNeeded(
+ const TaskTraits& traits) const {
+ return all_tasks_user_blocking_.IsSet()
+ ? TaskTraits::Override(traits, {TaskPriority::USER_BLOCKING})
+ : traits;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/task_scheduler_impl.h b/base/task_scheduler/task_scheduler_impl.h
new file mode 100644
index 0000000000..598079d57b
--- /dev/null
+++ b/base/task_scheduler/task_scheduler_impl.h
@@ -0,0 +1,136 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
+#define BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/environment_config.h"
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
+#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+#include "base/task_scheduler/task_tracker_posix.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/com_init_check_hook.h"
+#endif
+
+namespace base {
+
+class HistogramBase;
+class Thread;
+
+namespace internal {
+
+// Default TaskScheduler implementation. This class is thread-safe.
+class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
+ public:
+ using TaskTrackerImpl =
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+ TaskTrackerPosix;
+#else
+ TaskTracker;
+#endif
+
+ // Creates a TaskSchedulerImpl with a production TaskTracker.
+ //|histogram_label| is used to label histograms, it must not be empty.
+ explicit TaskSchedulerImpl(StringPiece histogram_label);
+
+ // For testing only. Creates a TaskSchedulerImpl with a custom TaskTracker.
+ TaskSchedulerImpl(StringPiece histogram_label,
+ std::unique_ptr<TaskTrackerImpl> task_tracker);
+
+ ~TaskSchedulerImpl() override;
+
+ // TaskScheduler:
+ void Start(const TaskScheduler::InitParams& init_params,
+ SchedulerWorkerObserver* scheduler_worker_observer) override;
+ void PostDelayedTaskWithTraits(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) override;
+ scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+ scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) override;
+#if defined(OS_WIN)
+ scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) override;
+#endif // defined(OS_WIN)
+ std::vector<const HistogramBase*> GetHistograms() const override;
+ int GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ const TaskTraits& traits) const override;
+ void Shutdown() override;
+ void FlushForTesting() override;
+ void FlushAsyncForTesting(OnceClosure flush_callback) override;
+ void JoinForTesting() override;
+
+ private:
+ // Returns the worker pool that runs Tasks with |traits|.
+ SchedulerWorkerPoolImpl* GetWorkerPoolForTraits(
+ const TaskTraits& traits) const;
+
+ // Returns |traits|, with priority set to TaskPriority::USER_BLOCKING if
+ // |all_tasks_user_blocking_| is set.
+ TaskTraits SetUserBlockingPriorityIfNeeded(const TaskTraits& traits) const;
+
+ const std::unique_ptr<TaskTrackerImpl> task_tracker_;
+ std::unique_ptr<Thread> service_thread_;
+ DelayedTaskManager delayed_task_manager_;
+ SchedulerSingleThreadTaskRunnerManager single_thread_task_runner_manager_;
+
+ // Indicates that all tasks are handled as if they had been posted with
+ // TaskPriority::USER_BLOCKING. Since this is set in Start(), it doesn't apply
+ // to tasks posted before Start() or to tasks posted to TaskRunners created
+ // before Start().
+ //
+ // TODO(fdoray): Remove after experiment. https://crbug.com/757022
+ AtomicFlag all_tasks_user_blocking_;
+
+ // Owns all the pools managed by this TaskScheduler.
+ std::vector<std::unique_ptr<SchedulerWorkerPoolImpl>> worker_pools_;
+
+ // Maps an environment from EnvironmentType to a pool in |worker_pools_|.
+ SchedulerWorkerPoolImpl* environment_to_worker_pool_[static_cast<int>(
+ EnvironmentType::ENVIRONMENT_COUNT)];
+
+#if DCHECK_IS_ON()
+ // Set once JoinForTesting() has returned.
+ AtomicFlag join_for_testing_returned_;
+#endif
+
+#if defined(OS_WIN) && defined(COM_INIT_CHECK_HOOK_ENABLED)
+ // Provides COM initialization verification for supported builds.
+ base::win::ComInitCheckHook com_init_check_hook_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerImpl);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
diff --git a/base/task_scheduler/task_tracker.cc b/base/task_scheduler/task_tracker.cc
new file mode 100644
index 0000000000..e4955fd729
--- /dev/null
+++ b/base/task_scheduler/task_tracker.cc
@@ -0,0 +1,837 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker.h"
+
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "base/base_switches.h"
+#include "base/callback.h"
+#include "base/command_line.h"
+#include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/sequence_token.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+#include "base/threading/sequence_local_storage_map.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "base/values.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+constexpr char kParallelExecutionMode[] = "parallel";
+constexpr char kSequencedExecutionMode[] = "sequenced";
+constexpr char kSingleThreadExecutionMode[] = "single thread";
+
+// An immutable copy of a scheduler task's info required by tracing.
+class TaskTracingInfo : public trace_event::ConvertableToTraceFormat {
+ public:
+ TaskTracingInfo(const TaskTraits& task_traits,
+ const char* execution_mode,
+ const SequenceToken& sequence_token)
+ : task_traits_(task_traits),
+ execution_mode_(execution_mode),
+ sequence_token_(sequence_token) {}
+
+ // trace_event::ConvertableToTraceFormat implementation.
+ void AppendAsTraceFormat(std::string* out) const override;
+
+ private:
+ const TaskTraits task_traits_;
+ const char* const execution_mode_;
+ const SequenceToken sequence_token_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskTracingInfo);
+};
+
+void TaskTracingInfo::AppendAsTraceFormat(std::string* out) const {
+ DictionaryValue dict;
+
+ dict.SetString("task_priority",
+ base::TaskPriorityToString(task_traits_.priority()));
+ dict.SetString("execution_mode", execution_mode_);
+ if (execution_mode_ != kParallelExecutionMode)
+ dict.SetInteger("sequence_token", sequence_token_.ToInternalValue());
+
+ std::string tmp;
+ JSONWriter::Write(dict, &tmp);
+ out->append(tmp);
+}
+
+// These name conveys that a Task is posted to/run by the task scheduler without
+// revealing its implementation details.
+constexpr char kQueueFunctionName[] = "TaskScheduler PostTask";
+constexpr char kRunFunctionName[] = "TaskScheduler RunTask";
+
+constexpr char kTaskSchedulerFlowTracingCategory[] =
+ TRACE_DISABLED_BY_DEFAULT("task_scheduler.flow");
+
+// Constructs a histogram to track latency which is logging to
+// "TaskScheduler.{histogram_name}.{histogram_label}.{task_type_suffix}".
+HistogramBase* GetLatencyHistogram(StringPiece histogram_name,
+ StringPiece histogram_label,
+ StringPiece task_type_suffix) {
+ DCHECK(!histogram_name.empty());
+ DCHECK(!histogram_label.empty());
+ DCHECK(!task_type_suffix.empty());
+ // Mimics the UMA_HISTOGRAM_HIGH_RESOLUTION_CUSTOM_TIMES macro. The minimums
+ // and maximums were chosen to place the 1ms mark at around the 70% range
+ // coverage for buckets giving us good info for tasks that have a latency
+ // below 1ms (most of them) and enough info to assess how bad the latency is
+ // for tasks that exceed this threshold.
+ const std::string histogram = JoinString(
+ {"TaskScheduler", histogram_name, histogram_label, task_type_suffix},
+ ".");
+ return Histogram::FactoryMicrosecondsTimeGet(
+ histogram, TimeDelta::FromMicroseconds(1),
+ TimeDelta::FromMilliseconds(20), 50,
+ HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+// Upper bound for the
+// TaskScheduler.BlockShutdownTasksPostedDuringShutdown histogram.
+constexpr HistogramBase::Sample kMaxBlockShutdownTasksPostedDuringShutdown =
+ 1000;
+
+void RecordNumBlockShutdownTasksPostedDuringShutdown(
+ HistogramBase::Sample value) {
+ UMA_HISTOGRAM_CUSTOM_COUNTS(
+ "TaskScheduler.BlockShutdownTasksPostedDuringShutdown", value, 1,
+ kMaxBlockShutdownTasksPostedDuringShutdown, 50);
+}
+
+// Returns the maximum number of TaskPriority::BACKGROUND sequences that can be
+// scheduled concurrently based on command line flags.
+int GetMaxNumScheduledBackgroundSequences() {
+ // The CommandLine might not be initialized if TaskScheduler is initialized
+ // in a dynamic library which doesn't have access to argc/argv.
+ if (CommandLine::InitializedForCurrentProcess() &&
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableBackgroundTasks)) {
+ return 0;
+ }
+ return std::numeric_limits<int>::max();
+}
+
+} // namespace
+
+// Atomic internal state used by TaskTracker. Sequential consistency shouldn't
+// be assumed from these calls (i.e. a thread reading
+// |HasShutdownStarted() == true| isn't guaranteed to see all writes made before
+// |StartShutdown()| on the thread that invoked it).
+class TaskTracker::State {
+ public:
+ State() = default;
+
+ // Sets a flag indicating that shutdown has started. Returns true if there are
+ // tasks blocking shutdown. Can only be called once.
+ bool StartShutdown() {
+ const auto new_value =
+ subtle::NoBarrier_AtomicIncrement(&bits_, kShutdownHasStartedMask);
+
+ // Check that the "shutdown has started" bit isn't zero. This would happen
+ // if it was incremented twice.
+ DCHECK(new_value & kShutdownHasStartedMask);
+
+ const auto num_tasks_blocking_shutdown =
+ new_value >> kNumTasksBlockingShutdownBitOffset;
+ return num_tasks_blocking_shutdown != 0;
+ }
+
+ // Returns true if shutdown has started.
+ bool HasShutdownStarted() const {
+ return subtle::NoBarrier_Load(&bits_) & kShutdownHasStartedMask;
+ }
+
+ // Returns true if there are tasks blocking shutdown.
+ bool AreTasksBlockingShutdown() const {
+ const auto num_tasks_blocking_shutdown =
+ subtle::NoBarrier_Load(&bits_) >> kNumTasksBlockingShutdownBitOffset;
+ DCHECK_GE(num_tasks_blocking_shutdown, 0);
+ return num_tasks_blocking_shutdown != 0;
+ }
+
+ // Increments the number of tasks blocking shutdown. Returns true if shutdown
+ // has started.
+ bool IncrementNumTasksBlockingShutdown() {
+#if DCHECK_IS_ON()
+ // Verify that no overflow will occur.
+ const auto num_tasks_blocking_shutdown =
+ subtle::NoBarrier_Load(&bits_) >> kNumTasksBlockingShutdownBitOffset;
+ DCHECK_LT(num_tasks_blocking_shutdown,
+ std::numeric_limits<subtle::Atomic32>::max() -
+ kNumTasksBlockingShutdownIncrement);
+#endif
+
+ const auto new_bits = subtle::NoBarrier_AtomicIncrement(
+ &bits_, kNumTasksBlockingShutdownIncrement);
+ return new_bits & kShutdownHasStartedMask;
+ }
+
+ // Decrements the number of tasks blocking shutdown. Returns true if shutdown
+ // has started and the number of tasks blocking shutdown becomes zero.
+ bool DecrementNumTasksBlockingShutdown() {
+ const auto new_bits = subtle::NoBarrier_AtomicIncrement(
+ &bits_, -kNumTasksBlockingShutdownIncrement);
+ const bool shutdown_has_started = new_bits & kShutdownHasStartedMask;
+ const auto num_tasks_blocking_shutdown =
+ new_bits >> kNumTasksBlockingShutdownBitOffset;
+ DCHECK_GE(num_tasks_blocking_shutdown, 0);
+ return shutdown_has_started && num_tasks_blocking_shutdown == 0;
+ }
+
+ private:
+ static constexpr subtle::Atomic32 kShutdownHasStartedMask = 1;
+ static constexpr subtle::Atomic32 kNumTasksBlockingShutdownBitOffset = 1;
+ static constexpr subtle::Atomic32 kNumTasksBlockingShutdownIncrement =
+ 1 << kNumTasksBlockingShutdownBitOffset;
+
+ // The LSB indicates whether shutdown has started. The other bits count the
+ // number of tasks blocking shutdown.
+ // No barriers are required to read/write |bits_| as this class is only used
+ // as an atomic state checker, it doesn't provide sequential consistency
+ // guarantees w.r.t. external state. Sequencing of the TaskTracker::State
+ // operations themselves is guaranteed by the AtomicIncrement RMW (read-
+ // modify-write) semantics however. For example, if two threads are racing to
+ // call IncrementNumTasksBlockingShutdown() and StartShutdown() respectively,
+ // either the first thread will win and the StartShutdown() call will see the
+ // blocking task or the second thread will win and
+ // IncrementNumTasksBlockingShutdown() will know that shutdown has started.
+ subtle::Atomic32 bits_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(State);
+};
+
+struct TaskTracker::PreemptedBackgroundSequence {
+ PreemptedBackgroundSequence() = default;
+ PreemptedBackgroundSequence(scoped_refptr<Sequence> sequence_in,
+ TimeTicks next_task_sequenced_time_in,
+ CanScheduleSequenceObserver* observer_in)
+ : sequence(std::move(sequence_in)),
+ next_task_sequenced_time(next_task_sequenced_time_in),
+ observer(observer_in) {}
+ PreemptedBackgroundSequence(PreemptedBackgroundSequence&& other) = default;
+ ~PreemptedBackgroundSequence() = default;
+ PreemptedBackgroundSequence& operator=(PreemptedBackgroundSequence&& other) =
+ default;
+ bool operator<(const PreemptedBackgroundSequence& other) const {
+ return next_task_sequenced_time < other.next_task_sequenced_time;
+ }
+ bool operator>(const PreemptedBackgroundSequence& other) const {
+ return next_task_sequenced_time > other.next_task_sequenced_time;
+ }
+
+ // A background sequence waiting to be scheduled.
+ scoped_refptr<Sequence> sequence;
+
+ // The sequenced time of the next task in |sequence|.
+ TimeTicks next_task_sequenced_time;
+
+ // An observer to notify when |sequence| can be scheduled.
+ CanScheduleSequenceObserver* observer = nullptr;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PreemptedBackgroundSequence);
+};
+
+TaskTracker::TaskTracker(StringPiece histogram_label)
+ : TaskTracker(histogram_label, GetMaxNumScheduledBackgroundSequences()) {}
+
+TaskTracker::TaskTracker(StringPiece histogram_label,
+ int max_num_scheduled_background_sequences)
+ : state_(new State),
+ flush_cv_(flush_lock_.CreateConditionVariable()),
+ shutdown_lock_(&flush_lock_),
+ max_num_scheduled_background_sequences_(
+ max_num_scheduled_background_sequences),
+ task_latency_histograms_{
+ {GetLatencyHistogram("TaskLatencyMicroseconds",
+ histogram_label,
+ "BackgroundTaskPriority"),
+ GetLatencyHistogram("TaskLatencyMicroseconds",
+ histogram_label,
+ "BackgroundTaskPriority_MayBlock")},
+ {GetLatencyHistogram("TaskLatencyMicroseconds",
+ histogram_label,
+ "UserVisibleTaskPriority"),
+ GetLatencyHistogram("TaskLatencyMicroseconds",
+ histogram_label,
+ "UserVisibleTaskPriority_MayBlock")},
+ {GetLatencyHistogram("TaskLatencyMicroseconds",
+ histogram_label,
+ "UserBlockingTaskPriority"),
+ GetLatencyHistogram("TaskLatencyMicroseconds",
+ histogram_label,
+ "UserBlockingTaskPriority_MayBlock")}},
+ heartbeat_latency_histograms_{
+ {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+ histogram_label,
+ "BackgroundTaskPriority"),
+ GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+ histogram_label,
+ "BackgroundTaskPriority_MayBlock")},
+ {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+ histogram_label,
+ "UserVisibleTaskPriority"),
+ GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+ histogram_label,
+ "UserVisibleTaskPriority_MayBlock")},
+ {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+ histogram_label,
+ "UserBlockingTaskPriority"),
+ GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+ histogram_label,
+ "UserBlockingTaskPriority_MayBlock")}},
+ tracked_ref_factory_(this) {
+ // Confirm that all |task_latency_histograms_| have been initialized above.
+ DCHECK(*(&task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) +
+ 1][0] -
+ 1));
+}
+
+TaskTracker::~TaskTracker() = default;
+
+void TaskTracker::Shutdown() {
+ PerformShutdown();
+ DCHECK(IsShutdownComplete());
+
+ // Unblock FlushForTesting() and perform the FlushAsyncForTesting callback
+ // when shutdown completes.
+ {
+ AutoSchedulerLock auto_lock(flush_lock_);
+ flush_cv_->Signal();
+ }
+ CallFlushCallbackForTesting();
+}
+
+void TaskTracker::FlushForTesting() {
+ AutoSchedulerLock auto_lock(flush_lock_);
+ while (subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0 &&
+ !IsShutdownComplete()) {
+ flush_cv_->Wait();
+ }
+}
+
+void TaskTracker::FlushAsyncForTesting(OnceClosure flush_callback) {
+ DCHECK(flush_callback);
+ {
+ AutoSchedulerLock auto_lock(flush_lock_);
+ DCHECK(!flush_callback_for_testing_)
+ << "Only one FlushAsyncForTesting() may be pending at any time.";
+ flush_callback_for_testing_ = std::move(flush_callback);
+ }
+
+ if (subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) == 0 ||
+ IsShutdownComplete()) {
+ CallFlushCallbackForTesting();
+ }
+}
+
+bool TaskTracker::WillPostTask(Task* task) {
+ DCHECK(task->task);
+
+ if (!BeforePostTask(task->traits.shutdown_behavior()))
+ return false;
+
+ if (task->delayed_run_time.is_null())
+ subtle::NoBarrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, 1);
+
+ {
+ TRACE_EVENT_WITH_FLOW0(
+ kTaskSchedulerFlowTracingCategory, kQueueFunctionName,
+ TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(*task)),
+ TRACE_EVENT_FLAG_FLOW_OUT);
+ }
+
+ task_annotator_.WillQueueTask(nullptr, task);
+
+ return true;
+}
+
+scoped_refptr<Sequence> TaskTracker::WillScheduleSequence(
+ scoped_refptr<Sequence> sequence,
+ CanScheduleSequenceObserver* observer) {
+ const SequenceSortKey sort_key = sequence->GetSortKey();
+
+ // A foreground sequence can always be scheduled.
+ if (sort_key.priority() != TaskPriority::BACKGROUND)
+ return sequence;
+
+ // It is convenient not to have to specify an observer when scheduling
+ // foreground sequences in tests.
+ DCHECK(observer);
+
+ AutoSchedulerLock auto_lock(background_lock_);
+
+ if (num_scheduled_background_sequences_ <
+ max_num_scheduled_background_sequences_) {
+ ++num_scheduled_background_sequences_;
+ return sequence;
+ }
+
+ preempted_background_sequences_.emplace(
+ std::move(sequence), sort_key.next_task_sequenced_time(), observer);
+ return nullptr;
+}
+
+scoped_refptr<Sequence> TaskTracker::RunAndPopNextTask(
+ scoped_refptr<Sequence> sequence,
+ CanScheduleSequenceObserver* observer) {
+ DCHECK(sequence);
+
+ // Run the next task in |sequence|.
+ Optional<Task> task = sequence->TakeTask();
+ // TODO(fdoray): Support TakeTask() returning null. https://crbug.com/783309
+ DCHECK(task);
+
+ const TaskShutdownBehavior shutdown_behavior =
+ task->traits.shutdown_behavior();
+ const TaskPriority task_priority = task->traits.priority();
+ const bool can_run_task = BeforeRunTask(shutdown_behavior);
+ const bool is_delayed = !task->delayed_run_time.is_null();
+
+ RunOrSkipTask(std::move(task.value()), sequence.get(), can_run_task);
+ if (can_run_task)
+ AfterRunTask(shutdown_behavior);
+
+ if (!is_delayed)
+ DecrementNumIncompleteUndelayedTasks();
+
+ const bool sequence_is_empty_after_pop = sequence->Pop();
+
+ // Never reschedule a Sequence emptied by Pop(). The contract is such that
+ // next poster to make it non-empty is responsible to schedule it.
+ if (sequence_is_empty_after_pop)
+ sequence = nullptr;
+
+ if (task_priority == TaskPriority::BACKGROUND) {
+ // Allow |sequence| to be rescheduled only if its next task is set to run
+ // earlier than the earliest currently preempted sequence
+ return ManageBackgroundSequencesAfterRunningTask(std::move(sequence),
+ observer);
+ }
+
+ return sequence;
+}
+
+bool TaskTracker::HasShutdownStarted() const {
+ return state_->HasShutdownStarted();
+}
+
+bool TaskTracker::IsShutdownComplete() const {
+ AutoSchedulerLock auto_lock(shutdown_lock_);
+ return shutdown_event_ && shutdown_event_->IsSignaled();
+}
+
+void TaskTracker::SetHasShutdownStartedForTesting() {
+ AutoSchedulerLock auto_lock(shutdown_lock_);
+
+ // Create a dummy |shutdown_event_| to satisfy TaskTracker's expectation of
+ // its existence during shutdown (e.g. in OnBlockingShutdownTasksComplete()).
+ shutdown_event_ = std::make_unique<WaitableEvent>();
+
+ state_->StartShutdown();
+}
+
+void TaskTracker::RecordLatencyHistogram(
+ LatencyHistogramType latency_histogram_type,
+ TaskTraits task_traits,
+ TimeTicks posted_time) const {
+ const TimeDelta task_latency = TimeTicks::Now() - posted_time;
+
+ DCHECK(latency_histogram_type == LatencyHistogramType::TASK_LATENCY ||
+ latency_histogram_type == LatencyHistogramType::HEARTBEAT_LATENCY);
+ const auto& histograms =
+ latency_histogram_type == LatencyHistogramType::TASK_LATENCY
+ ? task_latency_histograms_
+ : heartbeat_latency_histograms_;
+ histograms[static_cast<int>(task_traits.priority())]
+ [task_traits.may_block() || task_traits.with_base_sync_primitives()
+ ? 1
+ : 0]
+ ->AddTimeMicrosecondsGranularity(task_latency);
+}
+
+void TaskTracker::RunOrSkipTask(Task task,
+ Sequence* sequence,
+ bool can_run_task) {
+ RecordLatencyHistogram(LatencyHistogramType::TASK_LATENCY, task.traits,
+ task.sequenced_time);
+
+ const bool previous_singleton_allowed =
+ ThreadRestrictions::SetSingletonAllowed(
+ task.traits.shutdown_behavior() !=
+ TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
+ const bool previous_io_allowed =
+ ThreadRestrictions::SetIOAllowed(task.traits.may_block());
+ const bool previous_wait_allowed = ThreadRestrictions::SetWaitAllowed(
+ task.traits.with_base_sync_primitives());
+
+ {
+ const SequenceToken& sequence_token = sequence->token();
+ DCHECK(sequence_token.IsValid());
+ ScopedSetSequenceTokenForCurrentThread
+ scoped_set_sequence_token_for_current_thread(sequence_token);
+ ScopedSetTaskPriorityForCurrentThread
+ scoped_set_task_priority_for_current_thread(task.traits.priority());
+ ScopedSetSequenceLocalStorageMapForCurrentThread
+ scoped_set_sequence_local_storage_map_for_current_thread(
+ sequence->sequence_local_storage());
+
+ // Set up TaskRunnerHandle as expected for the scope of the task.
+ std::unique_ptr<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
+ std::unique_ptr<ThreadTaskRunnerHandle> single_thread_task_runner_handle;
+ DCHECK(!task.sequenced_task_runner_ref ||
+ !task.single_thread_task_runner_ref);
+ if (task.sequenced_task_runner_ref) {
+ sequenced_task_runner_handle.reset(
+ new SequencedTaskRunnerHandle(task.sequenced_task_runner_ref));
+ } else if (task.single_thread_task_runner_ref) {
+ single_thread_task_runner_handle.reset(
+ new ThreadTaskRunnerHandle(task.single_thread_task_runner_ref));
+ }
+
+ if (can_run_task) {
+ TRACE_TASK_EXECUTION(kRunFunctionName, task);
+
+ const char* const execution_mode =
+ task.single_thread_task_runner_ref
+ ? kSingleThreadExecutionMode
+ : (task.sequenced_task_runner_ref ? kSequencedExecutionMode
+ : kParallelExecutionMode);
+ // TODO(gab): In a better world this would be tacked on as an extra arg
+ // to the trace event generated above. This is not possible however until
+ // http://crbug.com/652692 is resolved.
+ TRACE_EVENT1("task_scheduler", "TaskTracker::RunTask", "task_info",
+ std::make_unique<TaskTracingInfo>(
+ task.traits, execution_mode, sequence_token));
+
+ {
+ // Put this in its own scope so it preceeds rather than overlaps with
+ // RunTask() in the trace view.
+ TRACE_EVENT_WITH_FLOW0(
+ kTaskSchedulerFlowTracingCategory, kQueueFunctionName,
+ TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(task)),
+ TRACE_EVENT_FLAG_FLOW_IN);
+ }
+
+ task_annotator_.RunTask(nullptr, &task);
+ }
+
+ // Make sure the arguments bound to the callback are deleted within the
+ // scope in which the callback runs.
+ task.task = OnceClosure();
+ }
+
+ ThreadRestrictions::SetWaitAllowed(previous_wait_allowed);
+ ThreadRestrictions::SetIOAllowed(previous_io_allowed);
+ ThreadRestrictions::SetSingletonAllowed(previous_singleton_allowed);
+}
+
+void TaskTracker::PerformShutdown() {
+ {
+ AutoSchedulerLock auto_lock(shutdown_lock_);
+
+ // This method can only be called once.
+ DCHECK(!shutdown_event_);
+ DCHECK(!num_block_shutdown_tasks_posted_during_shutdown_);
+ DCHECK(!state_->HasShutdownStarted());
+
+ shutdown_event_ = std::make_unique<WaitableEvent>();
+
+ const bool tasks_are_blocking_shutdown = state_->StartShutdown();
+
+ // From now, if a thread causes the number of tasks blocking shutdown to
+ // become zero, it will call OnBlockingShutdownTasksComplete().
+
+ if (!tasks_are_blocking_shutdown) {
+ // If another thread posts a BLOCK_SHUTDOWN task at this moment, it will
+ // block until this method releases |shutdown_lock_|. Then, it will fail
+ // DCHECK(!shutdown_event_->IsSignaled()). This is the desired behavior
+ // because posting a BLOCK_SHUTDOWN task when TaskTracker::Shutdown() has
+ // started and no tasks are blocking shutdown isn't allowed.
+ shutdown_event_->Signal();
+ return;
+ }
+ }
+
+ // Remove the cap on the maximum number of background sequences that can be
+ // scheduled concurrently. Done after starting shutdown to ensure that non-
+ // BLOCK_SHUTDOWN sequences don't get a chance to run and that BLOCK_SHUTDOWN
+ // sequences run on threads running with a normal priority.
+ SetMaxNumScheduledBackgroundSequences(std::numeric_limits<int>::max());
+
+ // It is safe to access |shutdown_event_| without holding |lock_| because the
+ // pointer never changes after being set above.
+ {
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ shutdown_event_->Wait();
+ }
+
+ {
+ AutoSchedulerLock auto_lock(shutdown_lock_);
+
+ // Record TaskScheduler.BlockShutdownTasksPostedDuringShutdown if less than
+ // |kMaxBlockShutdownTasksPostedDuringShutdown| BLOCK_SHUTDOWN tasks were
+ // posted during shutdown. Otherwise, the histogram has already been
+ // recorded in BeforePostTask().
+ if (num_block_shutdown_tasks_posted_during_shutdown_ <
+ kMaxBlockShutdownTasksPostedDuringShutdown) {
+ RecordNumBlockShutdownTasksPostedDuringShutdown(
+ num_block_shutdown_tasks_posted_during_shutdown_);
+ }
+ }
+}
+
+void TaskTracker::SetMaxNumScheduledBackgroundSequences(
+ int max_num_scheduled_background_sequences) {
+ std::vector<PreemptedBackgroundSequence> sequences_to_schedule;
+
+ {
+ AutoSchedulerLock auto_lock(background_lock_);
+ max_num_scheduled_background_sequences_ =
+ max_num_scheduled_background_sequences;
+
+ while (num_scheduled_background_sequences_ <
+ max_num_scheduled_background_sequences &&
+ !preempted_background_sequences_.empty()) {
+ sequences_to_schedule.push_back(
+ GetPreemptedBackgroundSequenceToScheduleLockRequired());
+ }
+ }
+
+ for (auto& sequence_to_schedule : sequences_to_schedule)
+ SchedulePreemptedBackgroundSequence(std::move(sequence_to_schedule));
+}
+
+TaskTracker::PreemptedBackgroundSequence
+TaskTracker::GetPreemptedBackgroundSequenceToScheduleLockRequired() {
+ background_lock_.AssertAcquired();
+ DCHECK(!preempted_background_sequences_.empty());
+
+ ++num_scheduled_background_sequences_;
+ DCHECK_LE(num_scheduled_background_sequences_,
+ max_num_scheduled_background_sequences_);
+
+ // The const_cast on top is okay since the PreemptedBackgroundSequence is
+ // transactionnaly being popped from |preempted_background_sequences_| right
+ // after and the move doesn't alter the sort order (a requirement for the
+ // Windows STL's consistency debug-checks for std::priority_queue::top()).
+ PreemptedBackgroundSequence popped_sequence =
+ std::move(const_cast<PreemptedBackgroundSequence&>(
+ preempted_background_sequences_.top()));
+ preempted_background_sequences_.pop();
+ return popped_sequence;
+}
+
+void TaskTracker::SchedulePreemptedBackgroundSequence(
+ PreemptedBackgroundSequence sequence_to_schedule) {
+ DCHECK(sequence_to_schedule.observer);
+ sequence_to_schedule.observer->OnCanScheduleSequence(
+ std::move(sequence_to_schedule.sequence));
+}
+
+#if DCHECK_IS_ON()
+bool TaskTracker::IsPostingBlockShutdownTaskAfterShutdownAllowed() {
+ return false;
+}
+#endif
+
+bool TaskTracker::HasIncompleteUndelayedTasksForTesting() const {
+ return subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0;
+}
+
+bool TaskTracker::BeforePostTask(TaskShutdownBehavior shutdown_behavior) {
+ if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
+ // BLOCK_SHUTDOWN tasks block shutdown between the moment they are posted
+ // and the moment they complete their execution.
+ const bool shutdown_started = state_->IncrementNumTasksBlockingShutdown();
+
+ if (shutdown_started) {
+ AutoSchedulerLock auto_lock(shutdown_lock_);
+
+ // A BLOCK_SHUTDOWN task posted after shutdown has completed is an
+ // ordering bug. This aims to catch those early.
+ DCHECK(shutdown_event_);
+ if (shutdown_event_->IsSignaled()) {
+#if DCHECK_IS_ON()
+// clang-format off
+ // TODO(robliao): http://crbug.com/698140. Since the service thread
+ // doesn't stop processing its own tasks at shutdown, we may still
+ // attempt to post a BLOCK_SHUTDOWN task in response to a
+ // FileDescriptorWatcher. Same is true for FilePathWatcher
+ // (http://crbug.com/728235). Until it's possible for such services to
+ // post to non-BLOCK_SHUTDOWN sequences which are themselves funneled to
+ // the main execution sequence (a future plan for the post_task.h API),
+ // this DCHECK will be flaky and must be disabled.
+ // DCHECK(IsPostingBlockShutdownTaskAfterShutdownAllowed());
+// clang-format on
+#endif
+ state_->DecrementNumTasksBlockingShutdown();
+ return false;
+ }
+
+ ++num_block_shutdown_tasks_posted_during_shutdown_;
+
+ if (num_block_shutdown_tasks_posted_during_shutdown_ ==
+ kMaxBlockShutdownTasksPostedDuringShutdown) {
+ // Record the TaskScheduler.BlockShutdownTasksPostedDuringShutdown
+ // histogram as soon as its upper bound is hit. That way, a value will
+ // be recorded even if an infinite number of BLOCK_SHUTDOWN tasks are
+ // posted, preventing shutdown to complete.
+ RecordNumBlockShutdownTasksPostedDuringShutdown(
+ num_block_shutdown_tasks_posted_during_shutdown_);
+ }
+ }
+
+ return true;
+ }
+
+ // A non BLOCK_SHUTDOWN task is allowed to be posted iff shutdown hasn't
+ // started.
+ return !state_->HasShutdownStarted();
+}
+
+bool TaskTracker::BeforeRunTask(TaskShutdownBehavior shutdown_behavior) {
+ switch (shutdown_behavior) {
+ case TaskShutdownBehavior::BLOCK_SHUTDOWN: {
+ // The number of tasks blocking shutdown has been incremented when the
+ // task was posted.
+ DCHECK(state_->AreTasksBlockingShutdown());
+
+ // Trying to run a BLOCK_SHUTDOWN task after shutdown has completed is
+ // unexpected as it either shouldn't have been posted if shutdown
+ // completed or should be blocking shutdown if it was posted before it
+ // did.
+ DCHECK(!state_->HasShutdownStarted() || !IsShutdownComplete());
+
+ return true;
+ }
+
+ case TaskShutdownBehavior::SKIP_ON_SHUTDOWN: {
+ // SKIP_ON_SHUTDOWN tasks block shutdown while they are running.
+ const bool shutdown_started = state_->IncrementNumTasksBlockingShutdown();
+
+ if (shutdown_started) {
+ // The SKIP_ON_SHUTDOWN task isn't allowed to run during shutdown.
+ // Decrement the number of tasks blocking shutdown that was wrongly
+ // incremented.
+ const bool shutdown_started_and_no_tasks_block_shutdown =
+ state_->DecrementNumTasksBlockingShutdown();
+ if (shutdown_started_and_no_tasks_block_shutdown)
+ OnBlockingShutdownTasksComplete();
+
+ return false;
+ }
+
+ return true;
+ }
+
+ case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN: {
+ return !state_->HasShutdownStarted();
+ }
+ }
+
+ NOTREACHED();
+ return false;
+}
+
+void TaskTracker::AfterRunTask(TaskShutdownBehavior shutdown_behavior) {
+ if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN ||
+ shutdown_behavior == TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {
+ const bool shutdown_started_and_no_tasks_block_shutdown =
+ state_->DecrementNumTasksBlockingShutdown();
+ if (shutdown_started_and_no_tasks_block_shutdown)
+ OnBlockingShutdownTasksComplete();
+ }
+}
+
+void TaskTracker::OnBlockingShutdownTasksComplete() {
+ AutoSchedulerLock auto_lock(shutdown_lock_);
+
+ // This method can only be called after shutdown has started.
+ DCHECK(state_->HasShutdownStarted());
+ DCHECK(shutdown_event_);
+
+ shutdown_event_->Signal();
+}
+
+void TaskTracker::DecrementNumIncompleteUndelayedTasks() {
+ const auto new_num_incomplete_undelayed_tasks =
+ subtle::Barrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, -1);
+ DCHECK_GE(new_num_incomplete_undelayed_tasks, 0);
+ if (new_num_incomplete_undelayed_tasks == 0) {
+ {
+ AutoSchedulerLock auto_lock(flush_lock_);
+ flush_cv_->Signal();
+ }
+ CallFlushCallbackForTesting();
+ }
+}
+
+scoped_refptr<Sequence> TaskTracker::ManageBackgroundSequencesAfterRunningTask(
+ scoped_refptr<Sequence> just_ran_sequence,
+ CanScheduleSequenceObserver* observer) {
+ const TimeTicks next_task_sequenced_time =
+ just_ran_sequence
+ ? just_ran_sequence->GetSortKey().next_task_sequenced_time()
+ : TimeTicks();
+ PreemptedBackgroundSequence sequence_to_schedule;
+
+ {
+ AutoSchedulerLock auto_lock(background_lock_);
+
+ DCHECK(preempted_background_sequences_.empty() ||
+ num_scheduled_background_sequences_ ==
+ max_num_scheduled_background_sequences_);
+ --num_scheduled_background_sequences_;
+
+ if (just_ran_sequence) {
+ if (preempted_background_sequences_.empty() ||
+ preempted_background_sequences_.top().next_task_sequenced_time >
+ next_task_sequenced_time) {
+ ++num_scheduled_background_sequences_;
+ return just_ran_sequence;
+ }
+
+ preempted_background_sequences_.emplace(
+ std::move(just_ran_sequence), next_task_sequenced_time, observer);
+ }
+
+ if (!preempted_background_sequences_.empty()) {
+ sequence_to_schedule =
+ GetPreemptedBackgroundSequenceToScheduleLockRequired();
+ }
+ }
+
+ // |sequence_to_schedule.sequence| may be null if there was no preempted
+ // background sequence.
+ if (sequence_to_schedule.sequence)
+ SchedulePreemptedBackgroundSequence(std::move(sequence_to_schedule));
+
+ return nullptr;
+}
+
+void TaskTracker::CallFlushCallbackForTesting() {
+ OnceClosure flush_callback;
+ {
+ AutoSchedulerLock auto_lock(flush_lock_);
+ flush_callback = std::move(flush_callback_for_testing_);
+ }
+ if (flush_callback)
+ std::move(flush_callback).Run();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/task_tracker.h b/base/task_scheduler/task_tracker.h
new file mode 100644
index 0000000000..3596eca7c4
--- /dev/null
+++ b/base/task_scheduler/task_tracker.h
@@ -0,0 +1,365 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRACKER_H_
+#define BASE_TASK_SCHEDULER_TASK_TRACKER_H_
+
+#include <functional>
+#include <memory>
+#include <queue>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/debug/task_annotator.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/can_schedule_sequence_observer.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/tracked_ref.h"
+
+namespace base {
+
+class ConditionVariable;
+class HistogramBase;
+
+namespace internal {
+
+// TaskTracker enforces policies that determines whether:
+// - A task can be added to a sequence (WillPostTask).
+// - A sequence can be scheduled (WillScheduleSequence).
+// - The next task in a scheduled sequence can run (RunAndPopNextTask).
+// TaskTracker also sets up the environment to run a task (RunAndPopNextTask)
+// and records metrics and trace events. This class is thread-safe.
+//
+// Life of a sequence:
+// (possible states: IDLE, PREEMPTED, SCHEDULED, RUNNING)
+//
+// Create a sequence
+// |
+// ------------------------> Sequence is IDLE
+// | |
+// | Add a task to the sequence
+// | (allowed by TaskTracker::WillPostTask)
+// | |
+// | TaskTracker:WillScheduleSequence
+// | _____________________|_____________________
+// | | |
+// | Returns true Returns false
+// | | |
+// | | Sequence is PREEMPTED <----
+// | | | |
+// | | Eventually, |
+// | | CanScheduleSequenceObserver |
+// | | is notified that the |
+// | | sequence can be scheduled. |
+// | |__________________________________________| |
+// | | |
+// | (*) Sequence is SCHEDULED |
+// | | |
+// | A thread is ready to run the next |
+// | task in the sequence |
+// | | |
+// | TaskTracker::RunAndPopNextTask |
+// | A task from the sequence is run |
+// | Sequence is RUNNING |
+// | | |
+// | ______________________|____ |
+// | | | |
+// | Sequence is empty Sequence has more tasks |
+// |_________| _____________|_______________ |
+// | | |
+// Sequence can be Sequence cannot be |
+// scheduled scheduled at this |
+// | moment |
+// Go back to (*) |_________________|
+//
+//
+// Note: A background task is a task posted with TaskPriority::BACKGROUND. A
+// foreground task is a task posted with TaskPriority::USER_VISIBLE or
+// TaskPriority::USER_BLOCKING.
+//
+// TODO(fdoray): We want to allow disabling TaskPriority::BACKGROUND tasks in a
+// scope (e.g. during startup or page load), but we don't need a dynamic maximum
+// number of background tasks. The code could probably be simplified if it
+// didn't support that. https://crbug.com/831835
+class BASE_EXPORT TaskTracker {
+ public:
+ // |histogram_label| is used as a suffix for histograms, it must not be empty.
+ // The first constructor sets the maximum number of TaskPriority::BACKGROUND
+ // sequences that can be scheduled concurrently to 0 if the
+ // --disable-background-tasks flag is specified, max() otherwise. The second
+ // constructor sets it to |max_num_scheduled_background_sequences|.
+ TaskTracker(StringPiece histogram_label);
+ TaskTracker(StringPiece histogram_label,
+ int max_num_scheduled_background_sequences);
+
+ virtual ~TaskTracker();
+
+ // Synchronously shuts down the scheduler. Once this is called, only tasks
+ // posted with the BLOCK_SHUTDOWN behavior will be run. Returns when:
+ // - All SKIP_ON_SHUTDOWN tasks that were already running have completed their
+ // execution.
+ // - All posted BLOCK_SHUTDOWN tasks have completed their execution.
+ // CONTINUE_ON_SHUTDOWN tasks still may be running after Shutdown returns.
+ // This can only be called once.
+ void Shutdown();
+
+ // Waits until there are no incomplete undelayed tasks. May be called in tests
+ // to validate that a condition is met after all undelayed tasks have run.
+ //
+ // Does not wait for delayed tasks. Waits for undelayed tasks posted from
+ // other threads during the call. Returns immediately when shutdown completes.
+ void FlushForTesting();
+
+ // Returns and calls |flush_callback| when there are no incomplete undelayed
+ // tasks. |flush_callback| may be called back on any thread and should not
+ // perform a lot of work. May be used when additional work on the current
+ // thread needs to be performed during a flush. Only one
+ // FlushAsyncForTesting() may be pending at any given time.
+ void FlushAsyncForTesting(OnceClosure flush_callback);
+
+ // Informs this TaskTracker that |task| is about to be posted. Returns true if
+ // this operation is allowed (|task| should be posted if-and-only-if it is).
+ // This method may also modify metadata on |task| if desired.
+ bool WillPostTask(Task* task);
+
+ // Informs this TaskTracker that |sequence| is about to be scheduled. If this
+ // returns |sequence|, it is expected that RunAndPopNextTask() will soon be
+ // called with |sequence| as argument. Otherwise, RunAndPopNextTask() must not
+ // be called with |sequence| as argument until |observer| is notified that
+ // |sequence| can be scheduled (the caller doesn't need to keep a pointer to
+ // |sequence|; it will be included in the notification to |observer|).
+ // WillPostTask() must have allowed the task in front of |sequence| to be
+ // posted before this is called. |observer| is only required if the priority
+ // of |sequence| is TaskPriority::BACKGROUND
+ scoped_refptr<Sequence> WillScheduleSequence(
+ scoped_refptr<Sequence> sequence,
+ CanScheduleSequenceObserver* observer);
+
+ // Runs the next task in |sequence| unless the current shutdown state prevents
+ // that. Then, pops the task from |sequence| (even if it didn't run). Returns
+ // |sequence| if it can be rescheduled immediately. If |sequence| is non-empty
+ // after popping a task from it but it can't be rescheduled immediately, it
+ // will be handed back to |observer| when it can be rescheduled.
+ // WillPostTask() must have allowed the task in front of |sequence| to be
+ // posted before this is called. Also, WillScheduleSequence(),
+ // RunAndPopNextTask() or CanScheduleSequenceObserver::OnCanScheduleSequence()
+ // must have allowed |sequence| to be (re)scheduled.
+ scoped_refptr<Sequence> RunAndPopNextTask(
+ scoped_refptr<Sequence> sequence,
+ CanScheduleSequenceObserver* observer);
+
+ // Returns true once shutdown has started (Shutdown() has been called but
+ // might not have returned). Note: sequential consistency with the thread
+ // calling Shutdown() (or SetHasShutdownStartedForTesting()) isn't guaranteed
+ // by this call.
+ bool HasShutdownStarted() const;
+
+ // Returns true if shutdown has completed (Shutdown() has returned).
+ bool IsShutdownComplete() const;
+
+ enum class LatencyHistogramType {
+ // Records the latency of each individual task posted through TaskTracker.
+ TASK_LATENCY,
+ // Records the latency of heartbeat tasks which are independent of current
+ // workload. These avoid a bias towards TASK_LATENCY reporting that high-
+ // priority tasks are "slower" than regular tasks because high-priority
+ // tasks tend to be correlated with heavy workloads.
+ HEARTBEAT_LATENCY,
+ };
+
+ // Causes HasShutdownStarted() to return true. Unlike when Shutdown() returns,
+ // IsShutdownComplete() won't return true after this returns. Shutdown()
+ // cannot be called after this.
+ void SetHasShutdownStartedForTesting();
+
+ // Records |Now() - posted_time| to the appropriate |latency_histogram_type|
+ // based on |task_traits|.
+ void RecordLatencyHistogram(LatencyHistogramType latency_histogram_type,
+ TaskTraits task_traits,
+ TimeTicks posted_time) const;
+
+ TrackedRef<TaskTracker> GetTrackedRef() {
+ return tracked_ref_factory_.GetTrackedRef();
+ }
+
+ protected:
+ // Runs and deletes |task| if |can_run_task| is true. Otherwise, just deletes
+ // |task|. |task| is always deleted in the environment where it runs or would
+ // have run. |sequence| is the sequence from which |task| was extracted. An
+ // override is expected to call its parent's implementation but is free to
+ // perform extra work before and after doing so.
+ virtual void RunOrSkipTask(Task task, Sequence* sequence, bool can_run_task);
+
+#if DCHECK_IS_ON()
+ // Returns true if this context should be exempt from blocking shutdown
+ // DCHECKs.
+ // TODO(robliao): Remove when http://crbug.com/698140 is fixed.
+ virtual bool IsPostingBlockShutdownTaskAfterShutdownAllowed();
+#endif
+
+ // Returns true if there are undelayed tasks that haven't completed their
+ // execution (still queued or in progress). If it returns false: the side-
+ // effects of all completed tasks are guaranteed to be visible to the caller.
+ bool HasIncompleteUndelayedTasksForTesting() const;
+
+ private:
+ class State;
+ struct PreemptedBackgroundSequence;
+
+ void PerformShutdown();
+
+ // Updates the maximum number of background sequences that can be scheduled
+ // concurrently to |max_num_scheduled_background_sequences|. Then, schedules
+ // as many preempted background sequences as allowed by the new value.
+ void SetMaxNumScheduledBackgroundSequences(
+ int max_num_scheduled_background_sequences);
+
+ // Pops the next sequence in |preempted_background_sequences_| and increments
+ // |num_scheduled_background_sequences_|. Must only be called in the scope of
+ // |background_lock_|, with |preempted_background_sequences_| non-empty. The
+ // caller must forward the returned sequence to the associated
+ // CanScheduleSequenceObserver as soon as |background_lock_| is released.
+ PreemptedBackgroundSequence
+ GetPreemptedBackgroundSequenceToScheduleLockRequired();
+
+ // Schedules |sequence_to_schedule.sequence| using
+ // |sequence_to_schedule.observer|. Does not verify that the sequence is
+ // allowed to be scheduled.
+ void SchedulePreemptedBackgroundSequence(
+ PreemptedBackgroundSequence sequence_to_schedule);
+
+ // Called before WillPostTask() informs the tracing system that a task has
+ // been posted. Updates |num_tasks_blocking_shutdown_| if necessary and
+ // returns true if the current shutdown state allows the task to be posted.
+ bool BeforePostTask(TaskShutdownBehavior shutdown_behavior);
+
+ // Called before a task with |shutdown_behavior| is run by RunTask(). Updates
+ // |num_tasks_blocking_shutdown_| if necessary and returns true if the current
+ // shutdown state allows the task to be run.
+ bool BeforeRunTask(TaskShutdownBehavior shutdown_behavior);
+
+ // Called after a task with |shutdown_behavior| has been run by RunTask().
+ // Updates |num_tasks_blocking_shutdown_| and signals |shutdown_cv_| if
+ // necessary.
+ void AfterRunTask(TaskShutdownBehavior shutdown_behavior);
+
+ // Called when the number of tasks blocking shutdown becomes zero after
+ // shutdown has started.
+ void OnBlockingShutdownTasksComplete();
+
+ // Decrements the number of incomplete undelayed tasks and signals |flush_cv_|
+ // if it reaches zero.
+ void DecrementNumIncompleteUndelayedTasks();
+
+ // To be called after running a background task from |just_ran_sequence|.
+ // Performs the following actions:
+ // - If |just_ran_sequence| is non-null:
+ // - returns it if it should be rescheduled by the caller of
+ // RunAndPopNextTask(), i.e. its next task is set to run earlier than the
+ // earliest currently preempted sequence.
+ // - Otherwise |just_ran_sequence| is preempted and the next preempted
+ // sequence is scheduled (|observer| will be notified when
+ // |just_ran_sequence| should be scheduled again).
+ // - If |just_ran_sequence| is null (RunAndPopNextTask() just popped the last
+ // task from it):
+ // - the next preempeted sequence (if any) is scheduled.
+ // - In all cases: adjusts the number of scheduled background sequences
+ // accordingly.
+ scoped_refptr<Sequence> ManageBackgroundSequencesAfterRunningTask(
+ scoped_refptr<Sequence> just_ran_sequence,
+ CanScheduleSequenceObserver* observer);
+
+ // Calls |flush_callback_for_testing_| if one is available in a lock-safe
+ // manner.
+ void CallFlushCallbackForTesting();
+
+ debug::TaskAnnotator task_annotator_;
+
+ // Number of tasks blocking shutdown and boolean indicating whether shutdown
+ // has started.
+ const std::unique_ptr<State> state_;
+
+ // Number of undelayed tasks that haven't completed their execution. Is
+ // decremented with a memory barrier after a task runs. Is accessed with an
+ // acquire memory barrier in FlushForTesting(). The memory barriers ensure
+ // that the memory written by flushed tasks is visible when FlushForTesting()
+ // returns.
+ subtle::Atomic32 num_incomplete_undelayed_tasks_ = 0;
+
+ // Lock associated with |flush_cv_|. Partially synchronizes access to
+ // |num_incomplete_undelayed_tasks_|. Full synchronization isn't needed
+ // because it's atomic, but synchronization is needed to coordinate waking and
+ // sleeping at the right time. Fully synchronizes access to
+ // |flush_callback_for_testing_|.
+ mutable SchedulerLock flush_lock_;
+
+ // Signaled when |num_incomplete_undelayed_tasks_| is or reaches zero or when
+ // shutdown completes.
+ const std::unique_ptr<ConditionVariable> flush_cv_;
+
+ // Invoked if non-null when |num_incomplete_undelayed_tasks_| is zero or when
+ // shutdown completes.
+ OnceClosure flush_callback_for_testing_;
+
+ // Synchronizes access to shutdown related members below.
+ mutable SchedulerLock shutdown_lock_;
+
+ // Event instantiated when shutdown starts and signaled when shutdown
+ // completes.
+ std::unique_ptr<WaitableEvent> shutdown_event_;
+
+ // Synchronizes accesses to |preempted_background_sequences_|,
+ // |max_num_scheduled_background_sequences_| and
+ // |num_scheduled_background_sequences_|.
+ SchedulerLock background_lock_;
+
+ // A priority queue of sequences that are waiting to be scheduled. Use
+ // std::greater so that the sequence which contains the task that has been
+ // posted the earliest is on top of the priority queue.
+ std::priority_queue<PreemptedBackgroundSequence,
+ std::vector<PreemptedBackgroundSequence>,
+ std::greater<PreemptedBackgroundSequence>>
+ preempted_background_sequences_;
+
+ // Maximum number of background sequences that can that be scheduled
+ // concurrently.
+ int max_num_scheduled_background_sequences_;
+
+ // Number of currently scheduled background sequences.
+ int num_scheduled_background_sequences_ = 0;
+
+ // TaskScheduler.TaskLatencyMicroseconds.* and
+ // TaskScheduler.HeartbeatLatencyMicroseconds.* histograms. The first index is
+ // a TaskPriority. The second index is 0 for non-blocking tasks, 1 for
+ // blocking tasks. Intentionally leaked.
+ // TODO(scheduler-dev): Consider using STATIC_HISTOGRAM_POINTER_GROUP for
+ // these.
+ static constexpr int kNumTaskPriorities =
+ static_cast<int>(TaskPriority::HIGHEST) + 1;
+ HistogramBase* const task_latency_histograms_[kNumTaskPriorities][2];
+ HistogramBase* const heartbeat_latency_histograms_[kNumTaskPriorities][2];
+
+ // Number of BLOCK_SHUTDOWN tasks posted during shutdown.
+ HistogramBase::Sample num_block_shutdown_tasks_posted_during_shutdown_ = 0;
+
+ // Ensures all state (e.g. dangling cleaned up workers) is coalesced before
+ // destroying the TaskTracker (e.g. in test environments).
+ // Ref. https://crbug.com/827615.
+ TrackedRefFactory<TaskTracker> tracked_ref_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskTracker);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_TASK_TRACKER_H_
diff --git a/base/task_scheduler/task_tracker_posix.cc b/base/task_scheduler/task_tracker_posix.cc
new file mode 100644
index 0000000000..8289d909dc
--- /dev/null
+++ b/base/task_scheduler/task_tracker_posix.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker_posix.h"
+
+#include <utility>
+
+#include "base/files/file_descriptor_watcher_posix.h"
+
+namespace base {
+namespace internal {
+
+TaskTrackerPosix::TaskTrackerPosix(StringPiece name) : TaskTracker(name) {}
+TaskTrackerPosix::~TaskTrackerPosix() = default;
+
+void TaskTrackerPosix::RunOrSkipTask(Task task,
+ Sequence* sequence,
+ bool can_run_task) {
+ DCHECK(watch_file_descriptor_message_loop_);
+ FileDescriptorWatcher file_descriptor_watcher(
+ watch_file_descriptor_message_loop_);
+ TaskTracker::RunOrSkipTask(std::move(task), sequence, can_run_task);
+}
+
+#if DCHECK_IS_ON()
+bool TaskTrackerPosix::IsPostingBlockShutdownTaskAfterShutdownAllowed() {
+ return service_thread_handle_.is_equal(PlatformThread::CurrentHandle());
+}
+#endif
+
+} // namespace internal
+} // namespace base
diff --git a/base/task_scheduler/task_tracker_posix.h b/base/task_scheduler/task_tracker_posix.h
new file mode 100644
index 0000000000..4689f7a13e
--- /dev/null
+++ b/base/task_scheduler/task_tracker_posix.h
@@ -0,0 +1,74 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
+#define BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+class MessageLoopForIO;
+
+namespace internal {
+
+struct Task;
+
+// A TaskTracker that instantiates a FileDescriptorWatcher in the scope in which
+// a task runs. Used on all POSIX platforms except NaCl SFI.
+// set_watch_file_descriptor_message_loop() must be called before the
+// TaskTracker can run tasks.
+class BASE_EXPORT TaskTrackerPosix : public TaskTracker {
+ public:
+ TaskTrackerPosix(StringPiece name);
+ ~TaskTrackerPosix() override;
+
+ // Sets the MessageLoopForIO with which to setup FileDescriptorWatcher in the
+ // scope in which tasks run. Must be called before starting to run tasks.
+ // External synchronization is required between a call to this and a call to
+ // RunTask().
+ void set_watch_file_descriptor_message_loop(
+ MessageLoopForIO* watch_file_descriptor_message_loop) {
+ watch_file_descriptor_message_loop_ = watch_file_descriptor_message_loop;
+ }
+
+#if DCHECK_IS_ON()
+ // TODO(robliao): http://crbug.com/698140. This addresses service thread tasks
+ // that could run after the task scheduler has shut down. Anything from the
+ // service thread is exempted from the task scheduler shutdown DCHECKs.
+ void set_service_thread_handle(
+ const PlatformThreadHandle& service_thread_handle) {
+ DCHECK(!service_thread_handle.is_null());
+ service_thread_handle_ = service_thread_handle;
+ }
+#endif
+
+ protected:
+ // TaskTracker:
+ void RunOrSkipTask(Task task, Sequence* sequence, bool can_run_task) override;
+
+ private:
+#if DCHECK_IS_ON()
+ bool IsPostingBlockShutdownTaskAfterShutdownAllowed() override;
+#endif
+
+ MessageLoopForIO* watch_file_descriptor_message_loop_ = nullptr;
+
+#if DCHECK_IS_ON()
+ PlatformThreadHandle service_thread_handle_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(TaskTrackerPosix);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
diff --git a/base/test/fuzzed_data_provider.cc b/base/test/fuzzed_data_provider.cc
deleted file mode 100644
index b2d443a9b9..0000000000
--- a/base/test/fuzzed_data_provider.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/fuzzed_data_provider.h"
-
-#include <algorithm>
-#include <limits>
-
-#include "base/logging.h"
-
-namespace base {
-
-FuzzedDataProvider::FuzzedDataProvider(const uint8_t* data, size_t size)
- : remaining_data_(reinterpret_cast<const char*>(data), size) {}
-
-FuzzedDataProvider::~FuzzedDataProvider() = default;
-
-std::string FuzzedDataProvider::ConsumeBytes(size_t num_bytes) {
- num_bytes = std::min(num_bytes, remaining_data_.length());
- StringPiece result(remaining_data_.data(), num_bytes);
- remaining_data_ = remaining_data_.substr(num_bytes);
- return result.as_string();
-}
-
-std::string FuzzedDataProvider::ConsumeRemainingBytes() {
- return ConsumeBytes(remaining_data_.length());
-}
-
-uint32_t FuzzedDataProvider::ConsumeUint32InRange(uint32_t min, uint32_t max) {
- CHECK_LE(min, max);
-
- uint32_t range = max - min;
- uint32_t offset = 0;
- uint32_t result = 0;
-
- while (offset < 32 && (range >> offset) > 0 && !remaining_data_.empty()) {
- // Pull bytes off the end of the seed data. Experimentally, this seems to
- // allow the fuzzer to more easily explore the input space. This makes
- // sense, since it works by modifying inputs that caused new code to run,
- // and this data is often used to encode length of data read by
- // ConsumeBytes. Separating out read lengths makes it easier modify the
- // contents of the data that is actually read.
- uint8_t next_byte = remaining_data_.back();
- remaining_data_.remove_suffix(1);
- result = (result << 8) | next_byte;
- offset += 8;
- }
-
- // Avoid division by 0, in the case |range + 1| results in overflow.
- if (range == std::numeric_limits<uint32_t>::max())
- return result;
-
- return min + result % (range + 1);
-}
-
-std::string FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) {
- // Reads bytes from start of |remaining_data_|. Maps "\\" to "\", and maps "\"
- // followed by anything else to the end of the string. As a result of this
- // logic, a fuzzer can insert characters into the string, and the string will
- // be lengthened to include those new characters, resulting in a more stable
- // fuzzer than picking the length of a string independently from picking its
- // contents.
- std::string out;
- for (size_t i = 0; i < max_length && !remaining_data_.empty(); ++i) {
- char next = remaining_data_[0];
- remaining_data_.remove_prefix(1);
- if (next == '\\' && !remaining_data_.empty()) {
- next = remaining_data_[0];
- remaining_data_.remove_prefix(1);
- if (next != '\\')
- return out;
- }
- out += next;
- }
- return out;
-}
-
-int FuzzedDataProvider::ConsumeInt32InRange(int min, int max) {
- CHECK_LE(min, max);
-
- uint32_t range = max - min;
- return min + ConsumeUint32InRange(0, range);
-}
-
-bool FuzzedDataProvider::ConsumeBool() {
- return (ConsumeUint8() & 0x01) == 0x01;
-}
-
-uint8_t FuzzedDataProvider::ConsumeUint8() {
- return ConsumeUint32InRange(0, 0xFF);
-}
-
-uint16_t FuzzedDataProvider::ConsumeUint16() {
- return ConsumeUint32InRange(0, 0xFFFF);
-}
-
-} // namespace base
diff --git a/base/test/fuzzed_data_provider.h b/base/test/fuzzed_data_provider.h
deleted file mode 100644
index 425c820a21..0000000000
--- a/base/test/fuzzed_data_provider.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TEST_FUZZED_DATA_PROVIDER_H_
-#define BASE_TEST_FUZZED_DATA_PROVIDER_H_
-
-#include <stdint.h>
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/strings/string_piece.h"
-
-namespace base {
-
-// Utility class to break up fuzzer input for multiple consumers. Whenever run
-// on the same input, provides the same output, as long as its methods are
-// called in the same order, with the same arguments.
-class FuzzedDataProvider {
- public:
- // |data| is an array of length |size| that the FuzzedDataProvider wraps to
- // provide more granular access. |data| must outlive the FuzzedDataProvider.
- FuzzedDataProvider(const uint8_t* data, size_t size);
- ~FuzzedDataProvider();
-
- // Returns a std::string containing |num_bytes| of input data. If fewer than
- // |num_bytes| of data remain, returns a shorter std::string containing all
- // of the data that's left.
- std::string ConsumeBytes(size_t num_bytes);
-
- // Returns a std::string containing all remaining bytes of the input data.
- std::string ConsumeRemainingBytes();
-
- // Returns a std::string of length from 0 to |max_length|. When it runs out of
- // input data, returns what remains of the input. Designed to be more stable
- // with respect to a fuzzer inserting characters than just picking a random
- // length and then consuming that many bytes with ConsumeBytes().
- std::string ConsumeRandomLengthString(size_t max_length);
-
- // Returns a number in the range [min, max] by consuming bytes from the input
- // data. The value might not be uniformly distributed in the given range. If
- // there's no input data left, always returns |min|. |min| must be less than
- // or equal to |max|.
- uint32_t ConsumeUint32InRange(uint32_t min, uint32_t max);
- int ConsumeInt32InRange(int min, int max);
-
- // Returns a bool, or false when no data remains.
- bool ConsumeBool();
-
- // Returns a uint8_t from the input or 0 if nothing remains. This is
- // equivalent to ConsumeUint32InRange(0, 0xFF).
- uint8_t ConsumeUint8();
-
- // Returns a uint16_t from the input. If fewer than 2 bytes of data remain
- // will fill the most significant bytes with 0. This is equivalent to
- // ConsumeUint32InRange(0, 0xFFFF).
- uint16_t ConsumeUint16();
-
- // Returns a value from |array|, consuming as many bytes as needed to do so.
- // |array| must be a fixed-size array. Equivalent to
- // array[ConsumeUint32InRange(sizeof(array)-1)];
- template <typename Type, size_t size>
- Type PickValueInArray(Type (&array)[size]) {
- return array[ConsumeUint32InRange(0, size - 1)];
- }
-
- // Reports the remaining bytes available for fuzzed input.
- size_t remaining_bytes() { return remaining_data_.length(); }
-
- private:
- StringPiece remaining_data_;
-
- DISALLOW_COPY_AND_ASSIGN(FuzzedDataProvider);
-};
-
-} // namespace base
-
-#endif // BASE_TEST_FUZZED_DATA_PROVIDER_H_
diff --git a/components/json_schema/json_schema_constants.cc b/components/json_schema/json_schema_constants.cc
new file mode 100644
index 0000000000..79c5eaf415
--- /dev/null
+++ b/components/json_schema/json_schema_constants.cc
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/json_schema/json_schema_constants.h"
+
+namespace json_schema_constants {
+
+const char kAdditionalProperties[] = "additionalProperties";
+const char kAny[] = "any";
+const char kArray[] = "array";
+const char kBoolean[] = "boolean";
+const char kChoices[] = "choices";
+const char kDescription[] = "description";
+const char kEnum[] = "enum";
+const char kId[] = "id";
+const char kInteger[] = "integer";
+const char kItems[] = "items";
+const char kMaximum[] = "maximum";
+const char kMaxItems[] = "maxItems";
+const char kMaxLength[] = "maxLength";
+const char kMinimum[] = "minimum";
+const char kMinItems[] = "minItems";
+const char kMinLength[] = "minLength";
+const char kNull[] = "null";
+const char kNumber[] = "number";
+const char kObject[] = "object";
+const char kOptional[] = "optional";
+const char kPattern[] = "pattern";
+const char kPatternProperties[] = "patternProperties";
+const char kProperties[] = "properties";
+const char kRef[] = "$ref";
+const char kRequired[] = "required";
+const char kSchema[] = "$schema";
+const char kString[] = "string";
+const char kTitle[] = "title";
+const char kType[] = "type";
+
+} // namespace json_schema_constants
diff --git a/components/json_schema/json_schema_constants.h b/components/json_schema/json_schema_constants.h
new file mode 100644
index 0000000000..19394a915f
--- /dev/null
+++ b/components/json_schema/json_schema_constants.h
@@ -0,0 +1,43 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_JSON_SCHEMA_JSON_SCHEMA_CONSTANTS_H_
+#define COMPONENTS_JSON_SCHEMA_JSON_SCHEMA_CONSTANTS_H_
+
+// These constants are shared by code that uses JSON schemas.
+namespace json_schema_constants {
+
+extern const char kAdditionalProperties[];
+extern const char kAny[];
+extern const char kArray[];
+extern const char kBoolean[];
+extern const char kChoices[];
+extern const char kDescription[];
+extern const char kEnum[];
+extern const char kId[];
+extern const char kInteger[];
+extern const char kItems[];
+extern const char kMaximum[];
+extern const char kMaxItems[];
+extern const char kMaxLength[];
+extern const char kMinimum[];
+extern const char kMinItems[];
+extern const char kMinLength[];
+extern const char kNull[];
+extern const char kNumber[];
+extern const char kObject[];
+extern const char kOptional[];
+extern const char kPattern[];
+extern const char kPatternProperties[];
+extern const char kProperties[];
+extern const char kRef[];
+extern const char kRequired[];
+extern const char kSchema[];
+extern const char kString[];
+extern const char kTitle[];
+extern const char kType[];
+
+} // namespace json_schema_constants
+
+#endif // COMPONENTS_JSON_SCHEMA_JSON_SCHEMA_CONSTANTS_H_
diff --git a/components/json_schema/json_schema_validator.cc b/components/json_schema/json_schema_validator.cc
new file mode 100644
index 0000000000..7a6daba014
--- /dev/null
+++ b/components/json_schema/json_schema_validator.cc
@@ -0,0 +1,862 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/json_schema/json_schema_validator.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <cfloat>
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "base/json/json_reader.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+#include "components/json_schema/json_schema_constants.h"
+#include "third_party/re2/src/re2/re2.h"
+
+namespace schema = json_schema_constants;
+
+namespace {
+
+double GetNumberValue(const base::Value* value) {
+ double result = 0;
+ CHECK(value->GetAsDouble(&result))
+ << "Unexpected value type: " << value->type();
+ return result;
+}
+
+bool IsValidType(const std::string& type) {
+ static const char* kValidTypes[] = {
+ schema::kAny,
+ schema::kArray,
+ schema::kBoolean,
+ schema::kInteger,
+ schema::kNull,
+ schema::kNumber,
+ schema::kObject,
+ schema::kString,
+ };
+ const char** end = kValidTypes + arraysize(kValidTypes);
+ return std::find(kValidTypes, end, type) != end;
+}
+
+// Maps a schema attribute name to its expected type.
+struct ExpectedType {
+ const char* key;
+ base::Value::Type type;
+};
+
+// Helper for std::lower_bound.
+bool CompareToString(const ExpectedType& entry, const std::string& key) {
+ return entry.key < key;
+}
+
+// If |value| is a dictionary, returns the "name" attribute of |value| or NULL
+// if |value| does not contain a "name" attribute. Otherwise, returns |value|.
+const base::Value* ExtractNameFromDictionary(const base::Value* value) {
+ const base::DictionaryValue* value_dict = nullptr;
+ const base::Value* name_value = nullptr;
+ if (value->GetAsDictionary(&value_dict)) {
+ value_dict->Get("name", &name_value);
+ return name_value;
+ }
+ return value;
+}
+
+bool IsValidSchema(const base::DictionaryValue* dict,
+ int options,
+ std::string* error) {
+ // This array must be sorted, so that std::lower_bound can perform a
+ // binary search.
+ static const ExpectedType kExpectedTypes[] = {
+ // Note: kRef == "$ref", kSchema == "$schema"
+ { schema::kRef, base::Value::Type::STRING },
+ { schema::kSchema, base::Value::Type::STRING },
+
+ { schema::kAdditionalProperties, base::Value::Type::DICTIONARY },
+ { schema::kChoices, base::Value::Type::LIST },
+ { schema::kDescription, base::Value::Type::STRING },
+ { schema::kEnum, base::Value::Type::LIST },
+ { schema::kId, base::Value::Type::STRING },
+ { schema::kMaxItems, base::Value::Type::INTEGER },
+ { schema::kMaxLength, base::Value::Type::INTEGER },
+ { schema::kMaximum, base::Value::Type::DOUBLE },
+ { schema::kMinItems, base::Value::Type::INTEGER },
+ { schema::kMinLength, base::Value::Type::INTEGER },
+ { schema::kMinimum, base::Value::Type::DOUBLE },
+ { schema::kOptional, base::Value::Type::BOOLEAN },
+ { schema::kPattern, base::Value::Type::STRING },
+ { schema::kPatternProperties, base::Value::Type::DICTIONARY },
+ { schema::kProperties, base::Value::Type::DICTIONARY },
+ { schema::kRequired, base::Value::Type::LIST },
+ { schema::kTitle, base::Value::Type::STRING },
+ };
+
+ bool has_type_or_ref = false;
+ const base::ListValue* list_value = nullptr;
+ const base::DictionaryValue* dictionary_value = nullptr;
+ std::string string_value;
+
+ for (base::DictionaryValue::Iterator it(*dict); !it.IsAtEnd(); it.Advance()) {
+ // Validate the "type" attribute, which may be a string or a list.
+ if (it.key() == schema::kType) {
+ switch (it.value().type()) {
+ case base::Value::Type::STRING:
+ it.value().GetAsString(&string_value);
+ if (!IsValidType(string_value)) {
+ *error = "Invalid value for type attribute";
+ return false;
+ }
+ break;
+ case base::Value::Type::LIST:
+ it.value().GetAsList(&list_value);
+ for (size_t i = 0; i < list_value->GetSize(); ++i) {
+ if (!list_value->GetString(i, &string_value) ||
+ !IsValidType(string_value)) {
+ *error = "Invalid value for type attribute";
+ return false;
+ }
+ }
+ break;
+ default:
+ *error = "Invalid value for type attribute";
+ return false;
+ }
+ has_type_or_ref = true;
+ continue;
+ }
+
+ // Validate the "items" attribute, which is a schema or a list of schemas.
+ if (it.key() == schema::kItems) {
+ if (it.value().GetAsDictionary(&dictionary_value)) {
+ if (!IsValidSchema(dictionary_value, options, error)) {
+ DCHECK(!error->empty());
+ return false;
+ }
+ } else if (it.value().GetAsList(&list_value)) {
+ for (size_t i = 0; i < list_value->GetSize(); ++i) {
+ if (!list_value->GetDictionary(i, &dictionary_value)) {
+ *error = base::StringPrintf(
+ "Invalid entry in items attribute at index %d",
+ static_cast<int>(i));
+ return false;
+ }
+ if (!IsValidSchema(dictionary_value, options, error)) {
+ DCHECK(!error->empty());
+ return false;
+ }
+ }
+ } else {
+ *error = "Invalid value for items attribute";
+ return false;
+ }
+ continue;
+ }
+
+ // All the other attributes have a single valid type.
+ const ExpectedType* end = kExpectedTypes + arraysize(kExpectedTypes);
+ const ExpectedType* entry = std::lower_bound(
+ kExpectedTypes, end, it.key(), CompareToString);
+ if (entry == end || entry->key != it.key()) {
+ if (options & JSONSchemaValidator::OPTIONS_IGNORE_UNKNOWN_ATTRIBUTES)
+ continue;
+ *error = base::StringPrintf("Invalid attribute %s", it.key().c_str());
+ return false;
+ }
+
+ // Integer can be converted to double.
+ if (!(it.value().type() == entry->type ||
+ (it.value().is_int() && entry->type == base::Value::Type::DOUBLE))) {
+ *error = base::StringPrintf("Invalid value for %s attribute",
+ it.key().c_str());
+ return false;
+ }
+
+ // base::Value::Type::INTEGER attributes must be >= 0.
+ // This applies to "minItems", "maxItems", "minLength" and "maxLength".
+ if (it.value().is_int()) {
+ int integer_value;
+ it.value().GetAsInteger(&integer_value);
+ if (integer_value < 0) {
+ *error = base::StringPrintf("Value of %s must be >= 0, got %d",
+ it.key().c_str(), integer_value);
+ return false;
+ }
+ }
+
+ // Validate the "properties" attribute. Each entry maps a key to a schema.
+ if (it.key() == schema::kProperties) {
+ it.value().GetAsDictionary(&dictionary_value);
+ for (base::DictionaryValue::Iterator iter(*dictionary_value);
+ !iter.IsAtEnd(); iter.Advance()) {
+ if (!iter.value().GetAsDictionary(&dictionary_value)) {
+ *error = "properties must be a dictionary";
+ return false;
+ }
+ if (!IsValidSchema(dictionary_value, options, error)) {
+ DCHECK(!error->empty());
+ return false;
+ }
+ }
+ }
+
+ // Validate the "patternProperties" attribute. Each entry maps a regular
+ // expression to a schema. The validity of the regular expression expression
+ // won't be checked here for performance reasons. Instead, invalid regular
+ // expressions will be caught as validation errors in Validate().
+ if (it.key() == schema::kPatternProperties) {
+ it.value().GetAsDictionary(&dictionary_value);
+ for (base::DictionaryValue::Iterator iter(*dictionary_value);
+ !iter.IsAtEnd(); iter.Advance()) {
+ if (!iter.value().GetAsDictionary(&dictionary_value)) {
+ *error = "patternProperties must be a dictionary";
+ return false;
+ }
+ if (!IsValidSchema(dictionary_value, options, error)) {
+ DCHECK(!error->empty());
+ return false;
+ }
+ }
+ }
+
+ // Validate "additionalProperties" attribute, which is a schema.
+ if (it.key() == schema::kAdditionalProperties) {
+ it.value().GetAsDictionary(&dictionary_value);
+ if (!IsValidSchema(dictionary_value, options, error)) {
+ DCHECK(!error->empty());
+ return false;
+ }
+ }
+
+ // Validate "required" attribute.
+ if (it.key() == schema::kRequired) {
+ it.value().GetAsList(&list_value);
+ for (const base::Value& value : *list_value) {
+ if (value.type() != base::Value::Type::STRING) {
+ *error = "Invalid value in 'required' attribute";
+ return false;
+ }
+ // TODO(crbug.com/856903): Check that |value| is a key in
+ // schema::kProperties
+ }
+ }
+
+ // Validate the values contained in an "enum" attribute.
+ if (it.key() == schema::kEnum) {
+ it.value().GetAsList(&list_value);
+ for (size_t i = 0; i < list_value->GetSize(); ++i) {
+ const base::Value* value = nullptr;
+ list_value->Get(i, &value);
+ // Sometimes the enum declaration is a dictionary with the enum value
+ // under "name".
+ value = ExtractNameFromDictionary(value);
+ if (!value) {
+ *error = "Invalid value in enum attribute";
+ return false;
+ }
+ switch (value->type()) {
+ case base::Value::Type::NONE:
+ case base::Value::Type::BOOLEAN:
+ case base::Value::Type::INTEGER:
+ case base::Value::Type::DOUBLE:
+ case base::Value::Type::STRING:
+ break;
+ default:
+ *error = "Invalid value in enum attribute";
+ return false;
+ }
+ }
+ }
+
+ // Validate the schemas contained in a "choices" attribute.
+ if (it.key() == schema::kChoices) {
+ it.value().GetAsList(&list_value);
+ for (size_t i = 0; i < list_value->GetSize(); ++i) {
+ if (!list_value->GetDictionary(i, &dictionary_value)) {
+ *error = "Invalid choices attribute";
+ return false;
+ }
+ if (!IsValidSchema(dictionary_value, options, error)) {
+ DCHECK(!error->empty());
+ return false;
+ }
+ }
+ }
+
+ if (it.key() == schema::kRef)
+ has_type_or_ref = true;
+ }
+
+ if (!has_type_or_ref) {
+ *error = "Schema must have a type or a $ref attribute";
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+
+JSONSchemaValidator::Error::Error() {
+}
+
+JSONSchemaValidator::Error::Error(const std::string& message)
+ : path(message) {
+}
+
+JSONSchemaValidator::Error::Error(const std::string& path,
+ const std::string& message)
+ : path(path), message(message) {
+}
+
+
+const char JSONSchemaValidator::kUnknownTypeReference[] =
+ "Unknown schema reference: *.";
+const char JSONSchemaValidator::kInvalidChoice[] =
+ "Value does not match any valid type choices.";
+const char JSONSchemaValidator::kInvalidEnum[] =
+ "Value does not match any valid enum choices.";
+const char JSONSchemaValidator::kObjectPropertyIsRequired[] =
+ "Property is required.";
+const char JSONSchemaValidator::kUnexpectedProperty[] =
+ "Unexpected property.";
+const char JSONSchemaValidator::kArrayMinItems[] =
+ "Array must have at least * items.";
+const char JSONSchemaValidator::kArrayMaxItems[] =
+ "Array must not have more than * items.";
+const char JSONSchemaValidator::kArrayItemRequired[] =
+ "Item is required.";
+const char JSONSchemaValidator::kStringMinLength[] =
+ "String must be at least * characters long.";
+const char JSONSchemaValidator::kStringMaxLength[] =
+ "String must not be more than * characters long.";
+const char JSONSchemaValidator::kStringPattern[] =
+ "String must match the pattern: *.";
+const char JSONSchemaValidator::kNumberMinimum[] =
+ "Value must not be less than *.";
+const char JSONSchemaValidator::kNumberMaximum[] =
+ "Value must not be greater than *.";
+const char JSONSchemaValidator::kInvalidType[] =
+ "Expected '*' but got '*'.";
+const char JSONSchemaValidator::kInvalidTypeIntegerNumber[] =
+ "Expected 'integer' but got 'number', consider using Math.round().";
+const char JSONSchemaValidator::kInvalidRegex[] =
+ "Regular expression /*/ is invalid: *";
+
+
+// static
+std::string JSONSchemaValidator::GetJSONSchemaType(const base::Value* value) {
+ switch (value->type()) {
+ case base::Value::Type::NONE:
+ return schema::kNull;
+ case base::Value::Type::BOOLEAN:
+ return schema::kBoolean;
+ case base::Value::Type::INTEGER:
+ return schema::kInteger;
+ case base::Value::Type::DOUBLE: {
+ double double_value = 0;
+ value->GetAsDouble(&double_value);
+ if (std::abs(double_value) <= std::pow(2.0, DBL_MANT_DIG) &&
+ double_value == floor(double_value)) {
+ return schema::kInteger;
+ }
+ return schema::kNumber;
+ }
+ case base::Value::Type::STRING:
+ return schema::kString;
+ case base::Value::Type::DICTIONARY:
+ return schema::kObject;
+ case base::Value::Type::LIST:
+ return schema::kArray;
+ default:
+ NOTREACHED() << "Unexpected value type: " << value->type();
+ return std::string();
+ }
+}
+
+// static
+std::string JSONSchemaValidator::FormatErrorMessage(const std::string& format,
+ const std::string& s1) {
+ std::string ret_val = format;
+ base::ReplaceFirstSubstringAfterOffset(&ret_val, 0, "*", s1);
+ return ret_val;
+}
+
+// static
+std::string JSONSchemaValidator::FormatErrorMessage(const std::string& format,
+ const std::string& s1,
+ const std::string& s2) {
+ std::string ret_val = format;
+ base::ReplaceFirstSubstringAfterOffset(&ret_val, 0, "*", s1);
+ base::ReplaceFirstSubstringAfterOffset(&ret_val, 0, "*", s2);
+ return ret_val;
+}
+
+// static
+std::unique_ptr<base::DictionaryValue> JSONSchemaValidator::IsValidSchema(
+ const std::string& schema,
+ std::string* error) {
+ return JSONSchemaValidator::IsValidSchema(schema, 0, error);
+}
+
+// static
+std::unique_ptr<base::DictionaryValue> JSONSchemaValidator::IsValidSchema(
+ const std::string& schema,
+ int validator_options,
+ std::string* error) {
+ base::JSONParserOptions json_options = base::JSON_PARSE_RFC;
+ std::unique_ptr<base::Value> json = base::JSONReader::ReadAndReturnError(
+ schema, json_options, nullptr, error);
+ if (!json)
+ return std::unique_ptr<base::DictionaryValue>();
+ base::DictionaryValue* dict = nullptr;
+ if (!json->GetAsDictionary(&dict)) {
+ *error = "Schema must be a JSON object";
+ return std::unique_ptr<base::DictionaryValue>();
+ }
+ if (!::IsValidSchema(dict, validator_options, error))
+ return std::unique_ptr<base::DictionaryValue>();
+ ignore_result(json.release());
+ return base::WrapUnique(dict);
+}
+
+JSONSchemaValidator::JSONSchemaValidator(base::DictionaryValue* schema)
+ : schema_root_(schema), default_allow_additional_properties_(false) {
+}
+
+JSONSchemaValidator::JSONSchemaValidator(base::DictionaryValue* schema,
+ base::ListValue* types)
+ : schema_root_(schema), default_allow_additional_properties_(false) {
+ if (!types)
+ return;
+
+ for (size_t i = 0; i < types->GetSize(); ++i) {
+ base::DictionaryValue* type = nullptr;
+ CHECK(types->GetDictionary(i, &type));
+
+ std::string id;
+ CHECK(type->GetString(schema::kId, &id));
+
+ CHECK(types_.find(id) == types_.end());
+ types_[id] = type;
+ }
+}
+
+JSONSchemaValidator::~JSONSchemaValidator() {}
+
+bool JSONSchemaValidator::Validate(const base::Value* instance) {
+ errors_.clear();
+ Validate(instance, schema_root_, std::string());
+ return errors_.empty();
+}
+
+void JSONSchemaValidator::Validate(const base::Value* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path) {
+ // If this schema defines itself as reference type, save it in this.types.
+ std::string id;
+ if (schema->GetString(schema::kId, &id)) {
+ TypeMap::iterator iter = types_.find(id);
+ if (iter == types_.end())
+ types_[id] = schema;
+ else
+ DCHECK(iter->second == schema);
+ }
+
+ // If the schema has a $ref property, the instance must validate against
+ // that schema. It must be present in types_ to be referenced.
+ std::string ref;
+ if (schema->GetString(schema::kRef, &ref)) {
+ TypeMap::iterator type = types_.find(ref);
+ if (type == types_.end()) {
+ errors_.push_back(
+ Error(path, FormatErrorMessage(kUnknownTypeReference, ref)));
+ } else {
+ Validate(instance, type->second, path);
+ }
+ return;
+ }
+
+ // If the schema has a choices property, the instance must validate against at
+ // least one of the items in that array.
+ const base::ListValue* choices = nullptr;
+ if (schema->GetList(schema::kChoices, &choices)) {
+ ValidateChoices(instance, choices, path);
+ return;
+ }
+
+ // If the schema has an enum property, the instance must be one of those
+ // values.
+ const base::ListValue* enumeration = nullptr;
+ if (schema->GetList(schema::kEnum, &enumeration)) {
+ ValidateEnum(instance, enumeration, path);
+ return;
+ }
+
+ std::string type;
+ schema->GetString(schema::kType, &type);
+ CHECK(!type.empty());
+ if (type != schema::kAny) {
+ if (!ValidateType(instance, type, path))
+ return;
+
+ // These casts are safe because of checks in ValidateType().
+ if (type == schema::kObject) {
+ ValidateObject(static_cast<const base::DictionaryValue*>(instance),
+ schema,
+ path);
+ } else if (type == schema::kArray) {
+ ValidateArray(static_cast<const base::ListValue*>(instance),
+ schema, path);
+ } else if (type == schema::kString) {
+ // Intentionally NOT downcasting to StringValue*. Type::STRING only
+ // implies GetAsString() can safely be carried out, not that it's a
+ // StringValue.
+ ValidateString(instance, schema, path);
+ } else if (type == schema::kNumber || type == schema::kInteger) {
+ ValidateNumber(instance, schema, path);
+ } else if (type != schema::kBoolean && type != schema::kNull) {
+ NOTREACHED() << "Unexpected type: " << type;
+ }
+ }
+}
+
+void JSONSchemaValidator::ValidateChoices(const base::Value* instance,
+ const base::ListValue* choices,
+ const std::string& path) {
+ size_t original_num_errors = errors_.size();
+
+ for (size_t i = 0; i < choices->GetSize(); ++i) {
+ const base::DictionaryValue* choice = nullptr;
+ CHECK(choices->GetDictionary(i, &choice));
+
+ Validate(instance, choice, path);
+ if (errors_.size() == original_num_errors)
+ return;
+
+ // We discard the error from each choice. We only want to know if any of the
+ // validations succeeded.
+ errors_.resize(original_num_errors);
+ }
+
+ // Now add a generic error that no choices matched.
+ errors_.push_back(Error(path, kInvalidChoice));
+ return;
+}
+
+void JSONSchemaValidator::ValidateEnum(const base::Value* instance,
+ const base::ListValue* choices,
+ const std::string& path) {
+ for (size_t i = 0; i < choices->GetSize(); ++i) {
+ const base::Value* choice = nullptr;
+ CHECK(choices->Get(i, &choice));
+ // Sometimes the enum declaration is a dictionary with the enum value under
+ // "name".
+ choice = ExtractNameFromDictionary(choice);
+ if (!choice) {
+ NOTREACHED();
+ }
+ switch (choice->type()) {
+ case base::Value::Type::NONE:
+ case base::Value::Type::BOOLEAN:
+ case base::Value::Type::STRING:
+ if (instance->Equals(choice))
+ return;
+ break;
+
+ case base::Value::Type::INTEGER:
+ case base::Value::Type::DOUBLE:
+ if (instance->is_int() || instance->is_double()) {
+ if (GetNumberValue(choice) == GetNumberValue(instance))
+ return;
+ }
+ break;
+
+ default:
+ NOTREACHED() << "Unexpected type in enum: " << choice->type();
+ }
+ }
+
+ errors_.push_back(Error(path, kInvalidEnum));
+}
+
+void JSONSchemaValidator::ValidateObject(const base::DictionaryValue* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path) {
+ const base::DictionaryValue* properties = nullptr;
+ if (schema->GetDictionary(schema::kProperties, &properties)) {
+ for (base::DictionaryValue::Iterator it(*properties); !it.IsAtEnd();
+ it.Advance()) {
+ std::string prop_path = path.empty() ? it.key() : (path + "." + it.key());
+ const base::DictionaryValue* prop_schema = nullptr;
+ CHECK(it.value().GetAsDictionary(&prop_schema));
+
+ const base::Value* prop_value = nullptr;
+ if (instance->Get(it.key(), &prop_value)) {
+ Validate(prop_value, prop_schema, prop_path);
+ } else {
+ // Properties are required unless there is an optional field set to
+ // 'true'.
+ bool is_optional = false;
+ prop_schema->GetBoolean(schema::kOptional, &is_optional);
+ if (!is_optional) {
+ errors_.push_back(Error(prop_path, kObjectPropertyIsRequired));
+ }
+ }
+ }
+ }
+
+ const base::DictionaryValue* additional_properties_schema = nullptr;
+ bool allow_any_additional_properties =
+ SchemaAllowsAnyAdditionalItems(schema, &additional_properties_schema);
+
+ const base::DictionaryValue* pattern_properties = nullptr;
+ std::vector<std::unique_ptr<re2::RE2>> pattern_properties_pattern;
+ std::vector<const base::DictionaryValue*> pattern_properties_schema;
+
+ if (schema->GetDictionary(schema::kPatternProperties, &pattern_properties)) {
+ for (base::DictionaryValue::Iterator it(*pattern_properties); !it.IsAtEnd();
+ it.Advance()) {
+ auto prop_pattern = std::make_unique<re2::RE2>(it.key());
+ if (!prop_pattern->ok()) {
+ LOG(WARNING) << "Regular expression /" << it.key()
+ << "/ is invalid: " << prop_pattern->error() << ".";
+ errors_.push_back(
+ Error(path,
+ FormatErrorMessage(
+ kInvalidRegex, it.key(), prop_pattern->error())));
+ continue;
+ }
+ const base::DictionaryValue* prop_schema = nullptr;
+ CHECK(it.value().GetAsDictionary(&prop_schema));
+ pattern_properties_pattern.push_back(std::move(prop_pattern));
+ pattern_properties_schema.push_back(prop_schema);
+ }
+ }
+
+ // Validate pattern properties and additional properties.
+ for (base::DictionaryValue::Iterator it(*instance); !it.IsAtEnd();
+ it.Advance()) {
+ std::string prop_path = path.empty() ? it.key() : path + "." + it.key();
+
+ bool found_matching_pattern = false;
+ for (size_t index = 0; index < pattern_properties_pattern.size(); ++index) {
+ if (re2::RE2::PartialMatch(it.key(),
+ *pattern_properties_pattern[index])) {
+ found_matching_pattern = true;
+ Validate(&it.value(), pattern_properties_schema[index], prop_path);
+ break;
+ }
+ }
+
+ if (found_matching_pattern || allow_any_additional_properties ||
+ (properties && properties->HasKey(it.key())))
+ continue;
+
+ if (!additional_properties_schema) {
+ errors_.push_back(Error(prop_path, kUnexpectedProperty));
+ } else {
+ Validate(&it.value(), additional_properties_schema, prop_path);
+ }
+ }
+}
+
+void JSONSchemaValidator::ValidateArray(const base::ListValue* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path) {
+ const base::DictionaryValue* single_type = nullptr;
+ size_t instance_size = instance->GetSize();
+ if (schema->GetDictionary(schema::kItems, &single_type)) {
+ int min_items = 0;
+ if (schema->GetInteger(schema::kMinItems, &min_items)) {
+ CHECK(min_items >= 0);
+ if (instance_size < static_cast<size_t>(min_items)) {
+ errors_.push_back(Error(path, FormatErrorMessage(
+ kArrayMinItems, base::IntToString(min_items))));
+ }
+ }
+
+ int max_items = 0;
+ if (schema->GetInteger(schema::kMaxItems, &max_items)) {
+ CHECK(max_items >= 0);
+ if (instance_size > static_cast<size_t>(max_items)) {
+ errors_.push_back(Error(path, FormatErrorMessage(
+ kArrayMaxItems, base::IntToString(max_items))));
+ }
+ }
+
+ // If the items property is a single schema, each item in the array must
+ // validate against that schema.
+ for (size_t i = 0; i < instance_size; ++i) {
+ const base::Value* item = nullptr;
+ CHECK(instance->Get(i, &item));
+ std::string i_str = base::NumberToString(i);
+ std::string item_path = path.empty() ? i_str : (path + "." + i_str);
+ Validate(item, single_type, item_path);
+ }
+
+ return;
+ }
+
+ // Otherwise, the list must be a tuple type, where each item in the list has a
+ // particular schema.
+ ValidateTuple(instance, schema, path);
+}
+
+void JSONSchemaValidator::ValidateTuple(const base::ListValue* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path) {
+ const base::ListValue* tuple_type = nullptr;
+ schema->GetList(schema::kItems, &tuple_type);
+ size_t tuple_size = tuple_type ? tuple_type->GetSize() : 0;
+ if (tuple_type) {
+ for (size_t i = 0; i < tuple_size; ++i) {
+ std::string i_str = base::NumberToString(i);
+ std::string item_path = path.empty() ? i_str : (path + "." + i_str);
+ const base::DictionaryValue* item_schema = nullptr;
+ CHECK(tuple_type->GetDictionary(i, &item_schema));
+ const base::Value* item_value = nullptr;
+ instance->Get(i, &item_value);
+ if (item_value && item_value->type() != base::Value::Type::NONE) {
+ Validate(item_value, item_schema, item_path);
+ } else {
+ bool is_optional = false;
+ item_schema->GetBoolean(schema::kOptional, &is_optional);
+ if (!is_optional) {
+ errors_.push_back(Error(item_path, kArrayItemRequired));
+ return;
+ }
+ }
+ }
+ }
+
+ const base::DictionaryValue* additional_properties_schema = nullptr;
+ if (SchemaAllowsAnyAdditionalItems(schema, &additional_properties_schema))
+ return;
+
+ size_t instance_size = instance->GetSize();
+ if (additional_properties_schema) {
+ // Any additional properties must validate against the additionalProperties
+ // schema.
+ for (size_t i = tuple_size; i < instance_size; ++i) {
+ std::string i_str = base::NumberToString(i);
+ std::string item_path = path.empty() ? i_str : (path + "." + i_str);
+ const base::Value* item_value = nullptr;
+ CHECK(instance->Get(i, &item_value));
+ Validate(item_value, additional_properties_schema, item_path);
+ }
+ } else if (instance_size > tuple_size) {
+ errors_.push_back(Error(
+ path,
+ FormatErrorMessage(kArrayMaxItems, base::NumberToString(tuple_size))));
+ }
+}
+
+void JSONSchemaValidator::ValidateString(const base::Value* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path) {
+ std::string value;
+ CHECK(instance->GetAsString(&value));
+
+ int min_length = 0;
+ if (schema->GetInteger(schema::kMinLength, &min_length)) {
+ CHECK(min_length >= 0);
+ if (value.size() < static_cast<size_t>(min_length)) {
+ errors_.push_back(Error(path, FormatErrorMessage(
+ kStringMinLength, base::IntToString(min_length))));
+ }
+ }
+
+ int max_length = 0;
+ if (schema->GetInteger(schema::kMaxLength, &max_length)) {
+ CHECK(max_length >= 0);
+ if (value.size() > static_cast<size_t>(max_length)) {
+ errors_.push_back(Error(path, FormatErrorMessage(
+ kStringMaxLength, base::IntToString(max_length))));
+ }
+ }
+
+ std::string pattern;
+ if (schema->GetString(schema::kPattern, &pattern)) {
+ re2::RE2 compiled_regex(pattern);
+ if (!compiled_regex.ok()) {
+ LOG(WARNING) << "Regular expression /" << pattern
+ << "/ is invalid: " << compiled_regex.error() << ".";
+ errors_.push_back(Error(
+ path,
+ FormatErrorMessage(kInvalidRegex, pattern, compiled_regex.error())));
+ } else if (!re2::RE2::PartialMatch(value, compiled_regex)) {
+ errors_.push_back(
+ Error(path, FormatErrorMessage(kStringPattern, pattern)));
+ }
+ }
+}
+
+void JSONSchemaValidator::ValidateNumber(const base::Value* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path) {
+ double value = GetNumberValue(instance);
+
+ // TODO(aa): It would be good to test that the double is not infinity or nan,
+ // but isnan and isinf aren't defined on Windows.
+
+ double minimum = 0;
+ if (schema->GetDouble(schema::kMinimum, &minimum)) {
+ if (value < minimum)
+ errors_.push_back(Error(
+ path,
+ FormatErrorMessage(kNumberMinimum, base::NumberToString(minimum))));
+ }
+
+ double maximum = 0;
+ if (schema->GetDouble(schema::kMaximum, &maximum)) {
+ if (value > maximum)
+ errors_.push_back(Error(
+ path,
+ FormatErrorMessage(kNumberMaximum, base::NumberToString(maximum))));
+ }
+}
+
+bool JSONSchemaValidator::ValidateType(const base::Value* instance,
+ const std::string& expected_type,
+ const std::string& path) {
+ std::string actual_type = GetJSONSchemaType(instance);
+ if (expected_type == actual_type ||
+ (expected_type == schema::kNumber && actual_type == schema::kInteger)) {
+ return true;
+ }
+ if (expected_type == schema::kInteger && actual_type == schema::kNumber) {
+ errors_.push_back(Error(path, kInvalidTypeIntegerNumber));
+ return false;
+ }
+ errors_.push_back(Error(
+ path, FormatErrorMessage(kInvalidType, expected_type, actual_type)));
+ return false;
+}
+
+bool JSONSchemaValidator::SchemaAllowsAnyAdditionalItems(
+ const base::DictionaryValue* schema,
+ const base::DictionaryValue** additional_properties_schema) {
+ // If the validator allows additional properties globally, and this schema
+ // doesn't override, then we can exit early.
+ schema->GetDictionary(schema::kAdditionalProperties,
+ additional_properties_schema);
+
+ if (*additional_properties_schema) {
+ std::string additional_properties_type(schema::kAny);
+ CHECK((*additional_properties_schema)->GetString(
+ schema::kType, &additional_properties_type));
+ return additional_properties_type == schema::kAny;
+ }
+ return default_allow_additional_properties_;
+}
diff --git a/components/json_schema/json_schema_validator.h b/components/json_schema/json_schema_validator.h
new file mode 100644
index 0000000000..f2cb841ff2
--- /dev/null
+++ b/components/json_schema/json_schema_validator.h
@@ -0,0 +1,251 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_JSON_SCHEMA_JSON_SCHEMA_VALIDATOR_H_
+#define COMPONENTS_JSON_SCHEMA_JSON_SCHEMA_VALIDATOR_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+
+namespace base {
+class DictionaryValue;
+class ListValue;
+class Value;
+}
+
+//==============================================================================
+// This class implements a subset of JSON Schema.
+// See: http://www.json.com/json-schema-proposal/ for more details.
+//
+// There is also an older JavaScript implementation of the same functionality in
+// chrome/renderer/resources/json_schema.js.
+//
+// The following features of JSON Schema are not implemented:
+// - requires
+// - unique
+// - disallow
+// - union types (but replaced with 'choices')
+// - number.maxDecimal
+//
+// The following properties are not applicable to the interface exposed by
+// this class:
+// - options
+// - readonly
+// - title
+// - description
+// - format
+// - default
+// - transient
+// - hidden
+//
+// There are also these departures from the JSON Schema proposal:
+// - null counts as 'unspecified' for optional values
+// - added the 'choices' property, to allow specifying a list of possible types
+// for a value
+// - by default an "object" typed schema does not allow additional properties.
+// if present, "additionalProperties" is to be a schema against which all
+// additional properties will be validated.
+// - regular expression supports all syntaxes that re2 accepts.
+// See https://github.com/google/re2/blob/master/doc/syntax.txt for details.
+//==============================================================================
+class JSONSchemaValidator {
+ public:
+ // Details about a validation error.
+ struct Error {
+ Error();
+
+ explicit Error(const std::string& message);
+
+ Error(const std::string& path, const std::string& message);
+
+ // The path to the location of the error in the JSON structure.
+ std::string path;
+
+ // An english message describing the error.
+ std::string message;
+ };
+
+ enum Options {
+ // Ignore unknown attributes. If this option is not set then unknown
+ // attributes will make the schema validation fail.
+ OPTIONS_IGNORE_UNKNOWN_ATTRIBUTES = 1 << 0,
+ };
+
+ // Error messages.
+ static const char kUnknownTypeReference[];
+ static const char kInvalidChoice[];
+ static const char kInvalidEnum[];
+ static const char kObjectPropertyIsRequired[];
+ static const char kUnexpectedProperty[];
+ static const char kArrayMinItems[];
+ static const char kArrayMaxItems[];
+ static const char kArrayItemRequired[];
+ static const char kStringMinLength[];
+ static const char kStringMaxLength[];
+ static const char kStringPattern[];
+ static const char kNumberMinimum[];
+ static const char kNumberMaximum[];
+ static const char kInvalidType[];
+ static const char kInvalidTypeIntegerNumber[];
+ static const char kInvalidRegex[];
+
+ // Classifies a Value as one of the JSON schema primitive types.
+ static std::string GetJSONSchemaType(const base::Value* value);
+
+ // Utility methods to format error messages. The first method can have one
+ // wildcard represented by '*', which is replaced with s1. The second method
+ // can have two, which are replaced by s1 and s2.
+ static std::string FormatErrorMessage(const std::string& format,
+ const std::string& s1);
+ static std::string FormatErrorMessage(const std::string& format,
+ const std::string& s1,
+ const std::string& s2);
+
+ // Verifies if |schema| is a valid JSON v3 schema. When this validation passes
+ // then |schema| is valid JSON that can be parsed into a DictionaryValue,
+ // and that DictionaryValue can be used to build a JSONSchemaValidator.
+ // Returns the parsed DictionaryValue when |schema| validated, otherwise
+ // returns NULL. In that case, |error| contains an error description.
+ // For performance reasons, currently IsValidSchema() won't check the
+ // correctness of regular expressions used in "pattern" and
+ // "patternProperties" and in Validate() invalid regular expression don't
+ // accept any strings.
+ static std::unique_ptr<base::DictionaryValue> IsValidSchema(
+ const std::string& schema,
+ std::string* error);
+
+ // Same as above but with |options|, which is a bitwise-OR combination of the
+ // Options above.
+ static std::unique_ptr<base::DictionaryValue>
+ IsValidSchema(const std::string& schema, int options, std::string* error);
+
+ // Creates a validator for the specified schema.
+ //
+ // NOTE: This constructor assumes that |schema| is well formed and valid.
+ // Errors will result in CHECK at runtime; this constructor should not be used
+ // with untrusted schemas.
+ explicit JSONSchemaValidator(base::DictionaryValue* schema);
+
+ // Creates a validator for the specified schema and user-defined types. Each
+ // type must be a valid JSONSchema type description with an additional "id"
+ // field. Schema objects in |schema| can refer to these types with the "$ref"
+ // property.
+ //
+ // NOTE: This constructor assumes that |schema| and |types| are well-formed
+ // and valid. Errors will result in CHECK at runtime; this constructor should
+ // not be used with untrusted schemas.
+ JSONSchemaValidator(base::DictionaryValue* schema, base::ListValue* types);
+
+ ~JSONSchemaValidator();
+
+ // Whether the validator allows additional items for objects and lists, beyond
+ // those defined by their schema, by default.
+ //
+ // This setting defaults to false: all items in an instance list or object
+ // must be defined by the corresponding schema.
+ //
+ // This setting can be overridden on individual object and list schemas by
+ // setting the "additionalProperties" field.
+ bool default_allow_additional_properties() const {
+ return default_allow_additional_properties_;
+ }
+
+ void set_default_allow_additional_properties(bool val) {
+ default_allow_additional_properties_ = val;
+ }
+
+ // Returns any errors from the last call to to Validate().
+ const std::vector<Error>& errors() const {
+ return errors_;
+ }
+
+ // Validates a JSON value. Returns true if the instance is valid, false
+ // otherwise. If false is returned any errors are available from the errors()
+ // getter.
+ bool Validate(const base::Value* instance);
+
+ private:
+ typedef std::map<std::string, const base::DictionaryValue*> TypeMap;
+
+ // Each of the below methods handle a subset of the validation process. The
+ // path paramater is the path to |instance| from the root of the instance tree
+ // and is used in error messages.
+
+ // Validates any instance node against any schema node. This is called for
+ // every node in the instance tree, and it just decides which of the more
+ // detailed methods to call.
+ void Validate(const base::Value* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path);
+
+ // Validates a node against a list of possible schemas. If any one of the
+ // schemas match, the node is valid.
+ void ValidateChoices(const base::Value* instance,
+ const base::ListValue* choices,
+ const std::string& path);
+
+ // Validates a node against a list of exact primitive values, eg 42, "foobar".
+ void ValidateEnum(const base::Value* instance,
+ const base::ListValue* choices,
+ const std::string& path);
+
+ // Validates a JSON object against an object schema node.
+ void ValidateObject(const base::DictionaryValue* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path);
+
+ // Validates a JSON array against an array schema node.
+ void ValidateArray(const base::ListValue* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path);
+
+ // Validates a JSON array against an array schema node configured to be a
+ // tuple. In a tuple, there is one schema node for each item expected in the
+ // array.
+ void ValidateTuple(const base::ListValue* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path);
+
+ // Validate a JSON string against a string schema node.
+ void ValidateString(const base::Value* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path);
+
+ // Validate a JSON number against a number schema node.
+ void ValidateNumber(const base::Value* instance,
+ const base::DictionaryValue* schema,
+ const std::string& path);
+
+ // Validates that the JSON node |instance| has |expected_type|.
+ bool ValidateType(const base::Value* instance,
+ const std::string& expected_type,
+ const std::string& path);
+
+ // Returns true if |schema| will allow additional items of any type.
+ bool SchemaAllowsAnyAdditionalItems(
+ const base::DictionaryValue* schema,
+ const base::DictionaryValue** addition_items_schema);
+
+ // The root schema node.
+ base::DictionaryValue* schema_root_;
+
+ // Map of user-defined name to type.
+ TypeMap types_;
+
+ // Whether we allow additional properties on objects by default. This can be
+ // overridden by the allow_additional_properties flag on an Object schema.
+ bool default_allow_additional_properties_;
+
+ // Errors accumulated since the last call to Validate().
+ std::vector<Error> errors_;
+
+
+ DISALLOW_COPY_AND_ASSIGN(JSONSchemaValidator);
+};
+
+#endif // COMPONENTS_JSON_SCHEMA_JSON_SCHEMA_VALIDATOR_H_
diff --git a/components/policy/core/common/policy_load_status.cc b/components/policy/core/common/policy_load_status.cc
new file mode 100644
index 0000000000..1495d7c545
--- /dev/null
+++ b/components/policy/core/common/policy_load_status.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/policy/core/common/policy_load_status.h"
+
+#include "base/bind.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/stringprintf.h"
+#include "components/policy/core/common/policy_types.h"
+
+namespace policy {
+
+namespace {
+
+const char kHistogramName[] = "Enterprise.PolicyLoadStatus";
+
+} // namespace
+
+PolicyLoadStatusSampler::PolicyLoadStatusSampler() {
+ Add(POLICY_LOAD_STATUS_STARTED);
+}
+
+PolicyLoadStatusSampler::~PolicyLoadStatusSampler() {}
+
+void PolicyLoadStatusSampler::Add(PolicyLoadStatus status) {
+ status_bits_[status] = true;
+}
+
+PolicyLoadStatusUmaReporter::PolicyLoadStatusUmaReporter() {}
+
+PolicyLoadStatusUmaReporter::~PolicyLoadStatusUmaReporter() {
+ base::HistogramBase* histogram(base::LinearHistogram::FactoryGet(
+ kHistogramName, 1, POLICY_LOAD_STATUS_SIZE, POLICY_LOAD_STATUS_SIZE + 1,
+ base::Histogram::kUmaTargetedHistogramFlag));
+
+ for (int i = 0; i < POLICY_LOAD_STATUS_SIZE; ++i) {
+ if (GetStatusSet()[i])
+ histogram->Add(i);
+ }
+}
+
+} // namespace policy
diff --git a/components/policy/core/common/policy_load_status.h b/components/policy/core/common/policy_load_status.h
new file mode 100644
index 0000000000..dc3979ef1e
--- /dev/null
+++ b/components/policy/core/common/policy_load_status.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_POLICY_CORE_COMMON_POLICY_LOAD_STATUS_H_
+#define COMPONENTS_POLICY_CORE_COMMON_POLICY_LOAD_STATUS_H_
+
+#include <bitset>
+
+#include "base/macros.h"
+#include "components/policy/policy_export.h"
+
+namespace policy {
+
+// UMA histogram enum for policy load status. Don't change existing constants,
+// append additional constants to the end if needed.
+enum PolicyLoadStatus {
+ // Policy load attempt started. This gets logged for each policy load attempt
+ // to get a baseline on the number of requests, and an arbitrary number of
+ // the below status codes may get added in addition.
+ POLICY_LOAD_STATUS_STARTED = 0,
+ // System failed to determine whether there's policy.
+ POLICY_LOAD_STATUS_QUERY_FAILED = 1,
+ // No policy present.
+ POLICY_LOAD_STATUS_NO_POLICY = 2,
+ // Data inaccessible, such as non-local policy file.
+ POLICY_LOAD_STATUS_INACCCESSIBLE = 3,
+ // Data missing, such as policy file not present.
+ POLICY_LOAD_STATUS_MISSING = 4,
+ // Trying with Wow64 redirection disabled.
+ POLICY_LOAD_STATUS_WOW64_REDIRECTION_DISABLED = 5,
+ // Data read error, for example file reading errors.
+ POLICY_LOAD_STATUS_READ_ERROR = 6,
+ // Data too large to process.
+ POLICY_LOAD_STATUS_TOO_BIG = 7,
+ // Parse error.
+ POLICY_LOAD_STATUS_PARSE_ERROR = 8,
+
+ // This must stay last.
+ POLICY_LOAD_STATUS_SIZE
+};
+
+// A helper for collecting statuses for a policy load operation.
+class POLICY_EXPORT PolicyLoadStatusSampler {
+ public:
+ using StatusSet = std::bitset<POLICY_LOAD_STATUS_SIZE>;
+
+ PolicyLoadStatusSampler();
+ virtual ~PolicyLoadStatusSampler();
+
+ // Adds a status code.
+ void Add(PolicyLoadStatus status);
+
+ // Returns a set with all statuses.
+ const StatusSet& GetStatusSet() const { return status_bits_; }
+
+ private:
+ StatusSet status_bits_;
+ DISALLOW_COPY_AND_ASSIGN(PolicyLoadStatusSampler);
+};
+
+// A helper for generating policy load status UMA statistics. On destruction,
+// records histogram samples for the collected status codes.
+class POLICY_EXPORT PolicyLoadStatusUmaReporter
+ : public PolicyLoadStatusSampler {
+ public:
+ PolicyLoadStatusUmaReporter();
+ ~PolicyLoadStatusUmaReporter() override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PolicyLoadStatusUmaReporter);
+};
+
+} // namespace policy
+
+#endif // COMPONENTS_POLICY_CORE_COMMON_POLICY_LOAD_STATUS_H_
diff --git a/components/policy/core/common/policy_types.h b/components/policy/core/common/policy_types.h
new file mode 100644
index 0000000000..7595f8697c
--- /dev/null
+++ b/components/policy/core/common/policy_types.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_POLICY_CORE_COMMON_POLICY_TYPES_H_
+#define COMPONENTS_POLICY_CORE_COMMON_POLICY_TYPES_H_
+
+namespace policy {
+
+// The scope of a policy flags whether it is meant to be applied to the current
+// user or to the machine. Note that this property pertains to the source of
+// the policy and has no direct correspondence to the distinction between User
+// Policy and Device Policy.
+enum PolicyScope {
+ // USER policies apply to sessions of the current user.
+ POLICY_SCOPE_USER,
+
+ // MACHINE policies apply to any users of the current machine.
+ POLICY_SCOPE_MACHINE,
+};
+
+// The level of a policy determines its enforceability and whether users can
+// override it or not. The values are listed in increasing order of priority.
+enum PolicyLevel {
+ // RECOMMENDED policies can be overridden by users. They are meant as a
+ // default value configured by admins, that users can customize.
+ POLICY_LEVEL_RECOMMENDED,
+
+ // MANDATORY policies must be enforced and users can't circumvent them.
+ POLICY_LEVEL_MANDATORY,
+};
+
+// The source of a policy indicates where its value is originating from. The
+// sources are ordered by priority (with weakest policy first).
+enum PolicySource {
+ // The policy was set because we are running in an enterprise environment.
+ POLICY_SOURCE_ENTERPRISE_DEFAULT,
+
+ // The policy was set by a cloud source.
+ POLICY_SOURCE_CLOUD,
+
+ // The policy was set by an Active Directory source.
+ POLICY_SOURCE_ACTIVE_DIRECTORY,
+
+ // Any non-platform policy was overridden because we are running in a
+ // public session.
+ POLICY_SOURCE_PUBLIC_SESSION_OVERRIDE,
+
+ // The policy was set by a platform source.
+ POLICY_SOURCE_PLATFORM,
+
+ // Number of source types. Has to be the last element.
+ POLICY_SOURCE_COUNT
+};
+
+} // namespace policy
+
+#endif // COMPONENTS_POLICY_CORE_COMMON_POLICY_TYPES_H_
diff --git a/components/policy/core/common/registry_dict.cc b/components/policy/core/common/registry_dict.cc
new file mode 100644
index 0000000000..6ab00f9a14
--- /dev/null
+++ b/components/policy/core/common/registry_dict.cc
@@ -0,0 +1,356 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/policy/core/common/registry_dict.h"
+
+#include <utility>
+
+#include "base/json/json_reader.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/sys_byteorder.h"
+#include "base/values.h"
+#include "components/policy/core/common/schema.h"
+
+#if defined(OS_WIN)
+#include "base/win/registry.h"
+
+using base::win::RegistryKeyIterator;
+using base::win::RegistryValueIterator;
+#endif // #if defined(OS_WIN)
+
+namespace policy {
+
+namespace {
+
+// Validates that a key is numerical. Used for lists below.
+bool IsKeyNumerical(const std::string& key) {
+ int temp = 0;
+ return base::StringToInt(key, &temp);
+}
+
+} // namespace
+
+std::unique_ptr<base::Value> ConvertRegistryValue(const base::Value& value,
+ const Schema& schema) {
+ if (!schema.valid())
+ return value.CreateDeepCopy();
+
+ // If the type is good already, go with it.
+ if (value.type() == schema.type()) {
+ // Recurse for complex types.
+ const base::DictionaryValue* dict = nullptr;
+ const base::ListValue* list = nullptr;
+ if (value.GetAsDictionary(&dict)) {
+ std::unique_ptr<base::DictionaryValue> result(
+ new base::DictionaryValue());
+ for (base::DictionaryValue::Iterator entry(*dict); !entry.IsAtEnd();
+ entry.Advance()) {
+ std::unique_ptr<base::Value> converted = ConvertRegistryValue(
+ entry.value(), schema.GetProperty(entry.key()));
+ if (converted)
+ result->SetWithoutPathExpansion(entry.key(), std::move(converted));
+ }
+ return std::move(result);
+ } else if (value.GetAsList(&list)) {
+ std::unique_ptr<base::ListValue> result(new base::ListValue());
+ for (base::ListValue::const_iterator entry(list->begin());
+ entry != list->end(); ++entry) {
+ std::unique_ptr<base::Value> converted =
+ ConvertRegistryValue(*entry, schema.GetItems());
+ if (converted)
+ result->Append(std::move(converted));
+ }
+ return std::move(result);
+ }
+ return value.CreateDeepCopy();
+ }
+
+ // Else, do some conversions to map windows registry data types to JSON types.
+ std::string string_value;
+ int int_value = 0;
+ switch (schema.type()) {
+ case base::Value::Type::NONE: {
+ return std::make_unique<base::Value>();
+ }
+ case base::Value::Type::BOOLEAN: {
+ // Accept booleans encoded as either string or integer.
+ if (value.GetAsInteger(&int_value) ||
+ (value.GetAsString(&string_value) &&
+ base::StringToInt(string_value, &int_value))) {
+ return std::unique_ptr<base::Value>(new base::Value(int_value != 0));
+ }
+ break;
+ }
+ case base::Value::Type::INTEGER: {
+ // Integers may be string-encoded.
+ if (value.GetAsString(&string_value) &&
+ base::StringToInt(string_value, &int_value)) {
+ return std::unique_ptr<base::Value>(new base::Value(int_value));
+ }
+ break;
+ }
+ case base::Value::Type::DOUBLE: {
+ // Doubles may be string-encoded or integer-encoded.
+ double double_value = 0;
+ if (value.GetAsDouble(&double_value) ||
+ (value.GetAsString(&string_value) &&
+ base::StringToDouble(string_value, &double_value))) {
+ return std::unique_ptr<base::Value>(new base::Value(double_value));
+ }
+ break;
+ }
+ case base::Value::Type::LIST: {
+ // Lists are encoded as subkeys with numbered value in the registry
+ // (non-numerical keys are ignored).
+ const base::DictionaryValue* dict = nullptr;
+ if (value.GetAsDictionary(&dict)) {
+ std::unique_ptr<base::ListValue> result(new base::ListValue());
+ for (base::DictionaryValue::Iterator it(*dict); !it.IsAtEnd();
+ it.Advance()) {
+ if (!IsKeyNumerical(it.key()))
+ continue;
+ std::unique_ptr<base::Value> converted =
+ ConvertRegistryValue(it.value(), schema.GetItems());
+ if (converted)
+ result->Append(std::move(converted));
+ }
+ return std::move(result);
+ }
+ // Fall through in order to accept lists encoded as JSON strings.
+ FALLTHROUGH;
+ }
+ case base::Value::Type::DICTIONARY: {
+ // Dictionaries may be encoded as JSON strings.
+ if (value.GetAsString(&string_value)) {
+ std::unique_ptr<base::Value> result =
+ base::JSONReader::Read(string_value);
+ if (result && result->type() == schema.type())
+ return result;
+ }
+ break;
+ }
+ case base::Value::Type::STRING:
+ case base::Value::Type::BINARY:
+ // No conversion possible.
+ break;
+ }
+
+ LOG(WARNING) << "Failed to convert " << value.type() << " to "
+ << schema.type();
+ return nullptr;
+}
+
+bool CaseInsensitiveStringCompare::operator()(const std::string& a,
+ const std::string& b) const {
+ return base::CompareCaseInsensitiveASCII(a, b) < 0;
+}
+
+RegistryDict::RegistryDict() {}
+
+RegistryDict::~RegistryDict() {
+ ClearKeys();
+ ClearValues();
+}
+
+RegistryDict* RegistryDict::GetKey(const std::string& name) {
+ KeyMap::iterator entry = keys_.find(name);
+ return entry != keys_.end() ? entry->second.get() : nullptr;
+}
+
+const RegistryDict* RegistryDict::GetKey(const std::string& name) const {
+ KeyMap::const_iterator entry = keys_.find(name);
+ return entry != keys_.end() ? entry->second.get() : nullptr;
+}
+
+void RegistryDict::SetKey(const std::string& name,
+ std::unique_ptr<RegistryDict> dict) {
+ if (!dict) {
+ RemoveKey(name);
+ return;
+ }
+
+ keys_[name] = std::move(dict);
+}
+
+std::unique_ptr<RegistryDict> RegistryDict::RemoveKey(const std::string& name) {
+ std::unique_ptr<RegistryDict> result;
+ KeyMap::iterator entry = keys_.find(name);
+ if (entry != keys_.end()) {
+ result = std::move(entry->second);
+ keys_.erase(entry);
+ }
+ return result;
+}
+
+void RegistryDict::ClearKeys() {
+ keys_.clear();
+}
+
+base::Value* RegistryDict::GetValue(const std::string& name) {
+ ValueMap::iterator entry = values_.find(name);
+ return entry != values_.end() ? entry->second.get() : nullptr;
+}
+
+const base::Value* RegistryDict::GetValue(const std::string& name) const {
+ ValueMap::const_iterator entry = values_.find(name);
+ return entry != values_.end() ? entry->second.get() : nullptr;
+}
+
+void RegistryDict::SetValue(const std::string& name,
+ std::unique_ptr<base::Value> dict) {
+ if (!dict) {
+ RemoveValue(name);
+ return;
+ }
+
+ values_[name] = std::move(dict);
+}
+
+std::unique_ptr<base::Value> RegistryDict::RemoveValue(
+ const std::string& name) {
+ std::unique_ptr<base::Value> result;
+ ValueMap::iterator entry = values_.find(name);
+ if (entry != values_.end()) {
+ result = std::move(entry->second);
+ values_.erase(entry);
+ }
+ return result;
+}
+
+void RegistryDict::ClearValues() {
+ values_.clear();
+}
+
+void RegistryDict::Merge(const RegistryDict& other) {
+ for (KeyMap::const_iterator entry(other.keys_.begin());
+ entry != other.keys_.end(); ++entry) {
+ std::unique_ptr<RegistryDict>& subdict = keys_[entry->first];
+ if (!subdict)
+ subdict = std::make_unique<RegistryDict>();
+ subdict->Merge(*entry->second);
+ }
+
+ for (ValueMap::const_iterator entry(other.values_.begin());
+ entry != other.values_.end(); ++entry) {
+ SetValue(entry->first, entry->second->CreateDeepCopy());
+ }
+}
+
+void RegistryDict::Swap(RegistryDict* other) {
+ keys_.swap(other->keys_);
+ values_.swap(other->values_);
+}
+
+#if defined(OS_WIN)
+void RegistryDict::ReadRegistry(HKEY hive, const base::string16& root) {
+ ClearKeys();
+ ClearValues();
+
+ // First, read all the values of the key.
+ for (RegistryValueIterator it(hive, root.c_str()); it.Valid(); ++it) {
+ const std::string name = base::UTF16ToUTF8(it.Name());
+ switch (it.Type()) {
+ case REG_SZ:
+ case REG_EXPAND_SZ:
+ SetValue(name, std::unique_ptr<base::Value>(
+ new base::Value(base::UTF16ToUTF8(it.Value()))));
+ continue;
+ case REG_DWORD_LITTLE_ENDIAN:
+ case REG_DWORD_BIG_ENDIAN:
+ if (it.ValueSize() == sizeof(DWORD)) {
+ DWORD dword_value = *(reinterpret_cast<const DWORD*>(it.Value()));
+ if (it.Type() == REG_DWORD_BIG_ENDIAN)
+ dword_value = base::NetToHost32(dword_value);
+ else
+ dword_value = base::ByteSwapToLE32(dword_value);
+ SetValue(name, std::unique_ptr<base::Value>(
+ new base::Value(static_cast<int>(dword_value))));
+ continue;
+ }
+ FALLTHROUGH;
+ case REG_NONE:
+ case REG_LINK:
+ case REG_MULTI_SZ:
+ case REG_RESOURCE_LIST:
+ case REG_FULL_RESOURCE_DESCRIPTOR:
+ case REG_RESOURCE_REQUIREMENTS_LIST:
+ case REG_QWORD_LITTLE_ENDIAN:
+ // Unsupported type, message gets logged below.
+ break;
+ }
+
+ LOG(WARNING) << "Failed to read hive " << hive << " at " << root << "\\"
+ << name << " type " << it.Type();
+ }
+
+ // Recurse for all subkeys.
+ for (RegistryKeyIterator it(hive, root.c_str()); it.Valid(); ++it) {
+ std::string name(base::UTF16ToUTF8(it.Name()));
+ std::unique_ptr<RegistryDict> subdict(new RegistryDict());
+ subdict->ReadRegistry(hive, root + L"\\" + it.Name());
+ SetKey(name, std::move(subdict));
+ }
+}
+
+std::unique_ptr<base::Value> RegistryDict::ConvertToJSON(
+ const Schema& schema) const {
+ base::Value::Type type =
+ schema.valid() ? schema.type() : base::Value::Type::DICTIONARY;
+ switch (type) {
+ case base::Value::Type::DICTIONARY: {
+ std::unique_ptr<base::DictionaryValue> result(
+ new base::DictionaryValue());
+ for (RegistryDict::ValueMap::const_iterator entry(values_.begin());
+ entry != values_.end(); ++entry) {
+ Schema subschema =
+ schema.valid() ? schema.GetProperty(entry->first) : Schema();
+ std::unique_ptr<base::Value> converted =
+ ConvertRegistryValue(*entry->second, subschema);
+ if (converted)
+ result->SetWithoutPathExpansion(entry->first, std::move(converted));
+ }
+ for (RegistryDict::KeyMap::const_iterator entry(keys_.begin());
+ entry != keys_.end(); ++entry) {
+ Schema subschema =
+ schema.valid() ? schema.GetProperty(entry->first) : Schema();
+ std::unique_ptr<base::Value> converted =
+ entry->second->ConvertToJSON(subschema);
+ if (converted)
+ result->SetWithoutPathExpansion(entry->first, std::move(converted));
+ }
+ return std::move(result);
+ }
+ case base::Value::Type::LIST: {
+ std::unique_ptr<base::ListValue> result(new base::ListValue());
+ Schema item_schema = schema.valid() ? schema.GetItems() : Schema();
+ for (RegistryDict::KeyMap::const_iterator entry(keys_.begin());
+ entry != keys_.end(); ++entry) {
+ if (!IsKeyNumerical(entry->first))
+ continue;
+ std::unique_ptr<base::Value> converted =
+ entry->second->ConvertToJSON(item_schema);
+ if (converted)
+ result->Append(std::move(converted));
+ }
+ for (RegistryDict::ValueMap::const_iterator entry(values_.begin());
+ entry != values_.end(); ++entry) {
+ if (!IsKeyNumerical(entry->first))
+ continue;
+ std::unique_ptr<base::Value> converted =
+ ConvertRegistryValue(*entry->second, item_schema);
+ if (converted)
+ result->Append(std::move(converted));
+ }
+ return std::move(result);
+ }
+ default:
+ LOG(WARNING) << "Can't convert registry key to schema type " << type;
+ }
+
+ return nullptr;
+}
+#endif // #if defined(OS_WIN)
+} // namespace policy
diff --git a/components/policy/core/common/registry_dict.h b/components/policy/core/common/registry_dict.h
new file mode 100644
index 0000000000..871cd7d7d5
--- /dev/null
+++ b/components/policy/core/common/registry_dict.h
@@ -0,0 +1,103 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_POLICY_CORE_COMMON_REGISTRY_DICT_H_
+#define COMPONENTS_POLICY_CORE_COMMON_REGISTRY_DICT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "components/policy/policy_export.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+class Value;
+}
+
+namespace policy {
+
+class Schema;
+
+// Converts a value (as read from the registry) to meet |schema|, converting
+// types as necessary. Unconvertible types will show up as null values in the
+// result.
+std::unique_ptr<base::Value> POLICY_EXPORT
+ConvertRegistryValue(const base::Value& value, const Schema& schema);
+
+// A case-insensitive string comparison functor.
+struct POLICY_EXPORT CaseInsensitiveStringCompare {
+ bool operator()(const std::string& a, const std::string& b) const;
+};
+
+// In-memory representation of a registry subtree. Using a
+// base::DictionaryValue directly seems tempting, but that doesn't handle the
+// registry's case-insensitive-but-case-preserving semantics properly.
+class POLICY_EXPORT RegistryDict {
+ public:
+ using KeyMap = std::map<std::string,
+ std::unique_ptr<RegistryDict>,
+ CaseInsensitiveStringCompare>;
+ using ValueMap = std::map<std::string,
+ std::unique_ptr<base::Value>,
+ CaseInsensitiveStringCompare>;
+
+ RegistryDict();
+ ~RegistryDict();
+
+ // Returns a pointer to an existing key, NULL if not present.
+ RegistryDict* GetKey(const std::string& name);
+ const RegistryDict* GetKey(const std::string& name) const;
+ // Sets a key. If |dict| is NULL, clears that key.
+ void SetKey(const std::string& name, std::unique_ptr<RegistryDict> dict);
+ // Removes a key. If the key doesn't exist, NULL is returned.
+ std::unique_ptr<RegistryDict> RemoveKey(const std::string& name);
+ // Clears all keys.
+ void ClearKeys();
+
+ // Returns a pointer to a value, NULL if not present.
+ base::Value* GetValue(const std::string& name);
+ const base::Value* GetValue(const std::string& name) const;
+ // Sets a value. If |value| is NULL, removes the value.
+ void SetValue(const std::string& name, std::unique_ptr<base::Value> value);
+ // Removes a value. If the value doesn't exist, NULL is returned.
+ std::unique_ptr<base::Value> RemoveValue(const std::string& name);
+ // Clears all values.
+ void ClearValues();
+
+ // Merge keys and values from |other|, giving precedence to |other|.
+ void Merge(const RegistryDict& other);
+
+ // Swap with |other|.
+ void Swap(RegistryDict* other);
+
+#if defined(OS_WIN)
+ // Read a Windows registry subtree into this registry dictionary object.
+ void ReadRegistry(HKEY hive, const base::string16& root);
+
+ // Converts the dictionary to base::Value representation. For key/value name
+ // collisions, the key wins. |schema| is used to determine the expected type
+ // for each policy.
+ // The returned object is either a base::DictionaryValue or a base::ListValue.
+ std::unique_ptr<base::Value> ConvertToJSON(const class Schema& schema) const;
+#endif
+
+ const KeyMap& keys() const { return keys_; }
+ const ValueMap& values() const { return values_; }
+
+ private:
+ KeyMap keys_;
+ ValueMap values_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegistryDict);
+};
+
+} // namespace policy
+
+#endif // COMPONENTS_POLICY_CORE_COMMON_REGISTRY_DICT_H_
diff --git a/components/policy/core/common/registry_dict_unittest.cc b/components/policy/core/common/registry_dict_unittest.cc
new file mode 100644
index 0000000000..1a18bb6c62
--- /dev/null
+++ b/components/policy/core/common/registry_dict_unittest.cc
@@ -0,0 +1,304 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/policy/core/common/registry_dict.h"
+
+#include <string>
+#include <utility>
+
+#include "base/values.h"
+#include "components/policy/core/common/schema.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace policy {
+namespace {
+
+TEST(RegistryDictTest, SetAndGetValue) {
+ RegistryDict test_dict;
+
+ base::Value int_value(42);
+ base::Value string_value("fortytwo");
+
+ test_dict.SetValue("one", int_value.CreateDeepCopy());
+ EXPECT_EQ(1u, test_dict.values().size());
+ EXPECT_EQ(int_value, *test_dict.GetValue("one"));
+ EXPECT_FALSE(test_dict.GetValue("two"));
+
+ test_dict.SetValue("two", string_value.CreateDeepCopy());
+ EXPECT_EQ(2u, test_dict.values().size());
+ EXPECT_EQ(int_value, *test_dict.GetValue("one"));
+ EXPECT_EQ(string_value, *test_dict.GetValue("two"));
+
+ std::unique_ptr<base::Value> one(test_dict.RemoveValue("one"));
+ EXPECT_EQ(1u, test_dict.values().size());
+ EXPECT_EQ(int_value, *one);
+ EXPECT_FALSE(test_dict.GetValue("one"));
+ EXPECT_EQ(string_value, *test_dict.GetValue("two"));
+
+ test_dict.ClearValues();
+ EXPECT_FALSE(test_dict.GetValue("one"));
+ EXPECT_FALSE(test_dict.GetValue("two"));
+ EXPECT_TRUE(test_dict.values().empty());
+}
+
+TEST(RegistryDictTest, CaseInsensitiveButPreservingValueNames) {
+ RegistryDict test_dict;
+
+ base::Value int_value(42);
+ base::Value string_value("fortytwo");
+
+ test_dict.SetValue("One", int_value.CreateDeepCopy());
+ EXPECT_EQ(1u, test_dict.values().size());
+ EXPECT_EQ(int_value, *test_dict.GetValue("oNe"));
+
+ RegistryDict::ValueMap::const_iterator entry = test_dict.values().begin();
+ ASSERT_NE(entry, test_dict.values().end());
+ EXPECT_EQ("One", entry->first);
+
+ test_dict.SetValue("ONE", string_value.CreateDeepCopy());
+ EXPECT_EQ(1u, test_dict.values().size());
+ EXPECT_EQ(string_value, *test_dict.GetValue("one"));
+
+ std::unique_ptr<base::Value> removed_value(test_dict.RemoveValue("onE"));
+ EXPECT_EQ(string_value, *removed_value);
+ EXPECT_TRUE(test_dict.values().empty());
+}
+
+TEST(RegistryDictTest, SetAndGetKeys) {
+ RegistryDict test_dict;
+
+ base::Value int_value(42);
+ base::Value string_value("fortytwo");
+
+ std::unique_ptr<RegistryDict> subdict(new RegistryDict());
+ subdict->SetValue("one", int_value.CreateDeepCopy());
+ test_dict.SetKey("two", std::move(subdict));
+ EXPECT_EQ(1u, test_dict.keys().size());
+ RegistryDict* actual_subdict = test_dict.GetKey("two");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_EQ(int_value, *actual_subdict->GetValue("one"));
+
+ subdict.reset(new RegistryDict());
+ subdict->SetValue("three", string_value.CreateDeepCopy());
+ test_dict.SetKey("four", std::move(subdict));
+ EXPECT_EQ(2u, test_dict.keys().size());
+ actual_subdict = test_dict.GetKey("two");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_EQ(int_value, *actual_subdict->GetValue("one"));
+ actual_subdict = test_dict.GetKey("four");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_EQ(string_value, *actual_subdict->GetValue("three"));
+
+ test_dict.ClearKeys();
+ EXPECT_FALSE(test_dict.GetKey("one"));
+ EXPECT_FALSE(test_dict.GetKey("three"));
+ EXPECT_TRUE(test_dict.keys().empty());
+}
+
+TEST(RegistryDictTest, CaseInsensitiveButPreservingKeyNames) {
+ RegistryDict test_dict;
+
+ base::Value int_value(42);
+
+ test_dict.SetKey("One", std::make_unique<RegistryDict>());
+ EXPECT_EQ(1u, test_dict.keys().size());
+ RegistryDict* actual_subdict = test_dict.GetKey("One");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_TRUE(actual_subdict->values().empty());
+
+ RegistryDict::KeyMap::const_iterator entry = test_dict.keys().begin();
+ ASSERT_NE(entry, test_dict.keys().end());
+ EXPECT_EQ("One", entry->first);
+
+ std::unique_ptr<RegistryDict> subdict(new RegistryDict());
+ subdict->SetValue("two", int_value.CreateDeepCopy());
+ test_dict.SetKey("ONE", std::move(subdict));
+ EXPECT_EQ(1u, test_dict.keys().size());
+ actual_subdict = test_dict.GetKey("One");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_EQ(int_value, *actual_subdict->GetValue("two"));
+
+ std::unique_ptr<RegistryDict> removed_key(test_dict.RemoveKey("one"));
+ ASSERT_TRUE(removed_key);
+ EXPECT_EQ(int_value, *removed_key->GetValue("two"));
+ EXPECT_TRUE(test_dict.keys().empty());
+}
+
+TEST(RegistryDictTest, Merge) {
+ RegistryDict dict_a;
+ RegistryDict dict_b;
+
+ base::Value int_value(42);
+ base::Value string_value("fortytwo");
+
+ dict_a.SetValue("one", int_value.CreateDeepCopy());
+ std::unique_ptr<RegistryDict> subdict(new RegistryDict());
+ subdict->SetValue("two", string_value.CreateDeepCopy());
+ dict_a.SetKey("three", std::move(subdict));
+
+ dict_b.SetValue("four", string_value.CreateDeepCopy());
+ subdict.reset(new RegistryDict());
+ subdict->SetValue("two", int_value.CreateDeepCopy());
+ dict_b.SetKey("three", std::move(subdict));
+ subdict.reset(new RegistryDict());
+ subdict->SetValue("five", int_value.CreateDeepCopy());
+ dict_b.SetKey("six", std::move(subdict));
+
+ dict_a.Merge(dict_b);
+
+ EXPECT_EQ(int_value, *dict_a.GetValue("one"));
+ EXPECT_EQ(string_value, *dict_b.GetValue("four"));
+ RegistryDict* actual_subdict = dict_a.GetKey("three");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_EQ(int_value, *actual_subdict->GetValue("two"));
+ actual_subdict = dict_a.GetKey("six");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_EQ(int_value, *actual_subdict->GetValue("five"));
+}
+
+TEST(RegistryDictTest, Swap) {
+ RegistryDict dict_a;
+ RegistryDict dict_b;
+
+ base::Value int_value(42);
+ base::Value string_value("fortytwo");
+
+ dict_a.SetValue("one", int_value.CreateDeepCopy());
+ dict_a.SetKey("two", std::make_unique<RegistryDict>());
+ dict_b.SetValue("three", string_value.CreateDeepCopy());
+
+ dict_a.Swap(&dict_b);
+
+ EXPECT_EQ(int_value, *dict_b.GetValue("one"));
+ EXPECT_TRUE(dict_b.GetKey("two"));
+ EXPECT_FALSE(dict_b.GetValue("two"));
+
+ EXPECT_EQ(string_value, *dict_a.GetValue("three"));
+ EXPECT_FALSE(dict_a.GetValue("one"));
+ EXPECT_FALSE(dict_a.GetKey("two"));
+}
+
+#if defined(OS_WIN)
+TEST(RegistryDictTest, ConvertToJSON) {
+ RegistryDict test_dict;
+
+ base::Value int_value(42);
+ base::Value string_value("fortytwo");
+ base::Value string_zero("0");
+ base::Value string_dict("{ \"key\": [ \"value\" ] }");
+
+ test_dict.SetValue("one", int_value.CreateDeepCopy());
+ std::unique_ptr<RegistryDict> subdict(new RegistryDict());
+ subdict->SetValue("two", string_value.CreateDeepCopy());
+ test_dict.SetKey("three", std::move(subdict));
+ std::unique_ptr<RegistryDict> list(new RegistryDict());
+ list->SetValue("1", string_value.CreateDeepCopy());
+ test_dict.SetKey("dict-to-list", std::move(list));
+ test_dict.SetValue("int-to-bool", int_value.CreateDeepCopy());
+ test_dict.SetValue("int-to-double", int_value.CreateDeepCopy());
+ test_dict.SetValue("string-to-bool", string_zero.CreateDeepCopy());
+ test_dict.SetValue("string-to-double", string_zero.CreateDeepCopy());
+ test_dict.SetValue("string-to-int", string_zero.CreateDeepCopy());
+ test_dict.SetValue("string-to-dict", string_dict.CreateDeepCopy());
+
+ std::string error;
+ Schema schema = Schema::Parse(
+ "{"
+ " \"type\": \"object\","
+ " \"properties\": {"
+ " \"dict-to-list\": {"
+ " \"type\": \"array\","
+ " \"items\": { \"type\": \"string\" }"
+ " },"
+ " \"int-to-bool\": { \"type\": \"boolean\" },"
+ " \"int-to-double\": { \"type\": \"number\" },"
+ " \"string-to-bool\": { \"type\": \"boolean\" },"
+ " \"string-to-double\": { \"type\": \"number\" },"
+ " \"string-to-int\": { \"type\": \"integer\" },"
+ " \"string-to-dict\": { \"type\": \"object\" }"
+ " }"
+ "}",
+ &error);
+ ASSERT_TRUE(schema.valid()) << error;
+
+ std::unique_ptr<base::Value> actual(test_dict.ConvertToJSON(schema));
+
+ base::DictionaryValue expected;
+ expected.SetKey("one", int_value.Clone());
+ auto expected_subdict = std::make_unique<base::DictionaryValue>();
+ expected_subdict->SetKey("two", string_value.Clone());
+ expected.Set("three", std::move(expected_subdict));
+ auto expected_list = std::make_unique<base::ListValue>();
+ expected_list->Append(std::make_unique<base::Value>(string_value.Clone()));
+ expected.Set("dict-to-list", std::move(expected_list));
+ expected.SetBoolean("int-to-bool", true);
+ expected.SetDouble("int-to-double", 42.0);
+ expected.SetBoolean("string-to-bool", false);
+ expected.SetDouble("string-to-double", 0.0);
+ expected.SetInteger("string-to-int", static_cast<int>(0));
+ expected_list = std::make_unique<base::ListValue>();
+ expected_list->Append(std::make_unique<base::Value>("value"));
+ expected_subdict = std::make_unique<base::DictionaryValue>();
+ expected_subdict->Set("key", std::move(expected_list));
+ expected.Set("string-to-dict", std::move(expected_subdict));
+
+ EXPECT_EQ(expected, *actual);
+}
+
+TEST(RegistryDictTest, NonSequentialConvertToJSON) {
+ RegistryDict test_dict;
+
+ std::unique_ptr<RegistryDict> list(new RegistryDict());
+ list->SetValue("1", base::Value("1").CreateDeepCopy());
+ list->SetValue("2", base::Value("2").CreateDeepCopy());
+ list->SetValue("THREE", base::Value("3").CreateDeepCopy());
+ list->SetValue("4", base::Value("4").CreateDeepCopy());
+ test_dict.SetKey("dict-to-list", std::move(list));
+
+ std::string error;
+ Schema schema = Schema::Parse(
+ "{"
+ " \"type\": \"object\","
+ " \"properties\": {"
+ " \"dict-to-list\": {"
+ " \"type\": \"array\","
+ " \"items\": { \"type\": \"string\" }"
+ " }"
+ " }"
+ "}",
+ &error);
+ ASSERT_TRUE(schema.valid()) << error;
+
+ std::unique_ptr<base::Value> actual(test_dict.ConvertToJSON(schema));
+
+ base::DictionaryValue expected;
+ std::unique_ptr<base::ListValue> expected_list(new base::ListValue());
+ expected_list->Append(base::Value("1").CreateDeepCopy());
+ expected_list->Append(base::Value("2").CreateDeepCopy());
+ expected_list->Append(base::Value("4").CreateDeepCopy());
+ expected.Set("dict-to-list", std::move(expected_list));
+
+ EXPECT_EQ(expected, *actual);
+}
+#endif
+
+TEST(RegistryDictTest, KeyValueNameClashes) {
+ RegistryDict test_dict;
+
+ base::Value int_value(42);
+ base::Value string_value("fortytwo");
+
+ test_dict.SetValue("one", int_value.CreateDeepCopy());
+ std::unique_ptr<RegistryDict> subdict(new RegistryDict());
+ subdict->SetValue("two", string_value.CreateDeepCopy());
+ test_dict.SetKey("one", std::move(subdict));
+
+ EXPECT_EQ(int_value, *test_dict.GetValue("one"));
+ RegistryDict* actual_subdict = test_dict.GetKey("one");
+ ASSERT_TRUE(actual_subdict);
+ EXPECT_EQ(string_value, *actual_subdict->GetValue("two"));
+}
+
+} // namespace
+} // namespace policy
diff --git a/components/policy/core/common/schema.cc b/components/policy/core/common/schema.cc
new file mode 100644
index 0000000000..e212deff37
--- /dev/null
+++ b/components/policy/core/common/schema.cc
@@ -0,0 +1,1206 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/policy/core/common/schema.h"
+
+#include <limits.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <climits>
+#include <map>
+#include <memory>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/containers/flat_set.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "components/json_schema/json_schema_constants.h"
+#include "components/json_schema/json_schema_validator.h"
+#include "components/policy/core/common/schema_internal.h"
+#include "third_party/re2/src/re2/re2.h"
+
+namespace schema = json_schema_constants;
+
+namespace policy {
+
+using internal::PropertiesNode;
+using internal::PropertyNode;
+using internal::RestrictionNode;
+using internal::SchemaData;
+using internal::SchemaNode;
+
+namespace {
+
+// Maps schema "id" attributes to the corresponding SchemaNode index.
+typedef std::map<std::string, int> IdMap;
+
+// List of pairs of references to be assigned later. The string is the "id"
+// whose corresponding index should be stored in the pointer, once all the IDs
+// are available.
+typedef std::vector<std::pair<std::string, int*> > ReferenceList;
+
+// Sizes for the storage arrays. These are calculated in advance so that the
+// arrays don't have to be resized during parsing, which would invalidate
+// pointers into their contents (i.e. string's c_str() and address of indices
+// for "$ref" attributes).
+struct StorageSizes {
+ StorageSizes()
+ : strings(0),
+ schema_nodes(0),
+ property_nodes(0),
+ properties_nodes(0),
+ restriction_nodes(0),
+ required_properties(0),
+ int_enums(0),
+ string_enums(0) {}
+ size_t strings;
+ size_t schema_nodes;
+ size_t property_nodes;
+ size_t properties_nodes;
+ size_t restriction_nodes;
+ size_t required_properties;
+ size_t int_enums;
+ size_t string_enums;
+};
+
+// An invalid index, indicating that a node is not present; similar to a NULL
+// pointer.
+const int kInvalid = -1;
+
+bool SchemaTypeToValueType(const std::string& type_string,
+ base::Value::Type* type) {
+ // Note: "any" is not an accepted type.
+ static const struct {
+ const char* schema_type;
+ base::Value::Type value_type;
+ } kSchemaToValueTypeMap[] = {
+ { schema::kArray, base::Value::Type::LIST },
+ { schema::kBoolean, base::Value::Type::BOOLEAN },
+ { schema::kInteger, base::Value::Type::INTEGER },
+ { schema::kNull, base::Value::Type::NONE },
+ { schema::kNumber, base::Value::Type::DOUBLE },
+ { schema::kObject, base::Value::Type::DICTIONARY },
+ { schema::kString, base::Value::Type::STRING },
+ };
+ for (size_t i = 0; i < arraysize(kSchemaToValueTypeMap); ++i) {
+ if (kSchemaToValueTypeMap[i].schema_type == type_string) {
+ *type = kSchemaToValueTypeMap[i].value_type;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool StrategyAllowInvalidOnTopLevel(SchemaOnErrorStrategy strategy) {
+ return strategy == SCHEMA_ALLOW_INVALID ||
+ strategy == SCHEMA_ALLOW_INVALID_TOPLEVEL ||
+ strategy == SCHEMA_ALLOW_INVALID_TOPLEVEL_AND_ALLOW_UNKNOWN;
+}
+
+bool StrategyAllowUnknownOnTopLevel(SchemaOnErrorStrategy strategy) {
+ return strategy != SCHEMA_STRICT;
+}
+
+SchemaOnErrorStrategy StrategyForNextLevel(SchemaOnErrorStrategy strategy) {
+ static SchemaOnErrorStrategy next_level_strategy[] = {
+ SCHEMA_STRICT, // SCHEMA_STRICT
+ SCHEMA_STRICT, // SCHEMA_ALLOW_UNKNOWN_TOPLEVEL
+ SCHEMA_ALLOW_UNKNOWN, // SCHEMA_ALLOW_UNKNOWN
+ SCHEMA_STRICT, // SCHEMA_ALLOW_INVALID_TOPLEVEL
+ SCHEMA_ALLOW_UNKNOWN, // SCHEMA_ALLOW_INVALID_TOPLEVEL_AND_ALLOW_UNKNOWN
+ SCHEMA_ALLOW_INVALID, // SCHEMA_ALLOW_INVALID
+ };
+ return next_level_strategy[static_cast<int>(strategy)];
+}
+
+void SchemaErrorFound(std::string* error_path,
+ std::string* error,
+ const std::string& msg) {
+ if (error_path)
+ *error_path = "";
+ *error = msg;
+}
+
+void AddListIndexPrefixToPath(int index, std::string* path) {
+ if (path) {
+ if (path->empty())
+ *path = base::StringPrintf("items[%d]", index);
+ else
+ *path = base::StringPrintf("items[%d].", index) + *path;
+ }
+}
+
+void AddDictKeyPrefixToPath(const std::string& key, std::string* path) {
+ if (path) {
+ if (path->empty())
+ *path = key;
+ else
+ *path = key + "." + *path;
+ }
+}
+
+} // namespace
+
+// Contains the internal data representation of a Schema. This can either wrap
+// a SchemaData owned elsewhere (currently used to wrap the Chrome schema, which
+// is generated at compile time), or it can own its own SchemaData.
+class Schema::InternalStorage
+ : public base::RefCountedThreadSafe<InternalStorage> {
+ public:
+ static scoped_refptr<const InternalStorage> Wrap(const SchemaData* data);
+
+ static scoped_refptr<const InternalStorage> ParseSchema(
+ const base::DictionaryValue& schema,
+ std::string* error);
+
+ const SchemaData* data() const { return &schema_data_; }
+
+ const SchemaNode* root_node() const {
+ return schema(0);
+ }
+
+ // Returns the validation_schema root node if one was generated, or nullptr.
+ const SchemaNode* validation_schema_root_node() const {
+ return schema_data_.validation_schema_root_index >= 0
+ ? schema(schema_data_.validation_schema_root_index)
+ : nullptr;
+ }
+
+ const SchemaNode* schema(int index) const {
+ return schema_data_.schema_nodes + index;
+ }
+
+ const PropertiesNode* properties(int index) const {
+ return schema_data_.properties_nodes + index;
+ }
+
+ const PropertyNode* property(int index) const {
+ return schema_data_.property_nodes + index;
+ }
+
+ const RestrictionNode* restriction(int index) const {
+ return schema_data_.restriction_nodes + index;
+ }
+
+ const char* const* required_property(int index) const {
+ return schema_data_.required_properties + index;
+ }
+
+ const int* int_enums(int index) const {
+ return schema_data_.int_enums + index;
+ }
+
+ const char* const* string_enums(int index) const {
+ return schema_data_.string_enums + index;
+ }
+
+ // Compiles regular expression |pattern|. The result is cached and will be
+ // returned directly next time.
+ re2::RE2* CompileRegex(const std::string& pattern) const;
+
+ private:
+ friend class base::RefCountedThreadSafe<InternalStorage>;
+
+ InternalStorage();
+ ~InternalStorage();
+
+ // Determines the expected |sizes| of the storage for the representation
+ // of |schema|.
+ static void DetermineStorageSizes(const base::DictionaryValue& schema,
+ StorageSizes* sizes);
+
+ // Parses the JSON schema in |schema|.
+ //
+ // If |schema| has a "$ref" attribute then a pending reference is appended
+ // to the |reference_list|, and nothing else is done.
+ //
+ // Otherwise, |index| gets assigned the index of the corresponding SchemaNode
+ // in |schema_nodes_|. If the |schema| contains an "id" then that ID is mapped
+ // to the |index| in the |id_map|.
+ //
+ // If |schema| is invalid then |error| gets the error reason and false is
+ // returned. Otherwise returns true.
+ bool Parse(const base::DictionaryValue& schema,
+ int* index,
+ IdMap* id_map,
+ ReferenceList* reference_list,
+ std::string* error);
+
+ // Helper for Parse() that gets an already assigned |schema_node| instead of
+ // an |index| pointer.
+ bool ParseDictionary(const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ IdMap* id_map,
+ ReferenceList* reference_list,
+ std::string* error);
+
+ // Helper for Parse() that gets an already assigned |schema_node| instead of
+ // an |index| pointer.
+ bool ParseList(const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ IdMap* id_map,
+ ReferenceList* reference_list,
+ std::string* error);
+
+ bool ParseEnum(const base::DictionaryValue& schema,
+ base::Value::Type type,
+ SchemaNode* schema_node,
+ std::string* error);
+
+ bool ParseRangedInt(const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ std::string* error);
+
+ bool ParseStringPattern(const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ std::string* error);
+
+ // Assigns the IDs in |id_map| to the pending references in the
+ // |reference_list|. If an ID is missing then |error| is set and false is
+ // returned; otherwise returns true.
+ static bool ResolveReferences(const IdMap& id_map,
+ const ReferenceList& reference_list,
+ std::string* error);
+
+ // Cache for CompileRegex(), will memorize return value of every call to
+ // CompileRegex() and return results directly next time.
+ mutable std::map<std::string, std::unique_ptr<re2::RE2>> regex_cache_;
+
+ SchemaData schema_data_;
+ std::vector<std::string> strings_;
+ std::vector<SchemaNode> schema_nodes_;
+ std::vector<PropertyNode> property_nodes_;
+ std::vector<PropertiesNode> properties_nodes_;
+ std::vector<RestrictionNode> restriction_nodes_;
+ std::vector<const char*> required_properties_;
+ std::vector<int> int_enums_;
+ std::vector<const char*> string_enums_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternalStorage);
+};
+
+Schema::InternalStorage::InternalStorage() {
+}
+
+Schema::InternalStorage::~InternalStorage() {
+}
+
+// static
+scoped_refptr<const Schema::InternalStorage> Schema::InternalStorage::Wrap(
+ const SchemaData* data) {
+ InternalStorage* storage = new InternalStorage();
+ storage->schema_data_.schema_nodes = data->schema_nodes;
+ storage->schema_data_.property_nodes = data->property_nodes;
+ storage->schema_data_.properties_nodes = data->properties_nodes;
+ storage->schema_data_.restriction_nodes = data->restriction_nodes;
+ storage->schema_data_.required_properties = data->required_properties;
+ storage->schema_data_.int_enums = data->int_enums;
+ storage->schema_data_.string_enums = data->string_enums;
+ storage->schema_data_.validation_schema_root_index =
+ data->validation_schema_root_index;
+ return storage;
+}
+
+// static
+scoped_refptr<const Schema::InternalStorage>
+Schema::InternalStorage::ParseSchema(const base::DictionaryValue& schema,
+ std::string* error) {
+ // Determine the sizes of the storage arrays and reserve the capacity before
+ // starting to append nodes and strings. This is important to prevent the
+ // arrays from being reallocated, which would invalidate the c_str() pointers
+ // and the addresses of indices to fix.
+ StorageSizes sizes;
+ DetermineStorageSizes(schema, &sizes);
+
+ scoped_refptr<InternalStorage> storage = new InternalStorage();
+ storage->strings_.reserve(sizes.strings);
+ storage->schema_nodes_.reserve(sizes.schema_nodes);
+ storage->property_nodes_.reserve(sizes.property_nodes);
+ storage->properties_nodes_.reserve(sizes.properties_nodes);
+ storage->restriction_nodes_.reserve(sizes.restriction_nodes);
+ storage->required_properties_.reserve(sizes.required_properties);
+ storage->int_enums_.reserve(sizes.int_enums);
+ storage->string_enums_.reserve(sizes.string_enums);
+
+ int root_index = kInvalid;
+ IdMap id_map;
+ ReferenceList reference_list;
+ if (!storage->Parse(schema, &root_index, &id_map, &reference_list, error))
+ return nullptr;
+
+ if (root_index == kInvalid) {
+ *error = "The main schema can't have a $ref";
+ return nullptr;
+ }
+
+ // None of this should ever happen without having been already detected.
+ // But, if it does happen, then it will lead to corrupted memory; drop
+ // everything in that case.
+ if (root_index != 0 || sizes.strings != storage->strings_.size() ||
+ sizes.schema_nodes != storage->schema_nodes_.size() ||
+ sizes.property_nodes != storage->property_nodes_.size() ||
+ sizes.properties_nodes != storage->properties_nodes_.size() ||
+ sizes.restriction_nodes != storage->restriction_nodes_.size() ||
+ sizes.required_properties != storage->required_properties_.size() ||
+ sizes.int_enums != storage->int_enums_.size() ||
+ sizes.string_enums != storage->string_enums_.size()) {
+ *error = "Failed to parse the schema due to a Chrome bug. Please file a "
+ "new issue at http://crbug.com";
+ return nullptr;
+ }
+
+ if (!ResolveReferences(id_map, reference_list, error))
+ return nullptr;
+
+ SchemaData* data = &storage->schema_data_;
+ data->schema_nodes = storage->schema_nodes_.data();
+ data->property_nodes = storage->property_nodes_.data();
+ data->properties_nodes = storage->properties_nodes_.data();
+ data->restriction_nodes = storage->restriction_nodes_.data();
+ data->required_properties = storage->required_properties_.data();
+ data->int_enums = storage->int_enums_.data();
+ data->string_enums = storage->string_enums_.data();
+ data->validation_schema_root_index = -1;
+ return storage;
+}
+
+re2::RE2* Schema::InternalStorage::CompileRegex(
+ const std::string& pattern) const {
+ auto it = regex_cache_.find(pattern);
+ if (it == regex_cache_.end()) {
+ std::unique_ptr<re2::RE2> compiled(new re2::RE2(pattern));
+ re2::RE2* compiled_ptr = compiled.get();
+ regex_cache_.insert(std::make_pair(pattern, std::move(compiled)));
+ return compiled_ptr;
+ }
+ return it->second.get();
+}
+
+// static
+void Schema::InternalStorage::DetermineStorageSizes(
+ const base::DictionaryValue& schema,
+ StorageSizes* sizes) {
+ std::string ref_string;
+ if (schema.GetString(schema::kRef, &ref_string)) {
+ // Schemas with a "$ref" attribute don't take additional storage.
+ return;
+ }
+
+ std::string type_string;
+ base::Value::Type type = base::Value::Type::NONE;
+ if (!schema.GetString(schema::kType, &type_string) ||
+ !SchemaTypeToValueType(type_string, &type)) {
+ // This schema is invalid.
+ return;
+ }
+
+ sizes->schema_nodes++;
+
+ if (type == base::Value::Type::LIST) {
+ const base::DictionaryValue* items = nullptr;
+ if (schema.GetDictionary(schema::kItems, &items))
+ DetermineStorageSizes(*items, sizes);
+ } else if (type == base::Value::Type::DICTIONARY) {
+ sizes->properties_nodes++;
+
+ const base::DictionaryValue* dict = nullptr;
+ if (schema.GetDictionary(schema::kAdditionalProperties, &dict))
+ DetermineStorageSizes(*dict, sizes);
+
+ const base::DictionaryValue* properties = nullptr;
+ if (schema.GetDictionary(schema::kProperties, &properties)) {
+ for (base::DictionaryValue::Iterator it(*properties);
+ !it.IsAtEnd(); it.Advance()) {
+ // This should have been verified by the JSONSchemaValidator.
+ CHECK(it.value().GetAsDictionary(&dict));
+ DetermineStorageSizes(*dict, sizes);
+ sizes->strings++;
+ sizes->property_nodes++;
+ }
+ }
+
+ const base::DictionaryValue* pattern_properties = nullptr;
+ if (schema.GetDictionary(schema::kPatternProperties, &pattern_properties)) {
+ for (base::DictionaryValue::Iterator it(*pattern_properties);
+ !it.IsAtEnd(); it.Advance()) {
+ CHECK(it.value().GetAsDictionary(&dict));
+ DetermineStorageSizes(*dict, sizes);
+ sizes->strings++;
+ sizes->property_nodes++;
+ }
+ }
+
+ const base::Value* required_properties = schema.FindKey(schema::kRequired);
+ if (required_properties) {
+ // This should have been verified by the JSONSchemaValidator.
+ CHECK(required_properties->is_list());
+ sizes->strings += required_properties->GetList().size();
+ sizes->required_properties += required_properties->GetList().size();
+ }
+ } else if (schema.HasKey(schema::kEnum)) {
+ const base::ListValue* possible_values = nullptr;
+ if (schema.GetList(schema::kEnum, &possible_values)) {
+ if (type == base::Value::Type::INTEGER) {
+ sizes->int_enums += possible_values->GetSize();
+ } else if (type == base::Value::Type::STRING) {
+ sizes->string_enums += possible_values->GetSize();
+ sizes->strings += possible_values->GetSize();
+ }
+ sizes->restriction_nodes++;
+ }
+ } else if (type == base::Value::Type::INTEGER) {
+ if (schema.HasKey(schema::kMinimum) || schema.HasKey(schema::kMaximum))
+ sizes->restriction_nodes++;
+ } else if (type == base::Value::Type::STRING) {
+ if (schema.HasKey(schema::kPattern)) {
+ sizes->strings++;
+ sizes->string_enums++;
+ sizes->restriction_nodes++;
+ }
+ }
+}
+
+bool Schema::InternalStorage::Parse(const base::DictionaryValue& schema,
+ int* index,
+ IdMap* id_map,
+ ReferenceList* reference_list,
+ std::string* error) {
+ std::string ref_string;
+ if (schema.GetString(schema::kRef, &ref_string)) {
+ std::string id_string;
+ if (schema.GetString(schema::kId, &id_string)) {
+ *error = "Schemas with a $ref can't have an id";
+ return false;
+ }
+ reference_list->push_back(std::make_pair(ref_string, index));
+ return true;
+ }
+
+ std::string type_string;
+ if (!schema.GetString(schema::kType, &type_string)) {
+ *error = "The schema type must be declared.";
+ return false;
+ }
+
+ base::Value::Type type = base::Value::Type::NONE;
+ if (!SchemaTypeToValueType(type_string, &type)) {
+ *error = "Type not supported: " + type_string;
+ return false;
+ }
+
+ *index = static_cast<int>(schema_nodes_.size());
+ schema_nodes_.push_back(SchemaNode());
+ SchemaNode* schema_node = &schema_nodes_.back();
+ schema_node->type = type;
+ schema_node->extra = kInvalid;
+
+ if (type == base::Value::Type::DICTIONARY) {
+ if (!ParseDictionary(schema, schema_node, id_map, reference_list, error))
+ return false;
+ } else if (type == base::Value::Type::LIST) {
+ if (!ParseList(schema, schema_node, id_map, reference_list, error))
+ return false;
+ } else if (schema.HasKey(schema::kEnum)) {
+ if (!ParseEnum(schema, type, schema_node, error))
+ return false;
+ } else if (schema.HasKey(schema::kPattern)) {
+ if (!ParseStringPattern(schema, schema_node, error))
+ return false;
+ } else if (schema.HasKey(schema::kMinimum) ||
+ schema.HasKey(schema::kMaximum)) {
+ if (type != base::Value::Type::INTEGER) {
+ *error = "Only integers can have minimum and maximum";
+ return false;
+ }
+ if (!ParseRangedInt(schema, schema_node, error))
+ return false;
+ }
+ std::string id_string;
+ if (schema.GetString(schema::kId, &id_string)) {
+ if (base::ContainsKey(*id_map, id_string)) {
+ *error = "Duplicated id: " + id_string;
+ return false;
+ }
+ (*id_map)[id_string] = *index;
+ }
+
+ return true;
+}
+
+bool Schema::InternalStorage::ParseDictionary(
+ const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ IdMap* id_map,
+ ReferenceList* reference_list,
+ std::string* error) {
+ int extra = static_cast<int>(properties_nodes_.size());
+ properties_nodes_.push_back(PropertiesNode());
+ properties_nodes_[extra].additional = kInvalid;
+ schema_node->extra = extra;
+
+ const base::DictionaryValue* dict = nullptr;
+ if (schema.GetDictionary(schema::kAdditionalProperties, &dict)) {
+ if (!Parse(*dict, &properties_nodes_[extra].additional,
+ id_map, reference_list, error)) {
+ return false;
+ }
+ }
+
+ properties_nodes_[extra].begin = static_cast<int>(property_nodes_.size());
+
+ const base::DictionaryValue* properties = nullptr;
+ if (schema.GetDictionary(schema::kProperties, &properties)) {
+ // This and below reserves nodes for all of the |properties|, and makes sure
+ // they are contiguous. Recursive calls to Parse() will append after these
+ // elements.
+ property_nodes_.resize(property_nodes_.size() + properties->size());
+ }
+
+ properties_nodes_[extra].end = static_cast<int>(property_nodes_.size());
+
+ const base::DictionaryValue* pattern_properties = nullptr;
+ if (schema.GetDictionary(schema::kPatternProperties, &pattern_properties))
+ property_nodes_.resize(property_nodes_.size() + pattern_properties->size());
+
+ properties_nodes_[extra].pattern_end =
+ static_cast<int>(property_nodes_.size());
+
+ if (properties != nullptr) {
+ int base_index = properties_nodes_[extra].begin;
+ int index = base_index;
+
+ for (base::DictionaryValue::Iterator it(*properties);
+ !it.IsAtEnd(); it.Advance(), ++index) {
+ // This should have been verified by the JSONSchemaValidator.
+ CHECK(it.value().GetAsDictionary(&dict));
+ strings_.push_back(it.key());
+ property_nodes_[index].key = strings_.back().c_str();
+ if (!Parse(*dict, &property_nodes_[index].schema,
+ id_map, reference_list, error)) {
+ return false;
+ }
+ }
+ CHECK_EQ(static_cast<int>(properties->size()), index - base_index);
+ }
+
+ if (pattern_properties != nullptr) {
+ int base_index = properties_nodes_[extra].end;
+ int index = base_index;
+
+ for (base::DictionaryValue::Iterator it(*pattern_properties);
+ !it.IsAtEnd(); it.Advance(), ++index) {
+ CHECK(it.value().GetAsDictionary(&dict));
+ re2::RE2* compiled_regex = CompileRegex(it.key());
+ if (!compiled_regex->ok()) {
+ *error =
+ "/" + it.key() + "/ is a invalid regex: " + compiled_regex->error();
+ return false;
+ }
+ strings_.push_back(it.key());
+ property_nodes_[index].key = strings_.back().c_str();
+ if (!Parse(*dict, &property_nodes_[index].schema,
+ id_map, reference_list, error)) {
+ return false;
+ }
+ }
+ CHECK_EQ(static_cast<int>(pattern_properties->size()), index - base_index);
+ }
+
+ properties_nodes_[extra].required_begin = required_properties_.size();
+ const base::Value* required_properties = schema.FindKey(schema::kRequired);
+ if (required_properties) {
+ for (const base::Value& val : required_properties->GetList()) {
+ strings_.push_back(val.GetString());
+ required_properties_.push_back(strings_.back().c_str());
+ }
+ }
+ properties_nodes_[extra].required_end = required_properties_.size();
+
+ if (properties_nodes_[extra].begin == properties_nodes_[extra].pattern_end) {
+ properties_nodes_[extra].begin = kInvalid;
+ properties_nodes_[extra].end = kInvalid;
+ properties_nodes_[extra].pattern_end = kInvalid;
+ properties_nodes_[extra].required_begin = kInvalid;
+ properties_nodes_[extra].required_end = kInvalid;
+ }
+
+ return true;
+}
+
+bool Schema::InternalStorage::ParseList(const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ IdMap* id_map,
+ ReferenceList* reference_list,
+ std::string* error) {
+ const base::DictionaryValue* dict = nullptr;
+ if (!schema.GetDictionary(schema::kItems, &dict)) {
+ *error = "Arrays must declare a single schema for their items.";
+ return false;
+ }
+ return Parse(*dict, &schema_node->extra, id_map, reference_list, error);
+}
+
+bool Schema::InternalStorage::ParseEnum(const base::DictionaryValue& schema,
+ base::Value::Type type,
+ SchemaNode* schema_node,
+ std::string* error) {
+ const base::ListValue* possible_values = nullptr;
+ if (!schema.GetList(schema::kEnum, &possible_values)) {
+ *error = "Enum attribute must be a list value";
+ return false;
+ }
+ if (possible_values->empty()) {
+ *error = "Enum attribute must be non-empty";
+ return false;
+ }
+ int offset_begin;
+ int offset_end;
+ if (type == base::Value::Type::INTEGER) {
+ offset_begin = static_cast<int>(int_enums_.size());
+ int value;
+ for (base::ListValue::const_iterator it = possible_values->begin();
+ it != possible_values->end(); ++it) {
+ if (!it->GetAsInteger(&value)) {
+ *error = "Invalid enumeration member type";
+ return false;
+ }
+ int_enums_.push_back(value);
+ }
+ offset_end = static_cast<int>(int_enums_.size());
+ } else if (type == base::Value::Type::STRING) {
+ offset_begin = static_cast<int>(string_enums_.size());
+ std::string value;
+ for (base::ListValue::const_iterator it = possible_values->begin();
+ it != possible_values->end(); ++it) {
+ if (!it->GetAsString(&value)) {
+ *error = "Invalid enumeration member type";
+ return false;
+ }
+ strings_.push_back(value);
+ string_enums_.push_back(strings_.back().c_str());
+ }
+ offset_end = static_cast<int>(string_enums_.size());
+ } else {
+ *error = "Enumeration is only supported for integer and string.";
+ return false;
+ }
+ schema_node->extra = static_cast<int>(restriction_nodes_.size());
+ restriction_nodes_.push_back(RestrictionNode());
+ restriction_nodes_.back().enumeration_restriction.offset_begin = offset_begin;
+ restriction_nodes_.back().enumeration_restriction.offset_end = offset_end;
+ return true;
+}
+
+bool Schema::InternalStorage::ParseRangedInt(
+ const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ std::string* error) {
+ int min_value = INT_MIN;
+ int max_value = INT_MAX;
+ int value;
+ if (schema.GetInteger(schema::kMinimum, &value))
+ min_value = value;
+ if (schema.GetInteger(schema::kMaximum, &value))
+ max_value = value;
+ if (min_value > max_value) {
+ *error = "Invalid range restriction for int type.";
+ return false;
+ }
+ schema_node->extra = static_cast<int>(restriction_nodes_.size());
+ restriction_nodes_.push_back(RestrictionNode());
+ restriction_nodes_.back().ranged_restriction.max_value = max_value;
+ restriction_nodes_.back().ranged_restriction.min_value = min_value;
+ return true;
+}
+
+bool Schema::InternalStorage::ParseStringPattern(
+ const base::DictionaryValue& schema,
+ SchemaNode* schema_node,
+ std::string* error) {
+ std::string pattern;
+ if (!schema.GetString(schema::kPattern, &pattern)) {
+ *error = "Schema pattern must be a string.";
+ return false;
+ }
+ re2::RE2* compiled_regex = CompileRegex(pattern);
+ if (!compiled_regex->ok()) {
+ *error = "/" + pattern + "/ is invalid regex: " + compiled_regex->error();
+ return false;
+ }
+ int index = static_cast<int>(string_enums_.size());
+ strings_.push_back(pattern);
+ string_enums_.push_back(strings_.back().c_str());
+ schema_node->extra = static_cast<int>(restriction_nodes_.size());
+ restriction_nodes_.push_back(RestrictionNode());
+ restriction_nodes_.back().string_pattern_restriction.pattern_index = index;
+ restriction_nodes_.back().string_pattern_restriction.pattern_index_backup =
+ index;
+ return true;
+}
+
+// static
+bool Schema::InternalStorage::ResolveReferences(
+ const IdMap& id_map,
+ const ReferenceList& reference_list,
+ std::string* error) {
+ for (ReferenceList::const_iterator ref = reference_list.begin();
+ ref != reference_list.end(); ++ref) {
+ IdMap::const_iterator id = id_map.find(ref->first);
+ if (id == id_map.end()) {
+ *error = "Invalid $ref: " + ref->first;
+ return false;
+ }
+ *ref->second = id->second;
+ }
+ return true;
+}
+
+Schema::Iterator::Iterator(const scoped_refptr<const InternalStorage>& storage,
+ const PropertiesNode* node)
+ : storage_(storage),
+ it_(storage->property(node->begin)),
+ end_(storage->property(node->end)) {}
+
+Schema::Iterator::Iterator(const Iterator& iterator)
+ : storage_(iterator.storage_),
+ it_(iterator.it_),
+ end_(iterator.end_) {}
+
+Schema::Iterator::~Iterator() {}
+
+Schema::Iterator& Schema::Iterator::operator=(const Iterator& iterator) {
+ storage_ = iterator.storage_;
+ it_ = iterator.it_;
+ end_ = iterator.end_;
+ return *this;
+}
+
+bool Schema::Iterator::IsAtEnd() const {
+ return it_ == end_;
+}
+
+void Schema::Iterator::Advance() {
+ ++it_;
+}
+
+const char* Schema::Iterator::key() const {
+ return it_->key;
+}
+
+Schema Schema::Iterator::schema() const {
+ return Schema(storage_, storage_->schema(it_->schema));
+}
+
+Schema::Schema() : node_(nullptr) {}
+
+Schema::Schema(const scoped_refptr<const InternalStorage>& storage,
+ const SchemaNode* node)
+ : storage_(storage), node_(node) {}
+
+Schema::Schema(const Schema& schema)
+ : storage_(schema.storage_), node_(schema.node_) {}
+
+Schema::~Schema() {}
+
+Schema& Schema::operator=(const Schema& schema) {
+ storage_ = schema.storage_;
+ node_ = schema.node_;
+ return *this;
+}
+
+// static
+Schema Schema::Wrap(const SchemaData* data) {
+ scoped_refptr<const InternalStorage> storage = InternalStorage::Wrap(data);
+ return Schema(storage, storage->root_node());
+}
+
+bool Schema::Validate(const base::Value& value,
+ SchemaOnErrorStrategy strategy,
+ std::string* error_path,
+ std::string* error) const {
+ if (!valid()) {
+ SchemaErrorFound(error_path, error, "The schema is invalid.");
+ return false;
+ }
+
+ if (value.type() != type()) {
+ // Allow the integer to double promotion. Note that range restriction on
+ // double is not supported now.
+ if (value.is_int() && type() == base::Value::Type::DOUBLE) {
+ return true;
+ }
+
+ SchemaErrorFound(
+ error_path, error, "The value type doesn't match the schema type.");
+ return false;
+ }
+
+ const base::DictionaryValue* dict = nullptr;
+ const base::ListValue* list = nullptr;
+ int int_value;
+ std::string str_value;
+ if (value.GetAsDictionary(&dict)) {
+ base::flat_set<std::string> present_properties;
+ for (base::DictionaryValue::Iterator it(*dict); !it.IsAtEnd();
+ it.Advance()) {
+ SchemaList schema_list = GetMatchingProperties(it.key());
+ if (schema_list.empty()) {
+ // Unknown property was detected.
+ SchemaErrorFound(error_path, error, "Unknown property: " + it.key());
+ if (!StrategyAllowUnknownOnTopLevel(strategy))
+ return false;
+ } else {
+ bool all_subschemas_are_valid = true;
+ for (SchemaList::iterator subschema = schema_list.begin();
+ subschema != schema_list.end(); ++subschema) {
+ if (!subschema->Validate(it.value(),
+ StrategyForNextLevel(strategy),
+ error_path,
+ error)) {
+ // Invalid property was detected.
+ all_subschemas_are_valid = false;
+ AddDictKeyPrefixToPath(it.key(), error_path);
+ if (!StrategyAllowInvalidOnTopLevel(strategy))
+ return false;
+ }
+ }
+ if (all_subschemas_are_valid)
+ present_properties.insert(it.key());
+ }
+ }
+
+ for (const auto& required_property : GetRequiredProperties()) {
+ if (base::ContainsKey(present_properties, required_property))
+ continue;
+
+ SchemaErrorFound(
+ error_path, error,
+ "Missing or invalid required property: " + required_property);
+ return false;
+ }
+ } else if (value.GetAsList(&list)) {
+ for (base::ListValue::const_iterator it = list->begin(); it != list->end();
+ ++it) {
+ if (!GetItems().Validate(*it, StrategyForNextLevel(strategy), error_path,
+ error)) {
+ // Invalid list item was detected.
+ AddListIndexPrefixToPath(it - list->begin(), error_path);
+ if (!StrategyAllowInvalidOnTopLevel(strategy))
+ return false;
+ }
+ }
+ } else if (value.GetAsInteger(&int_value)) {
+ if (node_->extra != kInvalid &&
+ !ValidateIntegerRestriction(node_->extra, int_value)) {
+ SchemaErrorFound(error_path, error, "Invalid value for integer");
+ return false;
+ }
+ } else if (value.GetAsString(&str_value)) {
+ if (node_->extra != kInvalid &&
+ !ValidateStringRestriction(node_->extra, str_value.c_str())) {
+ SchemaErrorFound(error_path, error, "Invalid value for string");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Schema::Normalize(base::Value* value,
+ SchemaOnErrorStrategy strategy,
+ std::string* error_path,
+ std::string* error,
+ bool* changed) const {
+ if (!valid()) {
+ SchemaErrorFound(error_path, error, "The schema is invalid.");
+ return false;
+ }
+
+ if (value->type() != type()) {
+ // Allow the integer to double promotion. Note that range restriction on
+ // double is not supported now.
+ if (value->is_int() && type() == base::Value::Type::DOUBLE) {
+ return true;
+ }
+
+ SchemaErrorFound(
+ error_path, error, "The value type doesn't match the schema type.");
+ return false;
+ }
+
+ base::DictionaryValue* dict = nullptr;
+ base::ListValue* list = nullptr;
+ if (value->GetAsDictionary(&dict)) {
+ base::flat_set<std::string> present_properties;
+ std::vector<std::string> drop_list; // Contains the keys to drop.
+ for (base::DictionaryValue::Iterator it(*dict); !it.IsAtEnd();
+ it.Advance()) {
+ SchemaList schema_list = GetMatchingProperties(it.key());
+ if (schema_list.empty()) {
+ // Unknown property was detected.
+ SchemaErrorFound(error_path, error, "Unknown property: " + it.key());
+ if (StrategyAllowUnknownOnTopLevel(strategy))
+ drop_list.push_back(it.key());
+ else
+ return false;
+ } else {
+ bool all_subschemas_are_valid = true;
+ for (SchemaList::iterator subschema = schema_list.begin();
+ subschema != schema_list.end(); ++subschema) {
+ base::Value* sub_value = nullptr;
+ dict->GetWithoutPathExpansion(it.key(), &sub_value);
+ if (!subschema->Normalize(sub_value,
+ StrategyForNextLevel(strategy),
+ error_path,
+ error,
+ changed)) {
+ // Invalid property was detected.
+ all_subschemas_are_valid = false;
+ AddDictKeyPrefixToPath(it.key(), error_path);
+ if (StrategyAllowInvalidOnTopLevel(strategy)) {
+ drop_list.push_back(it.key());
+ break;
+ } else {
+ return false;
+ }
+ }
+ }
+ if (all_subschemas_are_valid)
+ present_properties.insert(it.key());
+ }
+ }
+
+ for (const auto& required_property : GetRequiredProperties()) {
+ if (base::ContainsKey(present_properties, required_property))
+ continue;
+
+ SchemaErrorFound(
+ error_path, error,
+ "Missing or invalid required property: " + required_property);
+ return false;
+ }
+
+ if (changed && !drop_list.empty())
+ *changed = true;
+ for (std::vector<std::string>::const_iterator it = drop_list.begin();
+ it != drop_list.end();
+ ++it) {
+ dict->RemoveWithoutPathExpansion(*it, nullptr);
+ }
+ return true;
+ } else if (value->GetAsList(&list)) {
+ std::vector<size_t> drop_list; // Contains the indexes to drop.
+ for (size_t index = 0; index < list->GetSize(); index++) {
+ base::Value* sub_value = nullptr;
+ list->Get(index, &sub_value);
+ if (!sub_value || !GetItems().Normalize(sub_value,
+ StrategyForNextLevel(strategy),
+ error_path,
+ error,
+ changed)) {
+ // Invalid list item was detected.
+ AddListIndexPrefixToPath(index, error_path);
+ if (StrategyAllowInvalidOnTopLevel(strategy))
+ drop_list.push_back(index);
+ else
+ return false;
+ }
+ }
+ if (changed && !drop_list.empty())
+ *changed = true;
+ for (std::vector<size_t>::reverse_iterator it = drop_list.rbegin();
+ it != drop_list.rend(); ++it) {
+ list->Remove(*it, nullptr);
+ }
+ return true;
+ }
+
+ return Validate(*value, strategy, error_path, error);
+}
+
+// static
+Schema Schema::Parse(const std::string& content, std::string* error) {
+ // Validate as a generic JSON schema, and ignore unknown attributes; they
+ // may become used in a future version of the schema format.
+ std::unique_ptr<base::DictionaryValue> dict =
+ JSONSchemaValidator::IsValidSchema(
+ content, JSONSchemaValidator::OPTIONS_IGNORE_UNKNOWN_ATTRIBUTES,
+ error);
+ if (!dict)
+ return Schema();
+
+ // Validate the main type.
+ std::string string_value;
+ if (!dict->GetString(schema::kType, &string_value) ||
+ string_value != schema::kObject) {
+ *error =
+ "The main schema must have a type attribute with \"object\" value.";
+ return Schema();
+ }
+
+ // Checks for invalid attributes at the top-level.
+ if (dict->HasKey(schema::kAdditionalProperties) ||
+ dict->HasKey(schema::kPatternProperties)) {
+ *error = "\"additionalProperties\" and \"patternProperties\" are not "
+ "supported at the main schema.";
+ return Schema();
+ }
+
+ scoped_refptr<const InternalStorage> storage =
+ InternalStorage::ParseSchema(*dict, error);
+ if (!storage)
+ return Schema();
+ return Schema(storage, storage->root_node());
+}
+
+base::Value::Type Schema::type() const {
+ CHECK(valid());
+ return node_->type;
+}
+
+Schema::Iterator Schema::GetPropertiesIterator() const {
+ CHECK(valid());
+ CHECK_EQ(base::Value::Type::DICTIONARY, type());
+ return Iterator(storage_, storage_->properties(node_->extra));
+}
+
+namespace {
+
+bool CompareKeys(const PropertyNode& node, const std::string& key) {
+ return node.key < key;
+}
+
+} // namespace
+
+Schema Schema::GetKnownProperty(const std::string& key) const {
+ CHECK(valid());
+ CHECK_EQ(base::Value::Type::DICTIONARY, type());
+ const PropertiesNode* node = storage_->properties(node_->extra);
+ const PropertyNode* begin = storage_->property(node->begin);
+ const PropertyNode* end = storage_->property(node->end);
+ const PropertyNode* it = std::lower_bound(begin, end, key, CompareKeys);
+ if (it != end && it->key == key)
+ return Schema(storage_, storage_->schema(it->schema));
+ return Schema();
+}
+
+Schema Schema::GetAdditionalProperties() const {
+ CHECK(valid());
+ CHECK_EQ(base::Value::Type::DICTIONARY, type());
+ const PropertiesNode* node = storage_->properties(node_->extra);
+ if (node->additional == kInvalid)
+ return Schema();
+ return Schema(storage_, storage_->schema(node->additional));
+}
+
+SchemaList Schema::GetPatternProperties(const std::string& key) const {
+ CHECK(valid());
+ CHECK_EQ(base::Value::Type::DICTIONARY, type());
+ const PropertiesNode* node = storage_->properties(node_->extra);
+ const PropertyNode* begin = storage_->property(node->end);
+ const PropertyNode* end = storage_->property(node->pattern_end);
+ SchemaList matching_properties;
+ for (const PropertyNode* it = begin; it != end; ++it) {
+ if (re2::RE2::PartialMatch(key, *storage_->CompileRegex(it->key))) {
+ matching_properties.push_back(
+ Schema(storage_, storage_->schema(it->schema)));
+ }
+ }
+ return matching_properties;
+}
+
+std::vector<std::string> Schema::GetRequiredProperties() const {
+ CHECK(valid());
+ CHECK_EQ(base::Value::Type::DICTIONARY, type());
+ const PropertiesNode* node = storage_->properties(node_->extra);
+ const size_t begin = node->required_begin;
+ const size_t end = node->required_end;
+
+ return std::vector<std::string>(storage_->required_property(begin),
+ storage_->required_property(end));
+}
+
+Schema Schema::GetProperty(const std::string& key) const {
+ Schema schema = GetKnownProperty(key);
+ if (schema.valid())
+ return schema;
+ return GetAdditionalProperties();
+}
+
+SchemaList Schema::GetMatchingProperties(const std::string& key) const {
+ SchemaList schema_list;
+
+ Schema known_property = GetKnownProperty(key);
+ if (known_property.valid())
+ schema_list.push_back(known_property);
+
+ SchemaList pattern_properties = GetPatternProperties(key);
+ schema_list.insert(
+ schema_list.end(), pattern_properties.begin(), pattern_properties.end());
+
+ if (schema_list.empty()) {
+ Schema additional_property = GetAdditionalProperties();
+ if (additional_property.valid())
+ schema_list.push_back(additional_property);
+ }
+
+ return schema_list;
+}
+
+Schema Schema::GetItems() const {
+ CHECK(valid());
+ CHECK_EQ(base::Value::Type::LIST, type());
+ if (node_->extra == kInvalid)
+ return Schema();
+ return Schema(storage_, storage_->schema(node_->extra));
+}
+
+bool Schema::ValidateIntegerRestriction(int index, int value) const {
+ const RestrictionNode* rnode = storage_->restriction(index);
+ if (rnode->ranged_restriction.min_value <=
+ rnode->ranged_restriction.max_value) {
+ return rnode->ranged_restriction.min_value <= value &&
+ rnode->ranged_restriction.max_value >= value;
+ } else {
+ for (int i = rnode->enumeration_restriction.offset_begin;
+ i < rnode->enumeration_restriction.offset_end; ++i) {
+ if (*storage_->int_enums(i) == value)
+ return true;
+ }
+ return false;
+ }
+}
+
+bool Schema::ValidateStringRestriction(int index, const char* str) const {
+ const RestrictionNode* rnode = storage_->restriction(index);
+ if (rnode->enumeration_restriction.offset_begin <
+ rnode->enumeration_restriction.offset_end) {
+ for (int i = rnode->enumeration_restriction.offset_begin;
+ i < rnode->enumeration_restriction.offset_end; ++i) {
+ if (strcmp(*storage_->string_enums(i), str) == 0)
+ return true;
+ }
+ return false;
+ } else {
+ int index = rnode->string_pattern_restriction.pattern_index;
+ DCHECK(index == rnode->string_pattern_restriction.pattern_index_backup);
+ re2::RE2* regex = storage_->CompileRegex(*storage_->string_enums(index));
+ return re2::RE2::PartialMatch(str, *regex);
+ }
+}
+
+Schema Schema::GetValidationSchema() const {
+ CHECK(valid());
+ const SchemaNode* validation_schema_root_node =
+ storage_->validation_schema_root_node();
+ if (!validation_schema_root_node)
+ return Schema();
+ return Schema(storage_, validation_schema_root_node);
+}
+
+} // namespace policy
diff --git a/components/policy/core/common/schema.h b/components/policy/core/common/schema.h
new file mode 100644
index 0000000000..5be02a8634
--- /dev/null
+++ b/components/policy/core/common/schema.h
@@ -0,0 +1,211 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_POLICY_CORE_COMMON_SCHEMA_H_
+#define COMPONENTS_POLICY_CORE_COMMON_SCHEMA_H_
+
+#include <string>
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "base/values.h"
+#include "components/policy/policy_export.h"
+
+namespace policy {
+namespace internal {
+
+struct POLICY_EXPORT SchemaData;
+struct POLICY_EXPORT SchemaNode;
+struct POLICY_EXPORT PropertyNode;
+struct POLICY_EXPORT PropertiesNode;
+
+} // namespace internal
+
+// Option flags passed to Schema::Validate() and Schema::Normalize(), describing
+// the strategy to handle unknown properties or invalid values for dict type.
+// Note that in Schema::Normalize() allowed errors will be dropped and thus
+// ignored.
+// Unknown error indicates that some value in a dictionary (may or may not be
+// the one in root) have unknown property name according to schema.
+// Invalid error indicates a validation failure against the schema. As
+// validation is done recursively, a validation failure of dict properties or
+// list items might be ignored (or dropped in Normalize()) or trigger whole
+// dictionary/list validation failure.
+enum SchemaOnErrorStrategy {
+ // No errors will be allowed.
+ SCHEMA_STRICT = 0,
+ // Unknown properties in the top-level dictionary will be ignored.
+ SCHEMA_ALLOW_UNKNOWN_TOPLEVEL,
+ // Unknown properties in any dictionary will be ignored.
+ SCHEMA_ALLOW_UNKNOWN,
+ // Mismatched values will be ignored at the toplevel.
+ SCHEMA_ALLOW_INVALID_TOPLEVEL,
+ // Mismatched values will be ignored at the top-level value.
+ // Unknown properties in any dictionary will be ignored.
+ SCHEMA_ALLOW_INVALID_TOPLEVEL_AND_ALLOW_UNKNOWN,
+ // Mismatched values will be ignored.
+ SCHEMA_ALLOW_INVALID,
+};
+
+class Schema;
+
+typedef std::vector<Schema> SchemaList;
+
+// Describes the expected type of one policy. Also recursively describes the
+// types of inner elements, for structured types.
+// Objects of this class refer to external, immutable data and are cheap to
+// copy.
+//
+// Schema validation is based on a subset of the JSON Schema standard.
+// TODO(crbug.com/856901): Document the supported subset of the JSON Schema
+// standard.
+class POLICY_EXPORT Schema {
+ public:
+ // Used internally to store shared data.
+ class InternalStorage;
+
+ // Builds an empty, invalid schema.
+ Schema();
+
+ // Makes a copy of |schema| that shares the same internal storage.
+ Schema(const Schema& schema);
+
+ ~Schema();
+
+ Schema& operator=(const Schema& schema);
+
+ // Returns a Schema that references static data. This can be used by
+ // the embedder to pass structures generated at compile time, which can then
+ // be quickly loaded at runtime.
+ static Schema Wrap(const internal::SchemaData* data);
+
+ // Parses the JSON schema in |schema| and returns a Schema that owns
+ // the internal representation. If |schema| is invalid then an invalid Schema
+ // is returned and |error| contains a reason for the failure.
+ static Schema Parse(const std::string& schema, std::string* error);
+
+ // Returns true if this Schema is valid. Schemas returned by the methods below
+ // may be invalid, and in those cases the other methods must not be used.
+ bool valid() const { return node_ != NULL; }
+
+ base::Value::Type type() const;
+
+ // Validate |value| against current schema, |strategy| is the strategy to
+ // handle unknown properties or invalid values. Allowed errors will be
+ // ignored. |error_path| and |error| will contain the last error location and
+ // detailed message if |value| doesn't strictly conform to the schema. If
+ // |value| doesn't conform to the schema even within the allowance of
+ // |strategy|, false will be returned and |error_path| and |error| will
+ // contain the corresponding error that caused the failure. |error_path| can
+ // be NULL and in that case no error path will be returned.
+ bool Validate(const base::Value& value,
+ SchemaOnErrorStrategy strategy,
+ std::string* error_path,
+ std::string* error) const;
+
+ // Similar to Validate() but drop values with errors instead of ignoring them.
+ // |changed| is a pointer to a boolean value, and indicate whether |value|
+ // is changed or not (probably dropped properties or items). Be sure to set
+ // the bool that |changed| pointed to to false before calling Normalize().
+ // |changed| can be NULL and in that case no boolean will be set.
+ // This function will also take the ownership of dropped base::Value and
+ // destroy them.
+ bool Normalize(base::Value* value,
+ SchemaOnErrorStrategy strategy,
+ std::string* error_path,
+ std::string* error,
+ bool* changed) const;
+
+ // Used to iterate over the known properties of Type::DICTIONARY schemas.
+ class POLICY_EXPORT Iterator {
+ public:
+ Iterator(const scoped_refptr<const InternalStorage>& storage,
+ const internal::PropertiesNode* node);
+ Iterator(const Iterator& iterator);
+ ~Iterator();
+
+ Iterator& operator=(const Iterator& iterator);
+
+ // The other methods must not be called if the iterator is at the end.
+ bool IsAtEnd() const;
+
+ // Advances the iterator to the next property.
+ void Advance();
+
+ // Returns the name of the current property.
+ const char* key() const;
+
+ // Returns the Schema for the current property. This Schema is always valid.
+ Schema schema() const;
+
+ private:
+ scoped_refptr<const InternalStorage> storage_;
+ const internal::PropertyNode* it_;
+ const internal::PropertyNode* end_;
+ };
+
+ // These methods should be called only if type() == Type::DICTIONARY,
+ // otherwise invalid memory will be read. A CHECK is currently enforcing this.
+
+ // Returns an iterator that goes over the named properties of this schema.
+ // The returned iterator is at the beginning.
+ Iterator GetPropertiesIterator() const;
+
+ // Returns the Schema for the property named |key|. If |key| is not a known
+ // property name then the returned Schema is not valid.
+ Schema GetKnownProperty(const std::string& key) const;
+
+ // Returns all Schemas from pattern properties that match |key|. May be empty.
+ SchemaList GetPatternProperties(const std::string& key) const;
+
+ // Returns this Schema's required properties. May be empty if the Schema has
+ // no required properties.
+ std::vector<std::string> GetRequiredProperties() const;
+
+ // Returns the Schema for additional properties. If additional properties are
+ // not allowed for this Schema then the Schema returned is not valid.
+ Schema GetAdditionalProperties() const;
+
+ // Returns the Schema for |key| if it is a known property, otherwise returns
+ // the Schema for additional properties.
+ // DEPRECATED: This function didn't consider patternProperties, use
+ // GetMatchingProperties() instead.
+ // TODO(binjin): Replace calls to this function with GetKnownProperty() or
+ // GetMatchingProperties() and remove this later.
+ Schema GetProperty(const std::string& key) const;
+
+ // Returns all Schemas that are supposed to be validated against for |key|.
+ // May be empty.
+ SchemaList GetMatchingProperties(const std::string& key) const;
+
+ // Returns the Schema for items of an array.
+ // This method should be called only if type() == Type::LIST,
+ // otherwise invalid memory will be read. A CHECK is currently enforcing this.
+ Schema GetItems() const;
+
+ // Gets the validation schema associated with this |schema| - or if there
+ // isn't one, returns an empty invalid schema. There are a few policies that
+ // contain embedded JSON - these policies have a schema for validating that
+ // JSON that is more complicated than the regular schema. For other policies
+ // it is not defined. To get the validation schema for a policy, call
+ // |chrome_schema.GetValidationSchema().GetKnownProperty(policy_name)|, where
+ // |chrome_schema| is the root schema that has all policies as children.
+ Schema GetValidationSchema() const;
+
+ private:
+ // Builds a schema pointing to the inner structure of |storage|,
+ // rooted at |node|.
+ Schema(const scoped_refptr<const InternalStorage>& storage,
+ const internal::SchemaNode* node);
+
+ bool ValidateIntegerRestriction(int index, int value) const;
+ bool ValidateStringRestriction(int index, const char* str) const;
+
+ scoped_refptr<const InternalStorage> storage_;
+ const internal::SchemaNode* node_;
+};
+
+} // namespace policy
+
+#endif // COMPONENTS_POLICY_CORE_COMMON_SCHEMA_H_
diff --git a/components/policy/core/common/schema_internal.h b/components/policy/core/common/schema_internal.h
new file mode 100644
index 0000000000..8cda371612
--- /dev/null
+++ b/components/policy/core/common/schema_internal.h
@@ -0,0 +1,143 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_POLICY_CORE_COMMON_SCHEMA_INTERNAL_H_
+#define COMPONENTS_POLICY_CORE_COMMON_SCHEMA_INTERNAL_H_
+
+#include "base/values.h"
+#include "components/policy/policy_export.h"
+
+namespace policy {
+namespace internal {
+
+// These types are used internally by the SchemaOwner parser, and by the
+// compile-time code generator. They shouldn't be used directly.
+
+// Represents the type of one policy, or an item of a list policy, or a
+// property of a map policy.
+struct POLICY_EXPORT SchemaNode {
+ // The policy type.
+ base::Value::Type type;
+
+ // If |type| is Type::DICTIONARY then |extra| is an offset into
+ // SchemaData::properties_nodes that indexes the PropertiesNode describing
+ // the entries of this dictionary.
+ //
+ // If |type| is Type::LIST then |extra| is an offset into
+ // SchemaData::schema_nodes that indexes the SchemaNode describing the items
+ // of this list.
+ //
+ // If |type| is Type::INTEGER or Type::STRING, and contains corresponding
+ // restriction (enumeration of possible values, or range for integer), then
+ // |extra| is an offset into SchemaData::restriction_nodes that indexes the
+ // RestrictionNode describing the restriction on the value.
+ //
+ // Otherwise extra is -1 and is invalid.
+ int extra;
+};
+
+// Represents an entry of a map policy.
+struct POLICY_EXPORT PropertyNode {
+ // The entry key.
+ const char* key;
+
+ // An offset into SchemaData::schema_nodes that indexes the SchemaNode
+ // describing the structure of this key.
+ int schema;
+};
+
+// Represents the list of keys of a map policy.
+struct POLICY_EXPORT PropertiesNode {
+ // An offset into SchemaData::property_nodes that indexes the PropertyNode
+ // describing the first known property of this map policy.
+ int begin;
+
+ // An offset into SchemaData::property_nodes that indexes the PropertyNode
+ // right beyond the last known property of this map policy.
+ //
+ // If |begin == end| then the map policy that this PropertiesNode corresponds
+ // to does not have known properties.
+ //
+ // Note that the range [begin, end) is sorted by PropertyNode::key, so that
+ // properties can be looked up by binary searching in the range.
+ int end;
+
+ // An offset into SchemaData::property_nodes that indexes the PropertyNode
+ // right beyond the last known pattern property.
+ //
+ // [end, pattern_end) is the range that covers all pattern properties
+ // defined. It's not required to be sorted.
+ int pattern_end;
+
+ // An offset into SchemaData::required_properties that indexes the first
+ // required property of this map policy.
+ int required_begin;
+
+ // An offset into SchemaData::required_properties that indexes the property
+ // right beyond the last required property.
+ //
+ // If |required_begin == required_end|, then the map policy that this
+ // PropertiesNode corresponds to does not have any required properties.
+ //
+ // Note that the range [required_begin, required_end) is not sorted.
+ int required_end;
+
+ // If this map policy supports keys with any value (besides the well-known
+ // values described in the range [begin, end)) then |additional| is an offset
+ // into SchemaData::schema_nodes that indexes the SchemaNode describing the
+ // structure of the values for those keys. Otherwise |additional| is -1 and
+ // is invalid.
+ int additional;
+};
+
+// Represents the restriction on Type::INTEGER or Type::STRING instance of
+// base::Value.
+union POLICY_EXPORT RestrictionNode {
+ // Offsets into SchemaData::int_enums or SchemaData::string_enums, the
+ // entry of which describes the enumeration of all possible values of
+ // corresponding integer or string value. |offset_begin| being strictly less
+ // than |offset_end| is assumed.
+ struct EnumerationRestriction {
+ int offset_begin;
+ int offset_end;
+ } enumeration_restriction;
+
+ // For integer type only, represents that all values between |min_value|
+ // and |max_value| can be choosen. Note that integer type in base::Value
+ // is bounded, so this can also be used if only one of |min_value| and
+ // |max_value| is stated. |max_value| being greater or equal to |min_value|
+ // is assumed.
+ struct RangedRestriction {
+ int max_value;
+ int min_value;
+ } ranged_restriction;
+
+ // For string type only, requires |pattern_index| and |pattern_index_backup|
+ // to be exactly the same. And it's an offset into SchemaData::string_enums
+ // which contains the regular expression that the target string must follow.
+ struct StringPatternRestriction {
+ int pattern_index;
+ int pattern_index_backup;
+ } string_pattern_restriction;
+};
+
+
+// Contains arrays of related nodes. All of the offsets in these nodes reference
+// other nodes in these arrays.
+struct POLICY_EXPORT SchemaData {
+ const SchemaNode* schema_nodes;
+ const PropertyNode* property_nodes;
+ const PropertiesNode* properties_nodes;
+ const RestrictionNode* restriction_nodes;
+ const char* const* required_properties;
+
+ const int* int_enums;
+ const char* const* string_enums;
+ int validation_schema_root_index;
+};
+
+} // namespace internal
+} // namespace policy
+
+#endif // COMPONENTS_POLICY_CORE_COMMON_SCHEMA_INTERNAL_H_
diff --git a/components/policy/policy_export.h b/components/policy/policy_export.h
new file mode 100644
index 0000000000..b6030aa178
--- /dev/null
+++ b/components/policy/policy_export.h
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_POLICY_POLICY_EXPORT_H_
+#define COMPONENTS_POLICY_POLICY_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+
+#if defined(WIN32)
+
+#if defined(POLICY_COMPONENT_IMPLEMENTATION)
+#define POLICY_EXPORT __declspec(dllexport)
+#else
+#define POLICY_EXPORT __declspec(dllimport)
+#endif // defined(POLICY_COMPONENT_IMPLEMENTATION)
+
+#else // defined(WIN32)
+
+#if defined(POLICY_COMPONENT_IMPLEMENTATION)
+#define POLICY_EXPORT __attribute__((visibility("default")))
+#else
+#define POLICY_EXPORT
+#endif // defined(POLICY_COMPONENT_IMPLEMENTATION)
+
+#endif // defined(WIN32)
+
+#else // defined(COMPONENT_BUILD)
+
+#define POLICY_EXPORT
+
+#endif // defined(COMPONENT_BUILD)
+
+#endif // COMPONENTS_POLICY_POLICY_EXPORT_H_
diff --git a/dbus/mock_object_manager.cc b/dbus/mock_object_manager.cc
new file mode 100644
index 0000000000..3f0c0a1744
--- /dev/null
+++ b/dbus/mock_object_manager.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/mock_object_manager.h"
+
+namespace dbus {
+
+MockObjectManager::MockObjectManager(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path)
+ : ObjectManager(bus, service_name, object_path) {
+}
+
+MockObjectManager::~MockObjectManager() = default;
+
+} // namespace dbus
diff --git a/dbus/mock_object_manager.h b/dbus/mock_object_manager.h
new file mode 100644
index 0000000000..6eb95de9a4
--- /dev/null
+++ b/dbus/mock_object_manager.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_MOCK_OBJECT_MANAGER_H_
+#define DBUS_MOCK_OBJECT_MANAGER_H_
+
+#include <string>
+
+#include "dbus/message.h"
+#include "dbus/object_manager.h"
+#include "dbus/object_path.h"
+#include "dbus/object_proxy.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace dbus {
+
+// Mock for ObjectManager.
+class MockObjectManager : public ObjectManager {
+ public:
+ MockObjectManager(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path);
+
+ MOCK_METHOD2(RegisterInterface, void(const std::string&,
+ Interface*));
+ MOCK_METHOD1(UnregisterInterface, void(const std::string&));
+ MOCK_METHOD0(GetObjects, std::vector<ObjectPath>());
+ MOCK_METHOD1(GetObjectsWithInterface,
+ std::vector<ObjectPath>(const std::string&));
+ MOCK_METHOD1(GetObjectProxy, ObjectProxy*(const ObjectPath&));
+ MOCK_METHOD2(GetProperties, PropertySet*(const ObjectPath&,
+ const std::string&));
+
+ protected:
+ ~MockObjectManager() override;
+};
+
+} // namespace dbus
+
+#endif // DBUS_MOCK_OBJECT_MANAGER_H_
diff --git a/third_party/re2/src/re2/re2.h b/third_party/re2/src/re2/re2.h
new file mode 100644
index 0000000000..9f0f186a49
--- /dev/null
+++ b/third_party/re2/src/re2/re2.h
@@ -0,0 +1,6 @@
+// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Redirect to system header.
+#include <re2/re2.h>