summaryrefslogtreecommitdiff
path: root/sync
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-10-18 15:46:22 +0100
committerTorne (Richard Coles) <torne@google.com>2013-10-18 15:46:22 +0100
commit4e180b6a0b4720a9b8e9e959a882386f690f08ff (patch)
tree788435d09362885908ba5ba9ef868b852ca82c0b /sync
parent1179b92b08db0c652a0cf003ab4d89b31ce3610f (diff)
downloadchromium_org-4e180b6a0b4720a9b8e9e959a882386f690f08ff.tar.gz
Merge from Chromium at DEPS revision 228962
This commit was generated by merge_to_master.py. Change-Id: I23bd7d7766f213fd52f28ae5e1ecc6ae9df905ea
Diffstat (limited to 'sync')
-rw-r--r--sync/android/java/src/org/chromium/sync/notifier/SyncStatusHelper.java4
-rw-r--r--sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java2
-rw-r--r--sync/engine/build_commit_command.cc225
-rw-r--r--sync/engine/build_commit_command.h82
-rw-r--r--sync/engine/commit.cc277
-rw-r--r--sync/engine/commit.h73
-rw-r--r--sync/engine/commit_util.cc (renamed from sync/engine/process_commit_response_command.cc)437
-rw-r--r--sync/engine/commit_util.h64
-rw-r--r--sync/engine/get_commit_ids.cc21
-rw-r--r--sync/engine/get_commit_ids.h22
-rw-r--r--sync/engine/process_commit_response_command.h123
-rw-r--r--sync/engine/process_commit_response_command_unittest.cc365
-rw-r--r--sync/engine/process_updates_command.cc63
-rw-r--r--sync/engine/process_updates_command.h16
-rw-r--r--sync/engine/process_updates_command_unittest.cc25
-rw-r--r--sync/engine/sync_directory_commit_contribution.cc164
-rw-r--r--sync/engine/sync_directory_commit_contribution.h102
-rw-r--r--sync/engine/sync_directory_commit_contribution_unittest.cc235
-rw-r--r--sync/engine/sync_directory_commit_contributor.cc24
-rw-r--r--sync/engine/sync_directory_commit_contributor.h45
-rw-r--r--sync/engine/sync_scheduler_impl.cc18
-rw-r--r--sync/engine/sync_scheduler_impl.h14
-rw-r--r--sync/engine/sync_scheduler_unittest.cc4
-rw-r--r--sync/engine/syncer.cc36
-rw-r--r--sync/engine/syncer.h8
-rw-r--r--sync/engine/syncer_unittest.cc87
-rw-r--r--sync/engine/syncer_util.cc45
-rw-r--r--sync/engine/syncer_util.h23
-rw-r--r--sync/internal_api/debug_info_event_listener.cc18
-rw-r--r--sync/internal_api/debug_info_event_listener.h4
-rw-r--r--sync/internal_api/http_bridge.cc6
-rw-r--r--sync/internal_api/http_bridge_unittest.cc29
-rw-r--r--sync/internal_api/js_mutation_event_observer.h3
-rw-r--r--sync/internal_api/public/base/DEPS4
-rw-r--r--sync/internal_api/public/base/ack_handle.cc67
-rw-r--r--sync/internal_api/public/base/ack_handle.h47
-rw-r--r--sync/internal_api/public/base/invalidation.cc162
-rw-r--r--sync/internal_api/public/base/invalidation.h88
-rw-r--r--sync/internal_api/public/base/invalidation_test_util.cc21
-rw-r--r--sync/internal_api/public/base/invalidation_test_util.h2
-rw-r--r--sync/internal_api/public/base/model_type.h2
-rw-r--r--sync/internal_api/public/base/model_type_test_util.cc11
-rw-r--r--sync/internal_api/public/base/object_id_invalidation_map_test_util.cc (renamed from sync/notifier/object_id_invalidation_map_test_util.cc)86
-rw-r--r--sync/internal_api/public/base/object_id_invalidation_map_test_util.h (renamed from sync/notifier/object_id_invalidation_map_test_util.h)9
-rw-r--r--sync/internal_api/sync_encryption_handler_impl.cc6
-rw-r--r--sync/internal_api/sync_encryption_handler_impl.h4
-rw-r--r--sync/internal_api/sync_manager_impl.cc30
-rw-r--r--sync/internal_api/sync_manager_impl.h4
-rw-r--r--sync/internal_api/sync_manager_impl_unittest.cc4
-rw-r--r--sync/notifier/invalidation_notifier_unittest.cc1
-rw-r--r--sync/notifier/invalidator_registrar.cc93
-rw-r--r--sync/notifier/invalidator_registrar.h11
-rw-r--r--sync/notifier/invalidator_registrar_unittest.cc1
-rw-r--r--sync/notifier/invalidator_test_template.h72
-rw-r--r--sync/notifier/non_blocking_invalidator.cc13
-rw-r--r--sync/notifier/non_blocking_invalidator.h4
-rw-r--r--sync/notifier/non_blocking_invalidator_unittest.cc1
-rw-r--r--sync/notifier/object_id_invalidation_map.cc143
-rw-r--r--sync/notifier/object_id_invalidation_map.h75
-rw-r--r--sync/notifier/object_id_invalidation_map_unittest.cc104
-rw-r--r--sync/notifier/p2p_invalidator.cc42
-rw-r--r--sync/notifier/p2p_invalidator.h4
-rw-r--r--sync/notifier/p2p_invalidator_unittest.cc69
-rw-r--r--sync/notifier/single_object_invalidation_set.cc130
-rw-r--r--sync/notifier/single_object_invalidation_set.h66
-rw-r--r--sync/notifier/single_object_invalidation_set_unittest.cc110
-rw-r--r--sync/notifier/sync_invalidation_listener.cc45
-rw-r--r--sync/notifier/sync_invalidation_listener.h3
-rw-r--r--sync/notifier/sync_invalidation_listener_unittest.cc81
-rw-r--r--sync/notifier/sync_system_resources.cc6
-rw-r--r--sync/notifier/sync_system_resources.h7
-rw-r--r--sync/protocol/article_specifics.proto29
-rw-r--r--sync/protocol/nigori_specifics.proto3
-rw-r--r--sync/protocol/proto_value_conversions.cc75
-rw-r--r--sync/protocol/proto_value_conversions.h40
-rw-r--r--sync/protocol/proto_value_conversions_unittest.cc7
-rw-r--r--sync/protocol/sync.proto2
-rw-r--r--sync/sessions/data_type_tracker.cc23
-rw-r--r--sync/sessions/data_type_tracker.h9
-rw-r--r--sync/sessions/nudge_tracker.cc11
-rw-r--r--sync/sessions/ordered_commit_set.cc131
-rw-r--r--sync/sessions/ordered_commit_set.h128
-rw-r--r--sync/sessions/ordered_commit_set_unittest.cc134
-rw-r--r--sync/sessions/status_controller.h9
-rw-r--r--sync/sessions/sync_session.h1
-rw-r--r--sync/sessions/sync_session_context.cc16
-rw-r--r--sync/sessions/sync_session_context.h16
-rw-r--r--sync/sync_core.gypi16
-rw-r--r--sync/sync_internal_api.gypi4
-rw-r--r--sync/sync_notifier.gypi2
-rw-r--r--sync/sync_proto.gypi1
-rw-r--r--sync/sync_tests.gypi13
-rw-r--r--sync/syncable/directory.cc40
-rw-r--r--sync/syncable/directory.h11
-rw-r--r--sync/syncable/metahandle_set.h2
-rw-r--r--sync/syncable/model_neutral_mutable_entry.cc94
-rw-r--r--sync/syncable/model_neutral_mutable_entry.h25
-rw-r--r--sync/syncable/model_type.cc23
-rw-r--r--sync/syncable/mutable_entry.cc57
-rw-r--r--sync/syncable/mutable_entry.h16
-rw-r--r--sync/syncable/nigori_util.cc7
-rw-r--r--sync/syncable/syncable_base_write_transaction.cc22
-rw-r--r--sync/syncable/syncable_base_write_transaction.h35
-rw-r--r--sync/syncable/syncable_model_neutral_write_transaction.cc33
-rw-r--r--sync/syncable/syncable_model_neutral_write_transaction.h44
-rw-r--r--sync/syncable/syncable_util.cc6
-rw-r--r--sync/syncable/syncable_util.h11
-rw-r--r--sync/syncable/syncable_write_transaction.cc14
-rw-r--r--sync/syncable/syncable_write_transaction.h6
-rw-r--r--sync/tools/sync_listen_notifications.cc10
-rw-r--r--sync/tools/testserver/chromiumsync.py7
-rw-r--r--sync/util/data_type_histogram.h3
-rw-r--r--sync/util/get_session_name.cc2
113 files changed, 2929 insertions, 2501 deletions
diff --git a/sync/android/java/src/org/chromium/sync/notifier/SyncStatusHelper.java b/sync/android/java/src/org/chromium/sync/notifier/SyncStatusHelper.java
index 346639dbd4..cde85e2d58 100644
--- a/sync/android/java/src/org/chromium/sync/notifier/SyncStatusHelper.java
+++ b/sync/android/java/src/org/chromium/sync/notifier/SyncStatusHelper.java
@@ -195,14 +195,14 @@ public class SyncStatusHelper {
* Wrapper method for the ContentResolver.addStatusChangeListener(...) when we are only
* interested in the settings type.
*/
- public void registerContentResolverObserver(SyncSettingsChangedObserver observer) {
+ public void registerSyncSettingsChangedObserver(SyncSettingsChangedObserver observer) {
mObservers.addObserver(observer);
}
/**
* Wrapper method for the ContentResolver.removeStatusChangeListener(...).
*/
- public void unregisterContentResolverObserver(SyncSettingsChangedObserver observer) {
+ public void unregisterSyncSettingsChangedObserver(SyncSettingsChangedObserver observer) {
mObservers.removeObserver(observer);
}
diff --git a/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java b/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java
index fe7eb092d0..f38eebf0fc 100644
--- a/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java
+++ b/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java
@@ -145,7 +145,7 @@ public class InvalidationControllerTest extends InstrumentationTestCase {
assertFalse(listenerCallbackCalled.get());
// Ensure we get a callback, which means we have registered for them.
- ActivityStatus.onStateChange(new Activity(), ActivityStatus.RESUMED);
+ ActivityStatus.onStateChangeForTesting(new Activity(), ActivityStatus.RESUMED);
assertTrue(listenerCallbackCalled.get());
}
diff --git a/sync/engine/build_commit_command.cc b/sync/engine/build_commit_command.cc
deleted file mode 100644
index 65f1cdfb9c..0000000000
--- a/sync/engine/build_commit_command.cc
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/build_commit_command.h"
-
-#include <limits>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/strings/string_util.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/ordered_commit_set.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/syncable/syncable_changes_version.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/util/time.h"
-
-using std::set;
-using std::string;
-using std::vector;
-
-namespace syncer {
-
-using sessions::SyncSession;
-using syncable::Entry;
-using syncable::IS_DEL;
-using syncable::IS_UNAPPLIED_UPDATE;
-using syncable::IS_UNSYNCED;
-using syncable::Id;
-using syncable::SPECIFICS;
-using syncable::UNIQUE_POSITION;
-
-BuildCommitCommand::BuildCommitCommand(
- syncable::BaseTransaction* trans,
- const sessions::OrderedCommitSet& batch_commit_set,
- sync_pb::ClientToServerMessage* commit_message,
- ExtensionsActivity::Records* extensions_activity_buffer)
- : trans_(trans),
- batch_commit_set_(batch_commit_set),
- commit_message_(commit_message),
- extensions_activity_buffer_(extensions_activity_buffer) {
-}
-
-BuildCommitCommand::~BuildCommitCommand() {}
-
-void BuildCommitCommand::AddExtensionsActivityToMessage(
- SyncSession* session, sync_pb::CommitMessage* message) {
- // We only send ExtensionsActivity to the server if bookmarks are being
- // committed.
- ExtensionsActivity* activity = session->context()->extensions_activity();
- if (batch_commit_set_.HasBookmarkCommitId()) {
- // This isn't perfect, since the set of extensions activity may not
- // correlate exactly with the items being committed. That's OK as
- // long as we're looking for a rough estimate of extensions activity,
- // not an precise mapping of which commits were triggered by which
- // extension.
- //
- // We will push this list of extensions activity back into the
- // ExtensionsActivityMonitor if this commit fails. That's why we must keep
- // a copy of these records in the session.
- activity->GetAndClearRecords(extensions_activity_buffer_);
-
- const ExtensionsActivity::Records& records =
- *extensions_activity_buffer_;
- for (ExtensionsActivity::Records::const_iterator it =
- records.begin();
- it != records.end(); ++it) {
- sync_pb::ChromiumExtensionsActivity* activity_message =
- message->add_extensions_activity();
- activity_message->set_extension_id(it->second.extension_id);
- activity_message->set_bookmark_writes_since_last_commit(
- it->second.bookmark_write_count);
- }
- }
-}
-
-void BuildCommitCommand::AddClientConfigParamsToMessage(
- SyncSession* session, sync_pb::CommitMessage* message) {
- const ModelSafeRoutingInfo& routing_info = session->context()->routing_info();
- sync_pb::ClientConfigParams* config_params = message->mutable_config_params();
- for (std::map<ModelType, ModelSafeGroup>::const_iterator iter =
- routing_info.begin(); iter != routing_info.end(); ++iter) {
- if (ProxyTypes().Has(iter->first))
- continue;
- int field_number = GetSpecificsFieldNumberFromModelType(iter->first);
- config_params->mutable_enabled_type_ids()->Add(field_number);
- }
- config_params->set_tabs_datatype_enabled(
- routing_info.count(syncer::PROXY_TABS) > 0);
-}
-
-namespace {
-void SetEntrySpecifics(const Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry) {
- // Add the new style extension and the folder bit.
- sync_entry->mutable_specifics()->CopyFrom(meta_entry.GetSpecifics());
- sync_entry->set_folder(meta_entry.GetIsDir());
-
- CHECK(!sync_entry->specifics().password().has_client_only_encrypted_data());
- DCHECK_EQ(meta_entry.GetModelType(), GetModelType(*sync_entry));
-}
-} // namespace
-
-SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
- commit_message_->set_share(session->context()->account_name());
- commit_message_->set_message_contents(sync_pb::ClientToServerMessage::COMMIT);
-
- sync_pb::CommitMessage* commit_message = commit_message_->mutable_commit();
- commit_message->set_cache_guid(trans_->directory()->cache_guid());
- AddExtensionsActivityToMessage(session, commit_message);
- AddClientConfigParamsToMessage(session, commit_message);
-
- for (size_t i = 0; i < batch_commit_set_.Size(); i++) {
- int64 handle = batch_commit_set_.GetCommitHandleAt(i);
- sync_pb::SyncEntity* sync_entry = commit_message->add_entries();
-
- Entry meta_entry(trans_, syncable::GET_BY_HANDLE, handle);
- CHECK(meta_entry.good());
-
- DCHECK_NE(0UL,
- session->context()->routing_info().count(
- meta_entry.GetModelType()))
- << "Committing change to datatype that's not actively enabled.";
-
- BuildCommitItem(meta_entry, sync_entry);
- }
-
-
- return SYNCER_OK;
-}
-
-// static.
-void BuildCommitCommand::BuildCommitItem(
- const syncable::Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry) {
- syncable::Id id = meta_entry.GetId();
- sync_entry->set_id_string(SyncableIdToProto(id));
-
- string name = meta_entry.GetNonUniqueName();
- CHECK(!name.empty()); // Make sure this isn't an update.
- // Note: Truncation is also performed in WriteNode::SetTitle(..). But this
- // call is still necessary to handle any title changes that might originate
- // elsewhere, or already be persisted in the directory.
- TruncateUTF8ToByteSize(name, 255, &name);
- sync_entry->set_name(name);
-
- // Set the non_unique_name. If we do, the server ignores
- // the |name| value (using |non_unique_name| instead), and will return
- // in the CommitResponse a unique name if one is generated.
- // We send both because it may aid in logging.
- sync_entry->set_non_unique_name(name);
-
- if (!meta_entry.GetUniqueClientTag().empty()) {
- sync_entry->set_client_defined_unique_tag(
- meta_entry.GetUniqueClientTag());
- }
-
- // Deleted items with server-unknown parent ids can be a problem so we set
- // the parent to 0. (TODO(sync): Still true in protocol?).
- Id new_parent_id;
- if (meta_entry.GetIsDel() &&
- !meta_entry.GetParentId().ServerKnows()) {
- new_parent_id = syncable::BaseTransaction::root_id();
- } else {
- new_parent_id = meta_entry.GetParentId();
- }
- sync_entry->set_parent_id_string(SyncableIdToProto(new_parent_id));
-
- // If our parent has changed, send up the old one so the server
- // can correctly deal with multiple parents.
- // TODO(nick): With the server keeping track of the primary sync parent,
- // it should not be necessary to provide the old_parent_id: the version
- // number should suffice.
- if (new_parent_id != meta_entry.GetServerParentId() &&
- 0 != meta_entry.GetBaseVersion() &&
- syncable::CHANGES_VERSION != meta_entry.GetBaseVersion()) {
- sync_entry->set_old_parent_id(
- SyncableIdToProto(meta_entry.GetServerParentId()));
- }
-
- int64 version = meta_entry.GetBaseVersion();
- if (syncable::CHANGES_VERSION == version || 0 == version) {
- // Undeletions are only supported for items that have a client tag.
- DCHECK(!id.ServerKnows() ||
- !meta_entry.GetUniqueClientTag().empty())
- << meta_entry;
-
- // Version 0 means to create or undelete an object.
- sync_entry->set_version(0);
- } else {
- DCHECK(id.ServerKnows()) << meta_entry;
- sync_entry->set_version(meta_entry.GetBaseVersion());
- }
- sync_entry->set_ctime(TimeToProtoTime(meta_entry.GetCtime()));
- sync_entry->set_mtime(TimeToProtoTime(meta_entry.GetMtime()));
-
- // Deletion is final on the server, let's move things and then delete them.
- if (meta_entry.GetIsDel()) {
- sync_entry->set_deleted(true);
- } else {
- if (meta_entry.GetSpecifics().has_bookmark()) {
- // Both insert_after_item_id and position_in_parent fields are set only
- // for legacy reasons. See comments in sync.proto for more information.
- const Id& prev_id = meta_entry.GetPredecessorId();
- string prev_id_string =
- prev_id.IsRoot() ? string() : prev_id.GetServerId();
- sync_entry->set_insert_after_item_id(prev_id_string);
- sync_entry->set_position_in_parent(
- meta_entry.GetUniquePosition().ToInt64());
- meta_entry.GetUniquePosition().ToProto(
- sync_entry->mutable_unique_position());
- }
- SetEntrySpecifics(meta_entry, sync_entry);
- }
-}
-
-} // namespace syncer
diff --git a/sync/engine/build_commit_command.h b/sync/engine/build_commit_command.h
deleted file mode 100644
index a47c62afe0..0000000000
--- a/sync/engine/build_commit_command.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
-#define SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/syncer_command.h"
-#include "sync/syncable/entry_kernel.h"
-#include "sync/util/extensions_activity.h"
-
-namespace syncer {
-
-namespace sessions {
-class OrderedCommitSet;
-}
-
-namespace syncable {
-class Entry;
-class BaseTransaction;
-}
-
-// A class that contains the code used to serialize a set of sync items into a
-// protobuf commit message. This conversion process references the
-// syncable::Directory, which is why it must be called within the same
-// transaction as the GetCommitIdsCommand that fetched the set of items to be
-// committed.
-//
-// See SyncerCommand documentation for more info.
-class SYNC_EXPORT_PRIVATE BuildCommitCommand : public SyncerCommand {
- public:
- // The batch_commit_set parameter contains a set of references to the items
- // that should be committed.
- //
- // The commit_message parameter is an output parameter which will contain the
- // fully initialized commit message once ExecuteImpl() has been called.
- BuildCommitCommand(
- syncable::BaseTransaction* trans,
- const sessions::OrderedCommitSet& batch_commit_set,
- sync_pb::ClientToServerMessage* commit_message,
- ExtensionsActivity::Records* extensions_activity_buffer);
- virtual ~BuildCommitCommand();
-
- // SyncerCommand implementation.
- virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
-
- // Helper function that takes a snapshot of |meta_entry| and puts it into a
- // protobuf suitable for use in a commit request message.
- static void BuildCommitItem(const syncable::Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry);
-
- private:
- FRIEND_TEST_ALL_PREFIXES(BuildCommitCommandTest, InterpolatePosition);
-
- void AddExtensionsActivityToMessage(sessions::SyncSession* session,
- sync_pb::CommitMessage* message);
-
- // Fills the config_params field of |message|.
- void AddClientConfigParamsToMessage(sessions::SyncSession* session,
- sync_pb::CommitMessage* message);
-
- DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
-
- // A pointer to a valid transaction not owned by this class.
- syncable::BaseTransaction* trans_;
-
- // Input parameter; see constructor comment.
- const sessions::OrderedCommitSet& batch_commit_set_;
-
- // Output parameter; see constructor comment.
- sync_pb::ClientToServerMessage* commit_message_;
-
- ExtensionsActivity::Records* extensions_activity_buffer_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
diff --git a/sync/engine/commit.cc b/sync/engine/commit.cc
index 415c608def..c7af4491f1 100644
--- a/sync/engine/commit.cc
+++ b/sync/engine/commit.cc
@@ -5,184 +5,165 @@
#include "sync/engine/commit.h"
#include "base/debug/trace_event.h"
-#include "sync/engine/build_commit_command.h"
-#include "sync/engine/get_commit_ids.h"
-#include "sync/engine/process_commit_response_command.h"
+#include "sync/engine/commit_util.h"
+#include "sync/engine/sync_directory_commit_contribution.h"
#include "sync/engine/syncer.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/sessions/sync_session.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_write_transaction.h"
namespace syncer {
-using sessions::SyncSession;
-using sessions::StatusController;
-using syncable::SYNCER;
-using syncable::WriteTransaction;
-
-namespace {
-
-// Sets the SYNCING bits of all items in the commit set to value_to_set.
-void SetAllSyncingBitsToValue(WriteTransaction* trans,
- const sessions::OrderedCommitSet& commit_set,
- bool value_to_set) {
- const std::vector<int64>& commit_handles = commit_set.GetAllCommitHandles();
- for (std::vector<int64>::const_iterator it = commit_handles.begin();
- it != commit_handles.end(); ++it) {
- syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, *it);
- if (entry.good()) {
- entry.PutSyncing(value_to_set);
- }
- }
-}
-
-// Sets the SYNCING bits for all items in the OrderedCommitSet.
-void SetSyncingBits(WriteTransaction* trans,
- const sessions::OrderedCommitSet& commit_set) {
- SetAllSyncingBitsToValue(trans, commit_set, true);
+Commit::Commit(
+ const std::map<ModelType, SyncDirectoryCommitContribution*>& contributions,
+ const sync_pb::ClientToServerMessage& message,
+ ExtensionsActivity::Records extensions_activity_buffer)
+ : contributions_(contributions),
+ deleter_(&contributions_),
+ message_(message),
+ extensions_activity_buffer_(extensions_activity_buffer),
+ cleaned_up_(false) {
}
-// Clears the SYNCING bits for all items in the OrderedCommitSet.
-void ClearSyncingBits(syncable::Directory* dir,
- const sessions::OrderedCommitSet& commit_set) {
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- SetAllSyncingBitsToValue(&trans, commit_set, false);
+Commit::~Commit() {
+ DCHECK(cleaned_up_);
}
-// Helper function that finds sync items that are ready to be committed to the
-// server and serializes them into a commit message protobuf. It will return
-// false iff there are no entries ready to be committed at this time.
-//
-// The OrderedCommitSet parameter is an output parameter which will contain
-// the set of all items which are to be committed. The number of items in
-// the set shall not exceed the maximum batch size. (The default batch size
-// is currently 25, though it can be overwritten by the server.)
-//
-// The ClientToServerMessage parameter is an output parameter which will contain
-// the commit message which should be sent to the server. It is valid iff the
-// return value of this function is true.
-bool PrepareCommitMessage(
- sessions::SyncSession* session,
+Commit* Commit::Init(
ModelTypeSet requested_types,
- sessions::OrderedCommitSet* commit_set,
- sync_pb::ClientToServerMessage* commit_message,
- ExtensionsActivity::Records* extensions_activity_buffer) {
- TRACE_EVENT0("sync", "PrepareCommitMessage");
-
- commit_set->Clear();
- commit_message->Clear();
-
- WriteTransaction trans(FROM_HERE, SYNCER, session->context()->directory());
+ size_t max_entries,
+ const std::string& account_name,
+ const std::string& cache_guid,
+ CommitContributorMap* contributor_map,
+ ExtensionsActivity* extensions_activity) {
+ // Gather per-type contributions.
+ ContributionMap contributions;
+ size_t num_entries = 0;
+ for (ModelTypeSet::Iterator it = requested_types.First();
+ it.Good(); it.Inc()) {
+ CommitContributorMap::iterator cm_it = contributor_map->find(it.Get());
+ if (cm_it == contributor_map->end()) {
+ NOTREACHED()
+ << "Could not find requested type " << ModelTypeToString(it.Get())
+ << " in contributor map.";
+ continue;
+ }
+ size_t spaces_remaining = max_entries - num_entries;
+ SyncDirectoryCommitContribution* contribution =
+ cm_it->second->GetContribution(spaces_remaining);
+ if (contribution) {
+ num_entries += contribution->GetNumEntries();
+ contributions.insert(std::make_pair(it.Get(), contribution));
+ }
+ if (num_entries == max_entries) {
+ break; // No point in continuting to iterate in this case.
+ }
+ }
- // Fetch the items to commit.
- const size_t batch_size = session->context()->max_commit_batch_size();
- GetCommitIds(&trans, requested_types, batch_size, commit_set);
+ // Give up if no one had anything to commit.
+ if (contributions.empty())
+ return NULL;
+
+ sync_pb::ClientToServerMessage message;
+ message.set_message_contents(sync_pb::ClientToServerMessage::COMMIT);
+ message.set_share(account_name);
+
+ sync_pb::CommitMessage* commit_message = message.mutable_commit();
+ commit_message->set_cache_guid(cache_guid);
+
+ // Set extensions activity if bookmark commits are present.
+ ExtensionsActivity::Records extensions_activity_buffer;
+ ContributionMap::iterator it = contributions.find(syncer::BOOKMARKS);
+ if (it != contributions.end() && it->second->GetNumEntries() != 0) {
+ commit_util::AddExtensionsActivityToMessage(
+ extensions_activity,
+ &extensions_activity_buffer,
+ commit_message);
+ }
- DVLOG(1) << "Commit message will contain " << commit_set->Size() << " items.";
- if (commit_set->Empty()) {
- return false;
+ // Set the client config params.
+ ModelTypeSet enabled_types;
+ for (CommitContributorMap::iterator it = contributor_map->begin();
+ it != contributor_map->end(); ++it) {
+ enabled_types.Put(it->first);
}
+ commit_util::AddClientConfigParamsToMessage(enabled_types,
+ commit_message);
- // Serialize the message.
- BuildCommitCommand build_commit_command(&trans,
- *commit_set,
- commit_message,
- extensions_activity_buffer);
- build_commit_command.Execute(session);
+ // Finally, serialize all our contributions.
+ for (std::map<ModelType, SyncDirectoryCommitContribution*>::iterator it =
+ contributions.begin(); it != contributions.end(); ++it) {
+ it->second->AddToCommitMessage(&message);
+ }
- SetSyncingBits(&trans, *commit_set);
- return true;
+ // If we made it this far, then we've successfully prepared a commit message.
+ return new Commit(contributions, message, extensions_activity_buffer);
}
-SyncerError BuildAndPostCommitsImpl(ModelTypeSet requested_types,
- Syncer* syncer,
- sessions::SyncSession* session,
- sessions::OrderedCommitSet* commit_set) {
- ModelTypeSet commit_request_types;
- while (!syncer->ExitRequested()) {
- sync_pb::ClientToServerMessage commit_message;
- ExtensionsActivity::Records extensions_activity_buffer;
-
- if (!PrepareCommitMessage(session,
- requested_types,
- commit_set,
- &commit_message,
- &extensions_activity_buffer)) {
- break;
- }
-
- commit_request_types.PutAll(commit_set->Types());
- session->mutable_status_controller()->set_commit_request_types(
- commit_request_types);
-
- sync_pb::ClientToServerResponse commit_response;
-
- DVLOG(1) << "Sending commit message.";
- TRACE_EVENT_BEGIN0("sync", "PostCommit");
- const SyncerError post_result = SyncerProtoUtil::PostClientToServerMessage(
- &commit_message, &commit_response, session);
- TRACE_EVENT_END0("sync", "PostCommit");
+SyncerError Commit::PostAndProcessResponse(
+ sessions::SyncSession* session,
+ sessions::StatusController* status,
+ ExtensionsActivity* extensions_activity) {
+ ModelTypeSet request_types;
+ for (ContributionMap::const_iterator it = contributions_.begin();
+ it != contributions_.end(); ++it) {
+ request_types.Put(it->first);
+ }
+ session->mutable_status_controller()->set_commit_request_types(request_types);
- // TODO(rlarocque): Put all the post-commit logic in one place.
- // See crbug.com/196338.
+ DVLOG(1) << "Sending commit message.";
+ TRACE_EVENT_BEGIN0("sync", "PostCommit");
+ const SyncerError post_result = SyncerProtoUtil::PostClientToServerMessage(
+ &message_, &response_, session);
+ TRACE_EVENT_END0("sync", "PostCommit");
- if (post_result != SYNCER_OK) {
- LOG(WARNING) << "Post commit failed";
- return post_result;
- }
+ if (post_result != SYNCER_OK) {
+ LOG(WARNING) << "Post commit failed";
+ return post_result;
+ }
- if (!commit_response.has_commit()) {
- LOG(WARNING) << "Commit response has no commit body!";
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
+ if (!response_.has_commit()) {
+ LOG(WARNING) << "Commit response has no commit body!";
+ return SERVER_RESPONSE_VALIDATION_FAILED;
+ }
- const size_t num_responses = commit_response.commit().entryresponse_size();
- if (num_responses != commit_set->Size()) {
- LOG(ERROR)
- << "Commit response has wrong number of entries! "
- << "Expected: " << commit_set->Size() << ", "
- << "Got: " << num_responses;
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
+ size_t message_entries = message_.commit().entries_size();
+ size_t response_entries = response_.commit().entryresponse_size();
+ if (message_entries != response_entries) {
+ LOG(ERROR)
+ << "Commit response has wrong number of entries! "
+ << "Expected: " << message_entries << ", "
+ << "Got: " << response_entries;
+ return SERVER_RESPONSE_VALIDATION_FAILED;
+ }
- TRACE_EVENT_BEGIN0("sync", "ProcessCommitResponse");
- ProcessCommitResponseCommand process_response_command(
- *commit_set, commit_message, commit_response);
- const SyncerError processing_result =
- process_response_command.Execute(session);
- TRACE_EVENT_END0("sync", "ProcessCommitResponse");
-
- // If the commit failed, return the data to the ExtensionsActivityMonitor.
- if (session->status_controller().
- model_neutral_state().num_successful_bookmark_commits == 0) {
- ExtensionsActivity* extensions_activity =
- session->context()->extensions_activity();
- extensions_activity->PutRecords(extensions_activity_buffer);
+ // Let the contributors process the responses to each of their requests.
+ SyncerError processing_result = SYNCER_OK;
+ for (std::map<ModelType, SyncDirectoryCommitContribution*>::iterator it =
+ contributions_.begin(); it != contributions_.end(); ++it) {
+ TRACE_EVENT1("sync", "ProcessCommitResponse",
+ "type", ModelTypeToString(it->first));
+ SyncerError type_result =
+ it->second->ProcessCommitResponse(response_, status);
+ if (processing_result == SYNCER_OK && type_result != SYNCER_OK) {
+ processing_result = type_result;
}
+ }
- if (processing_result != SYNCER_OK) {
- return processing_result;
- }
- session->SendEventNotification(SyncEngineEvent::STATUS_CHANGED);
+ // Handle bookmarks' special extensions activity stats.
+ if (session->status_controller().
+ model_neutral_state().num_successful_bookmark_commits == 0) {
+ extensions_activity->PutRecords(extensions_activity_buffer_);
}
- return SYNCER_OK;
+ return processing_result;
}
-} // namespace
-
-
-SyncerError BuildAndPostCommits(ModelTypeSet requested_types,
- Syncer* syncer,
- sessions::SyncSession* session) {
- sessions::OrderedCommitSet commit_set(session->context()->routing_info());
- SyncerError result =
- BuildAndPostCommitsImpl(requested_types, syncer, session, &commit_set);
- if (result != SYNCER_OK) {
- ClearSyncingBits(session->context()->directory(), commit_set);
+void Commit::CleanUp() {
+ for (ContributionMap::iterator it = contributions_.begin();
+ it != contributions_.end(); ++it) {
+ it->second->CleanUp();
}
- return result;
+ cleaned_up_ = true;
}
} // namespace syncer
diff --git a/sync/engine/commit.h b/sync/engine/commit.h
index 168d950c27..4750971bc7 100644
--- a/sync/engine/commit.h
+++ b/sync/engine/commit.h
@@ -5,33 +5,76 @@
#ifndef SYNC_ENGINE_COMMIT_H_
#define SYNC_ENGINE_COMMIT_H_
+#include <map>
+
+#include "base/stl_util.h"
+#include "sync/base/sync_export.h"
+#include "sync/engine/sync_directory_commit_contribution.h"
+#include "sync/engine/sync_directory_commit_contributor.h"
#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/engine/model_safe_worker.h"
#include "sync/internal_api/public/util/syncer_error.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/util/extensions_activity.h"
namespace syncer {
namespace sessions {
+class StatusController;
class SyncSession;
}
class Syncer;
-// This function will commit batches of unsynced items to the server until the
-// number of unsynced and ready to commit items reaches zero or an error is
-// encountered. A request to exit early will be treated as an error and will
-// abort any blocking operations.
-//
-// The Syncer parameter is provided only for access to its ExitRequested()
-// method. This is technically unnecessary since an early exit request should
-// be detected as we attempt to contact the sync server.
+// This class wraps the actions related to building and executing a single
+// commit operation.
//
-// The SyncSession parameter contains pointers to various bits of state,
-// including the syncable::Directory that contains all sync items and the
-// ServerConnectionManager used to contact the server.
-SyncerError BuildAndPostCommits(
- ModelTypeSet request_types,
- Syncer* syncer,
- sessions::SyncSession* session);
+// This class' most important responsibility is to manage the ContributionsMap.
+// This class serves as a container for those objects. Although it would have
+// been acceptable to let this class be a dumb container object, it turns out
+// that there was no other convenient place to put the Init() and
+// PostAndProcessCommitResponse() functions. So they ended up here.
+class SYNC_EXPORT_PRIVATE Commit {
+ public:
+ Commit(
+ const std::map<ModelType, SyncDirectoryCommitContribution*>&
+ contributions,
+ const sync_pb::ClientToServerMessage& message,
+ ExtensionsActivity::Records extensions_activity_buffer);
+
+ // This destructor will DCHECK if CleanUp() has not been called.
+ ~Commit();
+
+ static Commit* Init(
+ ModelTypeSet requested_types,
+ size_t max_entries,
+ const std::string& account_name,
+ const std::string& cache_guid,
+ CommitContributorMap* contributor_map,
+ ExtensionsActivity* extensions_activity);
+
+ SyncerError PostAndProcessResponse(
+ sessions::SyncSession* session,
+ sessions::StatusController* status,
+ ExtensionsActivity* extensions_activity);
+
+ // Cleans up state associated with this commit. Must be called before the
+ // destructor.
+ void CleanUp();
+
+ private:
+ typedef std::map<ModelType, SyncDirectoryCommitContribution*> ContributionMap;
+
+ ContributionMap contributions_;
+ STLValueDeleter<ContributionMap> deleter_;
+
+ sync_pb::ClientToServerMessage message_;
+ sync_pb::ClientToServerResponse response_;
+ ExtensionsActivity::Records extensions_activity_buffer_;
+
+ // Debug only flag used to indicate if it's safe to destruct the object.
+ bool cleaned_up_;
+};
} // namespace syncer
diff --git a/sync/engine/process_commit_response_command.cc b/sync/engine/commit_util.cc
index 2c38e98615..701e33532b 100644
--- a/sync/engine/process_commit_response_command.cc
+++ b/sync/engine/commit_util.cc
@@ -1,162 +1,187 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "sync/engine/process_commit_response_command.h"
+#include "sync/engine/commit_util.h"
-#include <cstddef>
+#include <limits>
#include <set>
#include <string>
#include <vector>
-#include "base/basictypes.h"
-#include "base/location.h"
+#include "base/strings/string_util.h"
#include "sync/engine/syncer_proto_util.h"
-#include "sync/engine/syncer_util.h"
#include "sync/internal_api/public/base/unique_position.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
#include "sync/sessions/sync_session.h"
+#include "sync/syncable/directory.h"
#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
+#include "sync/syncable/syncable_base_transaction.h"
+#include "sync/syncable/syncable_base_write_transaction.h"
+#include "sync/syncable/syncable_changes_version.h"
#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/time.h"
using std::set;
using std::string;
using std::vector;
-using sync_pb::CommitResponse;
namespace syncer {
-using sessions::OrderedCommitSet;
-using sessions::StatusController;
using sessions::SyncSession;
-using syncable::WriteTransaction;
-using syncable::MutableEntry;
using syncable::Entry;
-using syncable::BASE_VERSION;
-using syncable::GET_BY_ID;
-using syncable::GET_BY_HANDLE;
-using syncable::ID;
using syncable::IS_DEL;
-using syncable::IS_DIR;
using syncable::IS_UNAPPLIED_UPDATE;
using syncable::IS_UNSYNCED;
-using syncable::PARENT_ID;
-using syncable::SERVER_IS_DEL;
-using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_VERSION;
-using syncable::SYNCER;
-using syncable::SYNCING;
-
-ProcessCommitResponseCommand::ProcessCommitResponseCommand(
- const sessions::OrderedCommitSet& commit_set,
- const sync_pb::ClientToServerMessage& commit_message,
- const sync_pb::ClientToServerResponse& commit_response)
- : commit_set_(commit_set),
- commit_message_(commit_message),
- commit_response_(commit_response) {
+using syncable::Id;
+using syncable::SPECIFICS;
+using syncable::UNIQUE_POSITION;
+
+namespace commit_util {
+
+void AddExtensionsActivityToMessage(
+ ExtensionsActivity* activity,
+ ExtensionsActivity::Records* extensions_activity_buffer,
+ sync_pb::CommitMessage* message) {
+ // This isn't perfect, since the set of extensions activity may not correlate
+ // exactly with the items being committed. That's OK as long as we're looking
+ // for a rough estimate of extensions activity, not an precise mapping of
+ // which commits were triggered by which extension.
+ //
+ // We will push this list of extensions activity back into the
+ // ExtensionsActivityMonitor if this commit fails. That's why we must keep a
+ // copy of these records in the session.
+ activity->GetAndClearRecords(extensions_activity_buffer);
+
+ const ExtensionsActivity::Records& records = *extensions_activity_buffer;
+ for (ExtensionsActivity::Records::const_iterator it =
+ records.begin();
+ it != records.end(); ++it) {
+ sync_pb::ChromiumExtensionsActivity* activity_message =
+ message->add_extensions_activity();
+ activity_message->set_extension_id(it->second.extension_id);
+ activity_message->set_bookmark_writes_since_last_commit(
+ it->second.bookmark_write_count);
+ }
}
-ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
+void AddClientConfigParamsToMessage(
+ ModelTypeSet enabled_types,
+ sync_pb::CommitMessage* message) {
+ sync_pb::ClientConfigParams* config_params = message->mutable_config_params();
+ for (ModelTypeSet::Iterator it = enabled_types.First(); it.Good(); it.Inc()) {
+ if (ProxyTypes().Has(it.Get()))
+ continue;
+ int field_number = GetSpecificsFieldNumberFromModelType(it.Get());
+ config_params->mutable_enabled_type_ids()->Add(field_number);
+ }
+ config_params->set_tabs_datatype_enabled(
+ enabled_types.Has(syncer::PROXY_TABS));
+}
-std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
- const sessions::SyncSession& session) const {
- std::set<ModelSafeGroup> groups_with_commits;
+namespace {
+void SetEntrySpecifics(const Entry& meta_entry,
+ sync_pb::SyncEntity* sync_entry) {
+ // Add the new style extension and the folder bit.
+ sync_entry->mutable_specifics()->CopyFrom(meta_entry.GetSpecifics());
+ sync_entry->set_folder(meta_entry.GetIsDir());
- syncable::Directory* dir = session.context()->directory();
- syncable::ReadTransaction trans(FROM_HERE, dir);
- for (size_t i = 0; i < commit_set_.Size(); ++i) {
- groups_with_commits.insert(
- GetGroupForModelType(commit_set_.GetModelTypeAt(i),
- session.context()->routing_info()));
+ CHECK(!sync_entry->specifics().password().has_client_only_encrypted_data());
+ DCHECK_EQ(meta_entry.GetModelType(), GetModelType(*sync_entry));
+}
+} // namespace
+
+void BuildCommitItem(
+ const syncable::Entry& meta_entry,
+ sync_pb::SyncEntity* sync_entry) {
+ syncable::Id id = meta_entry.GetId();
+ sync_entry->set_id_string(SyncableIdToProto(id));
+
+ string name = meta_entry.GetNonUniqueName();
+ CHECK(!name.empty()); // Make sure this isn't an update.
+ // Note: Truncation is also performed in WriteNode::SetTitle(..). But this
+ // call is still necessary to handle any title changes that might originate
+ // elsewhere, or already be persisted in the directory.
+ TruncateUTF8ToByteSize(name, 255, &name);
+ sync_entry->set_name(name);
+
+ // Set the non_unique_name. If we do, the server ignores
+ // the |name| value (using |non_unique_name| instead), and will return
+ // in the CommitResponse a unique name if one is generated.
+ // We send both because it may aid in logging.
+ sync_entry->set_non_unique_name(name);
+
+ if (!meta_entry.GetUniqueClientTag().empty()) {
+ sync_entry->set_client_defined_unique_tag(
+ meta_entry.GetUniqueClientTag());
}
- return groups_with_commits;
-}
+ // Deleted items with server-unknown parent ids can be a problem so we set
+ // the parent to 0. (TODO(sync): Still true in protocol?).
+ Id new_parent_id;
+ if (meta_entry.GetIsDel() &&
+ !meta_entry.GetParentId().ServerKnows()) {
+ new_parent_id = syncable::BaseTransaction::root_id();
+ } else {
+ new_parent_id = meta_entry.GetParentId();
+ }
+ sync_entry->set_parent_id_string(SyncableIdToProto(new_parent_id));
+
+ // If our parent has changed, send up the old one so the server
+ // can correctly deal with multiple parents.
+ // TODO(nick): With the server keeping track of the primary sync parent,
+ // it should not be necessary to provide the old_parent_id: the version
+ // number should suffice.
+ if (new_parent_id != meta_entry.GetServerParentId() &&
+ 0 != meta_entry.GetBaseVersion() &&
+ syncable::CHANGES_VERSION != meta_entry.GetBaseVersion()) {
+ sync_entry->set_old_parent_id(
+ SyncableIdToProto(meta_entry.GetServerParentId()));
+ }
+ int64 version = meta_entry.GetBaseVersion();
+ if (syncable::CHANGES_VERSION == version || 0 == version) {
+ // Undeletions are only supported for items that have a client tag.
+ DCHECK(!id.ServerKnows() ||
+ !meta_entry.GetUniqueClientTag().empty())
+ << meta_entry;
-SyncerError ProcessCommitResponseCommand::ModelChangingExecuteImpl(
- SyncSession* session) {
- syncable::Directory* dir = session->context()->directory();
- StatusController* status = session->mutable_status_controller();
- const CommitResponse& cr = commit_response_.commit();
- const sync_pb::CommitMessage& commit_message = commit_message_.commit();
-
- int transient_error_commits = 0;
- int conflicting_commits = 0;
- int error_commits = 0;
- int successes = 0;
-
- set<syncable::Id> deleted_folders;
- OrderedCommitSet::Projection proj = status->commit_id_projection(
- commit_set_);
-
- if (!proj.empty()) { // Scope for WriteTransaction.
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- for (size_t i = 0; i < proj.size(); i++) {
- CommitResponse::ResponseType response_type = ProcessSingleCommitResponse(
- &trans,
- cr.entryresponse(proj[i]),
- commit_message.entries(proj[i]),
- commit_set_.GetCommitHandleAt(proj[i]),
- &deleted_folders);
- switch (response_type) {
- case CommitResponse::INVALID_MESSAGE:
- ++error_commits;
- break;
- case CommitResponse::CONFLICT:
- ++conflicting_commits;
- status->increment_num_server_conflicts();
- break;
- case CommitResponse::SUCCESS:
- // TODO(sync): worry about sync_rate_ rate calc?
- ++successes;
- if (commit_set_.GetModelTypeAt(proj[i]) == BOOKMARKS)
- status->increment_num_successful_bookmark_commits();
- status->increment_num_successful_commits();
- break;
- case CommitResponse::OVER_QUOTA:
- // We handle over quota like a retry, which is same as transient.
- case CommitResponse::RETRY:
- case CommitResponse::TRANSIENT_ERROR:
- ++transient_error_commits;
- break;
- default:
- LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
- }
- }
+ // Version 0 means to create or undelete an object.
+ sync_entry->set_version(0);
+ } else {
+ DCHECK(id.ServerKnows()) << meta_entry;
+ sync_entry->set_version(meta_entry.GetBaseVersion());
}
+ sync_entry->set_ctime(TimeToProtoTime(meta_entry.GetCtime()));
+ sync_entry->set_mtime(TimeToProtoTime(meta_entry.GetMtime()));
- MarkDeletedChildrenSynced(dir, &deleted_folders);
-
- int commit_count = static_cast<int>(proj.size());
- if (commit_count == successes) {
- return SYNCER_OK;
- } else if (error_commits > 0) {
- return SERVER_RETURN_UNKNOWN_ERROR;
- } else if (transient_error_commits > 0) {
- return SERVER_RETURN_TRANSIENT_ERROR;
- } else if (conflicting_commits > 0) {
- // This means that the server already has an item with this version, but
- // we haven't seen that update yet.
- //
- // A well-behaved client should respond to this by proceeding to the
- // download updates phase, fetching the conflicting items, then attempting
- // to resolve the conflict. That's not what this client does.
- //
- // We don't currently have any code to support that exceptional control
- // flow. Instead, we abort the current sync cycle and start a new one. The
- // end result is the same.
- return SERVER_RETURN_CONFLICT;
+ // Deletion is final on the server, let's move things and then delete them.
+ if (meta_entry.GetIsDel()) {
+ sync_entry->set_deleted(true);
} else {
- LOG(FATAL) << "Inconsistent counts when processing commit response";
- return SYNCER_OK;
+ if (meta_entry.GetSpecifics().has_bookmark()) {
+ // Both insert_after_item_id and position_in_parent fields are set only
+ // for legacy reasons. See comments in sync.proto for more information.
+ const Id& prev_id = meta_entry.GetPredecessorId();
+ string prev_id_string =
+ prev_id.IsRoot() ? string() : prev_id.GetServerId();
+ sync_entry->set_insert_after_item_id(prev_id_string);
+ sync_entry->set_position_in_parent(
+ meta_entry.GetUniquePosition().ToInt64());
+ meta_entry.GetUniquePosition().ToProto(
+ sync_entry->mutable_unique_position());
+ }
+ SetEntrySpecifics(meta_entry, sync_entry);
}
}
+
+// Helpers for ProcessSingleCommitResponse.
+namespace {
+
void LogServerError(const sync_pb::CommitResponse_EntryResponse& res) {
if (res.has_error_message())
LOG(WARNING) << " " << res.error_message();
@@ -164,79 +189,7 @@ void LogServerError(const sync_pb::CommitResponse_EntryResponse& res) {
LOG(WARNING) << " No detailed error message returned from server";
}
-CommitResponse::ResponseType
-ProcessCommitResponseCommand::ProcessSingleCommitResponse(
- syncable::WriteTransaction* trans,
- const sync_pb::CommitResponse_EntryResponse& server_entry,
- const sync_pb::SyncEntity& commit_request_entry,
- const int64 metahandle,
- set<syncable::Id>* deleted_folders) {
- MutableEntry local_entry(trans, GET_BY_HANDLE, metahandle);
- CHECK(local_entry.good());
- bool syncing_was_set = local_entry.GetSyncing();
- local_entry.PutSyncing(false);
-
- CommitResponse::ResponseType response = (CommitResponse::ResponseType)
- server_entry.response_type();
- if (!CommitResponse::ResponseType_IsValid(response)) {
- LOG(ERROR) << "Commit response has unknown response type! Possibly out "
- "of date client?";
- return CommitResponse::INVALID_MESSAGE;
- }
- if (CommitResponse::TRANSIENT_ERROR == response) {
- DVLOG(1) << "Transient Error Committing: " << local_entry;
- LogServerError(server_entry);
- return CommitResponse::TRANSIENT_ERROR;
- }
- if (CommitResponse::INVALID_MESSAGE == response) {
- LOG(ERROR) << "Error Commiting: " << local_entry;
- LogServerError(server_entry);
- return response;
- }
- if (CommitResponse::CONFLICT == response) {
- DVLOG(1) << "Conflict Committing: " << local_entry;
- return response;
- }
- if (CommitResponse::RETRY == response) {
- DVLOG(1) << "Retry Committing: " << local_entry;
- return response;
- }
- if (CommitResponse::OVER_QUOTA == response) {
- LOG(WARNING) << "Hit deprecated OVER_QUOTA Committing: " << local_entry;
- return response;
- }
- if (!server_entry.has_id_string()) {
- LOG(ERROR) << "Commit response has no id";
- return CommitResponse::INVALID_MESSAGE;
- }
-
- // Implied by the IsValid call above, but here for clarity.
- DCHECK_EQ(CommitResponse::SUCCESS, response) << response;
- // Check to see if we've been given the ID of an existing entry. If so treat
- // it as an error response and retry later.
- const syncable::Id& server_entry_id =
- SyncableIdFromProto(server_entry.id_string());
- if (local_entry.GetId() != server_entry_id) {
- Entry e(trans, GET_BY_ID, server_entry_id);
- if (e.good()) {
- LOG(ERROR)
- << "Got duplicate id when commiting id: "
- << local_entry.GetId()
- << ". Treating as an error return";
- return CommitResponse::INVALID_MESSAGE;
- }
- }
-
- if (server_entry.version() == 0) {
- LOG(WARNING) << "Server returned a zero version on a commit response.";
- }
-
- ProcessSuccessfulCommitResponse(commit_request_entry, server_entry,
- local_entry.GetId(), &local_entry, syncing_was_set, deleted_folders);
- return response;
-}
-
-const string& ProcessCommitResponseCommand::GetResultingPostCommitName(
+const string& GetResultingPostCommitName(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response) {
const string& response_name =
@@ -246,11 +199,11 @@ const string& ProcessCommitResponseCommand::GetResultingPostCommitName(
return SyncerProtoUtil::NameFromSyncEntity(committed_entry);
}
-bool ProcessCommitResponseCommand::UpdateVersionAfterCommit(
+bool UpdateVersionAfterCommit(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response,
const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry) {
+ syncable::ModelNeutralMutableEntry* local_entry) {
int64 old_version = local_entry->GetBaseVersion();
int64 new_version = entry_response.version();
bool bad_commit_version = false;
@@ -283,11 +236,11 @@ bool ProcessCommitResponseCommand::UpdateVersionAfterCommit(
return true;
}
-bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
+bool ChangeIdAfterCommit(
const sync_pb::CommitResponse_EntryResponse& entry_response,
const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry) {
- syncable::WriteTransaction* trans = local_entry->write_transaction();
+ syncable::ModelNeutralMutableEntry* local_entry) {
+ syncable::BaseWriteTransaction* trans = local_entry->base_write_transaction();
const syncable::Id& entry_response_id =
SyncableIdFromProto(entry_response.id_string());
if (entry_response_id != pre_commit_id) {
@@ -297,7 +250,10 @@ bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
DVLOG(1) << " ID changed while committing an old entry. "
<< pre_commit_id << " became " << entry_response_id << ".";
}
- MutableEntry same_id(trans, GET_BY_ID, entry_response_id);
+ syncable::ModelNeutralMutableEntry same_id(
+ trans,
+ syncable::GET_BY_ID,
+ entry_response_id);
// We should trap this before this function.
if (same_id.good()) {
LOG(ERROR) << "ID clash with id " << entry_response_id
@@ -310,10 +266,10 @@ bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
return true;
}
-void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
+void UpdateServerFieldsAfterCommit(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response,
- syncable::MutableEntry* local_entry) {
+ syncable::ModelNeutralMutableEntry* local_entry) {
// We just committed an entry successfully, and now we want to make our view
// of the server state consistent with the server state. We must be careful;
@@ -361,10 +317,11 @@ void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
}
}
-void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
+void ProcessSuccessfulCommitResponse(
const sync_pb::SyncEntity& committed_entry,
const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ const syncable::Id& pre_commit_id,
+ syncable::ModelNeutralMutableEntry* local_entry,
bool syncing_was_set, set<syncable::Id>* deleted_folders) {
DCHECK(local_entry->GetIsUnsynced());
@@ -402,4 +359,82 @@ void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
}
}
+} // namespace
+
+sync_pb::CommitResponse::ResponseType
+ProcessSingleCommitResponse(
+ syncable::BaseWriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& server_entry,
+ const sync_pb::SyncEntity& commit_request_entry,
+ int64 metahandle,
+ set<syncable::Id>* deleted_folders) {
+ syncable::ModelNeutralMutableEntry local_entry(
+ trans,
+ syncable::GET_BY_HANDLE,
+ metahandle);
+ CHECK(local_entry.good());
+ bool syncing_was_set = local_entry.GetSyncing();
+ local_entry.PutSyncing(false);
+
+ sync_pb::CommitResponse::ResponseType response = server_entry.response_type();
+ if (!sync_pb::CommitResponse::ResponseType_IsValid(response)) {
+ LOG(ERROR) << "Commit response has unknown response type! Possibly out "
+ "of date client?";
+ return sync_pb::CommitResponse::INVALID_MESSAGE;
+ }
+ if (sync_pb::CommitResponse::TRANSIENT_ERROR == response) {
+ DVLOG(1) << "Transient Error Committing: " << local_entry;
+ LogServerError(server_entry);
+ return sync_pb::CommitResponse::TRANSIENT_ERROR;
+ }
+ if (sync_pb::CommitResponse::INVALID_MESSAGE == response) {
+ LOG(ERROR) << "Error Commiting: " << local_entry;
+ LogServerError(server_entry);
+ return response;
+ }
+ if (sync_pb::CommitResponse::CONFLICT == response) {
+ DVLOG(1) << "Conflict Committing: " << local_entry;
+ return response;
+ }
+ if (sync_pb::CommitResponse::RETRY == response) {
+ DVLOG(1) << "Retry Committing: " << local_entry;
+ return response;
+ }
+ if (sync_pb::CommitResponse::OVER_QUOTA == response) {
+ LOG(WARNING) << "Hit deprecated OVER_QUOTA Committing: " << local_entry;
+ return response;
+ }
+ if (!server_entry.has_id_string()) {
+ LOG(ERROR) << "Commit response has no id";
+ return sync_pb::CommitResponse::INVALID_MESSAGE;
+ }
+
+ // Implied by the IsValid call above, but here for clarity.
+ DCHECK_EQ(sync_pb::CommitResponse::SUCCESS, response) << response;
+ // Check to see if we've been given the ID of an existing entry. If so treat
+ // it as an error response and retry later.
+ const syncable::Id& server_entry_id =
+ SyncableIdFromProto(server_entry.id_string());
+ if (local_entry.GetId() != server_entry_id) {
+ Entry e(trans, syncable::GET_BY_ID, server_entry_id);
+ if (e.good()) {
+ LOG(ERROR)
+ << "Got duplicate id when commiting id: "
+ << local_entry.GetId()
+ << ". Treating as an error return";
+ return sync_pb::CommitResponse::INVALID_MESSAGE;
+ }
+ }
+
+ if (server_entry.version() == 0) {
+ LOG(WARNING) << "Server returned a zero version on a commit response.";
+ }
+
+ ProcessSuccessfulCommitResponse(commit_request_entry, server_entry,
+ local_entry.GetId(), &local_entry, syncing_was_set, deleted_folders);
+ return response;
+}
+
+} // namespace commit_util
+
} // namespace syncer
diff --git a/sync/engine/commit_util.h b/sync/engine/commit_util.h
new file mode 100644
index 0000000000..387bdcf95e
--- /dev/null
+++ b/sync/engine/commit_util.h
@@ -0,0 +1,64 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
+#define SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
+
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/model_type.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/util/extensions_activity.h"
+
+namespace sync_pb {
+class CommitMessage;
+class SyncEntity;
+}
+
+namespace syncer {
+
+namespace syncable {
+class BaseTransaction;
+class Entry;
+class Id;
+class BaseWriteTransaction;
+}
+
+namespace commit_util {
+
+// Adds bookmark extensions activity report to |message|.
+SYNC_EXPORT_PRIVATE void AddExtensionsActivityToMessage(
+ ExtensionsActivity* activity,
+ ExtensionsActivity::Records* extensions_activity_buffer,
+ sync_pb::CommitMessage* message);
+
+// Fills the config_params field of |message|.
+SYNC_EXPORT_PRIVATE void AddClientConfigParamsToMessage(
+ ModelTypeSet enabled_types,
+ sync_pb::CommitMessage* message);
+
+// Takes a snapshot of |meta_entry| and puts it into a protobuf suitable for use
+// in a commit request message.
+SYNC_EXPORT_PRIVATE void BuildCommitItem(
+ const syncable::Entry& meta_entry,
+ sync_pb::SyncEntity* sync_entry);
+
+// Process a single commit response. Updates the entry's SERVER fields using
+// |pb_commit_response| and |pb_committed_entry|.
+//
+// The |deleted_folders| parameter is a set of IDs that represent deleted
+// folders. This function will add its entry's ID to this set if it finds
+// itself processing a folder deletion.
+SYNC_EXPORT_PRIVATE
+sync_pb::CommitResponse::ResponseType ProcessSingleCommitResponse(
+ syncable::BaseWriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& server_entry,
+ const sync_pb::SyncEntity& commit_request_entry,
+ int64 metahandle,
+ std::set<syncable::Id>* deleted_folders);
+
+} // namespace commit_util
+
+} // namespace syncer
+
+#endif // SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
diff --git a/sync/engine/get_commit_ids.cc b/sync/engine/get_commit_ids.cc
index cc437ce7d2..3a87383d1b 100644
--- a/sync/engine/get_commit_ids.cc
+++ b/sync/engine/get_commit_ids.cc
@@ -506,25 +506,4 @@ void OrderCommitIds(
} // namespace
-void GetCommitIds(
- syncable::BaseTransaction* trans,
- ModelTypeSet requested_types,
- size_t commit_batch_size,
- sessions::OrderedCommitSet* ordered_commit_set) {
- for (ModelTypeSet::Iterator it = requested_types.First();
- it.Good(); it.Inc()) {
- DCHECK_LE(ordered_commit_set->Size(), commit_batch_size);
- if (ordered_commit_set->Size() >= commit_batch_size)
- break;
- size_t space_remaining = commit_batch_size - ordered_commit_set->Size();
- syncable::Directory::Metahandles out;
- GetCommitIdsForType(
- trans,
- it.Get(),
- space_remaining,
- &out);
- ordered_commit_set->AddCommitItems(out, it.Get());
- }
-}
-
} // namespace syncer
diff --git a/sync/engine/get_commit_ids.h b/sync/engine/get_commit_ids.h
index 557853f36d..b435848e34 100644
--- a/sync/engine/get_commit_ids.h
+++ b/sync/engine/get_commit_ids.h
@@ -15,15 +15,14 @@ using std::vector;
namespace syncer {
-namespace sessions {
-class OrderedCommitSet;
-}
-
namespace syncable {
class BaseTransaction;
}
-// These functions return handles in "commit order". A valid commit ordering is
+// Returns up to |max_entries| metahandles of entries that belong to the
+// specified |type| and are ready for commit.
+//
+// This function returns handles in "commit order". A valid commit ordering is
// one where parents are placed before children, predecessors are placed before
// successors, and deletes appear after creates and moves.
//
@@ -32,25 +31,12 @@ class BaseTransaction;
// system can handle receiving the elements within a folder out of order, so we
// may be able to remove that functionality in the future.
// See crbug.com/287938.
-
-// Returns up to |max_entries| metahandles of entries that belong to the
-// specified |type| and are ready for commit. The returned handles will be
-// in a valid commit ordering.
SYNC_EXPORT_PRIVATE void GetCommitIdsForType(
syncable::BaseTransaction* trans,
ModelType type,
size_t max_entries,
std::vector<int64>* out);
-// Fills the specified |ordered_commit_set| with up to |commit_batch_size|
-// metahandles that belong to the set of types |requested_types| and are ready
-// for commit. The list will be in a valid commit ordering.
-SYNC_EXPORT_PRIVATE void GetCommitIds(
- syncable::BaseTransaction* trans,
- ModelTypeSet requested_types,
- size_t commit_batch_size,
- sessions::OrderedCommitSet* ordered_commit_set);
-
} // namespace syncer
#endif // SYNC_ENGINE_GET_COMMIT_IDS_H_
diff --git a/sync/engine/process_commit_response_command.h b/sync/engine/process_commit_response_command.h
deleted file mode 100644
index 72b5963438..0000000000
--- a/sync/engine/process_commit_response_command.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
-#define SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
-
-#include <set>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/model_changing_syncer_command.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-namespace sessions {
-class OrderedCommitSet;
-}
-
-namespace syncable {
-class Id;
-class WriteTransaction;
-class MutableEntry;
-class Directory;
-}
-
-// A class that processes the server's response to our commmit attempt. Note
-// that some of the preliminary processing is performed in
-// PostClientToServerMessage command.
-//
-// As part of processing the commit response, this command will modify sync
-// entries. It can rename items, update their versions, etc.
-//
-// This command will return a non-SYNCER_OK value if an error occurred while
-// processing the response, or if the server's response indicates that it had
-// trouble processing the request.
-//
-// See SyncerCommand documentation for more info.
-class SYNC_EXPORT_PRIVATE ProcessCommitResponseCommand
- : public ModelChangingSyncerCommand {
- public:
-
- // The commit_set parameter contains references to all the items which were
- // to be committed in this batch.
- //
- // The commmit_message parameter contains the message that was sent to the
- // server.
- //
- // The commit_response parameter contains the response received from the
- // server. This may be uninitialized if we were unable to contact the server
- // or a serious error was encountered.
- ProcessCommitResponseCommand(
- const sessions::OrderedCommitSet& commit_set,
- const sync_pb::ClientToServerMessage& commit_message,
- const sync_pb::ClientToServerResponse& commit_response);
- virtual ~ProcessCommitResponseCommand();
-
- protected:
- // ModelChangingSyncerCommand implementation.
- virtual std::set<ModelSafeGroup> GetGroupsToChange(
- const sessions::SyncSession& session) const OVERRIDE;
- virtual SyncerError ModelChangingExecuteImpl(
- sessions::SyncSession* session) OVERRIDE;
-
- private:
- sync_pb::CommitResponse::ResponseType ProcessSingleCommitResponse(
- syncable::WriteTransaction* trans,
- const sync_pb::CommitResponse_EntryResponse& pb_commit_response,
- const sync_pb::SyncEntity& pb_committed_entry,
- int64 metahandle,
- std::set<syncable::Id>* deleted_folders);
-
- void ProcessSuccessfulCommitResponse(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
- bool syncing_was_set, std::set<syncable::Id>* deleted_folders);
-
- // Update the BASE_VERSION and SERVER_VERSION, post-commit.
- // Helper for ProcessSuccessfulCommitResponse.
- bool UpdateVersionAfterCommit(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry);
-
- // If the server generated an ID for us during a commit, apply the new ID.
- // Helper for ProcessSuccessfulCommitResponse.
- bool ChangeIdAfterCommit(
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id,
- syncable::MutableEntry* local_entry);
-
- // Update the SERVER_ fields to reflect the server state after committing.
- // Helper for ProcessSuccessfulCommitResponse.
- void UpdateServerFieldsAfterCommit(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- syncable::MutableEntry* local_entry);
-
- // Helper to extract the final name from the protobufs.
- const std::string& GetResultingPostCommitName(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response);
-
- // Helper to clean up in case of failure.
- void ClearSyncingBits(
- syncable::Directory *dir,
- const std::vector<syncable::Id>& commit_ids);
-
- const sessions::OrderedCommitSet& commit_set_;
- const sync_pb::ClientToServerMessage& commit_message_;
- const sync_pb::ClientToServerResponse& commit_response_;
-
- DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
diff --git a/sync/engine/process_commit_response_command_unittest.cc b/sync/engine/process_commit_response_command_unittest.cc
deleted file mode 100644
index de2f5ec47a..0000000000
--- a/sync/engine/process_commit_response_command_unittest.cc
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/process_commit_response_command.h"
-
-#include <vector>
-
-#include "base/location.h"
-#include "base/strings/stringprintf.h"
-#include "sync/internal_api/public/test/test_entry_factory.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/syncer_command_test.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using std::string;
-using sync_pb::ClientToServerMessage;
-using sync_pb::CommitResponse;
-
-namespace syncer {
-
-using sessions::SyncSession;
-using syncable::BASE_VERSION;
-using syncable::Entry;
-using syncable::ID;
-using syncable::IS_DIR;
-using syncable::IS_UNSYNCED;
-using syncable::Id;
-using syncable::MutableEntry;
-using syncable::NON_UNIQUE_NAME;
-using syncable::UNIQUE_POSITION;
-using syncable::UNITTEST;
-using syncable::WriteTransaction;
-
-// A test fixture for tests exercising ProcessCommitResponseCommand.
-class ProcessCommitResponseCommandTest : public SyncerCommandTest {
- public:
- virtual void SetUp() {
- workers()->clear();
- mutable_routing_info()->clear();
-
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
- workers()->push_back(
- make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- (*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
- (*mutable_routing_info())[PREFERENCES] = GROUP_UI;
- (*mutable_routing_info())[AUTOFILL] = GROUP_DB;
-
- SyncerCommandTest::SetUp();
-
- test_entry_factory_.reset(new TestEntryFactory(directory()));
- }
-
- protected:
-
- ProcessCommitResponseCommandTest()
- : next_new_revision_(4000),
- next_server_position_(10000) {
- }
-
- void CheckEntry(Entry* e, const std::string& name,
- ModelType model_type, const Id& parent_id) {
- EXPECT_TRUE(e->good());
- ASSERT_EQ(name, e->GetNonUniqueName());
- ASSERT_EQ(model_type, e->GetModelType());
- ASSERT_EQ(parent_id, e->GetParentId());
- ASSERT_LT(0, e->GetBaseVersion())
- << "Item should have a valid (positive) server base revision";
- }
-
- // Create a new unsynced item in the database, and synthesize a commit record
- // and a commit response for it in the syncer session. If item_id is a local
- // ID, the item will be a create operation. Otherwise, it will be an edit.
- // Returns the metahandle of the newly created item.
- int CreateUnprocessedCommitResult(
- const Id& item_id,
- const Id& parent_id,
- const string& name,
- bool is_folder,
- ModelType model_type,
- sessions::OrderedCommitSet *commit_set,
- sync_pb::ClientToServerMessage *commit,
- sync_pb::ClientToServerResponse *response) {
- int64 metahandle = 0;
- test_entry_factory_->CreateUnsyncedItem(item_id, parent_id, name,
- is_folder, model_type, &metahandle);
-
- // ProcessCommitResponseCommand consumes commit_ids from the session
- // state, so we need to update that. O(n^2) because it's a test.
- commit_set->AddCommitItem(metahandle, model_type);
-
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::GET_BY_ID, item_id);
- EXPECT_TRUE(entry.good());
- entry.PutSyncing(true);
-
- // Add to the commit message.
- // TODO(sync): Use the real commit-building code to construct this.
- commit->set_message_contents(ClientToServerMessage::COMMIT);
- sync_pb::SyncEntity* entity = commit->mutable_commit()->add_entries();
- entity->set_non_unique_name(entry.GetNonUniqueName());
- entity->set_folder(entry.GetIsDir());
- entity->set_parent_id_string(
- SyncableIdToProto(entry.GetParentId()));
- entity->set_version(entry.GetBaseVersion());
- entity->mutable_specifics()->CopyFrom(entry.GetSpecifics());
- entity->set_id_string(SyncableIdToProto(item_id));
-
- if (!entry.GetUniqueClientTag().empty()) {
- entity->set_client_defined_unique_tag(
- entry.GetUniqueClientTag());
- }
-
- // Add to the response message.
- response->set_error_code(sync_pb::SyncEnums::SUCCESS);
- sync_pb::CommitResponse_EntryResponse* entry_response =
- response->mutable_commit()->add_entryresponse();
- entry_response->set_response_type(CommitResponse::SUCCESS);
- entry_response->set_name("Garbage.");
- entry_response->set_non_unique_name(entity->name());
- if (item_id.ServerKnows())
- entry_response->set_id_string(entity->id_string());
- else
- entry_response->set_id_string(id_factory_.NewServerId().GetServerId());
- entry_response->set_version(next_new_revision_++);
-
- // If the ID of our parent item committed earlier in the batch was
- // rewritten, rewrite it in the entry response. This matches
- // the server behavior.
- entry_response->set_parent_id_string(entity->parent_id_string());
- for (int i = 0; i < commit->commit().entries_size(); ++i) {
- if (commit->commit().entries(i).id_string() ==
- entity->parent_id_string()) {
- entry_response->set_parent_id_string(
- response->commit().entryresponse(i).id_string());
- }
- }
-
- return metahandle;
- }
-
- void SetLastErrorCode(sync_pb::CommitResponse::ResponseType error_code,
- sync_pb::ClientToServerResponse* response) {
- sync_pb::CommitResponse_EntryResponse* entry_response =
- response->mutable_commit()->mutable_entryresponse(
- response->mutable_commit()->entryresponse_size() - 1);
- entry_response->set_response_type(error_code);
- }
-
- TestIdFactory id_factory_;
- scoped_ptr<TestEntryFactory> test_entry_factory_;
- private:
- int64 next_new_revision_;
- int64 next_server_position_;
- DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommandTest);
-};
-
-TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
- sessions::OrderedCommitSet commit_set(session()->context()->routing_info());
- sync_pb::ClientToServerMessage request;
- sync_pb::ClientToServerResponse response;
-
- Id bookmark_folder_id = id_factory_.NewLocalId();
- int bookmark_folder_handle = CreateUnprocessedCommitResult(
- bookmark_folder_id, id_factory_.root(), "A bookmark folder", true,
- BOOKMARKS, &commit_set, &request, &response);
- int bookmark1_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), bookmark_folder_id, "bookmark 1", false,
- BOOKMARKS, &commit_set, &request, &response);
- int bookmark2_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), bookmark_folder_id, "bookmark 2", false,
- BOOKMARKS, &commit_set, &request, &response);
- int pref1_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Pref 1", false,
- PREFERENCES, &commit_set, &request, &response);
- int pref2_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Pref 2", false,
- PREFERENCES, &commit_set, &request, &response);
- int autofill1_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Autofill 1", false,
- AUTOFILL, &commit_set, &request, &response);
- int autofill2_handle = CreateUnprocessedCommitResult(
- id_factory_.NewLocalId(), id_factory_.root(), "Autofill 2", false,
- AUTOFILL, &commit_set, &request, &response);
-
- ProcessCommitResponseCommand command(commit_set, request, response);
- ExpectGroupsToChange(command, GROUP_UI, GROUP_DB);
- command.ExecuteImpl(session());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry b_folder(&trans, syncable::GET_BY_HANDLE, bookmark_folder_handle);
- ASSERT_TRUE(b_folder.good());
-
- Id new_fid = b_folder.GetId();
- ASSERT_FALSE(new_fid.IsRoot());
- EXPECT_TRUE(new_fid.ServerKnows());
- EXPECT_FALSE(bookmark_folder_id.ServerKnows());
- EXPECT_FALSE(new_fid == bookmark_folder_id);
-
- ASSERT_EQ("A bookmark folder", b_folder.GetNonUniqueName())
- << "Name of bookmark folder should not change.";
- ASSERT_LT(0, b_folder.GetBaseVersion())
- << "Bookmark folder should have a valid (positive) server base revision";
-
- // Look at the two bookmarks in bookmark_folder.
- Entry b1(&trans, syncable::GET_BY_HANDLE, bookmark1_handle);
- Entry b2(&trans, syncable::GET_BY_HANDLE, bookmark2_handle);
- CheckEntry(&b1, "bookmark 1", BOOKMARKS, new_fid);
- CheckEntry(&b2, "bookmark 2", BOOKMARKS, new_fid);
-
- // Look at the prefs and autofill items.
- Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
- Entry p2(&trans, syncable::GET_BY_HANDLE, pref2_handle);
- CheckEntry(&p1, "Pref 1", PREFERENCES, id_factory_.root());
- CheckEntry(&p2, "Pref 2", PREFERENCES, id_factory_.root());
-
- Entry a1(&trans, syncable::GET_BY_HANDLE, autofill1_handle);
- Entry a2(&trans, syncable::GET_BY_HANDLE, autofill2_handle);
- CheckEntry(&a1, "Autofill 1", AUTOFILL, id_factory_.root());
- CheckEntry(&a2, "Autofill 2", AUTOFILL, id_factory_.root());
-}
-
-// In this test, we test processing a commit response for a commit batch that
-// includes a newly created folder and some (but not all) of its children.
-// In particular, the folder has 50 children, which alternate between being
-// new items and preexisting items. This mixture of new and old is meant to
-// be a torture test of the code in ProcessCommitResponseCommand that changes
-// an item's ID from a local ID to a server-generated ID on the first commit.
-// We commit only the first 25 children in the sibling order, leaving the
-// second 25 children as unsynced items. http://crbug.com/33081 describes
-// how this scenario used to fail, reversing the order for the second half
-// of the children.
-TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
- sessions::OrderedCommitSet commit_set(session()->context()->routing_info());
- sync_pb::ClientToServerMessage request;
- sync_pb::ClientToServerResponse response;
-
- // Create the parent folder, a new item whose ID will change on commit.
- Id folder_id = id_factory_.NewLocalId();
- CreateUnprocessedCommitResult(folder_id, id_factory_.root(),
- "A", true, BOOKMARKS,
- &commit_set, &request, &response);
-
- // Verify that the item is reachable.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
- ASSERT_TRUE(root.good());
- Id child_id = root.GetFirstChildId();
- ASSERT_EQ(folder_id, child_id);
- }
-
- // The first 25 children of the parent folder will be part of the commit
- // batch. They will be placed left to right in order of creation.
- int batch_size = 25;
- int i = 0;
- Id prev_id = TestIdFactory::root();
- for (; i < batch_size; ++i) {
- // Alternate between new and old child items, just for kicks.
- Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
- int64 handle = CreateUnprocessedCommitResult(
- id, folder_id, base::StringPrintf("Item %d", i), false,
- BOOKMARKS, &commit_set, &request, &response);
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- e.PutPredecessor(prev_id);
- }
- prev_id = id;
- }
- // The second 25 children will be unsynced items but NOT part of the commit
- // batch. When the ID of the parent folder changes during the commit,
- // these items PARENT_ID should be updated, and their ordering should be
- // preserved.
- for (; i < 2*batch_size; ++i) {
- // Alternate between new and old child items, just for kicks.
- Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
- int64 handle = -1;
- test_entry_factory_->CreateUnsyncedItem(
- id, folder_id, base::StringPrintf("Item %d", i),
- false, BOOKMARKS, &handle);
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- e.PutPredecessor(prev_id);
- }
- prev_id = id;
- }
-
- // Process the commit response for the parent folder and the first
- // 25 items. This should apply the values indicated by
- // each CommitResponse_EntryResponse to the syncable Entries. All new
- // items in the commit batch should have their IDs changed to server IDs.
- ProcessCommitResponseCommand command(commit_set, request, response);
- ExpectGroupToChange(command, GROUP_UI);
- command.ExecuteImpl(session());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
- // Lookup the parent folder by finding a child of the root. We can't use
- // folder_id here, because it changed during the commit.
- syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
- ASSERT_TRUE(root.good());
- Id new_fid = root.GetFirstChildId();
- ASSERT_FALSE(new_fid.IsRoot());
- EXPECT_TRUE(new_fid.ServerKnows());
- EXPECT_FALSE(folder_id.ServerKnows());
- EXPECT_TRUE(new_fid != folder_id);
- Entry parent(&trans, syncable::GET_BY_ID, new_fid);
- ASSERT_TRUE(parent.good());
- ASSERT_EQ("A", parent.GetNonUniqueName())
- << "Name of parent folder should not change.";
- ASSERT_LT(0, parent.GetBaseVersion())
- << "Parent should have a valid (positive) server base revision";
-
- Id cid = parent.GetFirstChildId();
-
- int child_count = 0;
- // Now loop over all the children of the parent folder, verifying
- // that they are in their original order by checking to see that their
- // names are still sequential.
- while (!cid.IsRoot()) {
- SCOPED_TRACE(::testing::Message("Examining item #") << child_count);
- Entry c(&trans, syncable::GET_BY_ID, cid);
- DCHECK(c.good());
- ASSERT_EQ(base::StringPrintf("Item %d", child_count),
- c.GetNonUniqueName());
- ASSERT_EQ(new_fid, c.GetParentId());
- if (child_count < batch_size) {
- ASSERT_FALSE(c.GetIsUnsynced()) << "Item should be committed";
- ASSERT_TRUE(cid.ServerKnows());
- ASSERT_LT(0, c.GetBaseVersion());
- } else {
- ASSERT_TRUE(c.GetIsUnsynced()) << "Item should be uncommitted";
- // We alternated between creates and edits; double check that these items
- // have been preserved.
- if (child_count % 4 < 2) {
- ASSERT_FALSE(cid.ServerKnows());
- ASSERT_GE(0, c.GetBaseVersion());
- } else {
- ASSERT_TRUE(cid.ServerKnows());
- ASSERT_LT(0, c.GetBaseVersion());
- }
- }
- cid = c.GetSuccessorId();
- child_count++;
- }
- ASSERT_EQ(batch_size*2, child_count)
- << "Too few or too many children in parent folder after commit.";
-}
-
-} // namespace syncer
diff --git a/sync/engine/process_updates_command.cc b/sync/engine/process_updates_command.cc
index 4854852558..d761cfc71f 100644
--- a/sync/engine/process_updates_command.cc
+++ b/sync/engine/process_updates_command.cc
@@ -13,10 +13,10 @@
#include "sync/engine/syncer_util.h"
#include "sync/sessions/sync_session.h"
#include "sync/syncable/directory.h"
-#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
#include "sync/syncable/syncable_proto_util.h"
#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/cryptographer.h"
using std::vector;
@@ -31,21 +31,6 @@ using syncable::GET_BY_ID;
ProcessUpdatesCommand::ProcessUpdatesCommand() {}
ProcessUpdatesCommand::~ProcessUpdatesCommand() {}
-std::set<ModelSafeGroup> ProcessUpdatesCommand::GetGroupsToChange(
- const sessions::SyncSession& session) const {
- std::set<ModelSafeGroup> groups_with_updates;
-
- const sync_pb::GetUpdatesResponse& updates =
- session.status_controller().updates_response().get_updates();
- for (int i = 0; i < updates.entries().size(); i++) {
- groups_with_updates.insert(
- GetGroupForModelType(GetModelType(updates.entries(i)),
- session.context()->routing_info()));
- }
-
- return groups_with_updates;
-}
-
namespace {
// This function attempts to determine whether or not this update is genuinely
@@ -102,11 +87,11 @@ bool UpdateContainsNewVersion(syncable::BaseTransaction *trans,
} // namespace
-SyncerError ProcessUpdatesCommand::ModelChangingExecuteImpl(
- SyncSession* session) {
+SyncerError ProcessUpdatesCommand::ExecuteImpl(SyncSession* session) {
syncable::Directory* dir = session->context()->directory();
- syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
+ syncable::ModelNeutralWriteTransaction trans(
+ FROM_HERE, syncable::SYNCER, dir);
sessions::StatusController* status = session->mutable_status_controller();
const sync_pb::GetUpdatesResponse& updates =
@@ -120,18 +105,6 @@ SyncerError ProcessUpdatesCommand::ModelChangingExecuteImpl(
for (int i = 0; i < update_count; i++) {
const sync_pb::SyncEntity& update = updates.entries(i);
- // The current function gets executed on several different threads, but
- // every call iterates over the same list of items that the server returned
- // to us. We're not allowed to process items unless we're on the right
- // thread for that type. This check will ensure we only touch the items
- // that live on our current thread.
- // TODO(tim): Don't allow access to objects in other ModelSafeGroups.
- // See crbug.com/121521 .
- ModelSafeGroup g = GetGroupForModelType(GetModelType(update),
- session->context()->routing_info());
- if (g != status->group_restriction())
- continue;
-
VerifyResult verify_result = VerifyUpdate(
&trans, update, requested_types, session->context()->routing_info());
status->increment_num_updates_downloaded_by(1);
@@ -159,8 +132,9 @@ namespace {
// will have refused to unify the update.
// We should not attempt to apply it at all since it violates consistency
// rules.
-VerifyResult VerifyTagConsistency(const sync_pb::SyncEntity& entry,
- const syncable::MutableEntry& same_id) {
+VerifyResult VerifyTagConsistency(
+ const sync_pb::SyncEntity& entry,
+ const syncable::ModelNeutralMutableEntry& same_id) {
if (entry.has_client_defined_unique_tag() &&
entry.client_defined_unique_tag() !=
same_id.GetUniqueClientTag()) {
@@ -172,7 +146,8 @@ VerifyResult VerifyTagConsistency(const sync_pb::SyncEntity& entry,
} // namespace
VerifyResult ProcessUpdatesCommand::VerifyUpdate(
- syncable::WriteTransaction* trans, const sync_pb::SyncEntity& entry,
+ syncable::ModelNeutralWriteTransaction* trans,
+ const sync_pb::SyncEntity& entry,
ModelTypeSet requested_types,
const ModelSafeRoutingInfo& routes) {
syncable::Id id = SyncableIdFromProto(entry.id_string());
@@ -194,7 +169,7 @@ VerifyResult ProcessUpdatesCommand::VerifyUpdate(
}
}
- syncable::MutableEntry same_id(trans, GET_BY_ID, id);
+ syncable::ModelNeutralMutableEntry same_id(trans, GET_BY_ID, id);
result = VerifyNewEntry(entry, &same_id, deleted);
ModelType placement_type = !deleted ? GetModelType(entry)
@@ -220,8 +195,8 @@ VerifyResult ProcessUpdatesCommand::VerifyUpdate(
// If we have an existing entry, we check here for updates that break
// consistency rules.
if (VERIFY_UNDECIDED == result) {
- result = VerifyUpdateConsistency(trans, entry, &same_id,
- deleted, is_directory, model_type);
+ result = VerifyUpdateConsistency(trans, entry, deleted,
+ is_directory, model_type, &same_id);
}
if (VERIFY_UNDECIDED == result)
@@ -232,9 +207,9 @@ VerifyResult ProcessUpdatesCommand::VerifyUpdate(
namespace {
// Returns true if the entry is still ok to process.
-bool ReverifyEntry(syncable::WriteTransaction* trans,
+bool ReverifyEntry(syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& entry,
- syncable::MutableEntry* same_id) {
+ syncable::ModelNeutralMutableEntry* same_id) {
const bool deleted = entry.has_deleted() && entry.deleted();
const bool is_directory = IsFolder(entry);
@@ -242,10 +217,10 @@ bool ReverifyEntry(syncable::WriteTransaction* trans,
return VERIFY_SUCCESS == VerifyUpdateConsistency(trans,
entry,
- same_id,
deleted,
is_directory,
- model_type);
+ model_type,
+ same_id);
}
} // namespace
@@ -253,7 +228,7 @@ bool ReverifyEntry(syncable::WriteTransaction* trans,
ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
const sync_pb::SyncEntity& update,
const Cryptographer* cryptographer,
- syncable::WriteTransaction* const trans) {
+ syncable::ModelNeutralWriteTransaction* const trans) {
const syncable::Id& server_id = SyncableIdFromProto(update.id_string());
const std::string name = SyncerProtoUtil::NameFromSyncEntity(update);
@@ -270,7 +245,7 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// We take a two step approach. First we store the entries data in the
// server fields of a local entry and then move the data to the local fields
- syncable::MutableEntry target_entry(trans, GET_BY_ID, local_id);
+ syncable::ModelNeutralMutableEntry target_entry(trans, GET_BY_ID, local_id);
// We need to run the Verify checks again; the world could have changed
// since we last verified.
diff --git a/sync/engine/process_updates_command.h b/sync/engine/process_updates_command.h
index ce793879b3..0868e9847a 100644
--- a/sync/engine/process_updates_command.h
+++ b/sync/engine/process_updates_command.h
@@ -17,7 +17,7 @@ class SyncEntity;
namespace syncer {
namespace syncable {
-class WriteTransaction;
+class ModelNeutralWriteTransaction;
}
class Cryptographer;
@@ -28,29 +28,25 @@ class Cryptographer;
//
// Postconditions - All of the verified SyncEntity data will be copied to
// the server fields of the corresponding syncable entries.
-class SYNC_EXPORT_PRIVATE ProcessUpdatesCommand
- : public ModelChangingSyncerCommand {
+class SYNC_EXPORT_PRIVATE ProcessUpdatesCommand : public SyncerCommand {
public:
ProcessUpdatesCommand();
virtual ~ProcessUpdatesCommand();
protected:
- // ModelChangingSyncerCommand implementation.
- virtual std::set<ModelSafeGroup> GetGroupsToChange(
- const sessions::SyncSession& session) const OVERRIDE;
- virtual SyncerError ModelChangingExecuteImpl(
- sessions::SyncSession* session) OVERRIDE;
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
private:
VerifyResult VerifyUpdate(
- syncable::WriteTransaction* trans,
+ syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& entry,
ModelTypeSet requested_types,
const ModelSafeRoutingInfo& routes);
ServerUpdateProcessingResult ProcessUpdate(
const sync_pb::SyncEntity& proto_update,
const Cryptographer* cryptographer,
- syncable::WriteTransaction* const trans);
+ syncable::ModelNeutralWriteTransaction* const trans);
DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommand);
};
diff --git a/sync/engine/process_updates_command_unittest.cc b/sync/engine/process_updates_command_unittest.cc
index b46dc7846c..44b308a8e2 100644
--- a/sync/engine/process_updates_command_unittest.cc
+++ b/sync/engine/process_updates_command_unittest.cc
@@ -81,25 +81,6 @@ class ProcessUpdatesCommandTest : public SyncerCommandTest {
DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommandTest);
};
-TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
- std::string root = syncable::GetNullId().GetServerId();
-
- CreateLocalItem("p1", root, PREFERENCES);
- CreateLocalItem("a1", root, AUTOFILL);
-
- ExpectNoGroupsToChange(command_);
-
- sync_pb::GetUpdatesResponse* updates =
- session()->mutable_status_controller()->
- mutable_updates_response()->mutable_get_updates();
- AddUpdate(updates, "p1", root, PREFERENCES);
- AddUpdate(updates, "a1", root, AUTOFILL);
-
- ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
-
- command_.ExecuteImpl(session());
-}
-
static const char kCacheGuid[] = "IrcjZ2jyzHDV9Io4+zKcXQ==";
// Test that the bookmark tag is set on newly downloaded items.
@@ -118,7 +99,7 @@ TEST_F(ProcessUpdatesCommandTest, NewBookmarkTag) {
e->set_originator_client_item_id(client_id.GetServerId());
e->set_position_in_parent(0);
- command_.ExecuteImpl(session());
+ command_.Execute(session());
syncable::ReadTransaction trans(FROM_HERE, directory());
syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
@@ -150,7 +131,7 @@ TEST_F(ProcessUpdatesCommandTest, ReceiveServerCreatedBookmarkFolders) {
EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
- command_.ExecuteImpl(session());
+ command_.Execute(session());
syncable::ReadTransaction trans(FROM_HERE, directory());
syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
@@ -175,7 +156,7 @@ TEST_F(ProcessUpdatesCommandTest, ReceiveNonBookmarkItem) {
EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
- command_.ExecuteImpl(session());
+ command_.Execute(session());
syncable::ReadTransaction trans(FROM_HERE, directory());
syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
diff --git a/sync/engine/sync_directory_commit_contribution.cc b/sync/engine/sync_directory_commit_contribution.cc
new file mode 100644
index 0000000000..f43131e300
--- /dev/null
+++ b/sync/engine/sync_directory_commit_contribution.cc
@@ -0,0 +1,164 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_commit_contribution.h"
+
+#include "sync/engine/commit_util.h"
+#include "sync/engine/get_commit_ids.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
+
+namespace syncer {
+
+using syncable::GET_BY_HANDLE;
+using syncable::SYNCER;
+
+SyncDirectoryCommitContribution::~SyncDirectoryCommitContribution() {
+ DCHECK(!syncing_bits_set_);
+}
+
+// static.
+SyncDirectoryCommitContribution* SyncDirectoryCommitContribution::Build(
+ syncable::Directory* dir,
+ ModelType type,
+ size_t max_entries) {
+ std::vector<int64> metahandles;
+
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir);
+ GetCommitIdsForType(&trans, type, max_entries, &metahandles);
+
+ if (metahandles.empty())
+ return NULL;
+
+ google::protobuf::RepeatedPtrField<sync_pb::SyncEntity> entities;
+ for (std::vector<int64>::iterator it = metahandles.begin();
+ it != metahandles.end(); ++it) {
+ sync_pb::SyncEntity* entity = entities.Add();
+ syncable::ModelNeutralMutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ commit_util::BuildCommitItem(entry, entity);
+ entry.PutSyncing(true);
+ }
+
+ return new SyncDirectoryCommitContribution(metahandles, entities, dir);
+}
+
+void SyncDirectoryCommitContribution::AddToCommitMessage(
+ sync_pb::ClientToServerMessage* msg) {
+ DCHECK(syncing_bits_set_);
+ sync_pb::CommitMessage* commit_message = msg->mutable_commit();
+ entries_start_index_ = commit_message->entries_size();
+ std::copy(entities_.begin(),
+ entities_.end(),
+ RepeatedPtrFieldBackInserter(commit_message->mutable_entries()));
+}
+
+SyncerError SyncDirectoryCommitContribution::ProcessCommitResponse(
+ const sync_pb::ClientToServerResponse& response,
+ sessions::StatusController* status) {
+ DCHECK(syncing_bits_set_);
+ const sync_pb::CommitResponse& commit_response = response.commit();
+
+ int transient_error_commits = 0;
+ int conflicting_commits = 0;
+ int error_commits = 0;
+ int successes = 0;
+
+ std::set<syncable::Id> deleted_folders;
+ {
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
+ for (size_t i = 0; i < metahandles_.size(); ++i) {
+ sync_pb::CommitResponse::ResponseType response_type =
+ commit_util::ProcessSingleCommitResponse(
+ &trans,
+ commit_response.entryresponse(entries_start_index_ + i),
+ entities_.Get(i),
+ metahandles_[i],
+ &deleted_folders);
+ switch (response_type) {
+ case sync_pb::CommitResponse::INVALID_MESSAGE:
+ ++error_commits;
+ break;
+ case sync_pb::CommitResponse::CONFLICT:
+ ++conflicting_commits;
+ status->increment_num_server_conflicts();
+ break;
+ case sync_pb::CommitResponse::SUCCESS:
+ ++successes;
+ {
+ syncable::Entry e(&trans, GET_BY_HANDLE, metahandles_[i]);
+ if (e.GetModelType() == BOOKMARKS)
+ status->increment_num_successful_bookmark_commits();
+ }
+ status->increment_num_successful_commits();
+ break;
+ case sync_pb::CommitResponse::OVER_QUOTA:
+ // We handle over quota like a retry, which is same as transient.
+ case sync_pb::CommitResponse::RETRY:
+ case sync_pb::CommitResponse::TRANSIENT_ERROR:
+ ++transient_error_commits;
+ break;
+ default:
+ LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
+ }
+ }
+ MarkDeletedChildrenSynced(dir_, &trans, &deleted_folders);
+ }
+
+ int commit_count = static_cast<int>(metahandles_.size());
+ if (commit_count == successes) {
+ return SYNCER_OK;
+ } else if (error_commits > 0) {
+ return SERVER_RETURN_UNKNOWN_ERROR;
+ } else if (transient_error_commits > 0) {
+ return SERVER_RETURN_TRANSIENT_ERROR;
+ } else if (conflicting_commits > 0) {
+ // This means that the server already has an item with this version, but
+ // we haven't seen that update yet.
+ //
+ // A well-behaved client should respond to this by proceeding to the
+ // download updates phase, fetching the conflicting items, then attempting
+ // to resolve the conflict. That's not what this client does.
+ //
+ // We don't currently have any code to support that exceptional control
+ // flow. Instead, we abort the current sync cycle and start a new one. The
+ // end result is the same.
+ return SERVER_RETURN_CONFLICT;
+ } else {
+ LOG(FATAL) << "Inconsistent counts when processing commit response";
+ return SYNCER_OK;
+ }
+}
+
+void SyncDirectoryCommitContribution::CleanUp() {
+ DCHECK(syncing_bits_set_);
+ UnsetSyncingBits();
+}
+
+size_t SyncDirectoryCommitContribution::GetNumEntries() const {
+ return metahandles_.size();
+}
+
+SyncDirectoryCommitContribution::SyncDirectoryCommitContribution(
+ const std::vector<int64>& metahandles,
+ const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>& entities,
+ syncable::Directory* dir)
+ : dir_(dir),
+ metahandles_(metahandles),
+ entities_(entities),
+ entries_start_index_(0xDEADBEEF),
+ syncing_bits_set_(true) {
+}
+
+void SyncDirectoryCommitContribution::UnsetSyncingBits() {
+ syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
+ for (std::vector<int64>::const_iterator it = metahandles_.begin();
+ it != metahandles_.end(); ++it) {
+ syncable::ModelNeutralMutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ entry.PutSyncing(false);
+ }
+ syncing_bits_set_ = false;
+}
+
+} // namespace syncer
diff --git a/sync/engine/sync_directory_commit_contribution.h b/sync/engine/sync_directory_commit_contribution.h
new file mode 100644
index 0000000000..8934056675
--- /dev/null
+++ b/sync/engine/sync_directory_commit_contribution.h
@@ -0,0 +1,102 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
+#define SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
+
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/util/syncer_error.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/sessions/status_controller.h"
+
+namespace syncer {
+
+namespace sessions {
+class StatusController;
+} // namespace sessions
+
+namespace syncable {
+class Directory;
+} // namespace syncable
+
+// This class represents a set of items belonging to a particular data type that
+// have been selected from the syncable Directory and prepared for commit.
+//
+// This class handles the bookkeeping related to the commit of these items,
+// including processing the commit response message and setting and unsetting
+// the SYNCING bits.
+class SYNC_EXPORT_PRIVATE SyncDirectoryCommitContribution {
+ public:
+ // This destructor will DCHECK if UnsetSyncingBits() has not been called yet.
+ ~SyncDirectoryCommitContribution();
+
+ // Build a CommitContribution from the IS_UNSYNCED items in |dir| with the
+ // given |type|. The contribution will include at most |max_items| entries.
+ //
+ // This function may return NULL if this type has no items ready for and
+ // requiring commit. This function may make model neutral changes to the
+ // directory.
+ static SyncDirectoryCommitContribution* Build(
+ syncable::Directory* dir,
+ ModelType type,
+ size_t max_items);
+
+ // Serialize this contribution's entries to the given commit request |msg|.
+ //
+ // This function is not const. It will update some state in this contribution
+ // that will be used when processing the associated commit response. This
+ // function should not be called more than once.
+ void AddToCommitMessage(sync_pb::ClientToServerMessage* msg);
+
+ // Updates this contribution's contents in accordance with the provided
+ // |response|.
+ //
+ // This function may make model-neutral changes to the directory. It is not
+ // valid to call this function unless AddToCommitMessage() was called earlier.
+ // This function should not be called more than once.
+ SyncerError ProcessCommitResponse(
+ const sync_pb::ClientToServerResponse& response,
+ sessions::StatusController* status);
+
+ // Cleans up any temproary state associated with the commit. Must be called
+ // before destruction.
+ void CleanUp();
+
+ // Returns the number of entries included in this contribution.
+ size_t GetNumEntries() const;
+
+ private:
+ class SyncDirectoryCommitContributionTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncDirectoryCommitContributionTest, GatherByTypes);
+ FRIEND_TEST_ALL_PREFIXES(SyncDirectoryCommitContributionTest,
+ GatherAndTruncate);
+
+ SyncDirectoryCommitContribution(
+ const std::vector<int64>& metahandles,
+ const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>& entities,
+ syncable::Directory* directory);
+
+ void UnsetSyncingBits();
+
+ syncable::Directory* dir_;
+ const std::vector<int64> metahandles_;
+ const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity> entities_;
+ size_t entries_start_index_;
+
+ // This flag is tracks whether or not the directory entries associated with
+ // this commit still have their SYNCING bits set. These bits will be set when
+ // the CommitContribution is created with Build() and unset when CleanUp() is
+ // called. This flag must be unset by the time our destructor is called.
+ bool syncing_bits_set_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncDirectoryCommitContribution);
+};
+
+} // namespace syncer
+
+#endif // SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
diff --git a/sync/engine/sync_directory_commit_contribution_unittest.cc b/sync/engine/sync_directory_commit_contribution_unittest.cc
new file mode 100644
index 0000000000..75f88bd855
--- /dev/null
+++ b/sync/engine/sync_directory_commit_contribution_unittest.cc
@@ -0,0 +1,235 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_commit_contribution.h"
+
+#include "base/message_loop/message_loop.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/syncable/entry.h"
+#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/syncable_read_transaction.h"
+#include "sync/syncable/syncable_write_transaction.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/test/engine/test_syncable_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+class SyncDirectoryCommitContributionTest : public ::testing::Test {
+ public:
+ virtual void SetUp() OVERRIDE {
+ dir_maker_.SetUp();
+
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ CreateTypeRoot(&trans, dir(), PREFERENCES);
+ CreateTypeRoot(&trans, dir(), EXTENSIONS);
+ }
+
+ virtual void TearDown() OVERRIDE {
+ dir_maker_.TearDown();
+ }
+
+ protected:
+ int64 CreateUnsyncedItem(syncable::WriteTransaction* trans,
+ ModelType type,
+ const std::string& tag) {
+ syncable::Entry parent_entry(
+ trans,
+ syncable::GET_BY_SERVER_TAG,
+ ModelTypeToRootTag(type));
+ syncable::MutableEntry entry(
+ trans,
+ syncable::CREATE,
+ type,
+ parent_entry.GetId(),
+ tag);
+ entry.PutIsUnsynced(true);
+ return entry.GetMetahandle();
+ }
+
+ void CreateSuccessfulCommitResponse(
+ const sync_pb::SyncEntity& entity,
+ sync_pb::CommitResponse::EntryResponse* response) {
+ response->set_response_type(sync_pb::CommitResponse::SUCCESS);
+ response->set_non_unique_name(entity.name());
+ response->set_version(entity.version() + 1);
+ response->set_parent_id_string(entity.parent_id_string());
+
+ if (entity.id_string()[0] == '-') // Look for the - in 'c-1234' style IDs.
+ response->set_id_string(id_factory_.NewServerId().GetServerId());
+ else
+ response->set_id_string(entity.id_string());
+ }
+
+ syncable::Directory* dir() {
+ return dir_maker_.directory();
+ }
+
+ TestIdFactory id_factory_;
+
+ private:
+ base::MessageLoop loop_; // Neeed to initialize the directory.
+ TestDirectorySetterUpper dir_maker_;
+};
+
+// Verify that the SyncDirectoryCommitContribution contains only entries of its
+// specified type.
+TEST_F(SyncDirectoryCommitContributionTest, GatherByTypes) {
+ int64 pref1;
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ pref1 = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 5));
+ ASSERT_EQ(2U, cc->GetNumEntries());
+
+ const std::vector<int64>& metahandles = cc->metahandles_;
+ EXPECT_TRUE(std::find(metahandles.begin(), metahandles.end(), pref1) !=
+ metahandles.end());
+ EXPECT_TRUE(std::find(metahandles.begin(), metahandles.end(), pref1) !=
+ metahandles.end());
+
+ cc->CleanUp();
+}
+
+// Verify that the SyncDirectoryCommitContributionTest builder function
+// truncates if necessary.
+TEST_F(SyncDirectoryCommitContributionTest, GatherAndTruncate) {
+ int64 pref1;
+ int64 pref2;
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ pref1 = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ pref2 = CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 1));
+ ASSERT_EQ(1U, cc->GetNumEntries());
+
+ int64 only_metahandle = cc->metahandles_[0];
+ EXPECT_TRUE(only_metahandle == pref1 || only_metahandle == pref2);
+
+ cc->CleanUp();
+}
+
+// Sanity check for building commits from SyncDirectoryCommitContributions.
+// This test makes two CommitContribution objects of different types and uses
+// them to initialize a commit message. Then it checks that the contents of the
+// commit message match those of the directory they came from.
+TEST_F(SyncDirectoryCommitContributionTest, PrepareCommit) {
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> pref_cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 25));
+ scoped_ptr<SyncDirectoryCommitContribution> ext_cc(
+ SyncDirectoryCommitContribution::Build(dir(), EXTENSIONS, 25));
+
+ sync_pb::ClientToServerMessage message;
+ pref_cc->AddToCommitMessage(&message);
+ ext_cc->AddToCommitMessage(&message);
+
+ const sync_pb::CommitMessage& commit_message = message.commit();
+
+ std::set<syncable::Id> ids_for_commit;
+ ASSERT_EQ(3, commit_message.entries_size());
+ for (int i = 0; i < commit_message.entries_size(); ++i) {
+ const sync_pb::SyncEntity& entity = commit_message.entries(i);
+ // The entities in this test have client-style IDs since they've never been
+ // committed before, so we must use CreateFromClientString to re-create them
+ // from the commit message.
+ ids_for_commit.insert(syncable::Id::CreateFromClientString(
+ entity.id_string()));
+ }
+
+ ASSERT_EQ(3U, ids_for_commit.size());
+ {
+ syncable::ReadTransaction trans(FROM_HERE, dir());
+ for (std::set<syncable::Id>::iterator it = ids_for_commit.begin();
+ it != ids_for_commit.end(); ++it) {
+ SCOPED_TRACE(it->value());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, *it);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.GetSyncing());
+ }
+ }
+
+ pref_cc->CleanUp();
+ ext_cc->CleanUp();
+}
+
+// Creates some unsynced items, pretends to commit them, and hands back a
+// specially crafted response to the syncer in order to test commit response
+// processing. The response simulates a succesful commit scenario.
+TEST_F(SyncDirectoryCommitContributionTest, ProcessCommitResponse) {
+ int64 pref1_handle;
+ int64 pref2_handle;
+ int64 ext1_handle;
+ {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
+ pref1_handle = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
+ pref2_handle = CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
+ ext1_handle = CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
+ }
+
+ scoped_ptr<SyncDirectoryCommitContribution> pref_cc(
+ SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 25));
+ scoped_ptr<SyncDirectoryCommitContribution> ext_cc(
+ SyncDirectoryCommitContribution::Build(dir(), EXTENSIONS, 25));
+
+ sync_pb::ClientToServerMessage message;
+ pref_cc->AddToCommitMessage(&message);
+ ext_cc->AddToCommitMessage(&message);
+
+ const sync_pb::CommitMessage& commit_message = message.commit();
+ ASSERT_EQ(3, commit_message.entries_size());
+
+ sync_pb::ClientToServerResponse response;
+ for (int i = 0; i < commit_message.entries_size(); ++i) {
+ sync_pb::SyncEntity entity = commit_message.entries(i);
+ sync_pb::CommitResponse_EntryResponse* entry_response =
+ response.mutable_commit()->add_entryresponse();
+ CreateSuccessfulCommitResponse(entity, entry_response);
+ }
+
+ sessions::StatusController status;
+
+ // Process these in reverse order. Just because we can.
+ ext_cc->ProcessCommitResponse(response, &status);
+ pref_cc->ProcessCommitResponse(response, &status);
+
+ {
+ syncable::ReadTransaction trans(FROM_HERE, dir());
+ syncable::Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
+ EXPECT_TRUE(p1.GetId().ServerKnows());
+ EXPECT_FALSE(p1.GetSyncing());
+ EXPECT_LT(0, p1.GetServerVersion());
+
+ syncable::Entry p2(&trans, syncable::GET_BY_HANDLE, pref2_handle);
+ EXPECT_TRUE(p2.GetId().ServerKnows());
+ EXPECT_FALSE(p2.GetSyncing());
+ EXPECT_LT(0, p2.GetServerVersion());
+
+ syncable::Entry e1(&trans, syncable::GET_BY_HANDLE, ext1_handle);
+ EXPECT_TRUE(e1.GetId().ServerKnows());
+ EXPECT_FALSE(e1.GetSyncing());
+ EXPECT_LT(0, e1.GetServerVersion());
+ }
+
+ pref_cc->CleanUp();
+ ext_cc->CleanUp();
+}
+
+} // namespace syncer
diff --git a/sync/engine/sync_directory_commit_contributor.cc b/sync/engine/sync_directory_commit_contributor.cc
new file mode 100644
index 0000000000..c87c8eda87
--- /dev/null
+++ b/sync/engine/sync_directory_commit_contributor.cc
@@ -0,0 +1,24 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_directory_commit_contributor.h"
+
+#include "sync/engine/sync_directory_commit_contribution.h"
+
+namespace syncer {
+
+SyncDirectoryCommitContributor::SyncDirectoryCommitContributor(
+ syncable::Directory* dir,
+ ModelType type)
+ : dir_(dir),
+ type_(type) {}
+
+SyncDirectoryCommitContributor::~SyncDirectoryCommitContributor() {}
+
+SyncDirectoryCommitContribution*
+SyncDirectoryCommitContributor::GetContribution(size_t max_entries) {
+ return SyncDirectoryCommitContribution::Build(dir_, type_, max_entries);
+}
+
+} // namespace syncer
diff --git a/sync/engine/sync_directory_commit_contributor.h b/sync/engine/sync_directory_commit_contributor.h
new file mode 100644
index 0000000000..6ffaeb7761
--- /dev/null
+++ b/sync/engine/sync_directory_commit_contributor.h
@@ -0,0 +1,45 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
+#define SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
+
+#include <map>
+
+#include "sync/internal_api/public/base/model_type.h"
+
+namespace syncer {
+
+class SyncDirectoryCommitContribution;
+
+namespace syncable {
+class Directory;
+}
+
+// This class represents the syncable::Directory as a source of items to commit
+// to the sync server.
+//
+// Each instance of this class represents a particular type within the
+// syncable::Directory. When asked, it will iterate through the directory, grab
+// any items of its type that are ready for commit, and return them in the form
+// of a SyncDirectoryCommitContribution.
+class SyncDirectoryCommitContributor {
+ public:
+ SyncDirectoryCommitContributor(syncable::Directory* dir, ModelType type);
+ ~SyncDirectoryCommitContributor();
+
+ SyncDirectoryCommitContribution* GetContribution(size_t max_entries);
+
+ private:
+ syncable::Directory* dir_;
+ ModelType type_;
+};
+
+// TODO(rlarocque): Find a better place for this definition.
+typedef std::map<ModelType, SyncDirectoryCommitContributor*>
+ CommitContributorMap;
+
+} // namespace
+
+#endif // SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
diff --git a/sync/engine/sync_scheduler_impl.cc b/sync/engine/sync_scheduler_impl.cc
index 78010d75fe..3cb5619a93 100644
--- a/sync/engine/sync_scheduler_impl.cc
+++ b/sync/engine/sync_scheduler_impl.cc
@@ -152,11 +152,7 @@ SyncSchedulerImpl::SyncSchedulerImpl(const std::string& name,
BackoffDelayProvider* delay_provider,
sessions::SyncSessionContext* context,
Syncer* syncer)
- : weak_ptr_factory_(this),
- weak_ptr_factory_for_weak_handle_(this),
- weak_handle_this_(MakeWeakHandle(
- weak_ptr_factory_for_weak_handle_.GetWeakPtr())),
- name_(name),
+ : name_(name),
started_(false),
syncer_short_poll_interval_seconds_(
TimeDelta::FromSeconds(kDefaultShortPollIntervalSeconds)),
@@ -169,7 +165,11 @@ SyncSchedulerImpl::SyncSchedulerImpl(const std::string& name,
syncer_(syncer),
session_context_(context),
no_scheduling_allowed_(false),
- do_poll_after_credentials_updated_(false) {
+ do_poll_after_credentials_updated_(false),
+ weak_ptr_factory_(this),
+ weak_ptr_factory_for_weak_handle_(this) {
+ weak_handle_this_ = MakeWeakHandle(
+ weak_ptr_factory_for_weak_handle_.GetWeakPtr());
}
SyncSchedulerImpl::~SyncSchedulerImpl() {
@@ -393,12 +393,12 @@ void SyncSchedulerImpl::ScheduleInvalidationNudge(
const ObjectIdInvalidationMap& invalidation_map,
const tracked_objects::Location& nudge_location) {
DCHECK(CalledOnValidThread());
- DCHECK(!invalidation_map.empty());
+ DCHECK(!invalidation_map.Empty());
SDVLOG_LOC(nudge_location, 2)
<< "Scheduling sync because we received invalidation for "
- << ModelTypeSetToString(ObjectIdSetToModelTypeSet(
- ObjectIdInvalidationMapToSet(invalidation_map)));
+ << ModelTypeSetToString(
+ ObjectIdSetToModelTypeSet(invalidation_map.GetObjectIds()));
nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
ScheduleNudgeImpl(desired_delay, nudge_location);
}
diff --git a/sync/engine/sync_scheduler_impl.h b/sync/engine/sync_scheduler_impl.h
index 8492463b53..8cb5af76fd 100644
--- a/sync/engine/sync_scheduler_impl.h
+++ b/sync/engine/sync_scheduler_impl.h
@@ -239,14 +239,8 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
virtual void OnActionableError(const sessions::SyncSessionSnapshot& snapshot);
- base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_;
-
- // A second factory specially for weak_handle_this_, to allow the handle
- // to be const and alleviate threading concerns.
- base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_for_weak_handle_;
-
// For certain methods that need to worry about X-thread posting.
- const WeakHandle<SyncSchedulerImpl> weak_handle_this_;
+ WeakHandle<SyncSchedulerImpl> weak_handle_this_;
// Used for logging.
const std::string name_;
@@ -316,6 +310,12 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
// after credentials are updated.
bool do_poll_after_credentials_updated_;
+ base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_;
+
+ // A second factory specially for weak_handle_this_, to allow the handle
+ // to be const and alleviate threading concerns.
+ base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_for_weak_handle_;
+
DISALLOW_COPY_AND_ASSIGN(SyncSchedulerImpl);
};
diff --git a/sync/engine/sync_scheduler_unittest.cc b/sync/engine/sync_scheduler_unittest.cc
index a5a00e98bc..08e919a365 100644
--- a/sync/engine/sync_scheduler_unittest.cc
+++ b/sync/engine/sync_scheduler_unittest.cc
@@ -97,7 +97,7 @@ ModelSafeRoutingInfo TypesToRoutingInfo(ModelTypeSet types) {
static const size_t kMinNumSamples = 5;
class SyncSchedulerTest : public testing::Test {
public:
- SyncSchedulerTest() : weak_ptr_factory_(this), syncer_(NULL), delay_(NULL) {}
+ SyncSchedulerTest() : syncer_(NULL), delay_(NULL), weak_ptr_factory_(this) {}
class MockDelayProvider : public BackoffDelayProvider {
public:
@@ -222,7 +222,6 @@ class SyncSchedulerTest : public testing::Test {
}
base::MessageLoop loop_;
- base::WeakPtrFactory<SyncSchedulerTest> weak_ptr_factory_;
TestDirectorySetterUpper dir_maker_;
CancelationSignal cancelation_signal_;
scoped_ptr<MockConnectionManager> connection_;
@@ -233,6 +232,7 @@ class SyncSchedulerTest : public testing::Test {
std::vector<scoped_refptr<FakeModelWorker> > workers_;
scoped_refptr<ExtensionsActivity> extensions_activity_;
ModelSafeRoutingInfo routing_info_;
+ base::WeakPtrFactory<SyncSchedulerTest> weak_ptr_factory_;
};
void RecordSyncShareImpl(SyncShareTimes* times) {
diff --git a/sync/engine/syncer.cc b/sync/engine/syncer.cc
index cc379d9d82..9f2c378b5c 100644
--- a/sync/engine/syncer.cc
+++ b/sync/engine/syncer.cc
@@ -12,17 +12,16 @@
#include "build/build_config.h"
#include "sync/engine/apply_control_data_updates.h"
#include "sync/engine/apply_updates_and_resolve_conflicts_command.h"
-#include "sync/engine/build_commit_command.h"
#include "sync/engine/commit.h"
#include "sync/engine/conflict_resolver.h"
#include "sync/engine/download.h"
#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/process_commit_response_command.h"
#include "sync/engine/syncer_types.h"
#include "sync/internal_api/public/base/cancelation_signal.h"
#include "sync/internal_api/public/base/unique_position.h"
#include "sync/internal_api/public/util/syncer_error.h"
#include "sync/sessions/nudge_tracker.h"
+#include "sync/syncable/directory.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable-inl.h"
@@ -73,7 +72,7 @@ bool Syncer::NormalSyncShare(ModelTypeSet request_types,
}
VLOG(1) << "Committing from types " << ModelTypeSetToString(request_types);
- SyncerError commit_result = BuildAndPostCommits(request_types, this, session);
+ SyncerError commit_result = BuildAndPostCommits(request_types, session);
session->mutable_status_controller()->set_commit_result(commit_result);
return HandleCycleEnd(session, nudge_tracker.updates_source());
@@ -144,6 +143,37 @@ bool Syncer::DownloadAndApplyUpdates(
return true;
}
+SyncerError Syncer::BuildAndPostCommits(ModelTypeSet requested_types,
+ sessions::SyncSession* session) {
+ // The ExitRequested() check is unnecessary, since we should start getting
+ // errors from the ServerConnectionManager if an exist has been requested.
+ // However, it doesn't hurt to check it anyway.
+ while (!ExitRequested()) {
+ scoped_ptr<Commit> commit(
+ Commit::Init(
+ requested_types,
+ session->context()->max_commit_batch_size(),
+ session->context()->account_name(),
+ session->context()->directory()->cache_guid(),
+ session->context()->commit_contributor_map(),
+ session->context()->extensions_activity()));
+ if (!commit) {
+ break;
+ }
+
+ SyncerError error = commit->PostAndProcessResponse(
+ session,
+ session->mutable_status_controller(),
+ session->context()->extensions_activity());
+ commit->CleanUp();
+ if (error != SYNCER_OK) {
+ return error;
+ }
+ }
+
+ return SYNCER_OK;
+}
+
void Syncer::HandleCycleBegin(SyncSession* session) {
session->mutable_status_controller()->UpdateStartTime();
session->SendEventNotification(SyncEngineEvent::SYNC_CYCLE_BEGIN);
diff --git a/sync/engine/syncer.h b/sync/engine/syncer.h
index 132f6ef7b0..d943e5f120 100644
--- a/sync/engine/syncer.h
+++ b/sync/engine/syncer.h
@@ -73,6 +73,14 @@ class SYNC_EXPORT_PRIVATE Syncer {
sessions::SyncSession* session,
base::Callback<void(sync_pb::ClientToServerMessage*)> build_fn);
+ // This function will commit batches of unsynced items to the server until the
+ // number of unsynced and ready to commit items reaches zero or an error is
+ // encountered. A request to exit early will be treated as an error and will
+ // abort any blocking operations.
+ SyncerError BuildAndPostCommits(
+ ModelTypeSet request_types,
+ sessions::SyncSession* session);
+
void HandleCycleBegin(sessions::SyncSession* session);
bool HandleCycleEnd(
sessions::SyncSession* session,
diff --git a/sync/engine/syncer_unittest.cc b/sync/engine/syncer_unittest.cc
index 970fed548c..a7825dc82b 100644
--- a/sync/engine/syncer_unittest.cc
+++ b/sync/engine/syncer_unittest.cc
@@ -395,35 +395,6 @@ class SyncerTest : public testing::Test,
}
}
- void DoTruncationTest(const vector<int64>& unsynced_handle_view,
- const vector<int64>& expected_handle_order) {
- for (size_t limit = expected_handle_order.size() + 2; limit > 0; --limit) {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
-
- ModelSafeRoutingInfo routes;
- GetModelSafeRoutingInfo(&routes);
- ModelTypeSet types = GetRoutingInfoTypes(routes);
- sessions::OrderedCommitSet output_set(routes);
- GetCommitIds(&wtrans, types, limit, &output_set);
- size_t truncated_size = std::min(limit, expected_handle_order.size());
- ASSERT_EQ(truncated_size, output_set.Size());
- for (size_t i = 0; i < truncated_size; ++i) {
- ASSERT_EQ(expected_handle_order[i], output_set.GetCommitHandleAt(i))
- << "At index " << i << " with batch size limited to " << limit;
- }
- sessions::OrderedCommitSet::Projection proj;
- proj = output_set.GetCommitIdProjection(GROUP_PASSIVE);
- ASSERT_EQ(truncated_size, proj.size());
- for (size_t i = 0; i < truncated_size; ++i) {
- SCOPED_TRACE(::testing::Message("Projection mismatch with i = ") << i);
- int64 projected = output_set.GetCommitHandleAt(proj[i]);
- ASSERT_EQ(expected_handle_order[proj[i]], projected);
- // Since this projection is the identity, the following holds.
- ASSERT_EQ(expected_handle_order[i], projected);
- }
- }
- }
-
const StatusController& status() {
return session_->status_controller();
}
@@ -540,64 +511,6 @@ TEST_F(SyncerTest, TestCallGatherUnsyncedEntries) {
// regression for a very old bug.
}
-TEST_F(SyncerTest, GetCommitIdsCommandTruncates) {
- syncable::Id root = ids_.root();
- // Create two server entries.
- mock_server_->AddUpdateDirectory(ids_.MakeServer("x"), root, "X", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(ids_.MakeServer("w"), root, "W", 10, 10,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
-
- // Create some new client entries.
- CreateUnsyncedDirectory("C", ids_.MakeLocal("c"));
- CreateUnsyncedDirectory("B", ids_.MakeLocal("b"));
- CreateUnsyncedDirectory("D", ids_.MakeLocal("d"));
- CreateUnsyncedDirectory("E", ids_.MakeLocal("e"));
- CreateUnsyncedDirectory("J", ids_.MakeLocal("j"));
-
- vector<int64> expected_order;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry_x(&wtrans, GET_BY_ID, ids_.MakeServer("x"));
- MutableEntry entry_b(&wtrans, GET_BY_ID, ids_.MakeLocal("b"));
- MutableEntry entry_c(&wtrans, GET_BY_ID, ids_.MakeLocal("c"));
- MutableEntry entry_d(&wtrans, GET_BY_ID, ids_.MakeLocal("d"));
- MutableEntry entry_e(&wtrans, GET_BY_ID, ids_.MakeLocal("e"));
- MutableEntry entry_w(&wtrans, GET_BY_ID, ids_.MakeServer("w"));
- MutableEntry entry_j(&wtrans, GET_BY_ID, ids_.MakeLocal("j"));
- entry_x.PutIsUnsynced(true);
- entry_b.PutParentId(entry_x.GetId());
- entry_d.PutParentId(entry_b.GetId());
- entry_c.PutParentId(entry_x.GetId());
- entry_c.PutPredecessor(entry_b.GetId());
- entry_e.PutParentId(entry_c.GetId());
- entry_w.PutPredecessor(entry_x.GetId());
- entry_w.PutIsUnsynced(true);
- entry_w.PutServerVersion(20);
- entry_w.PutIsUnappliedUpdate(true); // Fake a conflict.
- entry_j.PutPredecessor(entry_w.GetId());
-
- // The expected order is "x", "b", "c", "d", "e", "j", truncated
- // appropriately.
- expected_order.push_back(entry_x.GetMetahandle());
- expected_order.push_back(entry_b.GetMetahandle());
- expected_order.push_back(entry_c.GetMetahandle());
- expected_order.push_back(entry_d.GetMetahandle());
- expected_order.push_back(entry_e.GetMetahandle());
- expected_order.push_back(entry_j.GetMetahandle());
- }
-
- // The arrangement is now: x (b (d) c (e)) w j
- // Entry "w" is in conflict, so it is not eligible for commit.
- vector<int64> unsynced_handle_view;
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- GetUnsyncedEntries(&rtrans, &unsynced_handle_view);
- }
- DoTruncationTest(unsynced_handle_view, expected_order);
-}
-
TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
const ModelTypeSet throttled_types(BOOKMARKS);
sync_pb::EntitySpecifics bookmark_data;
diff --git a/sync/engine/syncer_util.cc b/sync/engine/syncer_util.cc
index 3960baaa6f..2235734aed 100644
--- a/sync/engine/syncer_util.cc
+++ b/sync/engine/syncer_util.cc
@@ -23,8 +23,10 @@
#include "sync/protocol/sync.pb.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/entry.h"
+#include "sync/syncable/model_neutral_mutable_entry.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable_changes_version.h"
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
#include "sync/syncable/syncable_proto_util.h"
#include "sync/syncable/syncable_read_transaction.h"
#include "sync/syncable/syncable_util.h"
@@ -305,7 +307,7 @@ namespace {
void UpdateBookmarkSpecifics(const std::string& singleton_tag,
const std::string& url,
const std::string& favicon_bytes,
- MutableEntry* local_entry) {
+ syncable::ModelNeutralMutableEntry* local_entry) {
// In the new-style protocol, the server no longer sends bookmark info for
// the "google_chrome" folder. Mimic that here.
if (singleton_tag == "google_chrome")
@@ -319,8 +321,9 @@ void UpdateBookmarkSpecifics(const std::string& singleton_tag,
local_entry->PutServerSpecifics(pb);
}
-void UpdateBookmarkPositioning(const sync_pb::SyncEntity& update,
- MutableEntry* local_entry) {
+void UpdateBookmarkPositioning(
+ const sync_pb::SyncEntity& update,
+ syncable::ModelNeutralMutableEntry* local_entry) {
// Update our unique bookmark tag. In many cases this will be identical to
// the tag we already have. However, clients that have recently upgraded to
// versions that support unique positions will have incorrect tags. See the
@@ -348,7 +351,7 @@ void UpdateBookmarkPositioning(const sync_pb::SyncEntity& update,
} // namespace
void UpdateServerFieldsFromUpdate(
- MutableEntry* target,
+ syncable::ModelNeutralMutableEntry* target,
const sync_pb::SyncEntity& update,
const std::string& name) {
if (update.deleted()) {
@@ -418,12 +421,14 @@ void UpdateServerFieldsFromUpdate(
}
// Creates a new Entry iff no Entry exists with the given id.
-void CreateNewEntry(syncable::WriteTransaction *trans,
+void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans,
const syncable::Id& id) {
- syncable::MutableEntry entry(trans, GET_BY_ID, id);
+ syncable::Entry entry(trans, GET_BY_ID, id);
if (!entry.good()) {
- syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
- id);
+ syncable::ModelNeutralMutableEntry new_entry(
+ trans,
+ syncable::CREATE_NEW_UPDATE_ITEM,
+ id);
}
}
@@ -481,6 +486,7 @@ VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) {
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
+ syncable::BaseWriteTransaction* trans,
std::set<syncable::Id>* deleted_folders) {
// There's two options here.
// 1. Scan deleted unsynced entries looking up their pre-delete tree for any
@@ -492,27 +498,22 @@ void MarkDeletedChildrenSynced(
if (deleted_folders->empty())
return;
Directory::Metahandles handles;
- {
- syncable::ReadTransaction trans(FROM_HERE, dir);
- dir->GetUnsyncedMetaHandles(&trans, &handles);
- }
+ dir->GetUnsyncedMetaHandles(trans, &handles);
if (handles.empty())
return;
Directory::Metahandles::iterator it;
for (it = handles.begin() ; it != handles.end() ; ++it) {
- // Single transaction / entry we deal with.
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- MutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it);
if (!entry.GetIsUnsynced() || !entry.GetIsDel())
continue;
syncable::Id id = entry.GetParentId();
- while (id != trans.root_id()) {
+ while (id != trans->root_id()) {
if (deleted_folders->find(id) != deleted_folders->end()) {
// We've synced the deletion of this deleted entries parent.
entry.PutIsUnsynced(false);
break;
}
- Entry parent(&trans, GET_BY_ID, id);
+ Entry parent(trans, GET_BY_ID, id);
if (!parent.good() || !parent.GetIsDel())
break;
id = parent.GetParentId();
@@ -539,12 +540,12 @@ VerifyResult VerifyNewEntry(
// Assumes we have an existing entry; check here for updates that break
// consistency rules.
VerifyResult VerifyUpdateConsistency(
- syncable::WriteTransaction* trans,
+ syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target,
const bool deleted,
const bool is_directory,
- ModelType model_type) {
+ ModelType model_type,
+ syncable::ModelNeutralMutableEntry* target) {
CHECK(target->good());
const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
@@ -612,9 +613,9 @@ VerifyResult VerifyUpdateConsistency(
// Assumes we have an existing entry; verify an update that seems to be
// expressing an 'undelete'
-VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
+VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target) {
+ syncable::ModelNeutralMutableEntry* target) {
// TODO(nick): We hit this path for items deleted items that the server
// tells us to re-create; only deleted items with positive base versions
// will hit this path. However, it's not clear how such an undeletion
diff --git a/sync/engine/syncer_util.h b/sync/engine/syncer_util.h
index 45b3b46574..575ab11d37 100644
--- a/sync/engine/syncer_util.h
+++ b/sync/engine/syncer_util.h
@@ -27,6 +27,7 @@ namespace syncer {
namespace syncable {
class BaseTransaction;
+class ModelNeutralWriteTransaction;
} // namespace syncable
class Cryptographer;
@@ -66,12 +67,12 @@ std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update);
// Pass in name to avoid redundant UTF8 conversion.
void UpdateServerFieldsFromUpdate(
- syncable::MutableEntry* local_entry,
+ syncable::ModelNeutralMutableEntry* local_entry,
const sync_pb::SyncEntity& server_entry,
const std::string& name);
// Creates a new Entry iff no Entry exists with the given id.
-void CreateNewEntry(syncable::WriteTransaction *trans,
+void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans,
const syncable::Id& id);
// This function is called on an entry when we can update the user-facing data
@@ -87,21 +88,23 @@ VerifyResult VerifyNewEntry(const sync_pb::SyncEntity& update,
// Assumes we have an existing entry; check here for updates that break
// consistency rules.
-VerifyResult VerifyUpdateConsistency(syncable::WriteTransaction* trans,
- const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target,
- const bool deleted,
- const bool is_directory,
- ModelType model_type);
+VerifyResult VerifyUpdateConsistency(
+ syncable::ModelNeutralWriteTransaction* trans,
+ const sync_pb::SyncEntity& update,
+ const bool deleted,
+ const bool is_directory,
+ ModelType model_type,
+ syncable::ModelNeutralMutableEntry* target);
// Assumes we have an existing entry; verify an update that seems to be
// expressing an 'undelete'
-VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
+VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& update,
- syncable::MutableEntry* target);
+ syncable::ModelNeutralMutableEntry* target);
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
+ syncable::BaseWriteTransaction* trans,
std::set<syncable::Id>* deleted_folders);
} // namespace syncer
diff --git a/sync/internal_api/debug_info_event_listener.cc b/sync/internal_api/debug_info_event_listener.cc
index 51e6c0f4d3..222fa23d17 100644
--- a/sync/internal_api/debug_info_event_listener.cc
+++ b/sync/internal_api/debug_info_event_listener.cc
@@ -135,19 +135,15 @@ void DebugInfoEventListener::OnNudgeFromDatatype(ModelType datatype) {
}
void DebugInfoEventListener::OnIncomingNotification(
- const ObjectIdInvalidationMap& invalidations) {
+ const ObjectIdInvalidationMap& invalidation_map) {
DCHECK(thread_checker_.CalledOnValidThread());
sync_pb::DebugEventInfo event_info;
- ModelTypeSet types = ObjectIdSetToModelTypeSet(ObjectIdInvalidationMapToSet(
- invalidations));
-
- for (ObjectIdInvalidationMap::const_iterator it = invalidations.begin();
- it != invalidations.end(); ++it) {
- ModelType type = UNSPECIFIED;
- if (ObjectIdToRealModelType(it->first, &type)) {
- event_info.add_datatypes_notified_from_server(
- GetSpecificsFieldNumberFromModelType(type));
- }
+ ModelTypeSet types =
+ ObjectIdSetToModelTypeSet(invalidation_map.GetObjectIds());
+
+ for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
+ event_info.add_datatypes_notified_from_server(
+ GetSpecificsFieldNumberFromModelType(it.Get()));
}
AddEventToQueue(event_info);
diff --git a/sync/internal_api/debug_info_event_listener.h b/sync/internal_api/debug_info_event_listener.h
index c3aa9d0929..b0c079e946 100644
--- a/sync/internal_api/debug_info_event_listener.h
+++ b/sync/internal_api/debug_info_event_listener.h
@@ -103,10 +103,10 @@ class SYNC_EXPORT_PRIVATE DebugInfoEventListener
// Cryptographer is initialized and does not have pending keys.
bool cryptographer_ready_;
- base::WeakPtrFactory<DebugInfoEventListener> weak_ptr_factory_;
-
base::ThreadChecker thread_checker_;
+ base::WeakPtrFactory<DebugInfoEventListener> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(DebugInfoEventListener);
};
diff --git a/sync/internal_api/http_bridge.cc b/sync/internal_api/http_bridge.cc
index 48305eb0c9..5827c9d77b 100644
--- a/sync/internal_api/http_bridge.cc
+++ b/sync/internal_api/http_bridge.cc
@@ -79,9 +79,9 @@ void HttpBridgeFactory::Init(const std::string& user_agent) {
base::AutoLock lock(context_getter_lock_);
if (!baseline_request_context_getter_.get()) {
- // Uh oh. We've been aborted before we finsihed initializing.
- // There's no point in initializating further; let's just return
- // right away.
+ // Uh oh. We've been aborted before we finished initializing. There's no
+ // point in initializating further; let's just return right away.
+ return;
}
request_context_getter_ =
diff --git a/sync/internal_api/http_bridge_unittest.cc b/sync/internal_api/http_bridge_unittest.cc
index e1dc6decb9..5dd39bb2fd 100644
--- a/sync/internal_api/http_bridge_unittest.cc
+++ b/sync/internal_api/http_bridge_unittest.cc
@@ -488,4 +488,33 @@ TEST_F(SyncHttpBridgeTest, RequestContextGetterReleaseOrder) {
sync_thread.Stop();
}
+// Attempt to release the URLRequestContextGetter before the HttpBridgeFactory
+// is initialized.
+TEST_F(SyncHttpBridgeTest, EarlyAbortFactory) {
+ // In a real scenario, the following would happen on many threads. For
+ // simplicity, this test uses only one thread.
+
+ scoped_refptr<net::URLRequestContextGetter> baseline_context_getter(
+ new net::TestURLRequestContextGetter(io_thread()->message_loop_proxy()));
+ CancelationSignal release_request_context_signal;
+
+ // UI Thread: Initialize the HttpBridgeFactory. The next step would be to
+ // post a task to SBH::Core to have it initialized.
+ scoped_ptr<syncer::HttpBridgeFactory> factory(new HttpBridgeFactory(
+ baseline_context_getter,
+ NetworkTimeUpdateCallback(),
+ &release_request_context_signal));
+
+ // UI Thread: A very early shutdown request arrives and executes on the UI
+ // thread before the posted sync thread task is run.
+ release_request_context_signal.Signal();
+
+ // Sync thread: Finally run the posted task, only to find that our
+ // HttpBridgeFactory has been neutered. Should not crash.
+ factory->Init("TestUserAgent");
+
+ // At this point, attempting to use the factory would trigger a crash. Both
+ // this test and the real world code should make sure this never happens.
+};
+
} // namespace syncer
diff --git a/sync/internal_api/js_mutation_event_observer.h b/sync/internal_api/js_mutation_event_observer.h
index 9d23351503..6c92646e39 100644
--- a/sync/internal_api/js_mutation_event_observer.h
+++ b/sync/internal_api/js_mutation_event_observer.h
@@ -55,13 +55,14 @@ class SYNC_EXPORT_PRIVATE JsMutationEventObserver
ModelTypeSet models_with_changes) OVERRIDE;
private:
- base::WeakPtrFactory<JsMutationEventObserver> weak_ptr_factory_;
WeakHandle<JsEventHandler> event_handler_;
void HandleJsEvent(
const tracked_objects::Location& from_here,
const std::string& name, const JsEventDetails& details);
+ base::WeakPtrFactory<JsMutationEventObserver> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(JsMutationEventObserver);
};
diff --git a/sync/internal_api/public/base/DEPS b/sync/internal_api/public/base/DEPS
index 6eae52a42c..047cfd43b3 100644
--- a/sync/internal_api/public/base/DEPS
+++ b/sync/internal_api/public/base/DEPS
@@ -1,4 +1,8 @@
include_rules = [
+ # Invalidations headers depend on this. We should move them to sync/notifier
+ # then remove this rule.
+ "+google/cacheinvalidation",
+
"-sync",
"+sync/base",
"+sync/internal_api/public/base",
diff --git a/sync/internal_api/public/base/ack_handle.cc b/sync/internal_api/public/base/ack_handle.cc
new file mode 100644
index 0000000000..f5ddf121e3
--- /dev/null
+++ b/sync/internal_api/public/base/ack_handle.cc
@@ -0,0 +1,67 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/internal_api/public/base/ack_handle.h"
+
+#include <cstddef>
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/values.h"
+
+namespace syncer {
+
+namespace {
+// Hopefully enough bytes for uniqueness.
+const size_t kBytesInHandle = 16;
+} // namespace
+
+AckHandle AckHandle::CreateUnique() {
+ // This isn't a valid UUID, so we don't attempt to format it like one.
+ uint8 random_bytes[kBytesInHandle];
+ base::RandBytes(random_bytes, sizeof(random_bytes));
+ return AckHandle(base::HexEncode(random_bytes, sizeof(random_bytes)),
+ base::Time::Now());
+}
+
+AckHandle AckHandle::InvalidAckHandle() {
+ return AckHandle(std::string(), base::Time());
+}
+
+bool AckHandle::Equals(const AckHandle& other) const {
+ return state_ == other.state_ && timestamp_ == other.timestamp_;
+}
+
+scoped_ptr<base::DictionaryValue> AckHandle::ToValue() const {
+ scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
+ value->SetString("state", state_);
+ value->SetString("timestamp",
+ base::Int64ToString(timestamp_.ToInternalValue()));
+ return value.Pass();
+}
+
+bool AckHandle::ResetFromValue(const base::DictionaryValue& value) {
+ if (!value.GetString("state", &state_))
+ return false;
+ std::string timestamp_as_string;
+ if (!value.GetString("timestamp", &timestamp_as_string))
+ return false;
+ int64 timestamp_value;
+ if (!base::StringToInt64(timestamp_as_string, &timestamp_value))
+ return false;
+ timestamp_ = base::Time::FromInternalValue(timestamp_value);
+ return true;
+}
+
+bool AckHandle::IsValid() const {
+ return !state_.empty();
+}
+
+AckHandle::AckHandle(const std::string& state, base::Time timestamp)
+ : state_(state), timestamp_(timestamp) {
+}
+
+AckHandle::~AckHandle() {
+}
+
+} // namespace syncer
diff --git a/sync/internal_api/public/base/ack_handle.h b/sync/internal_api/public/base/ack_handle.h
new file mode 100644
index 0000000000..99d03af9eb
--- /dev/null
+++ b/sync/internal_api/public/base/ack_handle.h
@@ -0,0 +1,47 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
+#define SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "sync/base/sync_export.h"
+
+namespace base {
+class DictionaryValue;
+}
+
+namespace syncer {
+
+// Opaque class that represents a local ack handle. We don't reuse the
+// invalidation ack handles to avoid unnecessary dependencies.
+class SYNC_EXPORT AckHandle {
+ public:
+ static AckHandle CreateUnique();
+ static AckHandle InvalidAckHandle();
+
+ bool Equals(const AckHandle& other) const;
+
+ scoped_ptr<base::DictionaryValue> ToValue() const;
+ bool ResetFromValue(const base::DictionaryValue& value);
+
+ bool IsValid() const;
+
+ ~AckHandle();
+
+ private:
+ // Explicitly copyable and assignable for STL containers.
+ AckHandle(const std::string& state, base::Time timestamp);
+
+ std::string state_;
+ base::Time timestamp_;
+};
+
+} // namespace syncer
+
+#endif // SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
diff --git a/sync/internal_api/public/base/invalidation.cc b/sync/internal_api/public/base/invalidation.cc
index b503d07c4a..55fdcc7350 100644
--- a/sync/internal_api/public/base/invalidation.cc
+++ b/sync/internal_api/public/base/invalidation.cc
@@ -5,100 +5,144 @@
#include "sync/internal_api/public/base/invalidation.h"
#include <cstddef>
+
+#include "base/json/json_string_value_serializer.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/values.h"
+#include "sync/notifier/invalidation_util.h"
namespace syncer {
namespace {
-// Hopefully enough bytes for uniqueness.
-const size_t kBytesInHandle = 16;
-} // namespace
-
-AckHandle AckHandle::CreateUnique() {
- // This isn't a valid UUID, so we don't attempt to format it like one.
- uint8 random_bytes[kBytesInHandle];
- base::RandBytes(random_bytes, sizeof(random_bytes));
- return AckHandle(base::HexEncode(random_bytes, sizeof(random_bytes)),
- base::Time::Now());
+const char kObjectIdKey[] = "objectId";
+const char kIsUnknownVersionKey[] = "isUnknownVersion";
+const char kVersionKey[] = "version";
+const char kPayloadKey[] = "payload";
}
-AckHandle AckHandle::InvalidAckHandle() {
- return AckHandle(std::string(), base::Time());
+Invalidation Invalidation::Init(
+ const invalidation::ObjectId& id,
+ int64 version,
+ const std::string& payload) {
+ return Invalidation(id, false, version, payload, AckHandle::CreateUnique());
}
-bool AckHandle::Equals(const AckHandle& other) const {
- return state_ == other.state_ && timestamp_ == other.timestamp_;
+Invalidation Invalidation::InitUnknownVersion(
+ const invalidation::ObjectId& id) {
+ return Invalidation(id, true, -1, std::string(), AckHandle::CreateUnique());
}
-scoped_ptr<base::DictionaryValue> AckHandle::ToValue() const {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->SetString("state", state_);
- value->SetString("timestamp",
- base::Int64ToString(timestamp_.ToInternalValue()));
- return value.Pass();
-}
+scoped_ptr<Invalidation> Invalidation::InitFromValue(
+ const base::DictionaryValue& value) {
+ invalidation::ObjectId id;
-bool AckHandle::ResetFromValue(const base::DictionaryValue& value) {
- if (!value.GetString("state", &state_))
- return false;
- std::string timestamp_as_string;
- if (!value.GetString("timestamp", &timestamp_as_string))
- return false;
- int64 timestamp_value;
- if (!base::StringToInt64(timestamp_as_string, &timestamp_value))
- return false;
- timestamp_ = base::Time::FromInternalValue(timestamp_value);
- return true;
+ const base::DictionaryValue* object_id_dict;
+ if (!value.GetDictionary(kObjectIdKey, &object_id_dict)
+ || !ObjectIdFromValue(*object_id_dict, &id)) {
+ DLOG(WARNING) << "Failed to parse id";
+ return scoped_ptr<Invalidation>();
+ }
+ bool is_unknown_version;
+ if (!value.GetBoolean(kIsUnknownVersionKey, &is_unknown_version)) {
+ DLOG(WARNING) << "Failed to parse is_unknown_version flag";
+ return scoped_ptr<Invalidation>();
+ }
+ if (is_unknown_version) {
+ return scoped_ptr<Invalidation>(new Invalidation(
+ id,
+ true,
+ -1,
+ std::string(),
+ AckHandle::CreateUnique()));
+ } else {
+ int64 version;
+ std::string version_as_string;
+ if (!value.GetString(kVersionKey, &version_as_string)
+ || !base::StringToInt64(version_as_string, &version)) {
+ DLOG(WARNING) << "Failed to parse version";
+ return scoped_ptr<Invalidation>();
+ }
+ std::string payload;
+ if (!value.GetString(kPayloadKey, &payload)) {
+ DLOG(WARNING) << "Failed to parse payload";
+ return scoped_ptr<Invalidation>();
+ }
+ return scoped_ptr<Invalidation>(new Invalidation(
+ id,
+ false,
+ version,
+ payload,
+ AckHandle::CreateUnique()));
+ }
}
-bool AckHandle::IsValid() const {
- return !state_.empty();
+Invalidation::~Invalidation() {}
+
+invalidation::ObjectId Invalidation::object_id() const {
+ return id_;
}
-AckHandle::AckHandle(const std::string& state, base::Time timestamp)
- : state_(state), timestamp_(timestamp) {
+bool Invalidation::is_unknown_version() const {
+ return is_unknown_version_;
}
-AckHandle::~AckHandle() {
+int64 Invalidation::version() const {
+ DCHECK(!is_unknown_version_);
+ return version_;
}
-const int64 Invalidation::kUnknownVersion = -1;
+const std::string& Invalidation::payload() const {
+ DCHECK(!is_unknown_version_);
+ return payload_;
+}
-Invalidation::Invalidation()
- : version(kUnknownVersion), ack_handle(AckHandle::InvalidAckHandle()) {
+const AckHandle& Invalidation::ack_handle() const {
+ return ack_handle_;
}
-Invalidation::~Invalidation() {
+void Invalidation::set_ack_handle(const AckHandle& ack_handle) {
+ ack_handle_ = ack_handle;
}
bool Invalidation::Equals(const Invalidation& other) const {
- return (version == other.version) && (payload == other.payload) &&
- ack_handle.Equals(other.ack_handle);
+ return id_ == other.id_
+ && is_unknown_version_ == other.is_unknown_version_
+ && version_ == other.version_
+ && payload_ == other.payload_;
}
scoped_ptr<base::DictionaryValue> Invalidation::ToValue() const {
scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->SetString("version", base::Int64ToString(version));
- value->SetString("payload", payload);
- value->Set("ackHandle", ack_handle.ToValue().release());
+ value->Set(kObjectIdKey, ObjectIdToValue(id_).release());
+ if (is_unknown_version_) {
+ value->SetBoolean(kIsUnknownVersionKey, true);
+ } else {
+ value->SetBoolean(kIsUnknownVersionKey, false);
+ value->SetString(kVersionKey, base::Int64ToString(version_));
+ value->SetString(kPayloadKey, payload_);
+ }
return value.Pass();
}
-bool Invalidation::ResetFromValue(const base::DictionaryValue& value) {
- const base::DictionaryValue* ack_handle_value = NULL;
- std::string version_as_string;
- if (value.GetString("version", &version_as_string)) {
- if (!base::StringToInt64(version_as_string, &version))
- return false;
- } else {
- version = kUnknownVersion;
- }
- return
- value.GetString("payload", &payload) &&
- value.GetDictionary("ackHandle", &ack_handle_value) &&
- ack_handle.ResetFromValue(*ack_handle_value);
+std::string Invalidation::ToString() const {
+ std::string output;
+ JSONStringValueSerializer serializer(&output);
+ serializer.set_pretty_print(true);
+ serializer.Serialize(*ToValue().get());
+ return output;
}
+Invalidation::Invalidation(
+ const invalidation::ObjectId& id,
+ bool is_unknown_version,
+ int64 version,
+ const std::string& payload,
+ AckHandle ack_handle)
+ : id_(id),
+ is_unknown_version_(is_unknown_version),
+ version_(version),
+ payload_(payload),
+ ack_handle_(ack_handle) {}
+
} // namespace syncer
diff --git a/sync/internal_api/public/base/invalidation.h b/sync/internal_api/public/base/invalidation.h
index 851dbed747..2b83564b54 100644
--- a/sync/internal_api/public/base/invalidation.h
+++ b/sync/internal_api/public/base/invalidation.h
@@ -9,59 +9,73 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
+#include "base/values.h"
+#include "google/cacheinvalidation/include/types.h"
#include "sync/base/sync_export.h"
-
-namespace base {
-class DictionaryValue;
-} // namespace
+#include "sync/internal_api/public/base/ack_handle.h"
namespace syncer {
-// Opaque class that represents a local ack handle. We don't reuse the
-// invalidation ack handles to avoid unnecessary dependencies.
-class SYNC_EXPORT AckHandle {
+class DroppedInvalidationTracker;
+class AckHandler;
+
+// Represents a local invalidation, and is roughly analogous to
+// invalidation::Invalidation. Unlike invalidation::Invalidation, this class
+// supports "local" ack-tracking and simple serialization to pref values.
+class SYNC_EXPORT Invalidation {
public:
- static AckHandle CreateUnique();
- static AckHandle InvalidAckHandle();
+ // Factory functions.
+ static Invalidation Init(
+ const invalidation::ObjectId& id,
+ int64 version,
+ const std::string& payload);
+ static Invalidation InitUnknownVersion(const invalidation::ObjectId& id);
+ static scoped_ptr<Invalidation> InitFromValue(
+ const base::DictionaryValue& value);
+
+ ~Invalidation();
+
+ // Compares two invalidations. The comparison ignores ack-tracking state.
+ bool Equals(const Invalidation& other) const;
- bool Equals(const AckHandle& other) const;
+ invalidation::ObjectId object_id() const;
+ bool is_unknown_version() const;
- scoped_ptr<base::DictionaryValue> ToValue() const;
- bool ResetFromValue(const base::DictionaryValue& value);
+ // Safe to call only if is_unknown_version() returns false.
+ int64 version() const;
- bool IsValid() const;
+ // Safe to call only if is_unknown_version() returns false.
+ const std::string& payload() const;
- ~AckHandle();
+ const AckHandle& ack_handle() const;
+ void set_ack_handle(const AckHandle& ack_handle);
+
+ scoped_ptr<base::DictionaryValue> ToValue() const;
+ std::string ToString() const;
private:
- // Explicitly copyable and assignable for STL containers.
- AckHandle(const std::string& state, base::Time timestamp);
+ Invalidation(const invalidation::ObjectId& id,
+ bool is_unknown_version,
+ int64 version,
+ const std::string& payload,
+ AckHandle ack_handle);
- std::string state_;
- base::Time timestamp_;
-};
+ // The ObjectId to which this invalidation belongs.
+ invalidation::ObjectId id_;
-// Represents a local invalidation, and is roughly analogous to
-// invalidation::Invalidation. It contains a version (which may be
-// kUnknownVersion), a payload (which may be empty) and an
-// associated ack handle that an InvalidationHandler implementation can use to
-// acknowledge receipt of the invalidation. It does not embed the object ID,
-// since it is typically associated with it through ObjectIdInvalidationMap.
-struct SYNC_EXPORT Invalidation {
- static const int64 kUnknownVersion;
-
- Invalidation();
- ~Invalidation();
+ // This flag is set to true if this is an unknown version invalidation.
+ bool is_unknown_version_;
- bool Equals(const Invalidation& other) const;
+ // The version number of this invalidation. Should not be accessed if this is
+ // an unkown version invalidation.
+ int64 version_;
- scoped_ptr<base::DictionaryValue> ToValue() const;
- bool ResetFromValue(const base::DictionaryValue& value);
+ // The payaload associated with this invalidation. Should not be accessed if
+ // this is an unknown version invalidation.
+ std::string payload_;
- int64 version;
- std::string payload;
- AckHandle ack_handle;
+ // A locally generated unique ID used to manage local acknowledgements.
+ AckHandle ack_handle_;
};
} // namespace syncer
diff --git a/sync/internal_api/public/base/invalidation_test_util.cc b/sync/internal_api/public/base/invalidation_test_util.cc
index 3f3910be72..3c610daded 100644
--- a/sync/internal_api/public/base/invalidation_test_util.cc
+++ b/sync/internal_api/public/base/invalidation_test_util.cc
@@ -75,7 +75,18 @@ InvalidationEqMatcher::InvalidationEqMatcher(
bool InvalidationEqMatcher::MatchAndExplain(
const Invalidation& actual, MatchResultListener* listener) const {
- return expected_.payload == actual.payload;
+ if (!(expected_.object_id() == actual.object_id())) {
+ return false;
+ }
+ if (expected_.is_unknown_version() && actual.is_unknown_version()) {
+ return true;
+ } else if (expected_.is_unknown_version() != actual.is_unknown_version()) {
+ return false;
+ } else {
+ // Neither is unknown version.
+ return expected_.payload() == actual.payload()
+ && expected_.version() == actual.version();
+ }
}
void InvalidationEqMatcher::DescribeTo(::std::ostream* os) const {
@@ -99,12 +110,8 @@ Matcher<const AckHandle&> Eq(const AckHandle& expected) {
return MakeMatcher(new AckHandleEqMatcher(expected));
}
-void PrintTo(const Invalidation& state, ::std::ostream* os) {
- std::string printable_payload;
- base::JsonDoubleQuote(state.payload,
- true /* put_in_quotes */,
- &printable_payload);
- *os << "{ payload: " << printable_payload << " }";
+void PrintTo(const Invalidation& inv, ::std::ostream* os) {
+ *os << "{ payload: " << inv.ToString() << " }";
}
Matcher<const Invalidation&> Eq(const Invalidation& expected) {
diff --git a/sync/internal_api/public/base/invalidation_test_util.h b/sync/internal_api/public/base/invalidation_test_util.h
index 9376a287b5..e7c08caae0 100644
--- a/sync/internal_api/public/base/invalidation_test_util.h
+++ b/sync/internal_api/public/base/invalidation_test_util.h
@@ -12,7 +12,7 @@
namespace syncer {
class AckHandle;
-struct Invalidation;
+class Invalidation;
void PrintTo(const AckHandle& ack_handle, ::std::ostream* os);
::testing::Matcher<const AckHandle&> Eq(const AckHandle& expected);
diff --git a/sync/internal_api/public/base/model_type.h b/sync/internal_api/public/base/model_type.h
index 247d351375..4f512b4062 100644
--- a/sync/internal_api/public/base/model_type.h
+++ b/sync/internal_api/public/base/model_type.h
@@ -99,6 +99,8 @@ enum ModelType {
// by this user and can have restrictions applied. MANAGED_USERS and
// MANAGED_USER_SETTINGS can not be encrypted.
MANAGED_USERS,
+ // Distilled articles.
+ ARTICLES,
// ---- Proxy types ----
// Proxy types are excluded from the sync protocol, but are still considered
diff --git a/sync/internal_api/public/base/model_type_test_util.cc b/sync/internal_api/public/base/model_type_test_util.cc
index 242b398bc0..efad29c170 100644
--- a/sync/internal_api/public/base/model_type_test_util.cc
+++ b/sync/internal_api/public/base/model_type_test_util.cc
@@ -12,16 +12,9 @@ ObjectIdInvalidationMap BuildInvalidationMap(
const std::string& payload) {
ObjectIdInvalidationMap map;
invalidation::ObjectId id;
- Invalidation invalidation;
-
bool result = RealModelTypeToObjectId(type, &id);
- DCHECK(result)
- << "Conversion of model type to object id failed: "
- << ModelTypeToString(type);
- invalidation.version = version;
- invalidation.payload = payload;
-
- map.insert(std::make_pair(id, invalidation));
+ DCHECK(result);
+ map.Insert(Invalidation::Init(id, version, payload));
return map;
}
diff --git a/sync/notifier/object_id_invalidation_map_test_util.cc b/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc
index f2f82853e4..777fc69f74 100644
--- a/sync/notifier/object_id_invalidation_map_test_util.cc
+++ b/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
+#include "sync/internal_api/public/base/object_id_invalidation_map_test_util.h"
#include <algorithm>
@@ -24,7 +24,7 @@ class ObjectIdInvalidationMapEqMatcher
explicit ObjectIdInvalidationMapEqMatcher(
const ObjectIdInvalidationMap& expected);
- virtual bool MatchAndExplain(const ObjectIdInvalidationMap& actual,
+ virtual bool MatchAndExplain(const ObjectIdInvalidationMap& lhs,
MatchResultListener* listener) const;
virtual void DescribeTo(::std::ostream* os) const;
virtual void DescribeNegationTo(::std::ostream* os) const;
@@ -39,37 +39,57 @@ ObjectIdInvalidationMapEqMatcher::ObjectIdInvalidationMapEqMatcher(
const ObjectIdInvalidationMap& expected) : expected_(expected) {
}
+namespace {
+
+struct InvalidationEqPredicate {
+ InvalidationEqPredicate(const Invalidation& inv1)
+ : inv1_(inv1) { }
+
+ bool operator()(const Invalidation& inv2) {
+ return inv1_.Equals(inv2);
+ }
+
+ const Invalidation& inv1_;
+};
+
+}
+
bool ObjectIdInvalidationMapEqMatcher::MatchAndExplain(
const ObjectIdInvalidationMap& actual,
MatchResultListener* listener) const {
- ObjectIdInvalidationMap expected_only;
- ObjectIdInvalidationMap actual_only;
- typedef std::pair<invalidation::ObjectId,
- std::pair<Invalidation, Invalidation> >
- ValueDifference;
- std::vector<ValueDifference> value_differences;
-
- std::set_difference(expected_.begin(), expected_.end(),
- actual.begin(), actual.end(),
- std::inserter(expected_only, expected_only.begin()),
- expected_.value_comp());
- std::set_difference(actual.begin(), actual.end(),
- expected_.begin(), expected_.end(),
- std::inserter(actual_only, actual_only.begin()),
- actual.value_comp());
-
- for (ObjectIdInvalidationMap::const_iterator it = expected_.begin();
- it != expected_.end(); ++it) {
- ObjectIdInvalidationMap::const_iterator find_it =
- actual.find(it->first);
- if (find_it != actual.end() &&
- !Matches(Eq(it->second))(find_it->second)) {
- value_differences.push_back(std::make_pair(
- it->first, std::make_pair(it->second, find_it->second)));
+
+ std::vector<syncer::Invalidation> expected_invalidations;
+ std::vector<syncer::Invalidation> actual_invalidations;
+
+ expected_.GetAllInvalidations(&expected_invalidations);
+ actual.GetAllInvalidations(&actual_invalidations);
+
+ std::vector<syncer::Invalidation> expected_only;
+ std::vector<syncer::Invalidation> actual_only;
+
+ for (std::vector<syncer::Invalidation>::iterator it =
+ expected_invalidations.begin();
+ it != expected_invalidations.end(); ++it) {
+ if (std::find_if(actual_invalidations.begin(),
+ actual_invalidations.end(),
+ InvalidationEqPredicate(*it))
+ == actual_invalidations.end()) {
+ expected_only.push_back(*it);
}
}
- if (expected_only.empty() && actual_only.empty() && value_differences.empty())
+ for (std::vector<syncer::Invalidation>::iterator it =
+ actual_invalidations.begin();
+ it != actual_invalidations.end(); ++it) {
+ if (std::find_if(expected_invalidations.begin(),
+ expected_invalidations.end(),
+ InvalidationEqPredicate(*it))
+ == expected_invalidations.end()) {
+ actual_only.push_back(*it);
+ }
+ }
+
+ if (expected_only.empty() && actual_only.empty())
return true;
bool printed_header = false;
@@ -86,12 +106,6 @@ bool ObjectIdInvalidationMapEqMatcher::MatchAndExplain(
printed_header = true;
}
- if (!value_differences.empty()) {
- *listener << (printed_header ? ",\nand" : "which")
- << " differ in the following values: "
- << PrintToString(value_differences);
- }
-
return false;
}
@@ -99,8 +113,8 @@ void ObjectIdInvalidationMapEqMatcher::DescribeTo(::std::ostream* os) const {
*os << " is equal to " << PrintToString(expected_);
}
-void ObjectIdInvalidationMapEqMatcher::DescribeNegationTo
-(::std::ostream* os) const {
+void ObjectIdInvalidationMapEqMatcher::DescribeNegationTo(
+ ::std::ostream* os) const {
*os << " isn't equal to " << PrintToString(expected_);
}
diff --git a/sync/notifier/object_id_invalidation_map_test_util.h b/sync/internal_api/public/base/object_id_invalidation_map_test_util.h
index 0217da8d65..5d71979d1f 100644
--- a/sync/notifier/object_id_invalidation_map_test_util.h
+++ b/sync/internal_api/public/base/object_id_invalidation_map_test_util.h
@@ -1,13 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_TEST_UTILH_
-#define SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_TEST_UTILH_
+#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
+#define SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
// Convince googletest to use the correct overload for PrintTo().
#include "sync/internal_api/public/base/invalidation_test_util.h"
-#include "sync/internal_api/public/base/model_type.h"
#include "sync/notifier/object_id_invalidation_map.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -18,4 +17,4 @@ namespace syncer {
} // namespace syncer
-#endif // SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_TEST_UTILH_
+#endif // SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
diff --git a/sync/internal_api/sync_encryption_handler_impl.cc b/sync/internal_api/sync_encryption_handler_impl.cc
index b5870238f8..e468e6cda3 100644
--- a/sync/internal_api/sync_encryption_handler_impl.cc
+++ b/sync/internal_api/sync_encryption_handler_impl.cc
@@ -210,12 +210,12 @@ SyncEncryptionHandlerImpl::SyncEncryptionHandlerImpl(
Encryptor* encryptor,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping)
- : weak_ptr_factory_(this),
- user_share_(user_share),
+ : user_share_(user_share),
vault_unsafe_(encryptor, SensitiveTypes()),
encrypt_everything_(false),
passphrase_type_(IMPLICIT_PASSPHRASE),
- nigori_overwrite_count_(0) {
+ nigori_overwrite_count_(0),
+ weak_ptr_factory_(this) {
// Restore the cryptographer's previous keys. Note that we don't add the
// keystore keys into the cryptographer here, in case a migration was pending.
vault_unsafe_.cryptographer.Bootstrap(restored_key_for_bootstrapping);
diff --git a/sync/internal_api/sync_encryption_handler_impl.h b/sync/internal_api/sync_encryption_handler_impl.h
index 4b4e99b9ae..89621c7405 100644
--- a/sync/internal_api/sync_encryption_handler_impl.h
+++ b/sync/internal_api/sync_encryption_handler_impl.h
@@ -265,8 +265,6 @@ class SYNC_EXPORT_PRIVATE SyncEncryptionHandlerImpl
base::ThreadChecker thread_checker_;
- base::WeakPtrFactory<SyncEncryptionHandlerImpl> weak_ptr_factory_;
-
ObserverList<SyncEncryptionHandler::Observer> observers_;
// The current user share (for creating transactions).
@@ -307,6 +305,8 @@ class SYNC_EXPORT_PRIVATE SyncEncryptionHandlerImpl
// before support for this field was added.
base::Time custom_passphrase_time_;
+ base::WeakPtrFactory<SyncEncryptionHandlerImpl> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(SyncEncryptionHandlerImpl);
};
diff --git a/sync/internal_api/sync_manager_impl.cc b/sync/internal_api/sync_manager_impl.cc
index f79213dfdc..6f7a1b98d9 100644
--- a/sync/internal_api/sync_manager_impl.cc
+++ b/sync/internal_api/sync_manager_impl.cc
@@ -168,14 +168,14 @@ class NudgeStrategy {
SyncManagerImpl::SyncManagerImpl(const std::string& name)
: name_(name),
- weak_ptr_factory_(this),
change_delegate_(NULL),
initialized_(false),
observing_network_connectivity_changes_(false),
invalidator_state_(DEFAULT_INVALIDATION_ERROR),
traffic_recorder_(kMaxMessagesToRecord, kMaxMessageSizeToRecord),
encryptor_(NULL),
- report_unrecoverable_error_function_(NULL) {
+ report_unrecoverable_error_function_(NULL),
+ weak_ptr_factory_(this) {
// Pre-fill |notification_info_map_|.
for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
notification_info_map_.insert(
@@ -1005,7 +1005,7 @@ base::DictionaryValue* SyncManagerImpl::NotificationInfoToValue(
for (NotificationInfoMap::const_iterator it = notification_info.begin();
it != notification_info.end(); ++it) {
- const std::string& model_type_str = ModelTypeToString(it->first);
+ const std::string model_type_str = ModelTypeToString(it->first);
value->Set(model_type_str, it->second.ToValue());
}
@@ -1148,13 +1148,22 @@ JsArgList SyncManagerImpl::GetChildNodeIds(const JsArgList& args) {
void SyncManagerImpl::UpdateNotificationInfo(
const ObjectIdInvalidationMap& invalidation_map) {
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
ModelType type = UNSPECIFIED;
- if (ObjectIdToRealModelType(it->first, &type)) {
+ if (!ObjectIdToRealModelType(*it, &type)) {
+ continue;
+ }
+ const SingleObjectInvalidationSet& type_invalidations =
+ invalidation_map.ForObject(*it);
+ for (SingleObjectInvalidationSet::const_iterator inv_it =
+ type_invalidations.begin(); inv_it != type_invalidations.end();
+ ++inv_it) {
NotificationInfo* info = &notification_info_map_[type];
info->total_count++;
- info->payload = it->second.payload;
+ std::string payload =
+ inv_it->is_unknown_version() ? "UNKNOWN" : inv_it->payload();
+ info->payload = payload;
}
}
}
@@ -1185,7 +1194,7 @@ void SyncManagerImpl::OnIncomingInvalidation(
DCHECK(thread_checker_.CalledOnValidThread());
// We should never receive IDs from non-sync objects.
- ObjectIdSet ids = ObjectIdInvalidationMapToSet(invalidation_map);
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
ModelType type;
if (!ObjectIdToRealModelType(*it, &type)) {
@@ -1193,7 +1202,7 @@ void SyncManagerImpl::OnIncomingInvalidation(
}
}
- if (invalidation_map.empty()) {
+ if (invalidation_map.Empty()) {
LOG(WARNING) << "Sync received invalidation without any type information.";
} else {
allstatus_.IncrementNudgeCounter(NUDGE_SOURCE_NOTIFICATION);
@@ -1209,7 +1218,8 @@ void SyncManagerImpl::OnIncomingInvalidation(
base::DictionaryValue details;
base::ListValue* changed_types = new base::ListValue();
details.Set("changedTypes", changed_types);
- ObjectIdSet id_set = ObjectIdInvalidationMapToSet(invalidation_map);
+
+ ObjectIdSet id_set = invalidation_map.GetObjectIds();
ModelTypeSet nudged_types = ObjectIdSetToModelTypeSet(id_set);
DCHECK(!nudged_types.Empty());
for (ModelTypeSet::Iterator it = nudged_types.First();
diff --git a/sync/internal_api/sync_manager_impl.h b/sync/internal_api/sync_manager_impl.h
index 0283065f8a..d9dd542490 100644
--- a/sync/internal_api/sync_manager_impl.h
+++ b/sync/internal_api/sync_manager_impl.h
@@ -285,8 +285,6 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
base::ThreadChecker thread_checker_;
- base::WeakPtrFactory<SyncManagerImpl> weak_ptr_factory_;
-
// Thread-safe handle used by
// HandleCalculateChangesChangeEventFromSyncApi(), which can be
// called from any thread. Valid only between between calls to
@@ -367,6 +365,8 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
// with the cryptographer.
scoped_ptr<SyncEncryptionHandlerImpl> sync_encryption_handler_;
+ base::WeakPtrFactory<SyncManagerImpl> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(SyncManagerImpl);
};
diff --git a/sync/internal_api/sync_manager_impl_unittest.cc b/sync/internal_api/sync_manager_impl_unittest.cc
index 2e0d9ae8f7..5549895903 100644
--- a/sync/internal_api/sync_manager_impl_unittest.cc
+++ b/sync/internal_api/sync_manager_impl_unittest.cc
@@ -985,9 +985,7 @@ class SyncManagerTest : public testing::Test,
DCHECK(sync_manager_.thread_checker_.CalledOnValidThread());
ObjectIdSet id_set = ModelTypeSetToObjectIdSet(model_types);
ObjectIdInvalidationMap invalidation_map =
- ObjectIdSetToInvalidationMap(id_set,
- Invalidation::kUnknownVersion,
- std::string());
+ ObjectIdInvalidationMap::InvalidateAll(id_set);
sync_manager_.OnIncomingInvalidation(invalidation_map);
}
diff --git a/sync/notifier/invalidation_notifier_unittest.cc b/sync/notifier/invalidation_notifier_unittest.cc
index 623bfa991a..1f14319625 100644
--- a/sync/notifier/invalidation_notifier_unittest.cc
+++ b/sync/notifier/invalidation_notifier_unittest.cc
@@ -16,7 +16,6 @@
#include "sync/notifier/fake_invalidation_state_tracker.h"
#include "sync/notifier/invalidation_state_tracker.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
diff --git a/sync/notifier/invalidator_registrar.cc b/sync/notifier/invalidator_registrar.cc
index c2a18f9e8f..1c9c50cbbf 100644
--- a/sync/notifier/invalidator_registrar.cc
+++ b/sync/notifier/invalidator_registrar.cc
@@ -5,9 +5,11 @@
#include "sync/notifier/invalidator_registrar.h"
#include <cstddef>
+#include <iterator>
#include <utility>
#include "base/logging.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
@@ -17,7 +19,7 @@ InvalidatorRegistrar::InvalidatorRegistrar()
InvalidatorRegistrar::~InvalidatorRegistrar() {
DCHECK(thread_checker_.CalledOnValidThread());
CHECK(!handlers_.might_have_observers());
- // |id_to_handler_map_| may be non-empty but that's okay.
+ CHECK(handler_to_ids_map_.empty());
}
void InvalidatorRegistrar::RegisterHandler(InvalidationHandler* handler) {
@@ -33,29 +35,30 @@ void InvalidatorRegistrar::UpdateRegisteredIds(
DCHECK(thread_checker_.CalledOnValidThread());
CHECK(handler);
CHECK(handlers_.HasObserver(handler));
- // Remove all existing entries for |handler|.
- for (IdHandlerMap::iterator it = id_to_handler_map_.begin();
- it != id_to_handler_map_.end(); ) {
- if (it->second == handler) {
- IdHandlerMap::iterator erase_it = it;
- ++it;
- id_to_handler_map_.erase(erase_it);
- } else {
- ++it;
+
+ for (HandlerIdsMap::const_iterator it = handler_to_ids_map_.begin();
+ it != handler_to_ids_map_.end(); ++it) {
+ if (it->first == handler) {
+ continue;
}
- }
- // Now add the entries for |handler|. We keep track of the last insertion
- // point so we only traverse the map once to insert all the new entries.
- IdHandlerMap::iterator insert_it = id_to_handler_map_.begin();
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- insert_it =
- id_to_handler_map_.insert(insert_it, std::make_pair(*it, handler));
- CHECK_EQ(handler, insert_it->second)
+ std::vector<invalidation::ObjectId> intersection;
+ std::set_intersection(
+ it->second.begin(), it->second.end(),
+ ids.begin(), ids.end(),
+ std::inserter(intersection, intersection.end()),
+ ObjectIdLessThan());
+ CHECK(intersection.empty())
<< "Duplicate registration: trying to register "
- << ObjectIdToString(insert_it->first) << " for "
+ << ObjectIdToString(*intersection.begin()) << " for "
<< handler << " when it's already registered for "
- << insert_it->second;
+ << it->first;
+ }
+
+ if (ids.empty()) {
+ handler_to_ids_map_.erase(handler);
+ } else {
+ handler_to_ids_map_[handler] = ids;
}
}
@@ -64,27 +67,26 @@ void InvalidatorRegistrar::UnregisterHandler(InvalidationHandler* handler) {
CHECK(handler);
CHECK(handlers_.HasObserver(handler));
handlers_.RemoveObserver(handler);
+ handler_to_ids_map_.erase(handler);
}
ObjectIdSet InvalidatorRegistrar::GetRegisteredIds(
InvalidationHandler* handler) const {
DCHECK(thread_checker_.CalledOnValidThread());
- ObjectIdSet registered_ids;
- for (IdHandlerMap::const_iterator it = id_to_handler_map_.begin();
- it != id_to_handler_map_.end(); ++it) {
- if (it->second == handler) {
- registered_ids.insert(it->first);
- }
+ HandlerIdsMap::const_iterator lookup = handler_to_ids_map_.find(handler);
+ if (lookup != handler_to_ids_map_.end()) {
+ return lookup->second;
+ } else {
+ return ObjectIdSet();
}
- return registered_ids;
}
ObjectIdSet InvalidatorRegistrar::GetAllRegisteredIds() const {
DCHECK(thread_checker_.CalledOnValidThread());
ObjectIdSet registered_ids;
- for (IdHandlerMap::const_iterator it = id_to_handler_map_.begin();
- it != id_to_handler_map_.end(); ++it) {
- registered_ids.insert(it->first);
+ for (HandlerIdsMap::const_iterator it = handler_to_ids_map_.begin();
+ it != handler_to_ids_map_.end(); ++it) {
+ registered_ids.insert(it->second.begin(), it->second.end());
}
return registered_ids;
}
@@ -97,23 +99,13 @@ void InvalidatorRegistrar::DispatchInvalidationsToHandlers(
return;
}
- typedef std::map<InvalidationHandler*, ObjectIdInvalidationMap> DispatchMap;
- DispatchMap dispatch_map;
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- InvalidationHandler* const handler = ObjectIdToHandler(it->first);
- // Filter out invalidations for IDs with no handler.
- if (handler)
- dispatch_map[handler].insert(*it);
- }
-
- // Emit invalidations only for handlers in |handlers_|.
- ObserverListBase<InvalidationHandler>::Iterator it(handlers_);
- InvalidationHandler* handler = NULL;
- while ((handler = it.GetNext()) != NULL) {
- DispatchMap::const_iterator dispatch_it = dispatch_map.find(handler);
- if (dispatch_it != dispatch_map.end())
- handler->OnIncomingInvalidation(dispatch_it->second);
+ for (HandlerIdsMap::iterator it = handler_to_ids_map_.begin();
+ it != handler_to_ids_map_.end(); ++it) {
+ ObjectIdInvalidationMap to_emit =
+ invalidation_map.GetSubsetWithObjectIds(it->second);
+ if (!to_emit.Empty()) {
+ it->first->OnIncomingInvalidation(to_emit);
+ }
}
}
@@ -142,11 +134,4 @@ void InvalidatorRegistrar::DetachFromThreadForTest() {
thread_checker_.DetachFromThread();
}
-InvalidationHandler* InvalidatorRegistrar::ObjectIdToHandler(
- const invalidation::ObjectId& id) {
- DCHECK(thread_checker_.CalledOnValidThread());
- IdHandlerMap::const_iterator it = id_to_handler_map_.find(id);
- return (it == id_to_handler_map_.end()) ? NULL : it->second;
-}
-
} // namespace syncer
diff --git a/sync/notifier/invalidator_registrar.h b/sync/notifier/invalidator_registrar.h
index f2a3c638bd..fb6b3881c3 100644
--- a/sync/notifier/invalidator_registrar.h
+++ b/sync/notifier/invalidator_registrar.h
@@ -13,7 +13,6 @@
#include "sync/base/sync_export.h"
#include "sync/notifier/invalidation_handler.h"
#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
namespace invalidation {
class ObjectId;
@@ -21,6 +20,8 @@ class ObjectId;
namespace syncer {
+class ObjectIdInvalidationMap;
+
// A helper class for implementations of the Invalidator interface. It helps
// keep track of registered handlers and which object ID registrations are
// associated with which handlers, so implementors can just reuse the logic
@@ -76,15 +77,11 @@ class SYNC_EXPORT InvalidatorRegistrar {
void DetachFromThreadForTest();
private:
- typedef std::map<invalidation::ObjectId, InvalidationHandler*,
- ObjectIdLessThan>
- IdHandlerMap;
-
- InvalidationHandler* ObjectIdToHandler(const invalidation::ObjectId& id);
+ typedef std::map<InvalidationHandler*, ObjectIdSet> HandlerIdsMap;
base::ThreadChecker thread_checker_;
ObserverList<InvalidationHandler> handlers_;
- IdHandlerMap id_to_handler_map_;
+ HandlerIdsMap handler_to_ids_map_;
InvalidatorState state_;
DISALLOW_COPY_AND_ASSIGN(InvalidatorRegistrar);
diff --git a/sync/notifier/invalidator_registrar_unittest.cc b/sync/notifier/invalidator_registrar_unittest.cc
index ad222477df..7a01401093 100644
--- a/sync/notifier/invalidator_registrar_unittest.cc
+++ b/sync/notifier/invalidator_registrar_unittest.cc
@@ -9,7 +9,6 @@
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/invalidator_registrar.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
diff --git a/sync/notifier/invalidator_test_template.h b/sync/notifier/invalidator_test_template.h
index 0353000422..b86cf50341 100644
--- a/sync/notifier/invalidator_test_template.h
+++ b/sync/notifier/invalidator_test_template.h
@@ -81,11 +81,11 @@
#include "base/compiler_specific.h"
#include "google/cacheinvalidation/include/types.h"
#include "google/cacheinvalidation/types.pb.h"
+#include "sync/internal_api/public/base/invalidation_test_util.h"
+#include "sync/internal_api/public/base/object_id_invalidation_map_test_util.h"
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/fake_invalidation_state_tracker.h"
#include "sync/notifier/invalidator.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -135,13 +135,13 @@ TYPED_TEST_P(InvalidatorTest, Basic) {
invalidator->RegisterHandler(&handler);
- ObjectIdInvalidationMap states;
- states[this->id1].payload = "1";
- states[this->id2].payload = "2";
- states[this->id3].payload = "3";
+ ObjectIdInvalidationMap invalidation_map;
+ invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
+ invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
+ invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
// Should be ignored since no IDs are registered to |handler|.
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(0, handler.GetInvalidationCount());
ObjectIdSet ids;
@@ -152,25 +152,26 @@ TYPED_TEST_P(InvalidatorTest, Basic) {
this->delegate_.TriggerOnInvalidatorStateChange(INVALIDATIONS_ENABLED);
EXPECT_EQ(INVALIDATIONS_ENABLED, handler.GetInvalidatorState());
- ObjectIdInvalidationMap expected_states;
- expected_states[this->id1].payload = "1";
- expected_states[this->id2].payload = "2";
+ ObjectIdInvalidationMap expected_invalidations;
+ expected_invalidations.Insert(Invalidation::Init(this->id1, 1, "1"));
+ expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(1, handler.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler.GetLastInvalidationMap()));
ids.erase(this->id1);
ids.insert(this->id3);
invalidator->UpdateRegisteredIds(&handler, ids);
- expected_states.erase(this->id1);
- expected_states[this->id3].payload = "3";
+ expected_invalidations = ObjectIdInvalidationMap();
+ expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
+ expected_invalidations.Insert(Invalidation::Init(this->id3, 3, "3"));
// Removed object IDs should not be notified, newly-added ones should.
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(2, handler.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler.GetLastInvalidationMap()));
this->delegate_.TriggerOnInvalidatorStateChange(TRANSIENT_INVALIDATION_ERROR);
EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR,
@@ -184,7 +185,7 @@ TYPED_TEST_P(InvalidatorTest, Basic) {
invalidator->UnregisterHandler(&handler);
// Should be ignored since |handler| isn't registered anymore.
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(2, handler.GetInvalidationCount());
}
@@ -236,25 +237,26 @@ TYPED_TEST_P(InvalidatorTest, MultipleHandlers) {
EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler4.GetInvalidatorState());
{
- ObjectIdInvalidationMap states;
- states[this->id1].payload = "1";
- states[this->id2].payload = "2";
- states[this->id3].payload = "3";
- states[this->id4].payload = "4";
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ ObjectIdInvalidationMap invalidation_map;
+ invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
+ invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
+ invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
+ invalidation_map.Insert(Invalidation::Init(this->id4, 4, "4"));
- ObjectIdInvalidationMap expected_states;
- expected_states[this->id1].payload = "1";
- expected_states[this->id2].payload = "2";
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
+
+ ObjectIdInvalidationMap expected_invalidations;
+ expected_invalidations.Insert(Invalidation::Init(this->id1, 1, "1"));
+ expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
EXPECT_EQ(1, handler1.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler1.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler1.GetLastInvalidationMap()));
- expected_states.clear();
- expected_states[this->id3].payload = "3";
+ expected_invalidations = ObjectIdInvalidationMap();
+ expected_invalidations.Insert(Invalidation::Init(this->id3, 3, "3"));
EXPECT_EQ(1, handler2.GetInvalidationCount());
- EXPECT_THAT(expected_states, Eq(handler2.GetLastInvalidationMap()));
+ EXPECT_THAT(expected_invalidations, Eq(handler2.GetLastInvalidationMap()));
EXPECT_EQ(0, handler3.GetInvalidationCount());
EXPECT_EQ(0, handler4.GetInvalidationCount());
@@ -306,11 +308,11 @@ TYPED_TEST_P(InvalidatorTest, EmptySetUnregisters) {
EXPECT_EQ(INVALIDATIONS_ENABLED, handler2.GetInvalidatorState());
{
- ObjectIdInvalidationMap states;
- states[this->id1].payload = "1";
- states[this->id2].payload = "2";
- states[this->id3].payload = "3";
- this->delegate_.TriggerOnIncomingInvalidation(states);
+ ObjectIdInvalidationMap invalidation_map;
+ invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
+ invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
+ invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
+ this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
EXPECT_EQ(0, handler1.GetInvalidationCount());
EXPECT_EQ(1, handler2.GetInvalidationCount());
}
diff --git a/sync/notifier/non_blocking_invalidator.cc b/sync/notifier/non_blocking_invalidator.cc
index d4c602b4e4..ca89132a92 100644
--- a/sync/notifier/non_blocking_invalidator.cc
+++ b/sync/notifier/non_blocking_invalidator.cc
@@ -141,13 +141,12 @@ NonBlockingInvalidator::NonBlockingInvalidator(
const WeakHandle<InvalidationStateTracker>&
invalidation_state_tracker,
const std::string& client_info)
- : weak_ptr_factory_(this),
- core_(
- new Core(MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()))),
- parent_task_runner_(
- base::ThreadTaskRunnerHandle::Get()),
- network_task_runner_(notifier_options.request_context_getter->
- GetNetworkTaskRunner()) {
+ : parent_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ network_task_runner_(
+ notifier_options.request_context_getter->GetNetworkTaskRunner()),
+ weak_ptr_factory_(this) {
+ core_ = new Core(MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()));
+
if (!network_task_runner_->PostTask(
FROM_HERE,
base::Bind(
diff --git a/sync/notifier/non_blocking_invalidator.h b/sync/notifier/non_blocking_invalidator.h
index f2685c702d..2cb590380e 100644
--- a/sync/notifier/non_blocking_invalidator.h
+++ b/sync/notifier/non_blocking_invalidator.h
@@ -66,8 +66,6 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
private:
class Core;
- base::WeakPtrFactory<NonBlockingInvalidator> weak_ptr_factory_;
-
InvalidatorRegistrar registrar_;
// The real guts of NonBlockingInvalidator, which allows this class to live
@@ -76,6 +74,8 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
+ base::WeakPtrFactory<NonBlockingInvalidator> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(NonBlockingInvalidator);
};
diff --git a/sync/notifier/non_blocking_invalidator_unittest.cc b/sync/notifier/non_blocking_invalidator_unittest.cc
index f463077c08..e9cf31e121 100644
--- a/sync/notifier/non_blocking_invalidator_unittest.cc
+++ b/sync/notifier/non_blocking_invalidator_unittest.cc
@@ -17,7 +17,6 @@
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/invalidation_state_tracker.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
diff --git a/sync/notifier/object_id_invalidation_map.cc b/sync/notifier/object_id_invalidation_map.cc
index bde2e4c245..e1f584d0e4 100644
--- a/sync/notifier/object_id_invalidation_map.cc
+++ b/sync/notifier/object_id_invalidation_map.cc
@@ -1,93 +1,112 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sync/notifier/object_id_invalidation_map.h"
-#include <algorithm>
-
-#include "base/compiler_specific.h"
-#include "base/values.h"
+#include "base/json/json_string_value_serializer.h"
namespace syncer {
-ObjectIdSet ObjectIdInvalidationMapToSet(
- const ObjectIdInvalidationMap& invalidation_map) {
- ObjectIdSet ids;
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- ids.insert(it->first);
+// static
+ObjectIdInvalidationMap ObjectIdInvalidationMap::InvalidateAll(
+ const ObjectIdSet& ids) {
+ ObjectIdInvalidationMap invalidate_all;
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
+ invalidate_all.Insert(Invalidation::InitUnknownVersion(*it));
}
- return ids;
+ return invalidate_all;
+}
+
+ObjectIdInvalidationMap::ObjectIdInvalidationMap() {}
+
+ObjectIdInvalidationMap::~ObjectIdInvalidationMap() {}
+
+ObjectIdSet ObjectIdInvalidationMap::GetObjectIds() const {
+ ObjectIdSet ret;
+ for (IdToListMap::const_iterator it = map_.begin(); it != map_.end(); ++it) {
+ ret.insert(it->first);
+ }
+ return ret;
+}
+
+bool ObjectIdInvalidationMap::Empty() const {
+ return map_.empty();
+}
+
+void ObjectIdInvalidationMap::Insert(const Invalidation& invalidation) {
+ map_[invalidation.object_id()].Insert(invalidation);
}
-ObjectIdInvalidationMap ObjectIdSetToInvalidationMap(
- const ObjectIdSet& ids, int64 version, const std::string& payload) {
- ObjectIdInvalidationMap invalidation_map;
+ObjectIdInvalidationMap ObjectIdInvalidationMap::GetSubsetWithObjectIds(
+ const ObjectIdSet& ids) const {
+ IdToListMap new_map;
for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- // TODO(dcheng): Do we need to provide a way to set AckHandle?
- invalidation_map[*it].version = version;
- invalidation_map[*it].payload = payload;
+ IdToListMap::const_iterator lookup = map_.find(*it);
+ if (lookup != map_.end()) {
+ new_map[*it] = lookup->second;
+ }
}
- return invalidation_map;
+ return ObjectIdInvalidationMap(new_map);
}
-namespace {
+const SingleObjectInvalidationSet& ObjectIdInvalidationMap::ForObject(
+ invalidation::ObjectId id) const {
+ IdToListMap::const_iterator lookup = map_.find(id);
+ DCHECK(lookup != map_.end());
+ DCHECK(!lookup->second.IsEmpty());
+ return lookup->second;
+}
-struct ObjectIdInvalidationMapValueEquals {
- bool operator()(const ObjectIdInvalidationMap::value_type& value1,
- const ObjectIdInvalidationMap::value_type& value2) const {
- return
- (value1.first == value2.first) &&
- value1.second.Equals(value2.second);
+void ObjectIdInvalidationMap::GetAllInvalidations(
+ std::vector<syncer::Invalidation>* out) const {
+ for (IdToListMap::const_iterator it = map_.begin(); it != map_.end(); ++it) {
+ out->insert(out->begin(), it->second.begin(), it->second.end());
}
-};
-
-} // namespace
-
-bool ObjectIdInvalidationMapEquals(
- const ObjectIdInvalidationMap& invalidation_map1,
- const ObjectIdInvalidationMap& invalidation_map2) {
- return
- (invalidation_map1.size() == invalidation_map2.size()) &&
- std::equal(invalidation_map1.begin(), invalidation_map1.end(),
- invalidation_map2.begin(),
- ObjectIdInvalidationMapValueEquals());
}
-scoped_ptr<base::ListValue> ObjectIdInvalidationMapToValue(
- const ObjectIdInvalidationMap& invalidation_map) {
+bool ObjectIdInvalidationMap::operator==(
+ const ObjectIdInvalidationMap& other) const {
+ return map_ == other.map_;
+}
+
+scoped_ptr<base::ListValue> ObjectIdInvalidationMap::ToValue() const {
scoped_ptr<base::ListValue> value(new base::ListValue());
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- base::DictionaryValue* entry = new base::DictionaryValue();
- entry->Set("objectId", ObjectIdToValue(it->first).release());
- entry->Set("state", it->second.ToValue().release());
- value->Append(entry);
+ for (IdToListMap::const_iterator it1 = map_.begin();
+ it1 != map_.end(); ++it1) {
+ for (SingleObjectInvalidationSet::const_iterator it2 =
+ it1->second.begin(); it2 != it1->second.end(); ++it2) {
+ value->Append(it2->ToValue().release());
+ }
}
return value.Pass();
}
-bool ObjectIdInvalidationMapFromValue(const base::ListValue& value,
- ObjectIdInvalidationMap* out) {
- out->clear();
- for (base::ListValue::const_iterator it = value.begin();
- it != value.end(); ++it) {
- const base::DictionaryValue* entry = NULL;
- const base::DictionaryValue* id_value = NULL;
- const base::DictionaryValue* invalidation_value = NULL;
- invalidation::ObjectId id;
- Invalidation invalidation;
- if (!(*it)->GetAsDictionary(&entry) ||
- !entry->GetDictionary("objectId", &id_value) ||
- !entry->GetDictionary("state", &invalidation_value) ||
- !ObjectIdFromValue(*id_value, &id) ||
- !invalidation.ResetFromValue(*invalidation_value)) {
+bool ObjectIdInvalidationMap::ResetFromValue(const base::ListValue& value) {
+ map_.clear();
+ for (size_t i = 0; i < value.GetSize(); ++i) {
+ const DictionaryValue* dict;
+ if (!value.GetDictionary(i, &dict)) {
return false;
}
- ignore_result(out->insert(std::make_pair(id, invalidation)));
+ scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
+ if (!invalidation) {
+ return false;
+ }
+ Insert(*invalidation.get());
}
return true;
}
+std::string ObjectIdInvalidationMap::ToString() const {
+ std::string output;
+ JSONStringValueSerializer serializer(&output);
+ serializer.set_pretty_print(true);
+ serializer.Serialize(*ToValue().get());
+ return output;
+}
+
+ObjectIdInvalidationMap::ObjectIdInvalidationMap(const IdToListMap& map)
+ : map_(map) {}
+
} // namespace syncer
diff --git a/sync/notifier/object_id_invalidation_map.h b/sync/notifier/object_id_invalidation_map.h
index bb97fb3b13..c17e101e83 100644
--- a/sync/notifier/object_id_invalidation_map.h
+++ b/sync/notifier/object_id_invalidation_map.h
@@ -1,4 +1,4 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,40 +6,67 @@
#define SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_H_
#include <map>
-#include <string>
+#include <vector>
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "google/cacheinvalidation/include/types.h"
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/invalidation.h"
#include "sync/notifier/invalidation_util.h"
-
-namespace base {
-class ListValue;
-} // namespace base
+#include "sync/notifier/single_object_invalidation_set.h"
namespace syncer {
-typedef std::map<invalidation::ObjectId,
- Invalidation,
- ObjectIdLessThan> ObjectIdInvalidationMap;
+// A set of notifications with some helper methods to organize them by object ID
+// and version number.
+class SYNC_EXPORT ObjectIdInvalidationMap {
+ public:
+ // Creates an invalidation map that includes an 'unknown version'
+ // invalidation for each specified ID in |ids|.
+ static ObjectIdInvalidationMap InvalidateAll(const ObjectIdSet& ids);
+
+ ObjectIdInvalidationMap();
+ ~ObjectIdInvalidationMap();
+
+ // Returns set of ObjectIds for which at least one invalidation is present.
+ ObjectIdSet GetObjectIds() const;
+
+ // Returns true if this map contains no invalidations.
+ bool Empty() const;
+
+ // Returns true if both maps contain the same set of invalidations.
+ bool operator==(const ObjectIdInvalidationMap& other) const;
+
+ // Inserts a new invalidation into this map.
+ void Insert(const Invalidation& invalidation);
+
+ // Returns a new map containing the subset of invaliations from this map
+ // whose IDs were in the specified |ids| set.
+ ObjectIdInvalidationMap GetSubsetWithObjectIds(const ObjectIdSet& ids) const;
+
+ // Returns the subset of invalidations with IDs matching |id|.
+ const SingleObjectInvalidationSet& ForObject(
+ invalidation::ObjectId id) const;
+
+ // Returns the contents of this map in a single vector.
+ void GetAllInvalidations(std::vector<syncer::Invalidation>* out) const;
+
+ // Serialize this map to a value.
+ scoped_ptr<base::ListValue> ToValue() const;
+
+ // Deserialize the value into a map and use it to re-initialize this object.
+ bool ResetFromValue(const base::ListValue& value);
-// Converts between ObjectIdInvalidationMaps and ObjectIdSets.
-ObjectIdSet ObjectIdInvalidationMapToSet(
- const ObjectIdInvalidationMap& invalidation_map);
-SYNC_EXPORT ObjectIdInvalidationMap ObjectIdSetToInvalidationMap(
- const ObjectIdSet& ids, int64 version, const std::string& payload);
+ // Prints the contentes of this map as a human-readable string.
+ std::string ToString() const;
-SYNC_EXPORT bool ObjectIdInvalidationMapEquals(
- const ObjectIdInvalidationMap& invalidation_map1,
- const ObjectIdInvalidationMap& invalidation_map2);
+ private:
+ typedef std::map<invalidation::ObjectId,
+ SingleObjectInvalidationSet,
+ ObjectIdLessThan> IdToListMap;
-scoped_ptr<base::ListValue> ObjectIdInvalidationMapToValue(
- const ObjectIdInvalidationMap& invalidation_map);
+ ObjectIdInvalidationMap(const IdToListMap& map);
-bool ObjectIdInvalidationMapFromValue(const base::ListValue& value,
- ObjectIdInvalidationMap* out);
+ IdToListMap map_;
+};
} // namespace syncer
diff --git a/sync/notifier/object_id_invalidation_map_unittest.cc b/sync/notifier/object_id_invalidation_map_unittest.cc
new file mode 100644
index 0000000000..1acd920b79
--- /dev/null
+++ b/sync/notifier/object_id_invalidation_map_unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/object_id_invalidation_map.h"
+
+#include "google/cacheinvalidation/types.pb.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+namespace {
+
+class ObjectIdInvalidationMapTest : public testing::Test {
+ public:
+ ObjectIdInvalidationMapTest()
+ : kIdOne(ipc::invalidation::ObjectSource::TEST, "one"),
+ kIdTwo(ipc::invalidation::ObjectSource::TEST, "two"),
+ kInv1(Invalidation::Init(kIdOne, 10, "ten")) {
+ set1.insert(kIdOne);
+ set2.insert(kIdTwo);
+ all_set.insert(kIdOne);
+ all_set.insert(kIdTwo);
+
+ one_invalidation.Insert(kInv1);
+ invalidate_all = ObjectIdInvalidationMap::InvalidateAll(all_set);
+ }
+
+ protected:
+ const invalidation::ObjectId kIdOne;
+ const invalidation::ObjectId kIdTwo;
+ const Invalidation kInv1;
+
+ ObjectIdSet set1;
+ ObjectIdSet set2;
+ ObjectIdSet all_set;
+ ObjectIdInvalidationMap empty;
+ ObjectIdInvalidationMap one_invalidation;
+ ObjectIdInvalidationMap invalidate_all;
+};
+
+TEST_F(ObjectIdInvalidationMapTest, Empty) {
+ EXPECT_TRUE(empty.Empty());
+ EXPECT_FALSE(one_invalidation.Empty());
+ EXPECT_FALSE(invalidate_all.Empty());
+}
+
+TEST_F(ObjectIdInvalidationMapTest, Equality) {
+ ObjectIdInvalidationMap empty2;
+ EXPECT_TRUE(empty == empty2);
+
+ ObjectIdInvalidationMap one_invalidation2;
+ one_invalidation2.Insert(kInv1);
+ EXPECT_TRUE(one_invalidation == one_invalidation2);
+
+ EXPECT_FALSE(empty == invalidate_all);
+}
+
+TEST_F(ObjectIdInvalidationMapTest, GetObjectIds) {
+ EXPECT_EQ(ObjectIdSet(), empty.GetObjectIds());
+ EXPECT_EQ(set1, one_invalidation.GetObjectIds());
+ EXPECT_EQ(all_set, invalidate_all.GetObjectIds());
+}
+
+TEST_F(ObjectIdInvalidationMapTest, GetSubsetWithObjectIds) {
+ EXPECT_TRUE(empty.GetSubsetWithObjectIds(set1).Empty());
+
+ EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(set1) ==
+ one_invalidation);
+ EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(all_set) ==
+ one_invalidation);
+ EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(set2).Empty());
+
+ EXPECT_TRUE(invalidate_all.GetSubsetWithObjectIds(ObjectIdSet()).Empty());
+}
+
+TEST_F(ObjectIdInvalidationMapTest, SerializeEmpty) {
+ scoped_ptr<base::ListValue> value = empty.ToValue();
+ ASSERT_TRUE(value.get());
+ ObjectIdInvalidationMap deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(empty == deserialized);
+}
+
+TEST_F(ObjectIdInvalidationMapTest, SerializeOneInvalidation) {
+ scoped_ptr<base::ListValue> value = one_invalidation.ToValue();
+ ASSERT_TRUE(value.get());
+ ObjectIdInvalidationMap deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(one_invalidation == deserialized);
+}
+
+TEST_F(ObjectIdInvalidationMapTest, SerializeInvalidateAll) {
+ scoped_ptr<base::ListValue> value = invalidate_all.ToValue();
+ ASSERT_TRUE(value.get());
+ ObjectIdInvalidationMap deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(invalidate_all == deserialized);
+}
+
+} // namespace
+
+} // namespace syncer
diff --git a/sync/notifier/p2p_invalidator.cc b/sync/notifier/p2p_invalidator.cc
index 2468a51a73..3d47f418bb 100644
--- a/sync/notifier/p2p_invalidator.cc
+++ b/sync/notifier/p2p_invalidator.cc
@@ -14,6 +14,7 @@
#include "jingle/notifier/listener/push_client.h"
#include "sync/notifier/invalidation_handler.h"
#include "sync/notifier/invalidation_util.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace syncer {
@@ -27,7 +28,7 @@ const char kNotifyAll[] = "notifyAll";
const char kSenderIdKey[] = "senderId";
const char kNotificationTypeKey[] = "notificationType";
-const char kIdInvalidationMapKey[] = "idInvalidationMap";
+const char kInvalidationsKey[] = "invalidations";
} // namespace
@@ -96,8 +97,7 @@ bool P2PNotificationData::Equals(const P2PNotificationData& other) const {
return
(sender_id_ == other.sender_id_) &&
(target_ == other.target_) &&
- ObjectIdInvalidationMapEquals(invalidation_map_,
- other.invalidation_map_);
+ (invalidation_map_ == other.invalidation_map_);
}
std::string P2PNotificationData::ToString() const {
@@ -105,8 +105,7 @@ std::string P2PNotificationData::ToString() const {
dict->SetString(kSenderIdKey, sender_id_);
dict->SetString(kNotificationTypeKey,
P2PNotificationTargetToString(target_));
- dict->Set(kIdInvalidationMapKey,
- ObjectIdInvalidationMapToValue(invalidation_map_).release());
+ dict->Set(kInvalidationsKey, invalidation_map_.ToValue().release());
std::string json;
base::JSONWriter::Write(dict.get(), &json);
return json;
@@ -129,10 +128,9 @@ bool P2PNotificationData::ResetFromString(const std::string& str) {
}
target_ = P2PNotificationTargetFromString(target_str);
const base::ListValue* invalidation_map_list = NULL;
- if (!data_dict->GetList(kIdInvalidationMapKey, &invalidation_map_list) ||
- !ObjectIdInvalidationMapFromValue(*invalidation_map_list,
- &invalidation_map_)) {
- LOG(WARNING) << "Could not parse " << kIdInvalidationMapKey;
+ if (!data_dict->GetList(kInvalidationsKey, &invalidation_map_list) ||
+ !invalidation_map_.ResetFromValue(*invalidation_map_list)) {
+ LOG(WARNING) << "Could not parse " << kInvalidationsKey;
}
return true;
}
@@ -161,8 +159,7 @@ void P2PInvalidator::RegisterHandler(InvalidationHandler* handler) {
}
void P2PInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) {
- // TODO(akalin): Handle arbitrary object IDs (http://crbug.com/140411).
+ const ObjectIdSet& ids) {
DCHECK(thread_checker_.CalledOnValidThread());
ObjectIdSet new_ids;
const ObjectIdSet& old_ids = registrar_.GetRegisteredIds(handler);
@@ -173,10 +170,8 @@ void P2PInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
registrar_.UpdateRegisteredIds(handler, ids);
const P2PNotificationData notification_data(
invalidator_client_id_,
- NOTIFY_SELF,
- ObjectIdSetToInvalidationMap(new_ids,
- Invalidation::kUnknownVersion,
- std::string()));
+ send_notification_target_,
+ ObjectIdInvalidationMap::InvalidateAll(ids));
SendNotificationData(notification_data);
}
@@ -213,9 +208,10 @@ void P2PInvalidator::UpdateCredentials(
logged_in_ = true;
}
-void P2PInvalidator::SendInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
+void P2PInvalidator::SendInvalidation(const ObjectIdSet& ids) {
DCHECK(thread_checker_.CalledOnValidThread());
+ ObjectIdInvalidationMap invalidation_map =
+ ObjectIdInvalidationMap::InvalidateAll(ids);
const P2PNotificationData notification_data(
invalidator_client_id_, send_notification_target_, invalidation_map);
SendNotificationData(notification_data);
@@ -230,9 +226,8 @@ void P2PInvalidator::OnNotificationsEnabled() {
const P2PNotificationData notification_data(
invalidator_client_id_,
NOTIFY_SELF,
- ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(),
- Invalidation::kUnknownVersion,
- std::string()));
+ ObjectIdInvalidationMap::InvalidateAll(
+ registrar_.GetAllRegisteredIds()));
SendNotificationData(notification_data);
}
}
@@ -266,9 +261,8 @@ void P2PInvalidator::OnIncomingNotification(
notification_data = P2PNotificationData(
invalidator_client_id_,
NOTIFY_ALL,
- ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(),
- Invalidation::kUnknownVersion,
- std::string()));
+ ObjectIdInvalidationMap::InvalidateAll(
+ registrar_.GetAllRegisteredIds()));
}
if (!notification_data.IsTargeted(invalidator_client_id_)) {
DVLOG(1) << "Not a target of the notification -- "
@@ -288,7 +282,7 @@ void P2PInvalidator::SendNotificationDataForTest(
void P2PInvalidator::SendNotificationData(
const P2PNotificationData& notification_data) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (notification_data.GetIdInvalidationMap().empty()) {
+ if (notification_data.GetIdInvalidationMap().Empty()) {
DVLOG(1) << "Not sending XMPP notification with empty state map: "
<< notification_data.ToString();
return;
diff --git a/sync/notifier/p2p_invalidator.h b/sync/notifier/p2p_invalidator.h
index 515b27b5c4..bf622f0926 100644
--- a/sync/notifier/p2p_invalidator.h
+++ b/sync/notifier/p2p_invalidator.h
@@ -24,6 +24,7 @@
#include "sync/notifier/invalidator.h"
#include "sync/notifier/invalidator_registrar.h"
#include "sync/notifier/invalidator_state.h"
+#include "sync/notifier/object_id_invalidation_map.h"
namespace notifier {
class PushClient;
@@ -118,8 +119,7 @@ class SYNC_EXPORT_PRIVATE P2PInvalidator
virtual void OnIncomingNotification(
const notifier::Notification& notification) OVERRIDE;
- void SendInvalidation(
- const ObjectIdInvalidationMap& invalidation_map);
+ void SendInvalidation(const ObjectIdSet& ids);
void SendNotificationDataForTest(
const P2PNotificationData& notification_data);
diff --git a/sync/notifier/p2p_invalidator_unittest.cc b/sync/notifier/p2p_invalidator_unittest.cc
index 24cfe027f6..3cbabaf67f 100644
--- a/sync/notifier/p2p_invalidator_unittest.cc
+++ b/sync/notifier/p2p_invalidator_unittest.cc
@@ -10,7 +10,6 @@
#include "sync/internal_api/public/base/model_type.h"
#include "sync/notifier/fake_invalidation_handler.h"
#include "sync/notifier/invalidator_test_template.h"
-#include "sync/notifier/object_id_invalidation_map_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -98,9 +97,7 @@ class P2PInvalidatorTest : public testing::Test {
ObjectIdInvalidationMap MakeInvalidationMap(ModelTypeSet types) {
ObjectIdInvalidationMap invalidations;
ObjectIdSet ids = ModelTypeSetToObjectIdSet(types);
- return ObjectIdSetToInvalidationMap(ids,
- Invalidation::kUnknownVersion,
- std::string());
+ return ObjectIdInvalidationMap::InvalidateAll(ids);
}
// Simulate receiving all the notifications we sent out since last
@@ -166,10 +163,10 @@ TEST_F(P2PInvalidatorTest, P2PNotificationDataDefault) {
EXPECT_TRUE(notification_data.IsTargeted(std::string()));
EXPECT_FALSE(notification_data.IsTargeted("other1"));
EXPECT_FALSE(notification_data.IsTargeted("other2"));
- EXPECT_TRUE(notification_data.GetIdInvalidationMap().empty());
+ EXPECT_TRUE(notification_data.GetIdInvalidationMap().Empty());
const std::string& notification_data_str = notification_data.ToString();
EXPECT_EQ(
- "{\"idInvalidationMap\":[],\"notificationType\":\"notifySelf\","
+ "{\"invalidations\":[],\"notificationType\":\"notifySelf\","
"\"senderId\":\"\"}", notification_data_str);
P2PNotificationData notification_data_parsed;
@@ -180,27 +177,22 @@ TEST_F(P2PInvalidatorTest, P2PNotificationDataDefault) {
// Make sure the P2PNotificationData <-> string conversions work for a
// non-default-constructed P2PNotificationData.
TEST_F(P2PInvalidatorTest, P2PNotificationDataNonDefault) {
- const ObjectIdInvalidationMap& invalidation_map =
- ObjectIdSetToInvalidationMap(
- ModelTypeSetToObjectIdSet(ModelTypeSet(BOOKMARKS, THEMES)),
- Invalidation::kUnknownVersion,
- std::string());
- const P2PNotificationData notification_data(
- "sender", NOTIFY_ALL, invalidation_map);
+ ObjectIdInvalidationMap invalidation_map =
+ MakeInvalidationMap(ModelTypeSet(BOOKMARKS, THEMES));
+ const P2PNotificationData notification_data("sender",
+ NOTIFY_ALL,
+ invalidation_map);
EXPECT_TRUE(notification_data.IsTargeted("sender"));
EXPECT_TRUE(notification_data.IsTargeted("other1"));
EXPECT_TRUE(notification_data.IsTargeted("other2"));
- EXPECT_THAT(invalidation_map,
- Eq(notification_data.GetIdInvalidationMap()));
+ EXPECT_EQ(invalidation_map, notification_data.GetIdInvalidationMap());
const std::string& notification_data_str = notification_data.ToString();
EXPECT_EQ(
- "{\"idInvalidationMap\":["
- "{\"objectId\":{\"name\":\"BOOKMARK\",\"source\":1004},"
- "\"state\":{\"ackHandle\":{\"state\":\"\",\"timestamp\":\"0\"},"
- "\"payload\":\"\",\"version\":\"-1\"}},"
- "{\"objectId\":{\"name\":\"THEME\",\"source\":1004},"
- "\"state\":{\"ackHandle\":{\"state\":\"\",\"timestamp\":\"0\"},"
- "\"payload\":\"\",\"version\":\"-1\"}}"
+ "{\"invalidations\":["
+ "{\"isUnknownVersion\":true,"
+ "\"objectId\":{\"name\":\"BOOKMARK\",\"source\":1004}},"
+ "{\"isUnknownVersion\":true,"
+ "\"objectId\":{\"name\":\"THEME\",\"source\":1004}}"
"],\"notificationType\":\"notifyAll\","
"\"senderId\":\"sender\"}", notification_data_str);
@@ -248,14 +240,8 @@ TEST_F(P2PInvalidatorTest, NotificationsBasic) {
// Sent with target NOTIFY_OTHERS so should not be propagated to
// |fake_handler_|.
- {
- const ObjectIdInvalidationMap& invalidation_map =
- ObjectIdSetToInvalidationMap(
- ModelTypeSetToObjectIdSet(ModelTypeSet(THEMES, APPS)),
- Invalidation::kUnknownVersion,
- std::string());
- invalidator->SendInvalidation(invalidation_map);
- }
+ invalidator->SendInvalidation(
+ ModelTypeSetToObjectIdSet(ModelTypeSet(THEMES, APPS)));
ReflectSentNotifications();
EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
@@ -270,9 +256,7 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
const ModelTypeSet expected_types(THEMES);
const ObjectIdInvalidationMap& invalidation_map =
- ObjectIdSetToInvalidationMap(ModelTypeSetToObjectIdSet(changed_types),
- Invalidation::kUnknownVersion,
- std::string());
+ MakeInvalidationMap(changed_types);
P2PInvalidator* const invalidator = delegate_.GetInvalidator();
notifier::FakePushClient* const push_client = delegate_.GetPushClient();
@@ -288,23 +272,23 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
ReflectSentNotifications();
EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(MakeInvalidationMap(enabled_types),
- Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(ModelTypeSetToObjectIdSet(enabled_types),
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(P2PNotificationData());
ReflectSentNotifications();
EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
- const ObjectIdInvalidationMap& expected_ids =
- MakeInvalidationMap(expected_types);
+ const ObjectIdSet& expected_ids = ModelTypeSetToObjectIdSet(expected_types);
// Should be propagated.
invalidator->SendNotificationDataForTest(
P2PNotificationData("sender", NOTIFY_SELF, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(2, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(
@@ -329,7 +313,8 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
P2PNotificationData("sender2", NOTIFY_OTHERS, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(3, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(
@@ -342,14 +327,16 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
P2PNotificationData("sender", NOTIFY_ALL, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(4, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be propagated.
invalidator->SendNotificationDataForTest(
P2PNotificationData("sender2", NOTIFY_ALL, invalidation_map));
ReflectSentNotifications();
EXPECT_EQ(5, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(expected_ids, Eq(fake_handler_.GetLastInvalidationMap()));
+ EXPECT_EQ(expected_ids,
+ fake_handler_.GetLastInvalidationMap().GetObjectIds());
// Should be dropped.
invalidator->SendNotificationDataForTest(
diff --git a/sync/notifier/single_object_invalidation_set.cc b/sync/notifier/single_object_invalidation_set.cc
new file mode 100644
index 0000000000..55202bb148
--- /dev/null
+++ b/sync/notifier/single_object_invalidation_set.cc
@@ -0,0 +1,130 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/single_object_invalidation_set.h"
+
+#include "base/values.h"
+#include "sync/notifier/invalidation_util.h"
+
+namespace syncer {
+
+bool InvalidationVersionLessThan::operator()(
+ const Invalidation& a,
+ const Invalidation& b) {
+ DCHECK(a.object_id() == b.object_id())
+ << "a: " << ObjectIdToString(a.object_id()) << ", "
+ << "b: " << ObjectIdToString(a.object_id());
+
+ if (a.is_unknown_version() && !b.is_unknown_version())
+ return true;
+
+ if (!a.is_unknown_version() && b.is_unknown_version())
+ return false;
+
+ if (a.is_unknown_version() && b.is_unknown_version())
+ return false;
+
+ return a.version() < b.version();
+}
+
+SingleObjectInvalidationSet::SingleObjectInvalidationSet() {}
+
+SingleObjectInvalidationSet::~SingleObjectInvalidationSet() {}
+
+void SingleObjectInvalidationSet::Insert(const Invalidation& invalidation) {
+ invalidations_.insert(invalidation);
+}
+
+void SingleObjectInvalidationSet::InsertAll(
+ const SingleObjectInvalidationSet& other) {
+ invalidations_.insert(other.begin(), other.end());
+}
+
+void SingleObjectInvalidationSet::Clear() {
+ invalidations_.clear();
+}
+
+bool SingleObjectInvalidationSet::StartsWithUnknownVersion() const {
+ return !invalidations_.empty() &&
+ invalidations_.begin()->is_unknown_version();
+}
+
+size_t SingleObjectInvalidationSet::GetSize() const {
+ return invalidations_.size();
+}
+
+bool SingleObjectInvalidationSet::IsEmpty() const {
+ return invalidations_.empty();
+}
+
+namespace {
+
+struct InvalidationComparator {
+ bool operator()(const Invalidation& inv1, const Invalidation& inv2) {
+ return inv1.Equals(inv2);
+ }
+};
+
+} // namespace
+
+bool SingleObjectInvalidationSet::operator==(
+ const SingleObjectInvalidationSet& other) const {
+ return std::equal(invalidations_.begin(),
+ invalidations_.end(),
+ other.invalidations_.begin(),
+ InvalidationComparator());
+}
+
+SingleObjectInvalidationSet::const_iterator
+SingleObjectInvalidationSet::begin() const {
+ return invalidations_.begin();
+}
+
+SingleObjectInvalidationSet::const_iterator
+SingleObjectInvalidationSet::end() const {
+ return invalidations_.end();
+}
+
+SingleObjectInvalidationSet::const_reverse_iterator
+SingleObjectInvalidationSet::rbegin() const {
+ return invalidations_.rbegin();
+}
+
+SingleObjectInvalidationSet::const_reverse_iterator
+SingleObjectInvalidationSet::rend() const {
+ return invalidations_.rend();
+}
+
+const Invalidation& SingleObjectInvalidationSet::back() const {
+ return *invalidations_.rbegin();
+}
+
+scoped_ptr<base::ListValue> SingleObjectInvalidationSet::ToValue() const {
+ scoped_ptr<base::ListValue> value(new ListValue);
+ for (InvalidationsSet::const_iterator it = invalidations_.begin();
+ it != invalidations_.end(); ++it) {
+ value->Append(it->ToValue().release());
+ }
+ return value.Pass();
+}
+
+bool SingleObjectInvalidationSet::ResetFromValue(
+ const base::ListValue& list) {
+ for (size_t i = 0; i < list.GetSize(); ++i) {
+ const base::DictionaryValue* dict;
+ if (!list.GetDictionary(i, &dict)) {
+ DLOG(WARNING) << "Could not find invalidation at index " << i;
+ return false;
+ }
+ scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
+ if (!invalidation) {
+ DLOG(WARNING) << "Failed to parse invalidation at index " << i;
+ return false;
+ }
+ invalidations_.insert(*invalidation);
+ }
+ return true;
+}
+
+} // namespace syncer
diff --git a/sync/notifier/single_object_invalidation_set.h b/sync/notifier/single_object_invalidation_set.h
new file mode 100644
index 0000000000..c4dd051b65
--- /dev/null
+++ b/sync/notifier/single_object_invalidation_set.h
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
+#define SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
+
+#include <set>
+
+#include "base/memory/scoped_ptr.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/base/invalidation.h"
+
+namespace base {
+class ListValue;
+} // namespace base
+
+namespace syncer {
+
+struct InvalidationVersionLessThan {
+ bool operator()(const Invalidation& a, const Invalidation& b);
+};
+
+// Holds a list of invalidations that all share the same Object ID.
+//
+// The list is kept sorted by version to make it easier to perform common
+// operations, like checking for an unknown version invalidation or fetching the
+// highest invalidation with the highest version number.
+class SYNC_EXPORT SingleObjectInvalidationSet {
+ public:
+ typedef std::set<Invalidation, InvalidationVersionLessThan> InvalidationsSet;
+ typedef InvalidationsSet::const_iterator const_iterator;
+ typedef InvalidationsSet::const_reverse_iterator const_reverse_iterator;
+
+ SingleObjectInvalidationSet();
+ ~SingleObjectInvalidationSet();
+
+ void Insert(const Invalidation& invalidation);
+ void InsertAll(const SingleObjectInvalidationSet& other);
+ void Clear();
+
+ // Returns true if this list contains an unknown version.
+ //
+ // Unknown version invalidations always end up at the start of the list,
+ // because they have the lowest possible value in the sort ordering.
+ bool StartsWithUnknownVersion() const;
+ size_t GetSize() const;
+ bool IsEmpty() const;
+ bool operator==(const SingleObjectInvalidationSet& other) const;
+
+ const_iterator begin() const;
+ const_iterator end() const;
+ const_reverse_iterator rbegin() const;
+ const_reverse_iterator rend() const;
+ const Invalidation& back() const;
+
+ scoped_ptr<base::ListValue> ToValue() const;
+ bool ResetFromValue(const base::ListValue& list);
+
+ private:
+ InvalidationsSet invalidations_;
+};
+
+} // syncer
+
+#endif // SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
diff --git a/sync/notifier/single_object_invalidation_set_unittest.cc b/sync/notifier/single_object_invalidation_set_unittest.cc
new file mode 100644
index 0000000000..3fe074e10c
--- /dev/null
+++ b/sync/notifier/single_object_invalidation_set_unittest.cc
@@ -0,0 +1,110 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/single_object_invalidation_set.h"
+
+#include "google/cacheinvalidation/types.pb.h"
+#include "sync/internal_api/public/base/invalidation_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+namespace {
+
+class SingleObjectInvalidationSetTest : public testing::Test {
+ public:
+ SingleObjectInvalidationSetTest()
+ : kId(ipc::invalidation::ObjectSource::TEST, "one") {
+ }
+ protected:
+ const invalidation::ObjectId kId;
+};
+
+TEST_F(SingleObjectInvalidationSetTest, InsertionAndOrdering) {
+ SingleObjectInvalidationSet l1;
+ SingleObjectInvalidationSet l2;
+
+ Invalidation inv0 = Invalidation::InitUnknownVersion(kId);
+ Invalidation inv1 = Invalidation::Init(kId, 1, "one");
+ Invalidation inv2 = Invalidation::Init(kId, 5, "five");
+
+ l1.Insert(inv0);
+ l1.Insert(inv1);
+ l1.Insert(inv2);
+
+ l2.Insert(inv1);
+ l2.Insert(inv2);
+ l2.Insert(inv0);
+
+ ASSERT_EQ(3U, l1.GetSize());
+ ASSERT_EQ(3U, l2.GetSize());
+
+ SingleObjectInvalidationSet::const_iterator it1 = l1.begin();
+ SingleObjectInvalidationSet::const_iterator it2 = l2.begin();
+ EXPECT_THAT(inv0, Eq(*it1));
+ EXPECT_THAT(inv0, Eq(*it2));
+ it1++;
+ it2++;
+ EXPECT_THAT(inv1, Eq(*it1));
+ EXPECT_THAT(inv1, Eq(*it2));
+ it1++;
+ it2++;
+ EXPECT_THAT(inv2, Eq(*it1));
+ EXPECT_THAT(inv2, Eq(*it2));
+ it1++;
+ it2++;
+ EXPECT_TRUE(it1 == l1.end());
+ EXPECT_TRUE(it2 == l2.end());
+}
+
+TEST_F(SingleObjectInvalidationSetTest, StartWithUnknownVersion) {
+ SingleObjectInvalidationSet list;
+ EXPECT_FALSE(list.StartsWithUnknownVersion());
+
+ list.Insert(Invalidation::Init(kId, 1, "one"));
+ EXPECT_FALSE(list.StartsWithUnknownVersion());
+
+ list.Insert(Invalidation::InitUnknownVersion(kId));
+ EXPECT_TRUE(list.StartsWithUnknownVersion());
+
+ list.Clear();
+ EXPECT_FALSE(list.StartsWithUnknownVersion());
+}
+
+TEST_F(SingleObjectInvalidationSetTest, SerializeEmpty) {
+ SingleObjectInvalidationSet list;
+
+ scoped_ptr<base::ListValue> value = list.ToValue();
+ ASSERT_TRUE(value.get());
+ SingleObjectInvalidationSet deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(list == deserialized);
+}
+
+TEST_F(SingleObjectInvalidationSetTest, SerializeOne) {
+ SingleObjectInvalidationSet list;
+ list.Insert(Invalidation::Init(kId, 1, "one"));
+
+ scoped_ptr<base::ListValue> value = list.ToValue();
+ ASSERT_TRUE(value.get());
+ SingleObjectInvalidationSet deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(list == deserialized);
+}
+
+TEST_F(SingleObjectInvalidationSetTest, SerializeMany) {
+ SingleObjectInvalidationSet list;
+ list.Insert(Invalidation::Init(kId, 1, "one"));
+ list.Insert(Invalidation::InitUnknownVersion(kId));
+
+ scoped_ptr<base::ListValue> value = list.ToValue();
+ ASSERT_TRUE(value.get());
+ SingleObjectInvalidationSet deserialized;
+ deserialized.ResetFromValue(*value.get());
+ EXPECT_TRUE(list == deserialized);
+}
+
+} // namespace
+
+} // namespace syncer
diff --git a/sync/notifier/sync_invalidation_listener.cc b/sync/notifier/sync_invalidation_listener.cc
index ed07f2070b..763cc876bf 100644
--- a/sync/notifier/sync_invalidation_listener.cc
+++ b/sync/notifier/sync_invalidation_listener.cc
@@ -22,6 +22,8 @@ namespace {
const char kApplicationName[] = "chrome-sync";
+static const int64 kUnknownVersion = -1;
+
} // namespace
namespace syncer {
@@ -31,13 +33,13 @@ SyncInvalidationListener::Delegate::~Delegate() {}
SyncInvalidationListener::SyncInvalidationListener(
base::TickClock* tick_clock,
scoped_ptr<notifier::PushClient> push_client)
- : weak_ptr_factory_(this),
- ack_tracker_(tick_clock, this),
+ : ack_tracker_(tick_clock, this),
push_client_(push_client.get()),
sync_system_resources_(push_client.Pass(), this),
delegate_(NULL),
ticl_state_(DEFAULT_INVALIDATION_ERROR),
- push_client_state_(DEFAULT_INVALIDATION_ERROR) {
+ push_client_state_(DEFAULT_INVALIDATION_ERROR),
+ weak_ptr_factory_(this) {
DCHECK(CalledOnValidThread());
push_client_->AddObserver(this);
}
@@ -220,7 +222,7 @@ void SyncInvalidationListener::InvalidateUnknownVersion(
ids.insert(object_id);
PrepareInvalidation(
ids,
- Invalidation::kUnknownVersion,
+ kUnknownVersion,
std::string(),
client,
ack_handle);
@@ -237,7 +239,7 @@ void SyncInvalidationListener::InvalidateAll(
PrepareInvalidation(
registered_ids_,
- Invalidation::kUnknownVersion,
+ kUnknownVersion,
std::string(),
client,
ack_handle);
@@ -275,13 +277,22 @@ void SyncInvalidationListener::EmitInvalidation(
const invalidation::AckHandle& ack_handle,
const AckHandleMap& local_ack_handles) {
DCHECK(CalledOnValidThread());
- ObjectIdInvalidationMap invalidation_map =
- ObjectIdSetToInvalidationMap(ids, version, payload);
+
+ ObjectIdInvalidationMap invalidation_map;
for (AckHandleMap::const_iterator it = local_ack_handles.begin();
it != local_ack_handles.end(); ++it) {
// Update in-memory copy of the invalidation state.
invalidation_state_map_[it->first].expected = it->second;
- invalidation_map[it->first].ack_handle = it->second;
+
+ if (version == kUnknownVersion) {
+ Invalidation inv = Invalidation::InitUnknownVersion(it->first);
+ inv.set_ack_handle(it->second);
+ invalidation_map.Insert(inv);
+ } else {
+ Invalidation inv = Invalidation::Init(it->first, version, payload);
+ inv.set_ack_handle(it->second);
+ invalidation_map.Insert(inv);
+ }
}
ack_tracker_.Track(ids);
delegate_->OnInvalidate(invalidation_map);
@@ -291,13 +302,19 @@ void SyncInvalidationListener::EmitInvalidation(
void SyncInvalidationListener::OnTimeout(const ObjectIdSet& ids) {
ObjectIdInvalidationMap invalidation_map;
for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- Invalidation invalidation;
- invalidation.ack_handle = invalidation_state_map_[*it].expected;
- invalidation.version = invalidation_state_map_[*it].version;
- invalidation.payload = invalidation_state_map_[*it].payload;
- invalidation_map.insert(std::make_pair(*it, invalidation));
+ if (invalidation_state_map_[*it].version == kUnknownVersion) {
+ Invalidation inv = Invalidation::InitUnknownVersion(*it);
+ inv.set_ack_handle(invalidation_state_map_[*it].expected);
+ invalidation_map.Insert(inv);
+ } else {
+ Invalidation inv = Invalidation::Init(
+ *it,
+ invalidation_state_map_[*it].version,
+ invalidation_state_map_[*it].payload);
+ inv.set_ack_handle(invalidation_state_map_[*it].expected);
+ invalidation_map.Insert(inv);
+ }
}
-
delegate_->OnInvalidate(invalidation_map);
}
diff --git a/sync/notifier/sync_invalidation_listener.h b/sync/notifier/sync_invalidation_listener.h
index d280e2e5c6..7b7b2fd640 100644
--- a/sync/notifier/sync_invalidation_listener.h
+++ b/sync/notifier/sync_invalidation_listener.h
@@ -165,7 +165,6 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
// AckTracker::Delegate implementation.
virtual void OnTimeout(const ObjectIdSet& ids) OVERRIDE;
- base::WeakPtrFactory<SyncInvalidationListener> weak_ptr_factory_;
AckTracker ack_tracker_;
// Owned by |sync_system_resources_|.
@@ -183,6 +182,8 @@ class SYNC_EXPORT_PRIVATE SyncInvalidationListener
InvalidatorState ticl_state_;
InvalidatorState push_client_state_;
+ base::WeakPtrFactory<SyncInvalidationListener> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(SyncInvalidationListener);
};
diff --git a/sync/notifier/sync_invalidation_listener_unittest.cc b/sync/notifier/sync_invalidation_listener_unittest.cc
index d3aa712b7d..18dd12317c 100644
--- a/sync/notifier/sync_invalidation_listener_unittest.cc
+++ b/sync/notifier/sync_invalidation_listener_unittest.cc
@@ -141,37 +141,62 @@ class FakeDelegate : public SyncInvalidationListener::Delegate {
state_(TRANSIENT_INVALIDATION_ERROR) {}
virtual ~FakeDelegate() {}
- int GetInvalidationCount(const ObjectId& id) const {
- ObjectIdCountMap::const_iterator it = invalidation_counts_.find(id);
- return (it == invalidation_counts_.end()) ? 0 : it->second;
+ size_t GetInvalidationCount(const ObjectId& id) const {
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ return 0;
+ } else {
+ return it->second.size();
+ }
}
int64 GetVersion(const ObjectId& id) const {
- ObjectIdInvalidationMap::const_iterator it = invalidations_.find(id);
- return (it == invalidations_.end()) ? 0 : it->second.version;
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
+ return 0;
+ } else {
+ return it->second.back().version();
+ }
}
std::string GetPayload(const ObjectId& id) const {
- ObjectIdInvalidationMap::const_iterator it = invalidations_.find(id);
- return (it == invalidations_.end()) ? std::string() : it->second.payload;
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
+ return "";
+ } else {
+ return it->second.back().payload();
+ }
}
+ bool IsUnknownVersion(const ObjectId& id) const {
+ Map::const_iterator it = invalidations_.find(id);
+ if (it == invalidations_.end()) {
+ ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
+ return false;
+ } else {
+ return it->second.back().is_unknown_version();
+ }
+ }
InvalidatorState GetInvalidatorState() const {
return state_;
}
void Acknowledge(const ObjectId& id) {
- listener_->Acknowledge(id, invalidations_[id].ack_handle);
+ listener_->Acknowledge(id, invalidations_[id].back().ack_handle());
}
// SyncInvalidationListener::Delegate implementation.
virtual void OnInvalidate(
const ObjectIdInvalidationMap& invalidation_map) OVERRIDE {
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- ++invalidation_counts_[it->first];
- invalidations_[it->first] = it->second;
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::iterator it = ids.begin(); it != ids.end(); ++it) {
+ const SingleObjectInvalidationSet& incoming =
+ invalidation_map.ForObject(*it);
+ List& list = invalidations_[*it];
+ list.insert(list.end(), incoming.begin(), incoming.end());
}
}
@@ -181,8 +206,9 @@ class FakeDelegate : public SyncInvalidationListener::Delegate {
private:
typedef std::map<ObjectId, int, ObjectIdLessThan> ObjectIdCountMap;
- ObjectIdCountMap invalidation_counts_;
- ObjectIdInvalidationMap invalidations_;
+ typedef std::vector<Invalidation> List;
+ typedef std::map<ObjectId, List, ObjectIdLessThan> Map;
+ Map invalidations_;
SyncInvalidationListener* listener_;
InvalidatorState state_;
};
@@ -308,6 +334,10 @@ class SyncInvalidationListenerTest : public testing::Test {
return fake_delegate_.GetPayload(id);
}
+ bool IsUnknownVersion(const ObjectId& id) const {
+ return fake_delegate_.IsUnknownVersion(id);
+ }
+
InvalidatorState GetInvalidatorState() const {
return fake_delegate_.GetInvalidatorState();
}
@@ -489,7 +519,6 @@ TEST_F(SyncInvalidationListenerTest, InvalidateUnregisteredWithPayload) {
const ObjectId& id = kUnregisteredId;
EXPECT_EQ(0, GetInvalidationCount(id));
- EXPECT_EQ("", GetPayload(id));
EXPECT_EQ(kMinVersion, GetMaxVersion(id));
FireInvalidate(id, kVersion1, "unregistered payload");
@@ -524,25 +553,18 @@ TEST_F(SyncInvalidationListenerTest, InvalidateVersion) {
VerifyAcknowledged(id);
}
-// Fire an invalidation with an unknown version twice. It shouldn't
-// update the payload or version either time, but it should still be
-// processed.
+// Fire an invalidation with an unknown version twice. It shouldn't update the
+// version either time, but it should still be processed.
TEST_F(SyncInvalidationListenerTest, InvalidateUnknownVersion) {
const ObjectId& id = kBookmarksId_;
FireInvalidateUnknownVersion(id);
EXPECT_EQ(1, GetInvalidationCount(id));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(id));
- EXPECT_EQ("", GetPayload(id));
- EXPECT_EQ(kMinVersion, GetMaxVersion(id));
+ EXPECT_TRUE(IsUnknownVersion(id));
AcknowledgeAndVerify(id);
FireInvalidateUnknownVersion(id);
-
- EXPECT_EQ(2, GetInvalidationCount(id));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(id));
- EXPECT_EQ("", GetPayload(id));
EXPECT_EQ(kMinVersion, GetMaxVersion(id));
AcknowledgeAndVerify(id);
}
@@ -555,8 +577,7 @@ TEST_F(SyncInvalidationListenerTest, InvalidateAll) {
for (ObjectIdSet::const_iterator it = registered_ids_.begin();
it != registered_ids_.end(); ++it) {
EXPECT_EQ(1, GetInvalidationCount(*it));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(*it));
- EXPECT_EQ("", GetPayload(*it));
+ EXPECT_TRUE(IsUnknownVersion(*it));
EXPECT_EQ(kMinVersion, GetMaxVersion(*it));
AcknowledgeAndVerify(*it);
}
@@ -601,14 +622,12 @@ TEST_F(SyncInvalidationListenerTest, InvalidateMultipleIds) {
FireInvalidateAll();
EXPECT_EQ(2, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
+ EXPECT_TRUE(IsUnknownVersion(kBookmarksId_));
EXPECT_EQ(3, GetMaxVersion(kBookmarksId_));
AcknowledgeAndVerify(kBookmarksId_);
EXPECT_EQ(1, GetInvalidationCount(kPreferencesId_));
- EXPECT_EQ(Invalidation::kUnknownVersion, GetVersion(kPreferencesId_));
- EXPECT_EQ("", GetPayload(kPreferencesId_));
+ EXPECT_TRUE(IsUnknownVersion(kBookmarksId_));
EXPECT_EQ(kMinVersion, GetMaxVersion(kPreferencesId_));
AcknowledgeAndVerify(kPreferencesId_);
diff --git a/sync/notifier/sync_system_resources.cc b/sync/notifier/sync_system_resources.cc
index be50a82212..74dacaa638 100644
--- a/sync/notifier/sync_system_resources.cc
+++ b/sync/notifier/sync_system_resources.cc
@@ -61,10 +61,10 @@ void SyncLogger::SetSystemResources(invalidation::SystemResources* resources) {
}
SyncInvalidationScheduler::SyncInvalidationScheduler()
- : weak_factory_(this),
- created_on_loop_(base::MessageLoop::current()),
+ : created_on_loop_(base::MessageLoop::current()),
is_started_(false),
- is_stopped_(false) {
+ is_stopped_(false),
+ weak_factory_(this) {
CHECK(created_on_loop_);
}
diff --git a/sync/notifier/sync_system_resources.h b/sync/notifier/sync_system_resources.h
index e333bb24f9..b14d352bd1 100644
--- a/sync/notifier/sync_system_resources.h
+++ b/sync/notifier/sync_system_resources.h
@@ -65,7 +65,9 @@ class SyncInvalidationScheduler : public invalidation::Scheduler {
invalidation::SystemResources* resources) OVERRIDE;
private:
- base::WeakPtrFactory<SyncInvalidationScheduler> weak_factory_;
+ // Runs the task, deletes it, and removes it from |posted_tasks_|.
+ void RunPostedTask(invalidation::Closure* task);
+
// Holds all posted tasks that have not yet been run.
std::set<invalidation::Closure*> posted_tasks_;
@@ -73,8 +75,7 @@ class SyncInvalidationScheduler : public invalidation::Scheduler {
bool is_started_;
bool is_stopped_;
- // Runs the task, deletes it, and removes it from |posted_tasks_|.
- void RunPostedTask(invalidation::Closure* task);
+ base::WeakPtrFactory<SyncInvalidationScheduler> weak_factory_;
};
class SyncStorage : public invalidation::Storage {
diff --git a/sync/protocol/article_specifics.proto b/sync/protocol/article_specifics.proto
new file mode 100644
index 0000000000..00631fd16e
--- /dev/null
+++ b/sync/protocol/article_specifics.proto
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Sync protocol datatype extension for the article.
+
+syntax = "proto2";
+
+option optimize_for = LITE_RUNTIME;
+option retain_unknown_fields = true;
+
+package sync_pb;
+
+// Properties of Article objects.
+message ArticleSpecifics {
+ // Next ID to use: 4
+
+ optional string entry_id = 1;
+
+ optional string title = 2;
+
+ repeated ArticlePage pages = 3;
+}
+
+message ArticlePage {
+ // Next ID to use: 2
+
+ optional string url = 1;
+}
diff --git a/sync/protocol/nigori_specifics.proto b/sync/protocol/nigori_specifics.proto
index 6e72ae1493..662b94af95 100644
--- a/sync/protocol/nigori_specifics.proto
+++ b/sync/protocol/nigori_specifics.proto
@@ -120,5 +120,8 @@ message NigoriSpecifics {
// Boolean corresponding to Whether to encrypt favicons data or not.
optional bool encrypt_favicon_images = 35;
optional bool encrypt_favicon_tracking = 36;
+
+ // Boolean corresponding to whether articles should be encrypted.
+ optional bool encrypt_articles = 37;
}
diff --git a/sync/protocol/proto_value_conversions.cc b/sync/protocol/proto_value_conversions.cc
index a7da289b81..1f5a72648a 100644
--- a/sync/protocol/proto_value_conversions.cc
+++ b/sync/protocol/proto_value_conversions.cc
@@ -245,6 +245,9 @@ base::DictionaryValue* SyncedNotificationImageToValue(
const sync_pb::SyncedNotificationImage& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_STR(url);
+ SET_STR(alt_text);
+ SET_INT32(preferred_width);
+ SET_INT32(preferred_height);
return value;
}
@@ -252,6 +255,8 @@ base::DictionaryValue* SyncedNotificationProfileImageToValue(
const sync_pb::SyncedNotificationProfileImage& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_STR(image_url);
+ SET_STR(oid);
+ SET_STR(display_name);
return value;
}
@@ -262,15 +267,45 @@ base::DictionaryValue* MediaToValue(
return value;
}
+base::DictionaryValue* SyncedNotificationActionToValue(
+ const sync_pb::SyncedNotificationAction& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(text);
+ SET(icon, SyncedNotificationImageToValue);
+ SET_STR(url);
+ SET_STR(request_data);
+ SET_STR(accessibility_label);
+ return value;
+}
+
+base::DictionaryValue* SyncedNotificationDestiationToValue(
+ const sync_pb::SyncedNotificationDestination& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(text);
+ SET(icon, SyncedNotificationImageToValue);
+ SET_STR(url);
+ SET_STR(accessibility_label);
+ return value;
+}
+
+base::DictionaryValue* TargetToValue(
+ const sync_pb::Target& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET(destination, SyncedNotificationDestiationToValue);
+ SET(action, SyncedNotificationActionToValue);
+ SET_STR(target_key);
+ return value;
+}
+
base::DictionaryValue* SimpleCollapsedLayoutToValue(
const sync_pb::SimpleCollapsedLayout& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
+ SET(app_icon, SyncedNotificationImageToValue);
+ SET_REP(profile_image, SyncedNotificationProfileImageToValue);
SET_STR(heading);
SET_STR(description);
SET_STR(annotation);
SET_REP(media, MediaToValue);
- SET_REP(profile_image, SyncedNotificationProfileImageToValue);
- SET(app_icon, SyncedNotificationImageToValue);
return value;
}
@@ -278,13 +313,25 @@ base::DictionaryValue* CollapsedInfoToValue(
const sync_pb::CollapsedInfo& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET(simple_collapsed_layout, SimpleCollapsedLayoutToValue);
+ SET_INT64(creation_timestamp_usec);
+ SET(default_destination, SyncedNotificationDestiationToValue);
+ SET_REP(target, TargetToValue);
+ return value;
+}
+
+base::DictionaryValue* SyncedNotificationToValue(
+ const sync_pb::SyncedNotification& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(type);
+ SET_STR(external_id);
+ // TODO(petewil) Add SyncedNotificationCreator here if we ever need it.
return value;
}
base::DictionaryValue* RenderInfoToValue(
const sync_pb::SyncedNotificationRenderInfo& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
- // TODO(petewil): Add the expanded info values too.
+ // TODO(petewil): Add the expanded info values once we start using them.
SET(collapsed_info, CollapsedInfoToValue);
return value;
}
@@ -293,10 +340,12 @@ base::DictionaryValue* CoalescedNotificationToValue(
const sync_pb::CoalescedSyncedNotification& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_STR(key);
+ SET_STR(app_id);
+ SET_REP(notification, SyncedNotificationToValue);
+ SET(render_info, RenderInfoToValue);
SET_INT32(read_state);
SET_INT64(creation_time_msec);
SET_INT32(priority);
- SET(render_info, RenderInfoToValue);
return value;
}
@@ -515,6 +564,7 @@ base::DictionaryValue* NigoriSpecificsToValue(
SET_BOOL(encrypt_apps);
SET_BOOL(encrypt_search_engines);
SET_BOOL(encrypt_dictionary);
+ SET_BOOL(encrypt_articles);
SET_BOOL(encrypt_everything);
SET_BOOL(sync_tab_favicons);
SET_ENUM(passphrase_type, PassphraseTypeString);
@@ -524,6 +574,22 @@ base::DictionaryValue* NigoriSpecificsToValue(
return value;
}
+base::DictionaryValue* ArticlePageToValue(
+ const sync_pb::ArticlePage& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(url);
+ return value;
+}
+
+base::DictionaryValue* ArticleSpecificsToValue(
+ const sync_pb::ArticleSpecifics& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(entry_id);
+ SET_STR(title);
+ SET_REP(pages, ArticlePageToValue);
+ return value;
+}
+
base::DictionaryValue* PasswordSpecificsToValue(
const sync_pb::PasswordSpecifics& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -623,6 +689,7 @@ base::DictionaryValue* EntitySpecificsToValue(
SET_FIELD(app, AppSpecificsToValue);
SET_FIELD(app_notification, AppNotificationToValue);
SET_FIELD(app_setting, AppSettingSpecificsToValue);
+ SET_FIELD(article, ArticleSpecificsToValue);
SET_FIELD(autofill, AutofillSpecificsToValue);
SET_FIELD(autofill_profile, AutofillProfileSpecificsToValue);
SET_FIELD(bookmark, BookmarkSpecificsToValue);
diff --git a/sync/protocol/proto_value_conversions.h b/sync/protocol/proto_value_conversions.h
index f5306d791a..ee40e2bab1 100644
--- a/sync/protocol/proto_value_conversions.h
+++ b/sync/protocol/proto_value_conversions.h
@@ -18,6 +18,7 @@ class AppNotification;
class AppNotificationSettings;
class AppSettingSpecifics;
class AppSpecifics;
+class ArticleSpecifics;
class AutofillProfileSpecifics;
class AutofillSpecifics;
class BookmarkSpecifics;
@@ -58,11 +59,15 @@ class SessionTab;
class SessionWindow;
class SimpleCollapsedLayout;
class SyncCycleCompletedEventInfo;
+class SyncedNotification;
+class SyncedNotificationAction;
+class SyncedNotificationDestination;
class SyncedNotificationImage;
class SyncedNotificationProfileImage;
class SyncedNotificationRenderInfo;
class SyncedNotificationSpecifics;
class TabNavigation;
+class Target;
class ThemeSpecifics;
class TimeRangeDirective;
class TypedUrlSpecifics;
@@ -141,6 +146,28 @@ base::DictionaryValue* RenderInfoToValue(
base::DictionaryValue* CoalescedNotificationToValue(
const sync_pb::CoalescedSyncedNotification& proto);
+base::DictionaryValue* SyncedNotificationActionToValue(
+ const sync_pb::SyncedNotificationAction& action);
+
+base::DictionaryValue* SyncedNotificationDestinationToValue(
+ const sync_pb::SyncedNotificationDestination& destination);
+
+base::DictionaryValue* SyncedNotificationToValue(
+ const sync_pb::SyncedNotification& notification);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionSpecificsToValue(
+ const sync_pb::SessionSpecifics& session_specifics);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationImageToValue(
+ const sync_pb::SyncedNotificationImage& image);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue*
+ SyncedNotificationProfileImageToValue(
+ const sync_pb::SyncedNotificationProfileImage& image);
+
+SYNC_EXPORT_PRIVATE base::DictionaryValue* TargetToValue(
+ const sync_pb::Target& target);
+
// Main *SpecificsToValue functions.
SYNC_EXPORT_PRIVATE base::DictionaryValue* AppNotificationToValue(
@@ -152,6 +179,9 @@ base::DictionaryValue* AppSettingSpecificsToValue(
SYNC_EXPORT_PRIVATE base::DictionaryValue* AppSpecificsToValue(
const sync_pb::AppSpecifics& app_specifics);
+SYNC_EXPORT_PRIVATE base::DictionaryValue* ArticleSpecificsToValue(
+ const sync_pb::ArticleSpecifics& article_specifics);
+
SYNC_EXPORT_PRIVATE base::DictionaryValue* AutofillSpecificsToValue(
const sync_pb::AutofillSpecifics& autofill_specifics);
@@ -214,16 +244,6 @@ SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationSpecificsToValue(
SYNC_EXPORT_PRIVATE base::DictionaryValue* SearchEngineSpecificsToValue(
const sync_pb::SearchEngineSpecifics& search_engine_specifics);
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionSpecificsToValue(
- const sync_pb::SessionSpecifics& session_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationImageToValue(
- const sync_pb::SyncedNotificationImage& image);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue*
- SyncedNotificationProfileImageToValue(
- const sync_pb::SyncedNotificationProfileImage& image);
-
SYNC_EXPORT_PRIVATE base::DictionaryValue* ThemeSpecificsToValue(
const sync_pb::ThemeSpecifics& theme_specifics);
diff --git a/sync/protocol/proto_value_conversions_unittest.cc b/sync/protocol/proto_value_conversions_unittest.cc
index 718a78a60b..3275eb0cd7 100644
--- a/sync/protocol/proto_value_conversions_unittest.cc
+++ b/sync/protocol/proto_value_conversions_unittest.cc
@@ -53,7 +53,7 @@ TEST_F(ProtoValueConversionsTest, ProtoChangeCheck) {
// If this number changes, that means we added or removed a data
// type. Don't forget to add a unit test for {New
// type}SpecificsToValue below.
- EXPECT_EQ(28, MODEL_TYPE_COUNT);
+ EXPECT_EQ(29, MODEL_TYPE_COUNT);
// We'd also like to check if we changed any field in our messages.
// However, that's hard to do: sizeof could work, but it's
@@ -219,6 +219,10 @@ TEST_F(ProtoValueConversionsTest, DictionarySpecificsToValue) {
TestSpecificsToValue(DictionarySpecificsToValue);
}
+TEST_F(ProtoValueConversionsTest, ArticleSpecificsToValue) {
+ TestSpecificsToValue(ArticleSpecificsToValue);
+}
+
// TODO(akalin): Figure out how to better test EntitySpecificsToValue.
TEST_F(ProtoValueConversionsTest, EntitySpecificsToValue) {
@@ -230,6 +234,7 @@ TEST_F(ProtoValueConversionsTest, EntitySpecificsToValue) {
SET_FIELD(app);
SET_FIELD(app_notification);
SET_FIELD(app_setting);
+ SET_FIELD(article);
SET_FIELD(autofill);
SET_FIELD(autofill_profile);
SET_FIELD(bookmark);
diff --git a/sync/protocol/sync.proto b/sync/protocol/sync.proto
index 445fa41384..5b0c175d99 100644
--- a/sync/protocol/sync.proto
+++ b/sync/protocol/sync.proto
@@ -17,6 +17,7 @@ package sync_pb;
import "app_notification_specifics.proto";
import "app_setting_specifics.proto";
import "app_specifics.proto";
+import "article_specifics.proto";
import "autofill_specifics.proto";
import "bookmark_specifics.proto";
import "client_commands.proto";
@@ -118,6 +119,7 @@ message EntitySpecifics {
optional FaviconImageSpecifics favicon_image = 182019;
optional ManagedUserSettingSpecifics managed_user_setting = 186662;
optional ManagedUserSpecifics managed_user = 194582;
+ optional ArticleSpecifics article = 223759;
}
message SyncEntity {
diff --git a/sync/sessions/data_type_tracker.cc b/sync/sessions/data_type_tracker.cc
index a061679839..b0b464923c 100644
--- a/sync/sessions/data_type_tracker.cc
+++ b/sync/sessions/data_type_tracker.cc
@@ -5,6 +5,8 @@
#include "sync/sessions/data_type_tracker.h"
#include "base/logging.h"
+#include "sync/internal_api/public/base/invalidation.h"
+#include "sync/notifier/single_object_invalidation_set.h"
#include "sync/sessions/nudge_tracker.h"
namespace syncer {
@@ -27,13 +29,20 @@ void DataTypeTracker::RecordLocalRefreshRequest() {
local_refresh_request_count_++;
}
-void DataTypeTracker::RecordRemoteInvalidation(
- const std::string& payload) {
- pending_payloads_.push_back(payload);
- if (pending_payloads_.size() > payload_buffer_size_) {
- // Drop the oldest payload if we've overflowed.
- pending_payloads_.pop_front();
- local_payload_overflow_ = true;
+void DataTypeTracker::RecordRemoteInvalidations(
+ const SingleObjectInvalidationSet& invalidations) {
+ for (SingleObjectInvalidationSet::const_iterator it =
+ invalidations.begin(); it != invalidations.end(); ++it) {
+ if (it->is_unknown_version()) {
+ server_payload_overflow_ = true;
+ } else {
+ pending_payloads_.push_back(it->payload());
+ if (pending_payloads_.size() > payload_buffer_size_) {
+ // Drop the oldest payload if we've overflowed.
+ pending_payloads_.pop_front();
+ local_payload_overflow_ = true;
+ }
+ }
}
}
diff --git a/sync/sessions/data_type_tracker.h b/sync/sessions/data_type_tracker.h
index 30bc3b6dbb..6ecaa0eb7c 100644
--- a/sync/sessions/data_type_tracker.h
+++ b/sync/sessions/data_type_tracker.h
@@ -14,6 +14,10 @@
#include "sync/protocol/sync.pb.h"
namespace syncer {
+
+class Invalidation;
+class SingleObjectInvalidationSet;
+
namespace sessions {
typedef std::deque<std::string> PayloadList;
@@ -32,8 +36,9 @@ class DataTypeTracker {
// Tracks that a local refresh request has been made for this type.
void RecordLocalRefreshRequest();
- // Tracks that we received an invalidation notification for this type.
- void RecordRemoteInvalidation(const std::string& payload);
+ // Tracks that we received invalidation notifications for this type.
+ void RecordRemoteInvalidations(
+ const SingleObjectInvalidationSet& invalidations);
// Records that a sync cycle has been performed successfully.
// Generally, this means that all local changes have been committed and all
diff --git a/sync/sessions/nudge_tracker.cc b/sync/sessions/nudge_tracker.cc
index 8ec8970ef3..27947476f4 100644
--- a/sync/sessions/nudge_tracker.cc
+++ b/sync/sessions/nudge_tracker.cc
@@ -96,16 +96,17 @@ void NudgeTracker::RecordRemoteInvalidation(
const ObjectIdInvalidationMap& invalidation_map) {
updates_source_ = sync_pb::GetUpdatesCallerInfo::NOTIFICATION;
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
ModelType type;
- if (!ObjectIdToRealModelType(it->first, &type)) {
+ if (!ObjectIdToRealModelType(*it, &type)) {
NOTREACHED()
- << "Object ID " << ObjectIdToString(it->first)
+ << "Object ID " << ObjectIdToString(*it)
<< " does not map to valid model type";
}
DCHECK(type_trackers_.find(type) != type_trackers_.end());
- type_trackers_[type].RecordRemoteInvalidation(it->second.payload);
+ type_trackers_[type].RecordRemoteInvalidations(
+ invalidation_map.ForObject(*it));
}
}
diff --git a/sync/sessions/ordered_commit_set.cc b/sync/sessions/ordered_commit_set.cc
deleted file mode 100644
index 3bbddb9c28..0000000000
--- a/sync/sessions/ordered_commit_set.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/ordered_commit_set.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-
-namespace syncer {
-namespace sessions {
-
-OrderedCommitSet::OrderedCommitSet(const ModelSafeRoutingInfo& routes)
- : routes_(routes) {
-}
-
-OrderedCommitSet::~OrderedCommitSet() {}
-
-void OrderedCommitSet::AddCommitItem(const int64 metahandle,
- ModelType type) {
- if (!HaveCommitItem(metahandle)) {
- inserted_metahandles_.insert(metahandle);
- metahandle_order_.push_back(metahandle);
- projections_[GetGroupForModelType(type, routes_)].push_back(
- inserted_metahandles_.size() - 1);
- types_.push_back(type);
- types_in_list_.Put(type);
- }
-}
-
-void OrderedCommitSet::AddCommitItems(
- const std::vector<int64> metahandles,
- ModelType type) {
- for (std::vector<int64>::const_iterator it = metahandles.begin();
- it != metahandles.end(); ++it) {
- AddCommitItem(*it, type);
- }
-}
-
-const OrderedCommitSet::Projection& OrderedCommitSet::GetCommitIdProjection(
- ModelSafeGroup group) const {
- Projections::const_iterator i = projections_.find(group);
- DCHECK(i != projections_.end());
- return i->second;
-}
-
-void OrderedCommitSet::Append(const OrderedCommitSet& other) {
- for (size_t i = 0; i < other.Size(); ++i) {
- CommitItem item = other.GetCommitItemAt(i);
- AddCommitItem(item.meta, item.group);
- }
-}
-
-void OrderedCommitSet::AppendReverse(const OrderedCommitSet& other) {
- for (int i = other.Size() - 1; i >= 0; i--) {
- CommitItem item = other.GetCommitItemAt(i);
- AddCommitItem(item.meta, item.group);
- }
-}
-
-void OrderedCommitSet::Truncate(size_t max_size) {
- if (max_size < metahandle_order_.size()) {
- for (size_t i = max_size; i < metahandle_order_.size(); ++i) {
- inserted_metahandles_.erase(metahandle_order_[i]);
- }
-
- // Some projections may refer to indices that are getting chopped.
- // Since projections are in increasing order, it's easy to fix. Except
- // that you can't erase(..) using a reverse_iterator, so we use binary
- // search to find the chop point.
- Projections::iterator it = projections_.begin();
- for (; it != projections_.end(); ++it) {
- // For each projection, chop off any indices larger than or equal to
- // max_size by looking for max_size using binary search.
- Projection& p = it->second;
- Projection::iterator element = std::lower_bound(p.begin(), p.end(),
- max_size);
- if (element != p.end())
- p.erase(element, p.end());
- }
- metahandle_order_.resize(max_size);
- types_.resize(max_size);
- }
-}
-
-void OrderedCommitSet::Clear() {
- inserted_metahandles_.clear();
- metahandle_order_.clear();
- for (Projections::iterator it = projections_.begin();
- it != projections_.end(); ++it) {
- it->second.clear();
- }
- types_.clear();
- types_in_list_.Clear();
-}
-
-OrderedCommitSet::CommitItem OrderedCommitSet::GetCommitItemAt(
- const size_t position) const {
- DCHECK(position < Size());
- CommitItem return_item = {metahandle_order_[position],
- types_[position]};
- return return_item;
-}
-
-bool OrderedCommitSet::HasBookmarkCommitId() const {
- ModelSafeRoutingInfo::const_iterator group = routes_.find(BOOKMARKS);
- if (group == routes_.end())
- return false;
- Projections::const_iterator proj = projections_.find(group->second);
- if (proj == projections_.end())
- return false;
- DCHECK_LE(proj->second.size(), types_.size());
- for (size_t i = 0; i < proj->second.size(); i++) {
- if (types_[proj->second[i]] == BOOKMARKS)
- return true;
- }
- return false;
-}
-
-void OrderedCommitSet::operator=(const OrderedCommitSet& other) {
- inserted_metahandles_ = other.inserted_metahandles_;
- metahandle_order_ = other.metahandle_order_;
- projections_ = other.projections_;
- types_ = other.types_;
- routes_ = other.routes_;
-}
-
-} // namespace sessions
-} // namespace syncer
-
diff --git a/sync/sessions/ordered_commit_set.h b/sync/sessions/ordered_commit_set.h
deleted file mode 100644
index a30724e641..0000000000
--- a/sync/sessions/ordered_commit_set.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
-#define SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
-
-#include <map>
-#include <set>
-#include <vector>
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-
-namespace syncer {
-namespace sessions {
-
-// TODO(ncarter): This code is more generic than just Commit and can
-// be reused elsewhere (e.g. ChangeReorderBuffer do similar things). Merge
-// all these implementations.
-class SYNC_EXPORT_PRIVATE OrderedCommitSet {
- public:
- // A list of indices into the full list of commit ids such that:
- // 1 - each element is an index belonging to a particular ModelSafeGroup.
- // 2 - the vector is in sorted (smallest to largest) order.
- // 3 - each element is a valid index for GetCommitItemAt.
- // See GetCommitIdProjection for usage.
- typedef std::vector<size_t> Projection;
-
- // TODO(chron): Reserve space according to batch size?
- explicit OrderedCommitSet(const ModelSafeRoutingInfo& routes);
- ~OrderedCommitSet();
-
- bool HaveCommitItem(const int64 metahandle) const {
- return inserted_metahandles_.count(metahandle) > 0;
- }
-
- void AddCommitItem(const int64 metahandle, ModelType type);
- void AddCommitItems(const std::vector<int64> metahandles, ModelType type);
-
- const std::vector<int64>& GetAllCommitHandles() const {
- return metahandle_order_;
- }
-
- // Return the handle at index |position| in this OrderedCommitSet. Note that
- // the index uniquely identifies the same logical item in each of:
- // 1) this OrderedCommitSet
- // 2) the CommitRequest sent to the server
- // 3) the list of EntryResponse objects in the CommitResponse.
- // These together allow re-association of the pre-commit Id with the
- // actual committed entry.
- int64 GetCommitHandleAt(const size_t position) const {
- return metahandle_order_[position];
- }
-
- // Same as above, but for ModelType of the item.
- ModelType GetModelTypeAt(const size_t position) const {
- return types_[position];
- }
-
- // Get the projection of commit ids onto the space of commit ids
- // belonging to |group|. This is useful when you need to process a commit
- // response one ModelSafeGroup at a time. See GetCommitIdAt for how the
- // indices contained in the returned Projection can be used.
- const Projection& GetCommitIdProjection(
- ModelSafeGroup group) const;
-
- size_t Size() const {
- return metahandle_order_.size();
- }
-
- bool Empty() const {
- return Size() == 0;
- }
-
- // Returns all the types that are included in this list.
- ModelTypeSet Types() const {
- return types_in_list_;
- }
-
- // Returns true iff any of the commit ids added to this set have model type
- // BOOKMARKS.
- bool HasBookmarkCommitId() const;
-
- void Append(const OrderedCommitSet& other);
- void AppendReverse(const OrderedCommitSet& other);
- void Truncate(size_t max_size);
-
- // Removes all entries from this set.
- void Clear();
-
- void operator=(const OrderedCommitSet& other);
- private:
- // A set of CommitIdProjections associated with particular ModelSafeGroups.
- typedef std::map<ModelSafeGroup, Projection> Projections;
-
- // Helper container for return value of GetCommitItemAt.
- struct CommitItem {
- int64 meta;
- ModelType group;
- };
-
- CommitItem GetCommitItemAt(const size_t position) const;
-
- // These lists are different views of the same items; e.g they are
- // isomorphic.
- std::set<int64> inserted_metahandles_;
- std::vector<int64> metahandle_order_;
- Projections projections_;
-
- // We need this because of operations like AppendReverse that take ids from
- // one OrderedCommitSet and insert into another -- we need to know the
- // group for each ID so that the insertion can update the appropriate
- // projection.
- std::vector<ModelType> types_;
-
- // The set of types which are included in this particular list.
- ModelTypeSet types_in_list_;
-
- ModelSafeRoutingInfo routes_;
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
-
diff --git a/sync/sessions/ordered_commit_set_unittest.cc b/sync/sessions/ordered_commit_set_unittest.cc
deleted file mode 100644
index 4aca4f406c..0000000000
--- a/sync/sessions/ordered_commit_set_unittest.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/ordered_commit_set.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using std::vector;
-
-namespace syncer {
-namespace sessions {
-namespace {
-
-class OrderedCommitSetTest : public testing::Test {
- public:
- OrderedCommitSetTest() {
- routes_[BOOKMARKS] = GROUP_UI;
- routes_[PREFERENCES] = GROUP_UI;
- routes_[AUTOFILL] = GROUP_DB;
- routes_[SESSIONS] = GROUP_PASSIVE;
- }
- protected:
- TestIdFactory ids_;
- ModelSafeRoutingInfo routes_;
-};
-
-TEST_F(OrderedCommitSetTest, Projections) {
- vector<int64> expected;
- for (int64 i = 0; i < 8; i++)
- expected.push_back(i);
-
- OrderedCommitSet commit_set1(routes_), commit_set2(routes_);
- commit_set1.AddCommitItem(expected[0], BOOKMARKS);
- commit_set1.AddCommitItem(expected[1], BOOKMARKS);
- commit_set1.AddCommitItem(expected[2], PREFERENCES);
- // Duplicates should be dropped.
- commit_set1.AddCommitItem(expected[2], PREFERENCES);
- commit_set1.AddCommitItem(expected[3], SESSIONS);
- commit_set1.AddCommitItem(expected[4], SESSIONS);
- commit_set2.AddCommitItem(expected[7], AUTOFILL);
- commit_set2.AddCommitItem(expected[6], AUTOFILL);
- commit_set2.AddCommitItem(expected[5], AUTOFILL);
- // Add something in set1 to set2, which should get dropped by AppendReverse.
- commit_set2.AddCommitItem(expected[0], BOOKMARKS);
- commit_set1.AppendReverse(commit_set2);
-
- EXPECT_EQ(8U, commit_set1.Size());
-
- // First, we should verify the projections are correct. Second, we want to
- // do the same verification after truncating by 1. Next, try truncating
- // the set to a size of 4, so that the DB projection is wiped out and
- // PASSIVE has one element removed. Finally, truncate to 1 so only UI is
- // remaining.
- std::vector<size_t> sizes;
- sizes.push_back(8);
- sizes.push_back(7);
- sizes.push_back(4);
- sizes.push_back(1);
- for (std::vector<size_t>::iterator it = sizes.begin();
- it != sizes.end(); ++it) {
- commit_set1.Truncate(*it);
- size_t expected_size = *it;
-
- SCOPED_TRACE(::testing::Message("Iteration size = ") << *it);
- std::vector<int64> all_ids = commit_set1.GetAllCommitHandles();
- EXPECT_EQ(expected_size, all_ids.size());
- for (size_t i = 0; i < expected_size; i++) {
- EXPECT_TRUE(expected[i] == all_ids[i]);
- EXPECT_TRUE(expected[i] == commit_set1.GetCommitHandleAt(i));
- }
-
- OrderedCommitSet::Projection p1, p2, p3;
- p1 = commit_set1.GetCommitIdProjection(GROUP_UI);
- p2 = commit_set1.GetCommitIdProjection(GROUP_PASSIVE);
- p3 = commit_set1.GetCommitIdProjection(GROUP_DB);
- EXPECT_TRUE(p1.size() + p2.size() + p3.size() == expected_size) << "Sum"
- << "of sizes of projections should equal full expected size!";
-
- for (size_t i = 0; i < p1.size(); i++) {
- SCOPED_TRACE(::testing::Message("UI projection mismatch at i = ") << i);
- EXPECT_TRUE(expected[p1[i]] == commit_set1.GetCommitHandleAt(p1[i]))
- << "expected[p1[i]] = " << expected[p1[i]]
- << ", commit_set1[p1[i]] = " << commit_set1.GetCommitHandleAt(p1[i]);
- }
- for (size_t i = 0; i < p2.size(); i++) {
- SCOPED_TRACE(::testing::Message("PASSIVE projection mismatch at i = ")
- << i);
- EXPECT_TRUE(expected[p2[i]] == commit_set1.GetCommitHandleAt(p2[i]))
- << "expected[p2[i]] = " << expected[p2[i]]
- << ", commit_set1[p2[i]] = " << commit_set1.GetCommitHandleAt(p2[i]);
- }
- for (size_t i = 0; i < p3.size(); i++) {
- SCOPED_TRACE(::testing::Message("DB projection mismatch at i = ") << i);
- EXPECT_TRUE(expected[p3[i]] == commit_set1.GetCommitHandleAt(p3[i]))
- << "expected[p3[i]] = " << expected[p3[i]]
- << ", commit_set1[p3[i]] = " << commit_set1.GetCommitHandleAt(p3[i]);
- }
- }
-}
-
-TEST_F(OrderedCommitSetTest, HasBookmarkCommitId) {
- OrderedCommitSet commit_set(routes_);
-
- commit_set.AddCommitItem(0, AUTOFILL);
- commit_set.AddCommitItem(1, SESSIONS);
- EXPECT_FALSE(commit_set.HasBookmarkCommitId());
-
- commit_set.AddCommitItem(2, PREFERENCES);
- commit_set.AddCommitItem(3, PREFERENCES);
- EXPECT_FALSE(commit_set.HasBookmarkCommitId());
-
- commit_set.AddCommitItem(4, BOOKMARKS);
- EXPECT_TRUE(commit_set.HasBookmarkCommitId());
-
- commit_set.Truncate(4);
- EXPECT_FALSE(commit_set.HasBookmarkCommitId());
-}
-
-TEST_F(OrderedCommitSetTest, AddAndRemoveEntries) {
- OrderedCommitSet commit_set(routes_);
-
- ASSERT_TRUE(commit_set.Empty());
-
- commit_set.AddCommitItem(0, AUTOFILL);
- ASSERT_EQ(static_cast<size_t>(1), commit_set.Size());
-
- commit_set.Clear();
- ASSERT_TRUE(commit_set.Empty());
-}
-
-} // namespace
-} // namespace sessions
-} // namespace syncer
diff --git a/sync/sessions/status_controller.h b/sync/sessions/status_controller.h
index a547c1b67d..de51814ce2 100644
--- a/sync/sessions/status_controller.h
+++ b/sync/sessions/status_controller.h
@@ -35,8 +35,8 @@
#include "base/stl_util.h"
#include "base/time/time.h"
#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/engine/model_safe_worker.h"
#include "sync/internal_api/public/sessions/model_neutral_state.h"
-#include "sync/sessions/ordered_commit_set.h"
namespace syncer {
namespace sessions {
@@ -71,13 +71,6 @@ class SYNC_EXPORT_PRIVATE StatusController {
return model_neutral_.num_server_changes_remaining;
}
- const OrderedCommitSet::Projection& commit_id_projection(
- const sessions::OrderedCommitSet &commit_set) {
- DCHECK(group_restriction_in_effect_)
- << "No group restriction for projection.";
- return commit_set.GetCommitIdProjection(group_restriction_);
- }
-
// Various conflict counters.
int num_encryption_conflicts() const;
int num_hierarchy_conflicts() const;
diff --git a/sync/sessions/sync_session.h b/sync/sessions/sync_session.h
index cd4a22ccc1..31a64ca0a6 100644
--- a/sync/sessions/sync_session.h
+++ b/sync/sessions/sync_session.h
@@ -27,7 +27,6 @@
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/engine/model_safe_worker.h"
#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-#include "sync/sessions/ordered_commit_set.h"
#include "sync/sessions/status_controller.h"
#include "sync/sessions/sync_session_context.h"
diff --git a/sync/sessions/sync_session_context.cc b/sync/sessions/sync_session_context.cc
index bf1da3a174..78b34427c9 100644
--- a/sync/sessions/sync_session_context.cc
+++ b/sync/sessions/sync_session_context.cc
@@ -23,6 +23,7 @@ SyncSessionContext::SyncSessionContext(
const std::string& invalidator_client_id)
: connection_manager_(connection_manager),
directory_(directory),
+ commit_contributor_deleter_(&commit_contributor_map_),
extensions_activity_(extensions_activity),
notifications_enabled_(false),
max_commit_batch_size_(kDefaultMaxCommitBatchSize),
@@ -44,5 +45,20 @@ SyncSessionContext::SyncSessionContext(
SyncSessionContext::~SyncSessionContext() {
}
+void SyncSessionContext::set_routing_info(
+ const ModelSafeRoutingInfo& routing_info) {
+ routing_info_ = routing_info;
+
+ // TODO(rlarocque): This is not a good long-term solution. We must find a
+ // better way to initialize the set of CommitContributors.
+ STLDeleteValues<CommitContributorMap>(&commit_contributor_map_);
+ ModelTypeSet enabled_types = GetRoutingInfoTypes(routing_info);
+ for (ModelTypeSet::Iterator it = enabled_types.First(); it.Good(); it.Inc()) {
+ SyncDirectoryCommitContributor* contributor =
+ new SyncDirectoryCommitContributor(directory(), it.Get());
+ commit_contributor_map_.insert(std::make_pair(it.Get(), contributor));
+ }
+}
+
} // namespace sessions
} // namespace syncer
diff --git a/sync/sessions/sync_session_context.h b/sync/sessions/sync_session_context.h
index 718cc6caad..3133c32a2e 100644
--- a/sync/sessions/sync_session_context.h
+++ b/sync/sessions/sync_session_context.h
@@ -22,7 +22,9 @@
#include <string>
#include <vector>
+#include "base/stl_util.h"
#include "sync/base/sync_export.h"
+#include "sync/engine/sync_directory_commit_contributor.h"
#include "sync/engine/sync_engine_event.h"
#include "sync/engine/syncer_types.h"
#include "sync/engine/traffic_recorder.h"
@@ -71,8 +73,10 @@ class SYNC_EXPORT_PRIVATE SyncSessionContext {
return routing_info_;
}
- void set_routing_info(const ModelSafeRoutingInfo& routing_info) {
- routing_info_ = routing_info;
+ void set_routing_info(const ModelSafeRoutingInfo& routing_info);
+
+ CommitContributorMap* commit_contributor_map() {
+ return &commit_contributor_map_;
}
const std::vector<scoped_refptr<ModelSafeWorker> >& workers() const {
@@ -154,6 +158,14 @@ class SYNC_EXPORT_PRIVATE SyncSessionContext {
// Must be updated manually when SBR's state is modified.
ModelSafeRoutingInfo routing_info_;
+ // A map of 'commit contributors', one for each enabled type.
+ // This must be kept in sync with the routing info. Our temporary solution to
+ // that problem is to initialize this map in set_routing_info().
+ CommitContributorMap commit_contributor_map_;
+
+ // Deleter for the |commit_contributor_map_|.
+ STLValueDeleter<CommitContributorMap> commit_contributor_deleter_;
+
// The set of ModelSafeWorkers. Used to execute tasks of various threads.
std::vector<scoped_refptr<ModelSafeWorker> > workers_;
diff --git a/sync/sync_core.gypi b/sync/sync_core.gypi
index 9633f5e764..0d5cca740c 100644
--- a/sync/sync_core.gypi
+++ b/sync/sync_core.gypi
@@ -36,10 +36,14 @@
'engine/apply_updates_and_resolve_conflicts_command.h',
'engine/backoff_delay_provider.cc',
'engine/backoff_delay_provider.h',
- 'engine/build_commit_command.cc',
- 'engine/build_commit_command.h',
+ 'engine/commit_util.cc',
+ 'engine/commit_util.h',
'engine/commit.cc',
'engine/commit.h',
+ 'engine/sync_directory_commit_contribution.cc',
+ 'engine/sync_directory_commit_contribution.h',
+ 'engine/sync_directory_commit_contributor.cc',
+ 'engine/sync_directory_commit_contributor.h',
'engine/conflict_resolver.cc',
'engine/conflict_resolver.h',
'engine/conflict_util.cc',
@@ -56,8 +60,6 @@
'engine/net/url_translator.h',
'engine/nudge_source.cc',
'engine/nudge_source.h',
- 'engine/process_commit_response_command.cc',
- 'engine/process_commit_response_command.h',
'engine/process_updates_command.cc',
'engine/process_updates_command.h',
'engine/store_timestamps_command.cc',
@@ -104,8 +106,6 @@
'sessions/debug_info_getter.h',
'sessions/nudge_tracker.cc',
'sessions/nudge_tracker.h',
- 'sessions/ordered_commit_set.cc',
- 'sessions/ordered_commit_set.h',
'sessions/status_controller.cc',
'sessions/status_controller.h',
'sessions/sync_session.cc',
@@ -148,6 +148,8 @@
'syncable/syncable-inl.h',
'syncable/syncable_base_transaction.cc',
'syncable/syncable_base_transaction.h',
+ 'syncable/syncable_base_write_transaction.cc',
+ 'syncable/syncable_base_write_transaction.h',
'syncable/syncable_changes_version.h',
'syncable/syncable_columns.h',
'syncable/syncable_delete_journal.cc',
@@ -156,6 +158,8 @@
'syncable/syncable_enum_conversions.h',
'syncable/syncable_id.cc',
'syncable/syncable_id.h',
+ 'syncable/syncable_model_neutral_write_transaction.cc',
+ 'syncable/syncable_model_neutral_write_transaction.h',
'syncable/syncable_proto_util.cc',
'syncable/syncable_proto_util.h',
'syncable/syncable_read_transaction.cc',
diff --git a/sync/sync_internal_api.gypi b/sync/sync_internal_api.gypi
index be3ce713b0..f9aeeca921 100644
--- a/sync/sync_internal_api.gypi
+++ b/sync/sync_internal_api.gypi
@@ -32,6 +32,8 @@
'internal_api/js_sync_encryption_handler_observer.h',
'internal_api/js_sync_manager_observer.cc',
'internal_api/js_sync_manager_observer.h',
+ 'internal_api/public/base/ack_handle.cc',
+ 'internal_api/public/base/ack_handle.h',
'internal_api/public/base/cancelation_observer.cc',
'internal_api/public/base/cancelation_observer.h',
'internal_api/public/base/cancelation_signal.cc',
@@ -67,8 +69,8 @@
'internal_api/public/http_bridge.h',
'internal_api/public/http_post_provider_factory.h',
'internal_api/public/http_post_provider_interface.h',
- 'internal_api/public/internal_components_factory_impl.h',
'internal_api/public/internal_components_factory.h',
+ 'internal_api/public/internal_components_factory_impl.h',
'internal_api/public/read_node.h',
'internal_api/public/read_transaction.h',
'internal_api/public/sessions/model_neutral_state.cc',
diff --git a/sync/sync_notifier.gypi b/sync/sync_notifier.gypi
index ba40772b60..365ed33d7a 100644
--- a/sync/sync_notifier.gypi
+++ b/sync/sync_notifier.gypi
@@ -35,6 +35,8 @@
'notifier/invalidator_state.h',
'notifier/object_id_invalidation_map.cc',
'notifier/object_id_invalidation_map.h',
+ 'notifier/single_object_invalidation_set.cc',
+ 'notifier/single_object_invalidation_set.h',
],
'conditions': [
['OS != "android"', {
diff --git a/sync/sync_proto.gypi b/sync/sync_proto.gypi
index bb79b4b4cc..4b90525a45 100644
--- a/sync/sync_proto.gypi
+++ b/sync/sync_proto.gypi
@@ -13,6 +13,7 @@
'protocol/app_notification_specifics.proto',
'protocol/app_setting_specifics.proto',
'protocol/app_specifics.proto',
+ 'protocol/article_specifics.proto',
'protocol/autofill_specifics.proto',
'protocol/bookmark_specifics.proto',
'protocol/client_commands.proto',
diff --git a/sync/sync_tests.gypi b/sync/sync_tests.gypi
index 93d6ed8f75..535042fcec 100644
--- a/sync/sync_tests.gypi
+++ b/sync/sync_tests.gypi
@@ -111,16 +111,14 @@
'sync',
],
'sources': [
+ 'notifier/fake_invalidation_handler.cc',
+ 'notifier/fake_invalidation_handler.h',
'notifier/fake_invalidation_state_tracker.cc',
'notifier/fake_invalidation_state_tracker.h',
'notifier/fake_invalidator.cc',
'notifier/fake_invalidator.h',
- 'notifier/fake_invalidation_handler.cc',
- 'notifier/fake_invalidation_handler.h',
'notifier/invalidator_test_template.cc',
'notifier/invalidator_test_template.h',
- 'notifier/object_id_invalidation_map_test_util.cc',
- 'notifier/object_id_invalidation_map_test_util.h',
],
},
@@ -149,6 +147,8 @@
'sources': [
'internal_api/public/base/invalidation_test_util.cc',
'internal_api/public/base/invalidation_test_util.h',
+ 'internal_api/public/base/object_id_invalidation_map_test_util.cc',
+ 'internal_api/public/base/object_id_invalidation_map_test_util.h',
'internal_api/public/test/fake_sync_manager.h',
'internal_api/public/test/test_entry_factory.h',
'internal_api/public/test/test_internal_components_factory.h',
@@ -239,12 +239,12 @@
'engine/backoff_delay_provider_unittest.cc',
'engine/download_unittest.cc',
'engine/model_changing_syncer_command_unittest.cc',
- 'engine/process_commit_response_command_unittest.cc',
'engine/process_updates_command_unittest.cc',
'engine/store_timestamps_command_unittest.cc',
'engine/sync_scheduler_unittest.cc',
'engine/syncer_proto_util_unittest.cc',
'engine/syncer_unittest.cc',
+ 'engine/sync_directory_commit_contribution_unittest.cc',
'engine/traffic_recorder_unittest.cc',
'js/js_arg_list_unittest.cc',
'js/js_event_details_unittest.cc',
@@ -252,7 +252,6 @@
'protocol/proto_enum_conversions_unittest.cc',
'protocol/proto_value_conversions_unittest.cc',
'sessions/nudge_tracker_unittest.cc',
- 'sessions/ordered_commit_set_unittest.cc',
'sessions/status_controller_unittest.cc',
'sessions/sync_session_unittest.cc',
'syncable/directory_backing_store_unittest.cc',
@@ -331,6 +330,8 @@
'notifier/invalidation_notifier_unittest.cc',
'notifier/invalidator_registrar_unittest.cc',
'notifier/non_blocking_invalidator_unittest.cc',
+ 'notifier/object_id_invalidation_map_unittest.cc',
+ 'notifier/single_object_invalidation_set_unittest.cc',
'notifier/p2p_invalidator_unittest.cc',
'notifier/push_client_channel_unittest.cc',
'notifier/registration_manager_unittest.cc',
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index 9754b88533..85e5107a19 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -343,12 +343,12 @@ EntryKernel* Directory::GetRootEntry() {
return GetEntryById(Id());
}
-bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) {
+bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
ScopedKernelLock lock(this);
return InsertEntry(trans, entry, &lock);
}
-bool Directory::InsertEntry(WriteTransaction* trans,
+bool Directory::InsertEntry(BaseWriteTransaction* trans,
EntryKernel* entry,
ScopedKernelLock* lock) {
DCHECK(NULL != lock);
@@ -394,9 +394,9 @@ bool Directory::InsertEntry(WriteTransaction* trans,
return true;
}
-bool Directory::ReindexId(WriteTransaction* trans,
- EntryKernel* const entry,
- const Id& new_id) {
+bool Directory::ReindexId(BaseWriteTransaction* trans,
+ EntryKernel* const entry,
+ const Id& new_id) {
ScopedKernelLock lock(this);
if (NULL != GetEntryById(new_id, &lock))
return false;
@@ -413,7 +413,7 @@ bool Directory::ReindexId(WriteTransaction* trans,
return true;
}
-bool Directory::ReindexParentId(WriteTransaction* trans,
+bool Directory::ReindexParentId(BaseWriteTransaction* trans,
EntryKernel* const entry,
const Id& new_parent_id) {
ScopedKernelLock lock(this);
@@ -957,26 +957,24 @@ void Directory::CollectMetaHandleCounts(
bool Directory::CheckInvariantsOnTransactionClose(
syncable::BaseTransaction* trans,
- const EntryKernelMutationMap& mutations) {
+ const MetahandleSet& modified_handles) {
// NOTE: The trans may be in the process of being destructed. Be careful if
// you wish to call any of its virtual methods.
- MetahandleSet handles;
-
switch (invariant_check_level_) {
- case FULL_DB_VERIFICATION:
- GetAllMetaHandles(trans, &handles);
- break;
- case VERIFY_CHANGES:
- for (EntryKernelMutationMap::const_iterator i = mutations.begin();
- i != mutations.end(); ++i) {
- handles.insert(i->first);
+ case FULL_DB_VERIFICATION: {
+ MetahandleSet all_handles;
+ GetAllMetaHandles(trans, &all_handles);
+ return CheckTreeInvariants(trans, all_handles);
+ }
+ case VERIFY_CHANGES: {
+ return CheckTreeInvariants(trans, modified_handles);
+ }
+ case OFF: {
+ return true;
}
- break;
- case OFF:
- break;
}
-
- return CheckTreeInvariants(trans, handles);
+ NOTREACHED();
+ return false;
}
bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
diff --git a/sync/syncable/directory.h b/sync/syncable/directory.h
index 6de3a470ec..7a58dfc21c 100644
--- a/sync/syncable/directory.h
+++ b/sync/syncable/directory.h
@@ -32,6 +32,7 @@ class UnrecoverableErrorHandler;
namespace syncable {
class BaseTransaction;
+class BaseWriteTransaction;
class DirectoryChangeDelegate;
class DirectoryBackingStore;
class NigoriHandler;
@@ -341,7 +342,7 @@ class SYNC_EXPORT Directory {
// and may be used in release code.
bool CheckInvariantsOnTransactionClose(
syncable::BaseTransaction* trans,
- const EntryKernelMutationMap& mutations);
+ const MetahandleSet& modified_handles);
// Forces a full check of the directory. This operation may be slow and
// should not be invoked outside of tests.
@@ -377,9 +378,9 @@ class SYNC_EXPORT Directory {
EntryKernel* GetEntryByServerTag(const std::string& tag);
virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
EntryKernel* GetRootEntry();
- bool ReindexId(WriteTransaction* trans, EntryKernel* const entry,
+ bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_id);
- bool ReindexParentId(WriteTransaction* trans, EntryKernel* const entry,
+ bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_parent_id);
void ClearDirtyMetahandles();
@@ -506,9 +507,9 @@ class SYNC_EXPORT Directory {
void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
// For new entry creation only
- bool InsertEntry(WriteTransaction* trans,
+ bool InsertEntry(BaseWriteTransaction* trans,
EntryKernel* entry, ScopedKernelLock* lock);
- bool InsertEntry(WriteTransaction* trans, EntryKernel* entry);
+ bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);
// Used by CheckTreeInvariants
void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
diff --git a/sync/syncable/metahandle_set.h b/sync/syncable/metahandle_set.h
index 0522825408..5b4e425179 100644
--- a/sync/syncable/metahandle_set.h
+++ b/sync/syncable/metahandle_set.h
@@ -5,6 +5,8 @@
#ifndef SYNC_SYNCABLE_METAHANDLE_SET_
#define SYNC_SYNCABLE_METAHANDLE_SET_
+#include <set>
+
#include "base/basictypes.h"
namespace syncer {
diff --git a/sync/syncable/model_neutral_mutable_entry.cc b/sync/syncable/model_neutral_mutable_entry.cc
index 3d019e62a3..d778abacef 100644
--- a/sync/syncable/model_neutral_mutable_entry.cc
+++ b/sync/syncable/model_neutral_mutable_entry.cc
@@ -9,6 +9,7 @@
#include "sync/internal_api/public/base/unique_position.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/scoped_kernel_lock.h"
+#include "sync/syncable/syncable_changes_version.h"
#include "sync/syncable/syncable_util.h"
#include "sync/syncable/syncable_write_transaction.h"
@@ -18,29 +19,54 @@ namespace syncer {
namespace syncable {
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans,
+ CreateNewUpdateItem,
+ const Id& id)
+ : Entry(trans), base_write_transaction_(trans) {
+ Entry same_id(trans, GET_BY_ID, id);
+ kernel_ = NULL;
+ if (same_id.good()) {
+ return; // already have an item with this ID.
+ }
+ scoped_ptr<EntryKernel> kernel(new EntryKernel());
+
+ kernel->put(ID, id);
+ kernel->put(META_HANDLE, trans->directory()->NextMetahandle());
+ kernel->mark_dirty(&trans->directory()->kernel_->dirty_metahandles);
+ kernel->put(IS_DEL, true);
+ // We match the database defaults here
+ kernel->put(BASE_VERSION, CHANGES_VERSION);
+ if (!trans->directory()->InsertEntry(trans, kernel.get())) {
+ return; // Failed inserting.
+ }
+ trans->TrackChangesTo(kernel.get());
+
+ kernel_ = kernel.release();
+}
+
ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- WriteTransaction* trans, GetById, const Id& id)
- : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
+ BaseWriteTransaction* trans, GetById, const Id& id)
+ : Entry(trans, GET_BY_ID, id), base_write_transaction_(trans) {
}
ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- WriteTransaction* trans, GetByHandle, int64 metahandle)
- : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
+ BaseWriteTransaction* trans, GetByHandle, int64 metahandle)
+ : Entry(trans, GET_BY_HANDLE, metahandle), base_write_transaction_(trans) {
}
ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- WriteTransaction* trans, GetByClientTag, const std::string& tag)
- : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) {
+ BaseWriteTransaction* trans, GetByClientTag, const std::string& tag)
+ : Entry(trans, GET_BY_CLIENT_TAG, tag), base_write_transaction_(trans) {
}
ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- WriteTransaction* trans, GetByServerTag, const string& tag)
- : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) {
+ BaseWriteTransaction* trans, GetByServerTag, const string& tag)
+ : Entry(trans, GET_BY_SERVER_TAG, tag), base_write_transaction_(trans) {
}
void ModelNeutralMutableEntry::PutBaseVersion(int64 value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(BASE_VERSION) != value) {
kernel_->put(BASE_VERSION, value);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
@@ -49,7 +75,7 @@ void ModelNeutralMutableEntry::PutBaseVersion(int64 value) {
void ModelNeutralMutableEntry::PutServerVersion(int64 value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(SERVER_VERSION) != value) {
ScopedKernelLock lock(dir());
kernel_->put(SERVER_VERSION, value);
@@ -59,7 +85,7 @@ void ModelNeutralMutableEntry::PutServerVersion(int64 value) {
void ModelNeutralMutableEntry::PutServerMtime(base::Time value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(SERVER_MTIME) != value) {
kernel_->put(SERVER_MTIME, value);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
@@ -68,7 +94,7 @@ void ModelNeutralMutableEntry::PutServerMtime(base::Time value) {
void ModelNeutralMutableEntry::PutServerCtime(base::Time value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(SERVER_CTIME) != value) {
kernel_->put(SERVER_CTIME, value);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
@@ -77,9 +103,9 @@ void ModelNeutralMutableEntry::PutServerCtime(base::Time value) {
bool ModelNeutralMutableEntry::PutId(const Id& value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(ID) != value) {
- if (!dir()->ReindexId(write_transaction(), kernel_, value))
+ if (!dir()->ReindexId(base_write_transaction(), kernel_, value))
return false;
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
}
@@ -88,7 +114,7 @@ bool ModelNeutralMutableEntry::PutId(const Id& value) {
void ModelNeutralMutableEntry::PutServerParentId(const Id& value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(SERVER_PARENT_ID) != value) {
kernel_->put(SERVER_PARENT_ID, value);
@@ -98,7 +124,7 @@ void ModelNeutralMutableEntry::PutServerParentId(const Id& value) {
bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(IS_UNSYNCED) != value) {
MetahandleSet* index = &dir()->kernel_->unsynced_metahandles;
@@ -107,14 +133,14 @@ bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) {
if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
FROM_HERE,
"Could not insert",
- write_transaction())) {
+ base_write_transaction())) {
return false;
}
} else {
if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
FROM_HERE,
"Entry Not succesfully erased",
- write_transaction())) {
+ base_write_transaction())) {
return false;
}
}
@@ -126,7 +152,7 @@ bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) {
bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(IS_UNAPPLIED_UPDATE) != value) {
// Use kernel_->GetServerModelType() instead of
// GetServerModelType() as we may trigger some DCHECKs in the
@@ -139,14 +165,14 @@ bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) {
if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
FROM_HERE,
"Could not insert",
- write_transaction())) {
+ base_write_transaction())) {
return false;
}
} else {
if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
FROM_HERE,
"Entry Not succesfully erased",
- write_transaction())) {
+ base_write_transaction())) {
return false;
}
}
@@ -158,7 +184,7 @@ bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) {
void ModelNeutralMutableEntry::PutServerIsDir(bool value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
bool old_value = kernel_->ref(SERVER_IS_DIR);
if (old_value != value) {
kernel_->put(SERVER_IS_DIR, value);
@@ -168,7 +194,7 @@ void ModelNeutralMutableEntry::PutServerIsDir(bool value) {
void ModelNeutralMutableEntry::PutServerIsDel(bool value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
bool old_value = kernel_->ref(SERVER_IS_DEL);
if (old_value != value) {
kernel_->put(SERVER_IS_DEL, value);
@@ -181,13 +207,13 @@ void ModelNeutralMutableEntry::PutServerIsDel(bool value) {
// UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has
// to be called on sync thread.
dir()->delete_journal()->UpdateDeleteJournalForServerDelete(
- write_transaction(), old_value, *kernel_);
+ base_write_transaction(), old_value, *kernel_);
}
void ModelNeutralMutableEntry::PutServerNonUniqueName(
const std::string& value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(SERVER_NON_UNIQUE_NAME) != value) {
kernel_->put(SERVER_NON_UNIQUE_NAME, value);
@@ -200,7 +226,7 @@ bool ModelNeutralMutableEntry::PutUniqueServerTag(const string& new_tag) {
return true;
}
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
ScopedKernelLock lock(dir());
// Make sure your new value is not in there already.
if (dir()->kernel_->server_tags_map.find(new_tag) !=
@@ -224,7 +250,7 @@ bool ModelNeutralMutableEntry::PutUniqueClientTag(const string& new_tag) {
return true;
}
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
ScopedKernelLock lock(dir());
// Make sure your new value is not in there already.
if (dir()->kernel_->client_tags_map.find(new_tag) !=
@@ -270,7 +296,7 @@ void ModelNeutralMutableEntry::PutServerSpecifics(
const sync_pb::EntitySpecifics& value) {
DCHECK(kernel_);
CHECK(!value.password().has_client_only_encrypted_data());
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
// TODO(ncarter): This is unfortunately heavyweight. Can we do
// better?
if (kernel_->ref(SERVER_SPECIFICS).SerializeAsString() !=
@@ -304,7 +330,7 @@ void ModelNeutralMutableEntry::PutBaseServerSpecifics(
const sync_pb::EntitySpecifics& value) {
DCHECK(kernel_);
CHECK(!value.password().has_client_only_encrypted_data());
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
// TODO(ncarter): This is unfortunately heavyweight. Can we do
// better?
if (kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString()
@@ -317,7 +343,7 @@ void ModelNeutralMutableEntry::PutBaseServerSpecifics(
void ModelNeutralMutableEntry::PutServerUniquePosition(
const UniquePosition& value) {
DCHECK(kernel_);
- write_transaction_->SaveOriginal(kernel_);
+ base_write_transaction_->TrackChangesTo(kernel_);
if(!kernel_->ref(SERVER_UNIQUE_POSITION).Equals(value)) {
// We should never overwrite a valid position with an invalid one.
DCHECK(value.IsValid());
@@ -332,8 +358,8 @@ void ModelNeutralMutableEntry::PutSyncing(bool value) {
}
void ModelNeutralMutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
- write_transaction_->SaveOriginal(kernel_);
- dir()->ReindexParentId(write_transaction(), kernel_, parent_id);
+ base_write_transaction_->TrackChangesTo(kernel_);
+ dir()->ReindexParentId(base_write_transaction(), kernel_, parent_id);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
}
@@ -343,8 +369,8 @@ void ModelNeutralMutableEntry::UpdateTransactionVersion(int64 value) {
kernel_->mark_dirty(&(dir()->kernel_->dirty_metahandles));
}
-ModelNeutralMutableEntry::ModelNeutralMutableEntry(WriteTransaction* trans)
- : Entry(trans), write_transaction_(trans) {}
+ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans)
+ : Entry(trans), base_write_transaction_(trans) {}
MetahandleSet* ModelNeutralMutableEntry::GetDirtyIndexHelper() {
return &dir()->kernel_->dirty_metahandles;
diff --git a/sync/syncable/model_neutral_mutable_entry.h b/sync/syncable/model_neutral_mutable_entry.h
index 0d15fc9d0d..e2292e7045 100644
--- a/sync/syncable/model_neutral_mutable_entry.h
+++ b/sync/syncable/model_neutral_mutable_entry.h
@@ -14,7 +14,11 @@ class WriteNode;
namespace syncable {
-class WriteTransaction;
+class BaseWriteTransaction;
+
+enum CreateNewUpdateItem {
+ CREATE_NEW_UPDATE_ITEM
+};
// This Entry includes all the operations one can safely perform on the sync
// thread. In particular, it does not expose setters to make changes that need
@@ -23,19 +27,22 @@ class WriteTransaction;
// entry.
class SYNC_EXPORT_PRIVATE ModelNeutralMutableEntry : public Entry {
public:
- ModelNeutralMutableEntry(WriteTransaction* trans, GetByHandle, int64);
- ModelNeutralMutableEntry(WriteTransaction* trans, GetById, const Id&);
+ ModelNeutralMutableEntry(BaseWriteTransaction* trans,
+ CreateNewUpdateItem,
+ const Id& id);
+ ModelNeutralMutableEntry(BaseWriteTransaction* trans, GetByHandle, int64);
+ ModelNeutralMutableEntry(BaseWriteTransaction* trans, GetById, const Id&);
ModelNeutralMutableEntry(
- WriteTransaction* trans,
+ BaseWriteTransaction* trans,
GetByClientTag,
const std::string& tag);
ModelNeutralMutableEntry(
- WriteTransaction* trans,
+ BaseWriteTransaction* trans,
GetByServerTag,
const std::string& tag);
- inline WriteTransaction* write_transaction() const {
- return write_transaction_;
+ inline BaseWriteTransaction* base_write_transaction() const {
+ return base_write_transaction_;
}
// Non-model-changing setters. These setters will change properties internal
@@ -84,7 +91,7 @@ class SYNC_EXPORT_PRIVATE ModelNeutralMutableEntry : public Entry {
void UpdateTransactionVersion(int64 version);
protected:
- explicit ModelNeutralMutableEntry(WriteTransaction* trans);
+ explicit ModelNeutralMutableEntry(BaseWriteTransaction* trans);
syncable::MetahandleSet* GetDirtyIndexHelper();
@@ -98,7 +105,7 @@ class SYNC_EXPORT_PRIVATE ModelNeutralMutableEntry : public Entry {
// Kind of redundant. We should reduce the number of pointers
// floating around if at all possible. Could we store this in Directory?
// Scope: Set on construction, never changed after that.
- WriteTransaction* const write_transaction_;
+ BaseWriteTransaction* const base_write_transaction_;
DISALLOW_COPY_AND_ASSIGN(ModelNeutralMutableEntry);
};
diff --git a/sync/syncable/model_type.cc b/sync/syncable/model_type.cc
index b9dacbecf1..35764131be 100644
--- a/sync/syncable/model_type.cc
+++ b/sync/syncable/model_type.cc
@@ -107,6 +107,9 @@ void AddDefaultFieldValue(ModelType datatype,
case MANAGED_USERS:
specifics->mutable_managed_user();
break;
+ case ARTICLES:
+ specifics->mutable_article();
+ break;
default:
NOTREACHED() << "No known extension for model type.";
}
@@ -197,6 +200,8 @@ int GetSpecificsFieldNumberFromModelType(ModelType model_type) {
return sync_pb::EntitySpecifics::kManagedUserSettingFieldNumber;
case MANAGED_USERS:
return sync_pb::EntitySpecifics::kManagedUserFieldNumber;
+ case ARTICLES:
+ return sync_pb::EntitySpecifics::kArticleFieldNumber;
default:
NOTREACHED() << "No known extension for model type.";
return 0;
@@ -319,6 +324,9 @@ ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
if (specifics.has_managed_user())
return MANAGED_USERS;
+ if (specifics.has_article())
+ return ARTICLES;
+
return UNSPECIFIED;
}
@@ -484,6 +492,8 @@ const char* ModelTypeToString(ModelType model_type) {
return "Managed User Settings";
case MANAGED_USERS:
return "Managed Users";
+ case ARTICLES:
+ return "Articles";
case PROXY_TABS:
return "Tabs";
default:
@@ -555,6 +565,8 @@ int ModelTypeToHistogramInt(ModelType model_type) {
return 26;
case MANAGED_USERS:
return 27;
+ case ARTICLES:
+ return 28;
// Silence a compiler warning.
case MODEL_TYPE_COUNT:
return 0;
@@ -640,6 +652,8 @@ ModelType ModelTypeFromString(const std::string& model_type_string) {
return MANAGED_USER_SETTINGS;
else if (model_type_string == "Managed Users")
return MANAGED_USERS;
+ else if (model_type_string == "Articles")
+ return ARTICLES;
else if (model_type_string == "Tabs")
return PROXY_TABS;
else
@@ -732,6 +746,8 @@ std::string ModelTypeToRootTag(ModelType type) {
return "google_chrome_managed_user_settings";
case MANAGED_USERS:
return "google_chrome_managed_users";
+ case ARTICLES:
+ return "google_chrome_articles";
case PROXY_TABS:
return std::string();
default:
@@ -771,6 +787,7 @@ const char kFaviconImageNotificationType[] = "FAVICON_IMAGE";
const char kFaviconTrackingNotificationType[] = "FAVICON_TRACKING";
const char kManagedUserSettingNotificationType[] = "MANAGED_USER_SETTING";
const char kManagedUserNotificationType[] = "MANAGED_USER";
+const char kArticleNotificationType[] = "ARTICLE";
} // namespace
bool RealModelTypeToNotificationType(ModelType model_type,
@@ -851,6 +868,9 @@ bool RealModelTypeToNotificationType(ModelType model_type,
case MANAGED_USERS:
*notification_type = kManagedUserNotificationType;
return true;
+ case ARTICLES:
+ *notification_type = kArticleNotificationType;
+ return true;
default:
break;
}
@@ -935,6 +955,9 @@ bool NotificationTypeToRealModelType(const std::string& notification_type,
} else if (notification_type == kManagedUserNotificationType) {
*model_type = MANAGED_USERS;
return true;
+ } else if (notification_type == kArticleNotificationType) {
+ *model_type = ARTICLES;
+ return true;
}
*model_type = UNSPECIFIED;
return false;
diff --git a/sync/syncable/mutable_entry.cc b/sync/syncable/mutable_entry.cc
index 6dcf4841e6..863e65b8b3 100644
--- a/sync/syncable/mutable_entry.cc
+++ b/sync/syncable/mutable_entry.cc
@@ -47,7 +47,7 @@ void MutableEntry::Init(WriteTransaction* trans,
// Because this entry is new, it was originally deleted.
kernel->put(IS_DEL, true);
- trans->SaveOriginal(kernel.get());
+ trans->TrackChangesTo(kernel.get());
kernel->put(IS_DEL, false);
// Now swap the pointers.
@@ -59,7 +59,7 @@ MutableEntry::MutableEntry(WriteTransaction* trans,
ModelType model_type,
const Id& parent_id,
const string& name)
- : ModelNeutralMutableEntry(trans) {
+ : ModelNeutralMutableEntry(trans), write_transaction_(trans) {
Init(trans, model_type, parent_id, name);
// We need to have a valid position ready before we can index the item.
if (model_type == BOOKMARKS) {
@@ -78,50 +78,35 @@ MutableEntry::MutableEntry(WriteTransaction* trans,
MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
const Id& id)
- : ModelNeutralMutableEntry(trans) {
- Entry same_id(trans, GET_BY_ID, id);
- kernel_ = NULL;
- if (same_id.good()) {
- return; // already have an item with this ID.
- }
- scoped_ptr<EntryKernel> kernel(new EntryKernel());
-
- kernel->put(ID, id);
- kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
- kernel->mark_dirty(&trans->directory_->kernel_->dirty_metahandles);
- kernel->put(IS_DEL, true);
- // We match the database defaults here
- kernel->put(BASE_VERSION, CHANGES_VERSION);
- if (!trans->directory()->InsertEntry(trans, kernel.get())) {
- return; // Failed inserting.
- }
- trans->SaveOriginal(kernel.get());
-
- kernel_ = kernel.release();
-}
+ : ModelNeutralMutableEntry(trans, CREATE_NEW_UPDATE_ITEM, id),
+ write_transaction_(trans) {}
MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
- : ModelNeutralMutableEntry(trans, GET_BY_ID, id) {
+ : ModelNeutralMutableEntry(trans, GET_BY_ID, id),
+ write_transaction_(trans) {
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
int64 metahandle)
- : ModelNeutralMutableEntry(trans, GET_BY_HANDLE, metahandle) {
+ : ModelNeutralMutableEntry(trans, GET_BY_HANDLE, metahandle),
+ write_transaction_(trans) {
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
const std::string& tag)
- : ModelNeutralMutableEntry(trans, GET_BY_CLIENT_TAG, tag) {
+ : ModelNeutralMutableEntry(trans, GET_BY_CLIENT_TAG, tag),
+ write_transaction_(trans) {
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag,
const string& tag)
- : ModelNeutralMutableEntry(trans, GET_BY_SERVER_TAG, tag) {
+ : ModelNeutralMutableEntry(trans, GET_BY_SERVER_TAG, tag),
+ write_transaction_(trans) {
}
void MutableEntry::PutLocalExternalId(int64 value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(LOCAL_EXTERNAL_ID) != value) {
ScopedKernelLock lock(dir());
kernel_->put(LOCAL_EXTERNAL_ID, value);
@@ -131,7 +116,7 @@ void MutableEntry::PutLocalExternalId(int64 value) {
void MutableEntry::PutMtime(base::Time value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(MTIME) != value) {
kernel_->put(MTIME, value);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
@@ -140,7 +125,7 @@ void MutableEntry::PutMtime(base::Time value) {
void MutableEntry::PutCtime(base::Time value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(CTIME) != value) {
kernel_->put(CTIME, value);
kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
@@ -149,7 +134,7 @@ void MutableEntry::PutCtime(base::Time value) {
void MutableEntry::PutParentId(const Id& value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(PARENT_ID) != value) {
PutParentIdPropertyOnly(value);
if (!GetIsDel()) {
@@ -163,7 +148,7 @@ void MutableEntry::PutParentId(const Id& value) {
void MutableEntry::PutIsDir(bool value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
bool old_value = kernel_->ref(IS_DIR);
if (old_value != value) {
kernel_->put(IS_DIR, value);
@@ -173,7 +158,7 @@ void MutableEntry::PutIsDir(bool value) {
void MutableEntry::PutIsDel(bool value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (value == kernel_->ref(IS_DEL)) {
return;
}
@@ -205,7 +190,7 @@ void MutableEntry::PutIsDel(bool value) {
void MutableEntry::PutNonUniqueName(const std::string& value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(NON_UNIQUE_NAME) != value) {
kernel_->put(NON_UNIQUE_NAME, value);
@@ -216,7 +201,7 @@ void MutableEntry::PutNonUniqueName(const std::string& value) {
void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) {
DCHECK(kernel_);
CHECK(!value.password().has_client_only_encrypted_data());
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
// TODO(ncarter): This is unfortunately heavyweight. Can we do
// better?
if (kernel_->ref(SPECIFICS).SerializeAsString() !=
@@ -228,7 +213,7 @@ void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) {
void MutableEntry::PutUniquePosition(const UniquePosition& value) {
DCHECK(kernel_);
- write_transaction()->SaveOriginal(kernel_);
+ write_transaction()->TrackChangesTo(kernel_);
if(!kernel_->ref(UNIQUE_POSITION).Equals(value)) {
// We should never overwrite a valid position with an invalid one.
DCHECK(value.IsValid());
diff --git a/sync/syncable/mutable_entry.h b/sync/syncable/mutable_entry.h
index f575c22f45..8c2f2ab549 100644
--- a/sync/syncable/mutable_entry.h
+++ b/sync/syncable/mutable_entry.h
@@ -20,10 +20,6 @@ enum Create {
CREATE
};
-enum CreateNewUpdateItem {
- CREATE_NEW_UPDATE_ITEM
-};
-
class WriteTransaction;
// A mutable meta entry. Changes get committed to the database when the
@@ -41,6 +37,10 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public ModelNeutralMutableEntry {
MutableEntry(WriteTransaction* trans, GetByClientTag, const std::string& tag);
MutableEntry(WriteTransaction* trans, GetByServerTag, const std::string& tag);
+ inline WriteTransaction* write_transaction() const {
+ return write_transaction_;
+ }
+
// Model-changing setters. These setters make user-visible changes that will
// need to be communicated either to the local model or the sync server.
void PutLocalExternalId(int64 value);
@@ -58,6 +58,14 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public ModelNeutralMutableEntry {
// and fails if |predecessor_id| does not identify a sibling. Pass the root
// ID to put the node in first position.
bool PutPredecessor(const Id& predecessor_id);
+
+ private:
+ // Kind of redundant. We should reduce the number of pointers
+ // floating around if at all possible. Could we store this in Directory?
+ // Scope: Set on construction, never changed after that.
+ WriteTransaction* const write_transaction_;
+
+ DISALLOW_COPY_AND_ASSIGN(MutableEntry);
};
// This function sets only the flags needed to get this entry to sync.
diff --git a/sync/syncable/nigori_util.cc b/sync/syncable/nigori_util.cc
index 9100e9d4fa..fbdd9a5568 100644
--- a/sync/syncable/nigori_util.cc
+++ b/sync/syncable/nigori_util.cc
@@ -242,7 +242,7 @@ void UpdateNigoriFromEncryptedTypes(ModelTypeSet encrypted_types,
bool encrypt_everything,
sync_pb::NigoriSpecifics* nigori) {
nigori->set_encrypt_everything(encrypt_everything);
- COMPILE_ASSERT(28 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
+ COMPILE_ASSERT(29 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
nigori->set_encrypt_bookmarks(
encrypted_types.Has(BOOKMARKS));
nigori->set_encrypt_preferences(
@@ -268,6 +268,7 @@ void UpdateNigoriFromEncryptedTypes(ModelTypeSet encrypted_types,
nigori->set_encrypt_dictionary(encrypted_types.Has(DICTIONARY));
nigori->set_encrypt_favicon_images(encrypted_types.Has(FAVICON_IMAGES));
nigori->set_encrypt_favicon_tracking(encrypted_types.Has(FAVICON_TRACKING));
+ nigori->set_encrypt_articles(encrypted_types.Has(ARTICLES));
}
ModelTypeSet GetEncryptedTypesFromNigori(
@@ -276,7 +277,7 @@ ModelTypeSet GetEncryptedTypesFromNigori(
return ModelTypeSet::All();
ModelTypeSet encrypted_types;
- COMPILE_ASSERT(28 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
+ COMPILE_ASSERT(29 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
if (nigori.encrypt_bookmarks())
encrypted_types.Put(BOOKMARKS);
if (nigori.encrypt_preferences())
@@ -309,6 +310,8 @@ ModelTypeSet GetEncryptedTypesFromNigori(
encrypted_types.Put(FAVICON_IMAGES);
if (nigori.encrypt_favicon_tracking())
encrypted_types.Put(FAVICON_TRACKING);
+ if (nigori.encrypt_articles())
+ encrypted_types.Put(ARTICLES);
return encrypted_types;
}
diff --git a/sync/syncable/syncable_base_write_transaction.cc b/sync/syncable/syncable_base_write_transaction.cc
new file mode 100644
index 0000000000..a575c699fb
--- /dev/null
+++ b/sync/syncable/syncable_base_write_transaction.cc
@@ -0,0 +1,22 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable_base_write_transaction.h"
+
+namespace syncer {
+namespace syncable {
+
+BaseWriteTransaction::BaseWriteTransaction(
+ const tracked_objects::Location location,
+ const char* name,
+ WriterTag writer,
+ Directory* directory)
+ : BaseTransaction(location, name, writer, directory) {
+}
+
+BaseWriteTransaction::~BaseWriteTransaction() {}
+
+} // namespace syncable
+} // namespace syncer
+
diff --git a/sync/syncable/syncable_base_write_transaction.h b/sync/syncable/syncable_base_write_transaction.h
new file mode 100644
index 0000000000..8ea91a1b10
--- /dev/null
+++ b/sync/syncable/syncable_base_write_transaction.h
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
+#define SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
+
+#include "sync/base/sync_export.h"
+#include "sync/syncable/syncable_base_transaction.h"
+
+namespace syncer {
+namespace syncable {
+
+// A base class shared by both ModelNeutralWriteTransaction and
+// WriteTransaction.
+class SYNC_EXPORT BaseWriteTransaction : public BaseTransaction {
+ public:
+ virtual void TrackChangesTo(const EntryKernel* entry) = 0;
+
+ protected:
+ BaseWriteTransaction(
+ const tracked_objects::Location location,
+ const char* name,
+ WriterTag writer,
+ Directory* directory);
+ virtual ~BaseWriteTransaction();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BaseWriteTransaction);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
diff --git a/sync/syncable/syncable_model_neutral_write_transaction.cc b/sync/syncable/syncable_model_neutral_write_transaction.cc
new file mode 100644
index 0000000000..9aaf740072
--- /dev/null
+++ b/sync/syncable/syncable_model_neutral_write_transaction.cc
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable_model_neutral_write_transaction.h"
+
+#include "sync/syncable/directory.h"
+
+namespace syncer {
+namespace syncable {
+
+ModelNeutralWriteTransaction::ModelNeutralWriteTransaction(
+ const tracked_objects::Location& location,
+ WriterTag writer, Directory* directory)
+ : BaseWriteTransaction(location,
+ "ModelNeutralWriteTransaction",
+ writer,
+ directory) {
+ Lock();
+}
+
+ModelNeutralWriteTransaction::~ModelNeutralWriteTransaction() {
+ directory()->CheckInvariantsOnTransactionClose(this, modified_handles_);
+ HandleUnrecoverableErrorIfSet();
+ Unlock();
+}
+
+void ModelNeutralWriteTransaction::TrackChangesTo(const EntryKernel* entry) {
+ modified_handles_.insert(entry->ref(META_HANDLE));
+}
+
+} // namespace syncer
+} // namespace syncable
diff --git a/sync/syncable/syncable_model_neutral_write_transaction.h b/sync/syncable/syncable_model_neutral_write_transaction.h
new file mode 100644
index 0000000000..f96725ed69
--- /dev/null
+++ b/sync/syncable/syncable_model_neutral_write_transaction.h
@@ -0,0 +1,44 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
+#define SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
+
+#include "sync/base/sync_export.h"
+#include "sync/syncable/metahandle_set.h"
+#include "sync/syncable/syncable_base_write_transaction.h"
+
+namespace syncer {
+namespace syncable {
+
+// A transaction used to instantiate Entries or ModelNeutralMutableEntries.
+//
+// This allows it to be used when making changes to sync entity properties that
+// do not need to be kept in sync with the associated native model.
+//
+// This class differs internally from WriteTransactions in that it does a less
+// good job of tracking and reporting on changes to the entries modified within
+// its scope. This is because its changes do not need to be reported to the
+// DirectoryChangeDelegate.
+class SYNC_EXPORT_PRIVATE ModelNeutralWriteTransaction
+ : public BaseWriteTransaction {
+ public:
+ ModelNeutralWriteTransaction(
+ const tracked_objects::Location& location,
+ WriterTag writer,
+ Directory* directory);
+ virtual ~ModelNeutralWriteTransaction();
+
+ virtual void TrackChangesTo(const EntryKernel* entry) OVERRIDE;
+
+ private:
+ MetahandleSet modified_handles_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelNeutralWriteTransaction);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
diff --git a/sync/syncable/syncable_util.cc b/sync/syncable/syncable_util.cc
index 05cc2a9638..74ae06a001 100644
--- a/sync/syncable/syncable_util.cc
+++ b/sync/syncable/syncable_util.cc
@@ -47,8 +47,8 @@ bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
}
void ChangeEntryIDAndUpdateChildren(
- WriteTransaction* trans,
- MutableEntry* entry,
+ BaseWriteTransaction* trans,
+ ModelNeutralMutableEntry* entry,
const Id& new_id) {
Id old_id = entry->GetId();
if (!entry->PutId(new_id)) {
@@ -64,7 +64,7 @@ void ChangeEntryIDAndUpdateChildren(
trans->directory()->GetChildHandlesById(trans, old_id, &children);
Directory::Metahandles::iterator i = children.begin();
while (i != children.end()) {
- MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
+ ModelNeutralMutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
CHECK(child_entry.good());
// Use the unchecked setter here to avoid touching the child's
// UNIQUE_POSITION field. In this case, UNIQUE_POSITION among the
diff --git a/sync/syncable/syncable_util.h b/sync/syncable/syncable_util.h
index f7d351e0e2..be903fd595 100644
--- a/sync/syncable/syncable_util.h
+++ b/sync/syncable/syncable_util.h
@@ -20,13 +20,14 @@ namespace syncer {
namespace syncable {
class BaseTransaction;
-class WriteTransaction;
-class MutableEntry;
+class BaseWriteTransaction;
+class ModelNeutralMutableEntry;
class Id;
-SYNC_EXPORT_PRIVATE void ChangeEntryIDAndUpdateChildren(WriteTransaction* trans,
- MutableEntry* entry,
- const Id& new_id);
+SYNC_EXPORT_PRIVATE void ChangeEntryIDAndUpdateChildren(
+ BaseWriteTransaction* trans,
+ ModelNeutralMutableEntry* entry,
+ const Id& new_id);
SYNC_EXPORT_PRIVATE bool IsLegalNewParent(BaseTransaction* trans,
const Id& id,
diff --git a/sync/syncable/syncable_write_transaction.cc b/sync/syncable/syncable_write_transaction.cc
index 057b258718..d97ff6728a 100644
--- a/sync/syncable/syncable_write_transaction.cc
+++ b/sync/syncable/syncable_write_transaction.cc
@@ -17,7 +17,7 @@ const int64 kInvalidTransactionVersion = -1;
WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
WriterTag writer, Directory* directory)
- : BaseTransaction(location, "WriteTransaction", writer, directory),
+ : BaseWriteTransaction(location, "WriteTransaction", writer, directory),
transaction_version_(NULL) {
Lock();
}
@@ -25,14 +25,14 @@ WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
Directory* directory,
int64* transaction_version)
- : BaseTransaction(location, "WriteTransaction", SYNCAPI, directory),
+ : BaseWriteTransaction(location, "WriteTransaction", SYNCAPI, directory),
transaction_version_(transaction_version) {
Lock();
if (transaction_version_)
*transaction_version_ = kInvalidTransactionVersion;
}
-void WriteTransaction::SaveOriginal(const EntryKernel* entry) {
+void WriteTransaction::TrackChangesTo(const EntryKernel* entry) {
if (!entry) {
return;
}
@@ -147,7 +147,13 @@ void WriteTransaction::UpdateTransactionVersion(
WriteTransaction::~WriteTransaction() {
const ImmutableEntryKernelMutationMap& mutations = RecordMutations();
- directory()->CheckInvariantsOnTransactionClose(this, mutations.Get());
+
+ MetahandleSet modified_handles;
+ for (EntryKernelMutationMap::const_iterator i = mutations.Get().begin();
+ i != mutations.Get().end(); ++i) {
+ modified_handles.insert(i->first);
+ }
+ directory()->CheckInvariantsOnTransactionClose(this, modified_handles);
// |CheckTreeInvariants| could have thrown an unrecoverable error.
if (unrecoverable_error_set_) {
diff --git a/sync/syncable/syncable_write_transaction.h b/sync/syncable/syncable_write_transaction.h
index 0debaa5a36..4d16aca33c 100644
--- a/sync/syncable/syncable_write_transaction.h
+++ b/sync/syncable/syncable_write_transaction.h
@@ -7,7 +7,7 @@
#include "sync/base/sync_export.h"
#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/syncable_base_transaction.h"
+#include "sync/syncable/syncable_base_write_transaction.h"
namespace syncer {
namespace syncable {
@@ -15,7 +15,7 @@ namespace syncable {
SYNC_EXPORT extern const int64 kInvalidTransactionVersion;
// Locks db in constructor, unlocks in destructor.
-class SYNC_EXPORT WriteTransaction : public BaseTransaction {
+class SYNC_EXPORT WriteTransaction : public BaseWriteTransaction {
public:
WriteTransaction(const tracked_objects::Location& from_here,
WriterTag writer, Directory* directory);
@@ -30,7 +30,7 @@ class SYNC_EXPORT WriteTransaction : public BaseTransaction {
virtual ~WriteTransaction();
- void SaveOriginal(const EntryKernel* entry);
+ virtual void TrackChangesTo(const EntryKernel* entry) OVERRIDE;
protected:
// Overridden by tests.
diff --git a/sync/tools/sync_listen_notifications.cc b/sync/tools/sync_listen_notifications.cc
index b70ee6d57c..f9d1989de8 100644
--- a/sync/tools/sync_listen_notifications.cc
+++ b/sync/tools/sync_listen_notifications.cc
@@ -59,12 +59,10 @@ class NotificationPrinter : public InvalidationHandler {
virtual void OnIncomingInvalidation(
const ObjectIdInvalidationMap& invalidation_map) OVERRIDE {
- for (ObjectIdInvalidationMap::const_iterator it = invalidation_map.begin();
- it != invalidation_map.end(); ++it) {
- LOG(INFO) << "Remote invalidation: id = "
- << ObjectIdToString(it->first)
- << ", version = " << it->second.version
- << ", payload = " << it->second.payload;
+ ObjectIdSet ids = invalidation_map.GetObjectIds();
+ for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
+ LOG(INFO) << "Remote invalidation: "
+ << invalidation_map.ToString();
}
}
diff --git a/sync/tools/testserver/chromiumsync.py b/sync/tools/testserver/chromiumsync.py
index b95c6be6e4..d8074868c8 100644
--- a/sync/tools/testserver/chromiumsync.py
+++ b/sync/tools/testserver/chromiumsync.py
@@ -22,6 +22,7 @@ import urlparse
import app_notification_specifics_pb2
import app_setting_specifics_pb2
import app_specifics_pb2
+import article_specifics_pb2
import autofill_specifics_pb2
import bookmark_specifics_pb2
import dictionary_specifics_pb2
@@ -54,6 +55,7 @@ ALL_TYPES = (
APPS,
APP_NOTIFICATION,
APP_SETTINGS,
+ ARTICLE,
AUTOFILL,
AUTOFILL_PROFILE,
BOOKMARK,
@@ -75,7 +77,7 @@ ALL_TYPES = (
TYPED_URL,
EXTENSION_SETTINGS,
FAVICON_IMAGES,
- FAVICON_TRACKING) = range(26)
+ FAVICON_TRACKING) = range(27)
# An enumeration on the frequency at which the server should send errors
# to the client. This would be specified by the url that triggers the error.
@@ -95,6 +97,7 @@ SYNC_TYPE_TO_DESCRIPTOR = {
APP_NOTIFICATION: SYNC_TYPE_FIELDS['app_notification'],
APP_SETTINGS: SYNC_TYPE_FIELDS['app_setting'],
APPS: SYNC_TYPE_FIELDS['app'],
+ ARTICLE: SYNC_TYPE_FIELDS['article'],
AUTOFILL: SYNC_TYPE_FIELDS['autofill'],
AUTOFILL_PROFILE: SYNC_TYPE_FIELDS['autofill_profile'],
BOOKMARK: SYNC_TYPE_FIELDS['bookmark'],
@@ -530,6 +533,8 @@ class SyncDataModel(object):
parent_tag=ROOT_ID, sync_type=TYPED_URL),
PermanentItem('google_chrome_dictionary', name='Dictionary',
parent_tag=ROOT_ID, sync_type=DICTIONARY),
+ PermanentItem('google_chrome_articles', name='Articles',
+ parent_tag=ROOT_ID, sync_type=ARTICLE),
]
def __init__(self):
diff --git a/sync/util/data_type_histogram.h b/sync/util/data_type_histogram.h
index 5c8e840a74..01a2ba0155 100644
--- a/sync/util/data_type_histogram.h
+++ b/sync/util/data_type_histogram.h
@@ -111,6 +111,9 @@
case ::syncer::MANAGED_USERS: \
PER_DATA_TYPE_MACRO("ManagedUser"); \
break; \
+ case ::syncer::ARTICLES: \
+ PER_DATA_TYPE_MACRO("Article"); \
+ break; \
case ::syncer::PROXY_TABS: \
PER_DATA_TYPE_MACRO("Tabs"); \
break; \
diff --git a/sync/util/get_session_name.cc b/sync/util/get_session_name.cc
index 73041aebdd..d51c93e54e 100644
--- a/sync/util/get_session_name.cc
+++ b/sync/util/get_session_name.cc
@@ -35,7 +35,7 @@ std::string GetSessionNameSynchronously() {
#if defined(OS_CHROMEOS)
// The approach below is similar to that used by the CrOs implementation of
// StatisticsProvider::GetMachineStatistic(CHROMEOS_RELEASE_BOARD).
- // See chrome/browser/chromeos/system/statistics_provider.{h|cc}.
+ // See chromeos/system/statistics_provider.{h|cc}.
//
// We cannot use StatisticsProvider here because of the mutual dependency
// it creates between sync.gyp:sync and chrome.gyp:browser.