summaryrefslogtreecommitdiff
path: root/sync
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-05-09 18:35:53 +0100
committerTorne (Richard Coles) <torne@google.com>2013-05-13 13:57:14 +0100
commitc2e0dbddbe15c98d52c4786dac06cb8952a8ae6d (patch)
tree1dbdbb0624cc869ab25ee7f46971984c6fee3e7a /sync
parent2d519ce2457219605d4f472da8d2ffd469796035 (diff)
downloadchromium_org-c2e0dbddbe15c98d52c4786dac06cb8952a8ae6d.tar.gz
Merge from Chromium at DEPS revision r198571
This commit was generated by merge_to_master.py. Change-Id: I951118a03836157090561764dd2627f0add8118f
Diffstat (limited to 'sync')
-rw-r--r--sync/android/java/src/org/chromium/sync/internal_api/pub/base/ModelType.java59
-rw-r--r--sync/android/java/src/org/chromium/sync/notifier/InvalidationController.java26
-rw-r--r--sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolver.java17
-rw-r--r--sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolverImpl.java37
-rw-r--r--sync/android/java/src/org/chromium/sync/signin/AccountManagerDelegate.java3
-rw-r--r--sync/android/java/src/org/chromium/sync/signin/AccountManagerHelper.java22
-rw-r--r--sync/android/java/src/org/chromium/sync/signin/SystemAccountManagerDelegate.java6
-rw-r--r--sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java23
-rw-r--r--sync/android/javatests/src/org/chromium/sync/notifier/ModelTypeResolverTest.java72
-rw-r--r--sync/api/sync_data.cc2
-rw-r--r--sync/api/syncable_service.h10
-rw-r--r--sync/engine/apply_control_data_updates_unittest.cc8
-rw-r--r--sync/engine/build_commit_command.cc91
-rw-r--r--sync/engine/build_commit_command.h16
-rw-r--r--sync/engine/build_commit_command_unittest.cc105
-rw-r--r--sync/engine/conflict_resolver.cc66
-rw-r--r--sync/engine/net/server_connection_manager.cc43
-rw-r--r--sync/engine/net/server_connection_manager.h19
-rw-r--r--sync/engine/process_commit_response_command.cc31
-rw-r--r--sync/engine/process_commit_response_command_unittest.cc41
-rw-r--r--sync/engine/process_updates_command.cc24
-rw-r--r--sync/engine/process_updates_command_unittest.cc109
-rw-r--r--sync/engine/sync_scheduler.h2
-rw-r--r--sync/engine/sync_scheduler_impl.cc826
-rw-r--r--sync/engine/sync_scheduler_impl.h175
-rw-r--r--sync/engine/sync_scheduler_unittest.cc40
-rw-r--r--sync/engine/sync_scheduler_whitebox_unittest.cc276
-rw-r--r--sync/engine/sync_session_job.cc157
-rw-r--r--sync/engine/sync_session_job.h110
-rw-r--r--sync/engine/sync_session_job_unittest.cc207
-rw-r--r--sync/engine/syncer.cc23
-rw-r--r--sync/engine/syncer.h1
-rw-r--r--sync/engine/syncer_proto_util.cc10
-rw-r--r--sync/engine/syncer_proto_util.h2
-rw-r--r--sync/engine/syncer_unittest.cc325
-rw-r--r--sync/engine/syncer_util.cc143
-rw-r--r--sync/engine/syncer_util.h45
-rw-r--r--sync/internal_api/DEPS1
-rw-r--r--sync/internal_api/base_node.cc52
-rw-r--r--sync/internal_api/change_reorder_buffer.cc10
-rw-r--r--sync/internal_api/debug_info_event_listener.cc22
-rw-r--r--sync/internal_api/http_bridge.cc40
-rw-r--r--sync/internal_api/http_bridge_unittest.cc14
-rw-r--r--sync/internal_api/js_mutation_event_observer.cc2
-rw-r--r--sync/internal_api/js_sync_manager_observer_unittest.cc1
-rw-r--r--sync/internal_api/public/base/model_type.h23
-rw-r--r--sync/internal_api/public/base/model_type_invalidation_map_unittest.cc2
-rw-r--r--sync/internal_api/public/base/ordinal_unittest.cc2
-rw-r--r--sync/internal_api/public/base/unique_position.cc79
-rw-r--r--sync/internal_api/public/base/unique_position.h5
-rw-r--r--sync/internal_api/public/base/unique_position_unittest.cc64
-rw-r--r--sync/internal_api/public/base_node.h5
-rw-r--r--sync/internal_api/public/engine/passive_model_worker.h6
-rw-r--r--sync/internal_api/public/http_bridge.h39
-rw-r--r--sync/internal_api/public/sessions/sync_session_snapshot.cc14
-rw-r--r--sync/internal_api/public/sessions/sync_session_snapshot.h3
-rw-r--r--sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc9
-rw-r--r--sync/internal_api/public/sync_manager.h7
-rw-r--r--sync/internal_api/public/test/fake_sync_manager.h8
-rw-r--r--sync/internal_api/public/util/experiments.h16
-rw-r--r--sync/internal_api/public/util/weak_handle_unittest.cc2
-rw-r--r--sync/internal_api/public/write_node.h5
-rw-r--r--sync/internal_api/sync_encryption_handler_impl.cc10
-rw-r--r--sync/internal_api/sync_encryption_handler_impl_unittest.cc13
-rw-r--r--sync/internal_api/sync_manager_impl.cc63
-rw-r--r--sync/internal_api/sync_manager_impl.h10
-rw-r--r--sync/internal_api/sync_manager_impl_unittest.cc164
-rw-r--r--sync/internal_api/syncapi_server_connection_manager_unittest.cc2
-rw-r--r--sync/internal_api/test/fake_sync_manager.cc11
-rw-r--r--sync/internal_api/test/test_entry_factory.cc19
-rw-r--r--sync/internal_api/test/test_user_share.cc2
-rw-r--r--sync/internal_api/write_node.cc9
-rw-r--r--sync/notifier/fake_invalidation_state_tracker.cc7
-rw-r--r--sync/notifier/fake_invalidation_state_tracker.h1
-rw-r--r--sync/notifier/fake_invalidator.cc8
-rw-r--r--sync/notifier/fake_invalidator.h2
-rw-r--r--sync/notifier/fake_invalidator_unittest.cc1
-rw-r--r--sync/notifier/invalidation_notifier.cc11
-rw-r--r--sync/notifier/invalidation_notifier.h4
-rw-r--r--sync/notifier/invalidation_notifier_unittest.cc2
-rw-r--r--sync/notifier/invalidation_state_tracker.h3
-rw-r--r--sync/notifier/invalidator.h5
-rw-r--r--sync/notifier/invalidator_factory.cc53
-rw-r--r--sync/notifier/invalidator_factory.h13
-rw-r--r--sync/notifier/invalidator_registrar_unittest.cc5
-rw-r--r--sync/notifier/invalidator_test_template.h4
-rw-r--r--sync/notifier/non_blocking_invalidator.cc23
-rw-r--r--sync/notifier/non_blocking_invalidator.h2
-rw-r--r--sync/notifier/non_blocking_invalidator_unittest.cc2
-rw-r--r--sync/notifier/p2p_invalidator.cc32
-rw-r--r--sync/notifier/p2p_invalidator.h4
-rw-r--r--sync/notifier/p2p_invalidator_unittest.cc22
-rw-r--r--sync/notifier/push_client_channel.cc2
-rw-r--r--sync/notifier/push_client_channel_unittest.cc2
-rw-r--r--sync/notifier/sync_invalidation_listener.cc21
-rw-r--r--sync/notifier/sync_invalidation_listener_unittest.cc15
-rw-r--r--sync/notifier/sync_system_resources.cc7
-rw-r--r--sync/notifier/sync_system_resources_unittest.cc5
-rw-r--r--sync/protocol/experiments_specifics.proto4
-rw-r--r--sync/protocol/get_updates_caller_info.proto3
-rw-r--r--sync/protocol/managed_user_setting_specifics.proto21
-rw-r--r--sync/protocol/priority_preference_specifics.proto5
-rw-r--r--sync/protocol/proto_enum_conversions.cc17
-rw-r--r--sync/protocol/proto_enum_conversions.h3
-rw-r--r--sync/protocol/proto_value_conversions.cc61
-rw-r--r--sync/protocol/proto_value_conversions.h4
-rw-r--r--sync/protocol/proto_value_conversions_unittest.cc8
-rw-r--r--sync/protocol/sync.proto85
-rw-r--r--sync/protocol/sync_enums.proto30
-rw-r--r--sync/protocol/synced_notification_data.proto30
-rw-r--r--sync/protocol/synced_notification_render.proto143
-rw-r--r--sync/protocol/unique_position.proto10
-rw-r--r--sync/sessions/nudge_tracker.cc50
-rw-r--r--sync/sessions/nudge_tracker.h54
-rw-r--r--sync/sessions/nudge_tracker_unittest.cc112
-rw-r--r--sync/sessions/sync_session.cc8
-rw-r--r--sync/sessions/sync_session.h8
-rw-r--r--sync/sessions/sync_session_unittest.cc20
-rw-r--r--sync/sync_core.gypi9
-rw-r--r--sync/sync_internal_api.gypi1
-rw-r--r--sync/sync_proto.gypi1
-rw-r--r--sync/sync_tests.gypi5
-rw-r--r--sync/syncable/directory.cc395
-rw-r--r--sync/syncable/directory.h109
-rw-r--r--sync/syncable/directory_backing_store.cc188
-rw-r--r--sync/syncable/directory_backing_store.h1
-rw-r--r--sync/syncable/directory_backing_store_unittest.cc255
-rw-r--r--sync/syncable/entry.cc21
-rw-r--r--sync/syncable/entry.h13
-rw-r--r--sync/syncable/entry_kernel.cc35
-rw-r--r--sync/syncable/entry_kernel.h40
-rw-r--r--sync/syncable/in_memory_directory_backing_store.cc11
-rw-r--r--sync/syncable/in_memory_directory_backing_store.h6
-rw-r--r--sync/syncable/model_type.cc41
-rw-r--r--sync/syncable/mutable_entry.cc138
-rw-r--r--sync/syncable/mutable_entry.h5
-rw-r--r--sync/syncable/nigori_util.cc18
-rw-r--r--sync/syncable/parent_child_index.cc115
-rw-r--r--sync/syncable/parent_child_index.h66
-rw-r--r--sync/syncable/parent_child_index_unittest.cc344
-rw-r--r--sync/syncable/scoped_parent_child_index_updater.cc28
-rw-r--r--sync/syncable/scoped_parent_child_index_updater.h37
-rw-r--r--sync/syncable/syncable_columns.h8
-rw-r--r--sync/syncable/syncable_enum_conversions.cc21
-rw-r--r--sync/syncable/syncable_enum_conversions.h4
-rw-r--r--sync/syncable/syncable_enum_conversions_unittest.cc5
-rw-r--r--sync/syncable/syncable_unittest.cc84
-rw-r--r--sync/syncable/syncable_util.cc29
-rw-r--r--sync/syncable/syncable_util.h7
-rw-r--r--sync/syncable/syncable_write_transaction.cc2
-rw-r--r--sync/test/android/javatests/src/org/chromium/sync/test/util/MockAccountManager.java9
-rw-r--r--sync/test/engine/mock_connection_manager.cc36
-rw-r--r--sync/test/engine/mock_connection_manager.h1
-rw-r--r--sync/test/engine/syncer_command_test.h2
-rw-r--r--sync/test/local_sync_test_server.cc16
-rw-r--r--sync/test/test_directory_backing_store.h3
-rw-r--r--sync/tools/null_invalidation_state_tracker.cc10
-rw-r--r--sync/tools/null_invalidation_state_tracker.h2
-rw-r--r--sync/tools/sync_client.cc7
-rw-r--r--sync/tools/sync_listen_notifications.cc4
-rw-r--r--sync/tools/testserver/chromiumsync.py36
-rwxr-xr-xsync/tools/testserver/chromiumsync_test.py2
-rw-r--r--sync/util/cryptographer.cc16
-rw-r--r--sync/util/data_type_histogram.h3
-rw-r--r--sync/util/get_session_name_ios.mm2
-rw-r--r--sync/util/get_session_name_mac.mm2
166 files changed, 3507 insertions, 3524 deletions
diff --git a/sync/android/java/src/org/chromium/sync/internal_api/pub/base/ModelType.java b/sync/android/java/src/org/chromium/sync/internal_api/pub/base/ModelType.java
index 4ada2fe2a6..2b0ad855e6 100644
--- a/sync/android/java/src/org/chromium/sync/internal_api/pub/base/ModelType.java
+++ b/sync/android/java/src/org/chromium/sync/internal_api/pub/base/ModelType.java
@@ -35,11 +35,11 @@ public enum ModelType {
/**
* Flags to enable experimental features.
*/
- EXPERIMENTS("EXPERIMENTS", true),
+ EXPERIMENTS("EXPERIMENTS"),
/**
* An object representing a set of Nigori keys.
*/
- NIGORI("NIGORI", true),
+ NIGORI("NIGORI"),
/**
* A password entry.
*/
@@ -51,7 +51,27 @@ public enum ModelType {
/**
* A typed_url folder or a typed_url object.
*/
- TYPED_URL("TYPED_URL");
+ TYPED_URL("TYPED_URL"),
+ /**
+ * A history delete directive object.
+ */
+ HISTORY_DELETE_DIRECTIVE("HISTORY_DELETE_DIRECTIVE"),
+ /**
+ * A device info object.
+ */
+ DEVICE_INFO("DEVICE_INFO"),
+ /**
+ * A proxy tabs object (placeholder for sessions).
+ */
+ PROXY_TABS("NULL"),
+ /**
+ * A favicon image object.
+ */
+ FAVICON_IMAGE("FAVICON_IMAGE"),
+ /**
+ * A favicon tracking object.
+ */
+ FAVICON_TRACKING("FAVICON_TRACKING");
/** Special type representing all possible types. */
public static final String ALL_TYPES_TYPE = "ALL_TYPES";
@@ -60,18 +80,8 @@ public enum ModelType {
private final String mModelType;
- /**
- * True if this is a control type.
- */
- private final boolean mControl;
-
ModelType(String modelType) {
- this(modelType, false);
- }
-
- ModelType(String modelType, boolean control) {
mModelType = modelType;
- mControl = control;
}
public ObjectId toObjectId() {
@@ -131,27 +141,4 @@ public enum ModelType {
}
return objectIds;
}
-
- /**
- * Returns a set of all the control {@link ModelType}s.
- */
- public static Set<ModelType> controlTypes() {
- Set<ModelType> controlTypes = new HashSet<ModelType>();
- for (ModelType modelType : values()) {
- if (modelType.mControl) {
- controlTypes.add(modelType);
- }
- }
- return controlTypes;
- }
-
- /**
- * Returns a Multimap of all the {@link ModelType} groups. The key is the main
- * {@link ModelType}, and the value is a collection of {@link ModelType}s in the same group.
- */
- public static Multimap<ModelType, ModelType> modelTypeGroups() {
- Multimap<ModelType, ModelType> modelTypeGroups = HashMultimap.create();
- modelTypeGroups.put(AUTOFILL, AUTOFILL_PROFILE);
- return modelTypeGroups;
- }
}
diff --git a/sync/android/java/src/org/chromium/sync/notifier/InvalidationController.java b/sync/android/java/src/org/chromium/sync/notifier/InvalidationController.java
index cd98293b56..aa6b3d3838 100644
--- a/sync/android/java/src/org/chromium/sync/notifier/InvalidationController.java
+++ b/sync/android/java/src/org/chromium/sync/notifier/InvalidationController.java
@@ -119,7 +119,9 @@ public class InvalidationController implements ActivityStatus.StateListener {
* @param types Set of types for which to register. Ignored if {@code allTypes == true}.
*/
public void setRegisteredTypes(Account account, boolean allTypes, Set<ModelType> types) {
- Set<ModelType> typesToRegister = getModelTypeResolver().resolveModelTypes(types);
+ Set<ModelType> typesToRegister = types;
+ // Proxy types should never receive notifications.
+ typesToRegister.remove(ModelType.PROXY_TABS);
Intent registerIntent = IntentProtocol.createRegisterIntent(account, allTypes,
typesToRegister);
setDestinationClassName(registerIntent);
@@ -132,6 +134,7 @@ public class InvalidationController implements ActivityStatus.StateListener {
* values. It can be used on startup of Chrome to ensure we always have a consistent set of
* registrations.
*/
+ @Deprecated
public void refreshRegisteredTypes() {
InvalidationPreferences invalidationPreferences = new InvalidationPreferences(mContext);
Set<String> savedSyncedTypes = invalidationPreferences.getSavedSyncedTypes();
@@ -144,6 +147,22 @@ public class InvalidationController implements ActivityStatus.StateListener {
}
/**
+ * Reads all stored preferences and calls
+ * {@link #setRegisteredTypes(android.accounts.Account, boolean, java.util.Set)} with the stored
+ * values, refreshing the set of types with {@code types}. It can be used on startup of Chrome
+ * to ensure we always have a set of registrations consistent with the native code.
+ * @param types Set of types for which to register.
+ */
+ public void refreshRegisteredTypes(Set<ModelType> types) {
+ InvalidationPreferences invalidationPreferences = new InvalidationPreferences(mContext);
+ Set<String> savedSyncedTypes = invalidationPreferences.getSavedSyncedTypes();
+ Account account = invalidationPreferences.getSavedSyncedAccount();
+ boolean allTypes = savedSyncedTypes != null &&
+ savedSyncedTypes.contains(ModelType.ALL_TYPES_TYPE);
+ setRegisteredTypes(account, allTypes, types);
+ }
+
+ /**
* Starts the invalidation client.
*/
public void start() {
@@ -240,11 +259,6 @@ public class InvalidationController implements ActivityStatus.StateListener {
return null;
}
- @VisibleForTesting
- ModelTypeResolver getModelTypeResolver() {
- return new ModelTypeResolverImpl();
- }
-
@Override
public void onActivityStateChange(int newState) {
if (SyncStatusHelper.get(mContext).isSyncEnabled()) {
diff --git a/sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolver.java b/sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolver.java
deleted file mode 100644
index 7a30e2f793..0000000000
--- a/sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolver.java
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.sync.notifier;
-
-import org.chromium.sync.internal_api.pub.base.ModelType;
-
-import java.util.Set;
-
-/**
- * A utility class that supports groups of {@link ModelType}s and also supports adding the default
- * set of {@link ModelType}s.
- */
-interface ModelTypeResolver {
- Set<ModelType> resolveModelTypes(Set<ModelType> modelTypes);
-}
diff --git a/sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolverImpl.java b/sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolverImpl.java
deleted file mode 100644
index 27a8e6dc54..0000000000
--- a/sync/android/java/src/org/chromium/sync/notifier/ModelTypeResolverImpl.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.sync.notifier;
-
-import com.google.common.collect.Multimap;
-
-import org.chromium.sync.internal_api.pub.base.ModelType;
-
-import java.util.HashSet;
-import java.util.Set;
-
-class ModelTypeResolverImpl implements ModelTypeResolver {
- @Override
- public Set<ModelType> resolveModelTypes(Set<ModelType> modelTypes) {
- // Create a new set that we will return as a result, and add all original ModelTypes.
- Set<ModelType> typesWithGroups = new HashSet<ModelType>();
- Set<ModelType> modelTypesNonNull =
- modelTypes == null ? new HashSet<ModelType>() : modelTypes;
- typesWithGroups.addAll(modelTypesNonNull);
-
- Multimap<ModelType, ModelType> modelTypeGroups = ModelType.modelTypeGroups();
- // Remove ModelTypes that are specified, that does not have their group ModelType specified.
- for (ModelType modelType : modelTypeGroups.keySet()) {
- if (modelTypesNonNull.contains(modelType)) {
- typesWithGroups.addAll(modelTypeGroups.get(modelType));
- } else {
- typesWithGroups.removeAll(modelTypeGroups.get(modelType));
- }
- }
-
- // Add all control types.
- typesWithGroups.addAll(ModelType.controlTypes());
- return typesWithGroups;
- }
-}
diff --git a/sync/android/java/src/org/chromium/sync/signin/AccountManagerDelegate.java b/sync/android/java/src/org/chromium/sync/signin/AccountManagerDelegate.java
index 88ec2105b5..e855e192a9 100644
--- a/sync/android/java/src/org/chromium/sync/signin/AccountManagerDelegate.java
+++ b/sync/android/java/src/org/chromium/sync/signin/AccountManagerDelegate.java
@@ -7,6 +7,7 @@ package org.chromium.sync.signin;
import android.accounts.Account;
import android.accounts.AccountManagerCallback;
import android.accounts.AccountManagerFuture;
+import android.accounts.AuthenticatorDescription;
import android.accounts.AuthenticatorException;
import android.accounts.OperationCanceledException;
import android.app.Activity;
@@ -49,4 +50,6 @@ public interface AccountManagerDelegate {
Activity activity, AccountManagerCallback<Bundle> callback, Handler handler);
String peekAuthToken(Account account, String authTokenType);
+
+ AuthenticatorDescription[] getAuthenticatorTypes();
}
diff --git a/sync/android/java/src/org/chromium/sync/signin/AccountManagerHelper.java b/sync/android/java/src/org/chromium/sync/signin/AccountManagerHelper.java
index 882f90d4bf..ce3e171e3d 100644
--- a/sync/android/java/src/org/chromium/sync/signin/AccountManagerHelper.java
+++ b/sync/android/java/src/org/chromium/sync/signin/AccountManagerHelper.java
@@ -10,6 +10,7 @@ import com.google.common.annotations.VisibleForTesting;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.accounts.AccountManagerFuture;
+import android.accounts.AuthenticatorDescription;
import android.accounts.AuthenticatorException;
import android.accounts.OperationCanceledException;
import android.app.Activity;
@@ -134,6 +135,17 @@ public class AccountManagerHelper {
}
/**
+ * @return Whether or not there is an account authenticator for Google accounts.
+ */
+ public boolean hasGoogleAccountAuthenticator() {
+ AuthenticatorDescription[] descs = mAccountManager.getAuthenticatorTypes();
+ for (AuthenticatorDescription desc : descs) {
+ if (GOOGLE_ACCOUNT_TYPE.equals(desc.type)) return true;
+ }
+ return false;
+ }
+
+ /**
* Gets the auth token synchronously.
*
* - Assumes that the account is a valid account.
@@ -246,7 +258,8 @@ public class AccountManagerHelper {
@Override
public void onPostExecute(String authToken) {
if (authToken != null || !errorEncountered.get() ||
- numTries.incrementAndGet() == MAX_TRIES) {
+ numTries.incrementAndGet() == MAX_TRIES ||
+ !NetworkChangeNotifier.isInitialized()) {
callback.tokenAvailable(authToken);
return;
}
@@ -306,4 +319,11 @@ public class AccountManagerHelper {
getAuthTokenAsynchronously(
null, account, authTokenType, callback, numTries, errorEncountered, null);
}
+
+ /**
+ * Removes an auth token from the AccountManager's cache.
+ */
+ public void invalidateAuthToken(String accountType, String authToken) {
+ mAccountManager.invalidateAuthToken(accountType, authToken);
+ }
}
diff --git a/sync/android/java/src/org/chromium/sync/signin/SystemAccountManagerDelegate.java b/sync/android/java/src/org/chromium/sync/signin/SystemAccountManagerDelegate.java
index 13e3962c44..96b94bb102 100644
--- a/sync/android/java/src/org/chromium/sync/signin/SystemAccountManagerDelegate.java
+++ b/sync/android/java/src/org/chromium/sync/signin/SystemAccountManagerDelegate.java
@@ -8,6 +8,7 @@ import android.accounts.Account;
import android.accounts.AccountManager;
import android.accounts.AccountManagerCallback;
import android.accounts.AccountManagerFuture;
+import android.accounts.AuthenticatorDescription;
import android.accounts.AuthenticatorException;
import android.accounts.OperationCanceledException;
import android.app.Activity;
@@ -102,4 +103,9 @@ public class SystemAccountManagerDelegate implements AccountManagerDelegate {
public String peekAuthToken(Account account, String authTokenType) {
return mAccountManager.peekAuthToken(account, authTokenType);
}
+
+ @Override
+ public AuthenticatorDescription[] getAuthenticatorTypes() {
+ return mAccountManager.getAuthenticatorTypes();
+ }
}
diff --git a/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java b/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java
index bd5398a4d1..90d4db9560 100644
--- a/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java
+++ b/sync/android/javatests/src/org/chromium/sync/notifier/InvalidationControllerTest.java
@@ -152,20 +152,7 @@ public class InvalidationControllerTest extends InstrumentationTestCase {
@SmallTest
@Feature({"Sync"})
public void testRegisterForSpecificTypes() {
- final String controllerFlag = "resolveModelTypes";
- final ModelTypeResolver resolver = new ModelTypeResolver() {
- @Override
- public Set<ModelType> resolveModelTypes(Set<ModelType> modelTypes) {
- mContext.setFlag(controllerFlag);
- return modelTypes;
- }
- };
- InvalidationController controller = new InvalidationController(mContext) {
- @Override
- ModelTypeResolver getModelTypeResolver() {
- return resolver;
- }
- };
+ InvalidationController controller = new InvalidationController(mContext);
Account account = new Account("test@example.com", "bogus");
controller.setRegisteredTypes(account, false,
Sets.newHashSet(ModelType.BOOKMARK, ModelType.SESSION));
@@ -186,7 +173,6 @@ public class InvalidationControllerTest extends InstrumentationTestCase {
Set<String> actualTypes = Sets.newHashSet();
actualTypes.addAll(intent.getStringArrayListExtra(IntentProtocol.EXTRA_REGISTERED_TYPES));
assertEquals(expectedTypes, actualTypes);
- assertTrue(mContext.isFlagSet(controllerFlag));
}
@SmallTest
@@ -223,6 +209,9 @@ public class InvalidationControllerTest extends InstrumentationTestCase {
Set<String> storedModelTypes = new HashSet<String>();
storedModelTypes.add(ModelType.BOOKMARK.name());
storedModelTypes.add(ModelType.TYPED_URL.name());
+ Set<ModelType> refreshedTypes = new HashSet<ModelType>();
+ refreshedTypes.add(ModelType.BOOKMARK);
+ refreshedTypes.add(ModelType.TYPED_URL);
invalidationPreferences.setSyncTypes(edit, storedModelTypes);
Account storedAccount = AccountManagerHelper.createAccountFromName("test@gmail.com");
invalidationPreferences.setAccount(edit, storedAccount);
@@ -244,7 +233,7 @@ public class InvalidationControllerTest extends InstrumentationTestCase {
};
// Execute the test.
- controller.refreshRegisteredTypes();
+ controller.refreshRegisteredTypes(refreshedTypes);
// Validate the values.
assertEquals(storedAccount, resultAccount.get());
@@ -283,7 +272,7 @@ public class InvalidationControllerTest extends InstrumentationTestCase {
};
// Execute the test.
- controller.refreshRegisteredTypes();
+ controller.refreshRegisteredTypes(new HashSet<ModelType>());
// Validate the values.
assertEquals(storedAccount, resultAccount.get());
diff --git a/sync/android/javatests/src/org/chromium/sync/notifier/ModelTypeResolverTest.java b/sync/android/javatests/src/org/chromium/sync/notifier/ModelTypeResolverTest.java
deleted file mode 100644
index aa53eb84af..0000000000
--- a/sync/android/javatests/src/org/chromium/sync/notifier/ModelTypeResolverTest.java
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.sync.notifier;
-
-import android.test.InstrumentationTestCase;
-import android.test.suitebuilder.annotation.SmallTest;
-
-import org.chromium.base.test.util.Feature;
-import org.chromium.sync.internal_api.pub.base.ModelType;
-
-import java.util.HashSet;
-import java.util.Set;
-
-public class ModelTypeResolverTest extends InstrumentationTestCase {
- @SmallTest
- @Feature({"Sync"})
- public void testControlTypesShouldAlwaysBeAddedEvenForNullModelTypes() throws Exception {
- ModelTypeResolverImpl resolver = new ModelTypeResolverImpl();
- Set<ModelType> result = resolver.resolveModelTypes(null);
- assertNotNull(result);
- assertEquals("Size should be the same as number of control types",
- ModelType.controlTypes().size(), result.size());
- assertTrue("Should contain all control ModelTypes",
- result.containsAll(ModelType.controlTypes()));
- }
-
- @SmallTest
- @Feature({"Sync"})
- public void testControlTypesShouldAlwaysBeAdded() throws Exception {
- ModelTypeResolverImpl resolver = new ModelTypeResolverImpl();
- Set<ModelType> result = resolver.resolveModelTypes(new HashSet<ModelType>());
- assertNotNull(result);
- assertEquals("Size should be the same as number of control types",
- ModelType.controlTypes().size(), result.size());
- assertTrue("Should contain all control ModelTypes",
- result.containsAll(ModelType.controlTypes()));
- }
-
- @SmallTest
- @Feature({"Sync"})
- public void testAddingAutofillShouldAddAutofillProfile() throws Exception {
- Set<ModelType> modelTypes = new HashSet<ModelType>();
- modelTypes.add(ModelType.AUTOFILL);
- ModelTypeResolverImpl resolver = new ModelTypeResolverImpl();
- Set<ModelType> result = resolver.resolveModelTypes(modelTypes);
- assertNotNull(result);
- assertEquals("Size should be 2 plus the number of control types",
- 2 + ModelType.controlTypes().size(), result.size());
- assertTrue("Should have AUTOFILL ModelType", result.contains(ModelType.AUTOFILL));
- assertTrue("Should have AUTOFILL_PROFILE ModelType",
- result.contains(ModelType.AUTOFILL_PROFILE));
- }
-
- @SmallTest
- @Feature({"Sync"})
- public void testModelTypesThatArePartOfGroupsShouldStillWork() throws Exception {
- Set<ModelType> modelTypes = new HashSet<ModelType>();
- modelTypes.add(ModelType.BOOKMARK);
- modelTypes.add(ModelType.SESSION);
- modelTypes.add(ModelType.TYPED_URL);
- ModelTypeResolverImpl resolver = new ModelTypeResolverImpl();
- Set<ModelType> result = resolver.resolveModelTypes(modelTypes);
- assertNotNull(result);
- assertEquals("Size should be " + modelTypes.size() + " plus the number of control types",
- modelTypes.size() + ModelType.controlTypes().size(), result.size());
- assertTrue("Should have BOOKMARK ModelType", result.contains(ModelType.BOOKMARK));
- assertTrue("Should have SESSION ModelType", result.contains(ModelType.SESSION));
- assertTrue("Should have TYPED_URL ModelType", result.contains(ModelType.TYPED_URL));
- }
-}
diff --git a/sync/api/sync_data.cc b/sync/api/sync_data.cc
index b9e2ca999f..e0b6f1ae58 100644
--- a/sync/api/sync_data.cc
+++ b/sync/api/sync_data.cc
@@ -59,7 +59,7 @@ SyncData SyncData::CreateLocalDelete(
ModelType datatype) {
sync_pb::EntitySpecifics specifics;
AddDefaultFieldValue(datatype, &specifics);
- return CreateLocalData(sync_tag, "", specifics);
+ return CreateLocalData(sync_tag, std::string(), specifics);
}
// Static.
diff --git a/sync/api/syncable_service.h b/sync/api/syncable_service.h
index 13c282ef5a..13225ca984 100644
--- a/sync/api/syncable_service.h
+++ b/sync/api/syncable_service.h
@@ -7,6 +7,7 @@
#include <vector>
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
@@ -28,6 +29,15 @@ class SYNC_EXPORT SyncableService
: public SyncChangeProcessor,
public base::SupportsWeakPtr<SyncableService> {
public:
+ // A StartSyncFlare is useful when your SyncableService has a need for sync
+ // to start ASAP, typically because a local change event has occurred but
+ // MergeDataAndStartSyncing hasn't been called yet, meaning you don't have a
+ // SyncChangeProcessor. The sync subsystem will respond soon after invoking
+ // Run() on your flare by calling MergeDataAndStartSyncing. The ModelType
+ // parameter is included so that the recieving end can track usage and timing
+ // statistics, make optimizations or tradeoffs by type, etc.
+ typedef base::Callback<void(ModelType)> StartSyncFlare;
+
// Informs the service to begin syncing the specified synced datatype |type|.
// The service should then merge |initial_sync_data| into it's local data,
// calling |sync_processor|'s ProcessSyncChanges as necessary to reconcile the
diff --git a/sync/engine/apply_control_data_updates_unittest.cc b/sync/engine/apply_control_data_updates_unittest.cc
index 9bd1914145..d51ee250db 100644
--- a/sync/engine/apply_control_data_updates_unittest.cc
+++ b/sync/engine/apply_control_data_updates_unittest.cc
@@ -133,14 +133,14 @@ TEST_F(ApplyControlDataUpdatesTest, EncryptUnsyncedChanges) {
size_t batch_s = 5;
for (i = 0; i < batch_s; ++i) {
entry_factory_->CreateUnsyncedItem(id_factory_.NewLocalId(), folder_id,
- base::StringPrintf("Item %"PRIuS"", i),
+ base::StringPrintf("Item %" PRIuS "", i),
false, BOOKMARKS, NULL);
}
// Next five items are children of the root.
for (; i < 2*batch_s; ++i) {
entry_factory_->CreateUnsyncedItem(
id_factory_.NewLocalId(), id_factory_.root(),
- base::StringPrintf("Item %"PRIuS"", i), false,
+ base::StringPrintf("Item %" PRIuS "", i), false,
BOOKMARKS, NULL);
}
@@ -247,14 +247,14 @@ TEST_F(ApplyControlDataUpdatesTest, CannotEncryptUnsyncedChanges) {
size_t batch_s = 5;
for (i = 0; i < batch_s; ++i) {
entry_factory_->CreateUnsyncedItem(id_factory_.NewLocalId(), folder_id,
- base::StringPrintf("Item %"PRIuS"", i),
+ base::StringPrintf("Item %" PRIuS "", i),
false, BOOKMARKS, NULL);
}
// Next five items are children of the root.
for (; i < 2*batch_s; ++i) {
entry_factory_->CreateUnsyncedItem(
id_factory_.NewLocalId(), id_factory_.root(),
- base::StringPrintf("Item %"PRIuS"", i), false,
+ base::StringPrintf("Item %" PRIuS "", i), false,
BOOKMARKS, NULL);
}
diff --git a/sync/engine/build_commit_command.cc b/sync/engine/build_commit_command.cc
index 531eaf6179..71a67f23fb 100644
--- a/sync/engine/build_commit_command.cc
+++ b/sync/engine/build_commit_command.cc
@@ -11,6 +11,7 @@
#include "base/string_util.h"
#include "sync/engine/syncer_proto_util.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/protocol/sync.pb.h"
#include "sync/sessions/ordered_commit_set.h"
@@ -22,11 +23,6 @@
#include "sync/syncable/syncable_proto_util.h"
#include "sync/util/time.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using std::set;
using std::string;
using std::vector;
@@ -36,26 +32,11 @@ namespace syncer {
using sessions::SyncSession;
using syncable::Entry;
using syncable::IS_DEL;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::IS_UNAPPLIED_UPDATE;
using syncable::IS_UNSYNCED;
using syncable::Id;
using syncable::SPECIFICS;
-
-// static
-int64 BuildCommitCommand::GetFirstPosition() {
- return std::numeric_limits<int64>::min();
-}
-
-// static
-int64 BuildCommitCommand::GetLastPosition() {
- return std::numeric_limits<int64>::max();
-}
-
-// static
-int64 BuildCommitCommand::GetGap() {
- return 1LL << 20;
-}
+using syncable::UNIQUE_POSITION;
BuildCommitCommand::BuildCommitCommand(
syncable::BaseTransaction* trans,
@@ -158,6 +139,9 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
string name = meta_entry.Get(syncable::NON_UNIQUE_NAME);
CHECK(!name.empty()); // Make sure this isn't an update.
+ // Note: Truncation is also performed in WriteNode::SetTitle(..). But this
+ // call is still necessary to handle any title changes that might originate
+ // elsewhere, or already be persisted in the directory.
TruncateUTF8ToByteSize(name, 255, &name);
sync_entry->set_name(name);
@@ -216,30 +200,16 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
sync_entry->set_deleted(true);
} else {
if (meta_entry.Get(SPECIFICS).has_bookmark()) {
- // Common data in both new and old protocol.
+ // Both insert_after_item_id and position_in_parent fields are set only
+ // for legacy reasons. See comments in sync.proto for more information.
const Id& prev_id = meta_entry.GetPredecessorId();
string prev_id_string =
prev_id.IsRoot() ? string() : prev_id.GetServerId();
sync_entry->set_insert_after_item_id(prev_id_string);
-
- // Compute a numeric position based on what we know locally.
- std::pair<int64, int64> position_block(
- GetFirstPosition(), GetLastPosition());
- std::map<Id, std::pair<int64, int64> >::iterator prev_pos =
- position_map.find(prev_id);
- if (prev_pos != position_map.end()) {
- position_block = prev_pos->second;
- position_map.erase(prev_pos);
- } else {
- position_block = std::make_pair(
- FindAnchorPosition(syncable::PREV_ID, meta_entry),
- FindAnchorPosition(syncable::NEXT_ID, meta_entry));
- }
- position_block.first = BuildCommitCommand::InterpolatePosition(
- position_block.first, position_block.second);
-
- position_map[id] = position_block;
- sync_entry->set_position_in_parent(position_block.first);
+ sync_entry->set_position_in_parent(
+ meta_entry.Get(UNIQUE_POSITION).ToInt64());
+ meta_entry.Get(UNIQUE_POSITION).ToProto(
+ sync_entry->mutable_unique_position());
}
SetEntrySpecifics(&meta_entry, sync_entry);
}
@@ -248,43 +218,4 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
return SYNCER_OK;
}
-int64 BuildCommitCommand::FindAnchorPosition(syncable::IdField direction,
- const syncable::Entry& entry) {
- Id next_id = entry.Get(direction);
- while (!next_id.IsRoot()) {
- Entry next_entry(entry.trans(),
- syncable::GET_BY_ID,
- next_id);
- if (!next_entry.Get(IS_UNSYNCED) && !next_entry.Get(IS_UNAPPLIED_UPDATE)) {
- return NodeOrdinalToInt64(next_entry.Get(SERVER_ORDINAL_IN_PARENT));
- }
- next_id = next_entry.Get(direction);
- }
- return
- direction == syncable::PREV_ID ?
- GetFirstPosition() : GetLastPosition();
-}
-
-// static
-int64 BuildCommitCommand::InterpolatePosition(const int64 lo,
- const int64 hi) {
- DCHECK_LE(lo, hi);
-
- // The first item to be added under a parent gets a position of zero.
- if (lo == GetFirstPosition() && hi == GetLastPosition())
- return 0;
-
- // For small gaps, we do linear interpolation. For larger gaps,
- // we use an additive offset of |GetGap()|. We are careful to avoid
- // signed integer overflow.
- uint64 delta = static_cast<uint64>(hi) - static_cast<uint64>(lo);
- if (delta <= static_cast<uint64>(GetGap()*2))
- return lo + (static_cast<int64>(delta) + 7) / 8; // Interpolate.
- else if (lo == GetFirstPosition())
- return hi - GetGap(); // Extend range just before successor.
- else
- return lo + GetGap(); // Use or extend range just after predecessor.
-}
-
-
} // namespace syncer
diff --git a/sync/engine/build_commit_command.h b/sync/engine/build_commit_command.h
index a1b101d317..a55a0b48e2 100644
--- a/sync/engine/build_commit_command.h
+++ b/sync/engine/build_commit_command.h
@@ -51,11 +51,6 @@ class SYNC_EXPORT_PRIVATE BuildCommitCommand : public SyncerCommand {
private:
FRIEND_TEST_ALL_PREFIXES(BuildCommitCommandTest, InterpolatePosition);
- // Functions returning constants controlling range of values.
- static int64 GetFirstPosition();
- static int64 GetLastPosition();
- static int64 GetGap();
-
void AddExtensionsActivityToMessage(sessions::SyncSession* session,
sync_pb::CommitMessage* message);
@@ -63,17 +58,6 @@ class SYNC_EXPORT_PRIVATE BuildCommitCommand : public SyncerCommand {
void AddClientConfigParamsToMessage(sessions::SyncSession* session,
sync_pb::CommitMessage* message);
- // Helper for computing position. Find the numeric position value
- // of the closest already-synced entry. |direction| must be one of
- // NEXT_ID or PREV_ID; this parameter controls the search direction.
- // For an open range (no predecessor or successor), the return
- // value will be kFirstPosition or kLastPosition.
- int64 FindAnchorPosition(syncable::IdField direction,
- const syncable::Entry& entry);
- // Given two values of the type returned by FindAnchorPosition,
- // compute a third value in between the two ranges.
- static int64 InterpolatePosition(int64 lo, int64 hi);
-
DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
// A pointer to a valid transaction not owned by this class.
diff --git a/sync/engine/build_commit_command_unittest.cc b/sync/engine/build_commit_command_unittest.cc
deleted file mode 100644
index 1ad86679e6..0000000000
--- a/sync/engine/build_commit_command_unittest.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/build_commit_command.h"
-#include "sync/test/engine/syncer_command_test.h"
-
-namespace syncer {
-
-// A test fixture for tests exercising ClearDataCommandTest.
-class BuildCommitCommandTest : public SyncerCommandTest { };
-
-TEST_F(BuildCommitCommandTest, InterpolatePosition) {
- EXPECT_LT(BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetLastPosition());
-
- // Dense ranges.
- EXPECT_EQ(10, BuildCommitCommand::InterpolatePosition(10, 10));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 11));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 12));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 13));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 14));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 15));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 16));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 17));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 18));
- EXPECT_EQ(12, BuildCommitCommand::InterpolatePosition(10, 19));
- EXPECT_EQ(12, BuildCommitCommand::InterpolatePosition(10, 20));
-
- // Sparse ranges.
- EXPECT_EQ(0x32535ffe3dc97LL + BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- 0x32535ffe3dc97LL, 0x61abcd323122cLL));
- EXPECT_EQ(~0x61abcd323122cLL + BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- ~0x61abcd323122cLL, ~0x32535ffe3dc97LL));
-
- // Lower limits
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 0x20,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetFirstPosition() + 0x100));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 2,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetFirstPosition() + 2));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() +
- BuildCommitCommand::GetGap()/8 + 1,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetFirstPosition() + 1 +
- BuildCommitCommand::GetGap()));
-
- // Extremal cases.
- EXPECT_EQ(0,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 1 +
- BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 1 +
- BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetLastPosition() - 1));
- EXPECT_EQ(BuildCommitCommand::GetLastPosition() - 1 -
- BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetLastPosition() - 1));
-
- // Edge cases around zero.
- EXPECT_EQ(BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetGap() + 1,
- BuildCommitCommand::InterpolatePosition(
- 1, BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetGap() - 1,
- BuildCommitCommand::InterpolatePosition(
- -1, BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(-BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(), 0));
- EXPECT_EQ(-BuildCommitCommand::GetGap() + 1,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(), 1));
- EXPECT_EQ(-BuildCommitCommand::GetGap() - 1,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(), -1));
- EXPECT_EQ(BuildCommitCommand::GetGap() / 8,
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetGap()));
- EXPECT_EQ(BuildCommitCommand::GetGap() / 4,
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetGap()*2));
- EXPECT_EQ(BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetGap()*2 + 1));
-}
-
-} // namespace syncer
diff --git a/sync/engine/conflict_resolver.cc b/sync/engine/conflict_resolver.cc
index 5b485a1148..074eb37460 100644
--- a/sync/engine/conflict_resolver.cc
+++ b/sync/engine/conflict_resolver.cc
@@ -95,55 +95,18 @@ void ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
entry.Get(syncable::SERVER_PARENT_ID);
bool entry_deleted = entry.Get(syncable::IS_DEL);
- // This positional check is meant to be necessary but not sufficient. As a
- // result, it may be false even when the position hasn't changed, possibly
- // resulting in unnecessary commits, but if it's true the position has
- // definitely not changed. The check works by verifying that the prev id
- // as calculated from the server position (which will ignore any
- // unsynced/unapplied predecessors and be root for non-bookmark datatypes)
- // matches the client prev id. Because we traverse chains of conflicting
- // items in predecessor -> successor order, we don't need to also verify the
- // successor matches (If it's in conflict, we'll verify it next. If it's
- // not, then it should be taken into account already in the
- // ComputePrevIdFromServerPosition calculation). This works even when there
- // are chains of conflicting items.
+ // The position check might fail spuriously if one of the positions was
+ // based on a legacy random suffix, rather than a deterministic one based on
+ // originator_cache_guid and originator_item_id. If an item is being
+ // modified regularly, it shouldn't take long for the suffix and position to
+ // be updated, so such false failures shouldn't be a problem for long.
//
- // Example: Original sequence was abcde. Server changes to aCDbe, while
- // client changes to aDCbe (C and D are in conflict). Locally, D's prev id
- // is a, while C's prev id is D. On the other hand, the server prev id will
- // ignore unsynced/unapplied items, so D's server prev id will also be a,
- // just like C's. Because we traverse in client predecessor->successor
- // order, we evaluate D first. Since prev id and server id match, we
- // consider the position to have remained the same for D, and will unset
- // it's UNSYNCED/UNAPPLIED bits. When we evaluate C though, we'll see that
- // the prev id is D locally while the server's prev id is a. C will
- // therefore count as a positional conflict (and the local data will be
- // overwritten by the server data typically). The final result will be
- // aCDbe (the same as the server's view). Even though both C and D were
- // modified, only one counted as being in actual conflict and was resolved
- // with local/server wins.
- //
- // In general, when there are chains of positional conflicts, only the first
- // item in chain (based on the clients point of view) will have both its
- // server prev id and local prev id match. For all the rest the server prev
- // id will be the predecessor of the first item in the chain, and therefore
- // not match the local prev id.
- //
- // Similarly, chains of conflicts where the server and client info are the
- // same are supported due to the predecessor->successor ordering. In this
- // case, from the first item onward, we unset the UNSYNCED/UNAPPLIED bits as
- // we decide that nothing changed. The subsequent item's server prev id will
- // accurately match the local prev id because the predecessor is no longer
- // UNSYNCED/UNAPPLIED.
- // TODO(zea): simplify all this once we can directly compare server position
- // to client position.
- syncable::Id server_prev_id = entry.ComputePrevIdFromServerPosition(
- entry.Get(syncable::SERVER_PARENT_ID));
- bool needs_reinsertion = !parent_matches ||
- server_prev_id != entry.GetPredecessorId();
- DVLOG_IF(1, needs_reinsertion) << "Insertion needed, server prev id "
- << " is " << server_prev_id << ", local prev id is "
- << entry.GetPredecessorId();
+ // Lucky for us, it's OK to be wrong here. The position_matches check is
+ // allowed to return false negatives, as long as it returns no false
+ // positives.
+ bool position_matches = parent_matches &&
+ entry.Get(syncable::SERVER_UNIQUE_POSITION).Equals(
+ entry.Get(syncable::UNIQUE_POSITION));
const sync_pb::EntitySpecifics& specifics =
entry.Get(syncable::SPECIFICS);
const sync_pb::EntitySpecifics& server_specifics =
@@ -189,7 +152,7 @@ void ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
}
if (!entry_deleted && name_matches && parent_matches && specifics_match &&
- !needs_reinsertion) {
+ position_matches) {
DVLOG(1) << "Resolving simple conflict, everything matches, ignoring "
<< "changes for: " << entry;
conflict_util::IgnoreConflict(&entry);
@@ -288,6 +251,11 @@ void ConflictResolver::ResolveConflicts(
Entry entry(trans, syncable::GET_BY_ID, prev_id);
// Any entry in conflict must be valid.
CHECK(entry.good());
+
+ // We can't traverse over a delete item.
+ if (entry.Get(syncable::IS_DEL))
+ break;
+
Id new_prev_id = entry.GetPredecessorId();
if (new_prev_id == prev_id)
break;
diff --git a/sync/engine/net/server_connection_manager.cc b/sync/engine/net/server_connection_manager.cc
index 75c5e5394a..2d1d6c8bf0 100644
--- a/sync/engine/net/server_connection_manager.cc
+++ b/sync/engine/net/server_connection_manager.cc
@@ -11,6 +11,7 @@
#include <vector>
#include "base/command_line.h"
+#include "base/metrics/histogram.h"
#include "build/build_config.h"
#include "googleurl/src/gurl.h"
#include "net/base/net_errors.h"
@@ -118,7 +119,7 @@ ServerConnectionManager::ScopedConnectionHelper::ScopedConnectionHelper(
: manager_(manager), connection_(connection) {}
ServerConnectionManager::ScopedConnectionHelper::~ScopedConnectionHelper() {
- if (connection_.get())
+ if (connection_)
manager_->OnConnectionDestroyed(connection_.get());
connection_.reset();
}
@@ -212,7 +213,31 @@ void ServerConnectionManager::OnConnectionDestroyed(Connection* connection) {
active_connection_ = NULL;
}
+bool ServerConnectionManager::SetAuthToken(const std::string& auth_token,
+ const base::Time& auth_token_time) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (previously_invalidated_token != auth_token) {
+ auth_token_.assign(auth_token);
+ auth_token_time_ = auth_token_time;
+ previously_invalidated_token = std::string();
+ return true;
+ }
+ return false;
+}
+
void ServerConnectionManager::OnInvalidationCredentialsRejected() {
+ if (!auth_token_time_.is_null()) {
+ base::TimeDelta age = base::Time::Now() - auth_token_time_;
+ if (age < base::TimeDelta::FromHours(1)) {
+ UMA_HISTOGRAM_CUSTOM_TIMES("Sync.AuthInvalidationRejectedTokenAgeShort",
+ age,
+ base::TimeDelta::FromSeconds(1),
+ base::TimeDelta::FromHours(1),
+ 50);
+ }
+ UMA_HISTOGRAM_COUNTS("Sync.AuthInvalidationRejectedTokenAgeLong",
+ age.InDays());
+ }
InvalidateAndClearAuthToken();
SetServerStatus(HttpResponse::SYNC_AUTH_ERROR);
}
@@ -223,6 +248,7 @@ void ServerConnectionManager::InvalidateAndClearAuthToken() {
if (!auth_token_.empty()) {
previously_invalidated_token.assign(auth_token_);
auth_token_ = std::string();
+ auth_token_time_ = base::Time();
}
}
@@ -273,8 +299,21 @@ bool ServerConnectionManager::PostBufferToPath(PostBufferParams* params,
bool ok = post.get()->Init(
path.c_str(), auth_token, params->buffer_in, &params->response);
- if (params->response.server_status == HttpResponse::SYNC_AUTH_ERROR)
+ if (params->response.server_status == HttpResponse::SYNC_AUTH_ERROR) {
+ if (!auth_token_time_.is_null()) {
+ base::TimeDelta age = base::Time::Now() - auth_token_time_;
+ if (age < base::TimeDelta::FromHours(1)) {
+ UMA_HISTOGRAM_CUSTOM_TIMES("Sync.AuthServerRejectedTokenAgeShort",
+ age,
+ base::TimeDelta::FromSeconds(1),
+ base::TimeDelta::FromHours(1),
+ 50);
+ }
+ UMA_HISTOGRAM_COUNTS("Sync.AuthServerRejectedTokenAgeLong",
+ age.InDays());
+ }
InvalidateAndClearAuthToken();
+ }
if (!ok || net::HTTP_OK != params->response.response_code)
return false;
diff --git a/sync/engine/net/server_connection_manager.h b/sync/engine/net/server_connection_manager.h
index 917d5b427c..df81e26560 100644
--- a/sync/engine/net/server_connection_manager.h
+++ b/sync/engine/net/server_connection_manager.h
@@ -224,16 +224,11 @@ class SYNC_EXPORT_PRIVATE ServerConnectionManager {
client_id_.assign(client_id);
}
- // Returns true if the auth token is succesfully set and false otherwise.
- bool set_auth_token(const std::string& auth_token) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (previously_invalidated_token != auth_token) {
- auth_token_.assign(auth_token);
- previously_invalidated_token = std::string();
- return true;
- }
- return false;
- }
+ // Sets a new auth token and time. |auth_token_time| is an optional parameter
+ // that contains the date the auth token was fetched/refreshed, and is used
+ // for histogramms/logging only.
+ bool SetAuthToken(const std::string& auth_token,
+ const base::Time& auth_token_time);
// Our out-of-band invalidations channel can encounter auth errors,
// and when it does so it tells us via this method to prevent making more
@@ -299,6 +294,10 @@ class SYNC_EXPORT_PRIVATE ServerConnectionManager {
// The auth token to use in authenticated requests.
std::string auth_token_;
+ // The time at which this auth token was last created/refreshed.
+ // Used for histogramming.
+ base::Time auth_token_time_;
+
// The previous auth token that is invalid now.
std::string previously_invalidated_token;
diff --git a/sync/engine/process_commit_response_command.cc b/sync/engine/process_commit_response_command.cc
index 96a82bdd48..4a921cf095 100644
--- a/sync/engine/process_commit_response_command.cc
+++ b/sync/engine/process_commit_response_command.cc
@@ -13,6 +13,7 @@
#include "base/location.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/engine/syncer_util.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/sessions/sync_session.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/mutable_entry.h"
@@ -22,11 +23,6 @@
#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/time.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using std::set;
using std::string;
using std::vector;
@@ -50,7 +46,6 @@ using syncable::IS_UNSYNCED;
using syncable::PARENT_ID;
using syncable::SERVER_IS_DEL;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_VERSION;
using syncable::SYNCER;
using syncable::SYNCING;
@@ -342,8 +337,11 @@ void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
ProtoTimeToTime(committed_entry.mtime()));
local_entry->Put(syncable::SERVER_CTIME,
ProtoTimeToTime(committed_entry.ctime()));
- local_entry->Put(syncable::SERVER_ORDINAL_IN_PARENT,
- Int64ToNodeOrdinal(entry_response.position_in_parent()));
+ if (committed_entry.has_unique_position()) {
+ local_entry->Put(syncable::SERVER_UNIQUE_POSITION,
+ UniquePosition::FromProto(
+ committed_entry.unique_position()));
+ }
// TODO(nick): The server doesn't set entry_response.server_parent_id in
// practice; to update SERVER_PARENT_ID appropriately here we'd need to
@@ -386,23 +384,6 @@ void ProcessCommitResponseCommand::OverrideClientFieldsAfterCommit(
<< " to new name: " << server_name;
local_entry->Put(syncable::NON_UNIQUE_NAME, server_name);
}
-
- // The server has the final say on positioning, so apply the absolute
- // position that it returns.
- if (entry_response.has_position_in_parent()) {
- // The SERVER_ field should already have been written.
- DCHECK_EQ(entry_response.position_in_parent(),
- NodeOrdinalToInt64(local_entry->Get(SERVER_ORDINAL_IN_PARENT)));
-
- // We just committed successfully, so we assume that the position
- // value we got applies to the PARENT_ID we submitted.
- syncable::Id new_prev = local_entry->ComputePrevIdFromServerPosition(
- local_entry->Get(PARENT_ID));
- if (!local_entry->PutPredecessor(new_prev)) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
- }
- }
}
void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
diff --git a/sync/engine/process_commit_response_command_unittest.cc b/sync/engine/process_commit_response_command_unittest.cc
index 5c96424eef..0d25c0137c 100644
--- a/sync/engine/process_commit_response_command_unittest.cc
+++ b/sync/engine/process_commit_response_command_unittest.cc
@@ -32,11 +32,13 @@ namespace syncer {
using sessions::SyncSession;
using syncable::BASE_VERSION;
using syncable::Entry;
+using syncable::ID;
using syncable::IS_DIR;
using syncable::IS_UNSYNCED;
using syncable::Id;
using syncable::MutableEntry;
using syncable::NON_UNIQUE_NAME;
+using syncable::UNIQUE_POSITION;
using syncable::UNITTEST;
using syncable::WriteTransaction;
@@ -132,7 +134,6 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
else
entry_response->set_id_string(id_factory_.NewServerId().GetServerId());
entry_response->set_version(next_new_revision_++);
- entry_response->set_position_in_parent(next_server_position_++);
// If the ID of our parent item committed earlier in the batch was
// rewritten, rewrite it in the entry response. This matches
@@ -218,7 +219,6 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
Entry b2(&trans, syncable::GET_BY_HANDLE, bookmark2_handle);
CheckEntry(&b1, "bookmark 1", BOOKMARKS, new_fid);
CheckEntry(&b2, "bookmark 2", BOOKMARKS, new_fid);
- ASSERT_TRUE(b2.GetSuccessorId().IsRoot());
// Look at the prefs and autofill items.
Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
@@ -230,7 +230,6 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
Entry a2(&trans, syncable::GET_BY_HANDLE, autofill2_handle);
CheckEntry(&a1, "Autofill 1", AUTOFILL, id_factory_.root());
CheckEntry(&a2, "Autofill 2", AUTOFILL, id_factory_.root());
- ASSERT_TRUE(a2.GetSuccessorId().IsRoot());
}
// In this test, we test processing a commit response for a commit batch that
@@ -256,25 +255,31 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
// Verify that the item is reachable.
{
- Id child_id;
syncable::ReadTransaction trans(FROM_HERE, directory());
syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
ASSERT_TRUE(root.good());
- ASSERT_TRUE(directory()->GetFirstChildId(
- &trans, id_factory_.root(), &child_id));
+ Id child_id = root.GetFirstChildId();
ASSERT_EQ(folder_id, child_id);
}
// The first 25 children of the parent folder will be part of the commit
- // batch.
+ // batch. They will be placed left to right in order of creation.
int batch_size = 25;
int i = 0;
+ Id prev_id = TestIdFactory::root();
for (; i < batch_size; ++i) {
// Alternate between new and old child items, just for kicks.
Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
- CreateUnprocessedCommitResult(
+ int64 handle = CreateUnprocessedCommitResult(
id, folder_id, base::StringPrintf("Item %d", i), false,
BOOKMARKS, &commit_set, &request, &response);
+ {
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ e.PutPredecessor(prev_id);
+ }
+ prev_id = id;
}
// The second 25 children will be unsynced items but NOT part of the commit
// batch. When the ID of the parent folder changes during the commit,
@@ -283,9 +288,17 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
for (; i < 2*batch_size; ++i) {
// Alternate between new and old child items, just for kicks.
Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
+ int64 handle = -1;
test_entry_factory_->CreateUnsyncedItem(
id, folder_id, base::StringPrintf("Item %d", i),
- false, BOOKMARKS, NULL);
+ false, BOOKMARKS, &handle);
+ {
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ e.PutPredecessor(prev_id);
+ }
+ prev_id = id;
}
// Process the commit response for the parent folder and the first
@@ -299,9 +312,9 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
syncable::ReadTransaction trans(FROM_HERE, directory());
// Lookup the parent folder by finding a child of the root. We can't use
// folder_id here, because it changed during the commit.
- Id new_fid;
- ASSERT_TRUE(directory()->GetFirstChildId(
- &trans, id_factory_.root(), &new_fid));
+ syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
+ ASSERT_TRUE(root.good());
+ Id new_fid = root.GetFirstChildId();
ASSERT_FALSE(new_fid.IsRoot());
EXPECT_TRUE(new_fid.ServerKnows());
EXPECT_FALSE(folder_id.ServerKnows());
@@ -313,8 +326,8 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
ASSERT_LT(0, parent.Get(BASE_VERSION))
<< "Parent should have a valid (positive) server base revision";
- Id cid;
- ASSERT_TRUE(directory()->GetFirstChildId(&trans, new_fid, &cid));
+ Id cid = parent.GetFirstChildId();
+
int child_count = 0;
// Now loop over all the children of the parent folder, verifying
// that they are in their original order by checking to see that their
diff --git a/sync/engine/process_updates_command.cc b/sync/engine/process_updates_command.cc
index 07899581d6..12498f6d93 100644
--- a/sync/engine/process_updates_command.cc
+++ b/sync/engine/process_updates_command.cc
@@ -19,11 +19,6 @@
#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/cryptographer.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using std::vector;
namespace syncer {
@@ -313,12 +308,25 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// (on which any current or future local changes are based) before we
// overwrite SERVER_SPECIFICS.
// MTIME, CTIME, and NON_UNIQUE_NAME are not enforced.
+
+ bool position_matches = false;
+ if (target_entry.ShouldMaintainPosition() && !update.deleted()) {
+ std::string update_tag = GetUniqueBookmarkTagFromUpdate(update);
+ if (UniquePosition::IsValidSuffix(update_tag)) {
+ position_matches = GetUpdatePosition(update, update_tag).Equals(
+ target_entry.Get(syncable::SERVER_UNIQUE_POSITION));
+ } else {
+ NOTREACHED();
+ }
+ } else {
+ // If this item doesn't care about positions, then set this flag to true.
+ position_matches = true;
+ }
+
if (!update.deleted() && !target_entry.Get(syncable::SERVER_IS_DEL) &&
(SyncableIdFromProto(update.parent_id_string()) ==
target_entry.Get(syncable::SERVER_PARENT_ID)) &&
- (update.position_in_parent() ==
- NodeOrdinalToInt64(
- target_entry.Get(syncable::SERVER_ORDINAL_IN_PARENT))) &&
+ position_matches &&
update.has_specifics() && update.specifics().has_encrypted() &&
!cryptographer->CanDecrypt(update.specifics().encrypted())) {
sync_pb::EntitySpecifics prev_specifics =
diff --git a/sync/engine/process_updates_command_unittest.cc b/sync/engine/process_updates_command_unittest.cc
index f4cc583dc2..885fbe1206 100644
--- a/sync/engine/process_updates_command_unittest.cc
+++ b/sync/engine/process_updates_command_unittest.cc
@@ -4,16 +4,22 @@
#include "base/basictypes.h"
#include "sync/engine/process_updates_command.h"
+#include "sync/engine/syncer_proto_util.h"
#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/test/test_entry_factory.h"
#include "sync/sessions/sync_session.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable_id.h"
+#include "sync/syncable/syncable_proto_util.h"
+#include "sync/syncable/syncable_read_transaction.h"
+#include "sync/syncable/syncable_write_transaction.h"
#include "sync/test/engine/fake_model_worker.h"
#include "sync/test/engine/syncer_command_test.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
+using sync_pb::SyncEntity;
using syncable::Id;
using syncable::MutableEntry;
using syncable::UNITTEST;
@@ -35,6 +41,7 @@ class ProcessUpdatesCommandTest : public SyncerCommandTest {
(*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
(*mutable_routing_info())[AUTOFILL] = GROUP_DB;
SyncerCommandTest::SetUp();
+ test_entry_factory_.reset(new TestEntryFactory(directory()));
}
void CreateLocalItem(const std::string& item_id,
@@ -54,18 +61,21 @@ class ProcessUpdatesCommandTest : public SyncerCommandTest {
entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
}
- void AddUpdate(sync_pb::GetUpdatesResponse* updates,
+ SyncEntity* AddUpdate(sync_pb::GetUpdatesResponse* updates,
const std::string& id, const std::string& parent,
const ModelType& type) {
sync_pb::SyncEntity* e = updates->add_entries();
- e->set_id_string("b1");
+ e->set_id_string(id);
e->set_parent_id_string(parent);
- e->set_non_unique_name("b1");
- e->set_name("b1");
+ e->set_non_unique_name(id);
+ e->set_name(id);
+ e->set_version(1000);
AddDefaultFieldValue(type, e->mutable_specifics());
+ return e;
}
ProcessUpdatesCommand command_;
+ scoped_ptr<TestEntryFactory> test_entry_factory_;
private:
DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommandTest);
@@ -74,8 +84,6 @@ class ProcessUpdatesCommandTest : public SyncerCommandTest {
TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
std::string root = syncable::GetNullId().GetServerId();
- CreateLocalItem("b1", root, BOOKMARKS);
- CreateLocalItem("b2", root, BOOKMARKS);
CreateLocalItem("p1", root, PREFERENCES);
CreateLocalItem("a1", root, AUTOFILL);
@@ -84,8 +92,6 @@ TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
sync_pb::GetUpdatesResponse* updates =
session()->mutable_status_controller()->
mutable_updates_response()->mutable_get_updates();
- AddUpdate(updates, "b1", root, BOOKMARKS);
- AddUpdate(updates, "b2", root, BOOKMARKS);
AddUpdate(updates, "p1", root, PREFERENCES);
AddUpdate(updates, "a1", root, AUTOFILL);
@@ -94,6 +100,93 @@ TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
command_.ExecuteImpl(session());
}
+static const char kCacheGuid[] = "IrcjZ2jyzHDV9Io4+zKcXQ==";
+
+// Test that the bookmark tag is set on newly downloaded items.
+TEST_F(ProcessUpdatesCommandTest, NewBookmarkTag) {
+ std::string root = syncable::GetNullId().GetServerId();
+ sync_pb::GetUpdatesResponse* updates =
+ session()->mutable_status_controller()->
+ mutable_updates_response()->mutable_get_updates();
+ Id server_id = Id::CreateFromServerId("b1");
+ SyncEntity* e =
+ AddUpdate(updates, SyncableIdToProto(server_id), root, BOOKMARKS);
+
+ e->set_originator_cache_guid(
+ std::string(kCacheGuid, arraysize(kCacheGuid)-1));
+ Id client_id = Id::CreateFromClientString("-2");
+ e->set_originator_client_item_id(client_id.GetServerId());
+ e->set_position_in_parent(0);
+
+ command_.ExecuteImpl(session());
+
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(
+ UniquePosition::IsValidSuffix(entry.Get(syncable::UNIQUE_BOOKMARK_TAG)));
+ EXPECT_TRUE(entry.Get(syncable::SERVER_UNIQUE_POSITION).IsValid());
+
+ // If this assertion fails, that might indicate that the algorithm used to
+ // generate bookmark tags has been modified. This could have implications for
+ // bookmark ordering. Please make sure you know what you're doing if you
+ // intend to make such a change.
+ EXPECT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=",
+ entry.Get(syncable::UNIQUE_BOOKMARK_TAG));
+}
+
+TEST_F(ProcessUpdatesCommandTest, ReceiveServerCreatedBookmarkFolders) {
+ Id server_id = Id::CreateFromServerId("xyz");
+ std::string root = syncable::GetNullId().GetServerId();
+ sync_pb::GetUpdatesResponse* updates =
+ session()->mutable_status_controller()->
+ mutable_updates_response()->mutable_get_updates();
+
+ // Create an update that mimics the bookmark root.
+ SyncEntity* e =
+ AddUpdate(updates, SyncableIdToProto(server_id), root, BOOKMARKS);
+ e->set_server_defined_unique_tag("google_chrome_bookmarks");
+ e->set_folder(true);
+
+ EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
+
+ command_.ExecuteImpl(session());
+
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+
+ EXPECT_FALSE(entry.ShouldMaintainPosition());
+ EXPECT_FALSE(entry.Get(syncable::UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(entry.Get(syncable::SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(entry.Get(syncable::UNIQUE_BOOKMARK_TAG).empty());
+}
+
+TEST_F(ProcessUpdatesCommandTest, ReceiveNonBookmarkItem) {
+ Id server_id = Id::CreateFromServerId("xyz");
+ std::string root = syncable::GetNullId().GetServerId();
+ sync_pb::GetUpdatesResponse* updates =
+ session()->mutable_status_controller()->
+ mutable_updates_response()->mutable_get_updates();
+
+ SyncEntity* e =
+ AddUpdate(updates, SyncableIdToProto(server_id), root, AUTOFILL);
+ e->set_server_defined_unique_tag("9PGRuKdX5sHyGMB17CvYTXuC43I=");
+
+ EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
+
+ command_.ExecuteImpl(session());
+
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+
+ EXPECT_FALSE(entry.ShouldMaintainPosition());
+ EXPECT_FALSE(entry.Get(syncable::UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(entry.Get(syncable::SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(entry.Get(syncable::UNIQUE_BOOKMARK_TAG).empty());
+}
+
} // namespace
} // namespace syncer
diff --git a/sync/engine/sync_scheduler.h b/sync/engine/sync_scheduler.h
index db21ab00bf..b2ebbe3f04 100644
--- a/sync/engine/sync_scheduler.h
+++ b/sync/engine/sync_scheduler.h
@@ -16,8 +16,6 @@
#include "sync/internal_api/public/base/model_type_invalidation_map.h"
#include "sync/sessions/sync_session.h"
-class MessageLoop;
-
namespace tracked_objects {
class Location;
} // namespace tracked_objects
diff --git a/sync/engine/sync_scheduler_impl.cc b/sync/engine/sync_scheduler_impl.cc
index 5bfcbcc18d..2b2a127b7f 100644
--- a/sync/engine/sync_scheduler_impl.cc
+++ b/sync/engine/sync_scheduler_impl.cc
@@ -83,13 +83,10 @@ ConfigurationParams::ConfigurationParams(
ConfigurationParams::~ConfigurationParams() {}
SyncSchedulerImpl::WaitInterval::WaitInterval()
- : mode(UNKNOWN),
- had_nudge(false),
- pending_configure_job(NULL) {}
+ : mode(UNKNOWN) {}
SyncSchedulerImpl::WaitInterval::WaitInterval(Mode mode, TimeDelta length)
- : mode(mode), had_nudge(false), length(length),
- pending_configure_job(NULL) {}
+ : mode(mode), length(length) {}
SyncSchedulerImpl::WaitInterval::~WaitInterval() {}
@@ -155,12 +152,11 @@ SyncSchedulerImpl::SyncSchedulerImpl(const std::string& name,
BackoffDelayProvider* delay_provider,
sessions::SyncSessionContext* context,
Syncer* syncer)
- : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
- weak_ptr_factory_for_weak_handle_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ : weak_ptr_factory_(this),
+ weak_ptr_factory_for_weak_handle_(this),
weak_handle_this_(MakeWeakHandle(
weak_ptr_factory_for_weak_handle_.GetWeakPtr())),
name_(name),
- sync_loop_(MessageLoop::current()),
started_(false),
syncer_short_poll_interval_seconds_(
TimeDelta::FromSeconds(kDefaultShortPollIntervalSeconds)),
@@ -169,23 +165,19 @@ SyncSchedulerImpl::SyncSchedulerImpl(const std::string& name,
sessions_commit_delay_(
TimeDelta::FromSeconds(kDefaultSessionsCommitDelaySeconds)),
mode_(NORMAL_MODE),
- // Start with assuming everything is fine with the connection.
- // At the end of the sync cycle we would have the correct status.
- pending_nudge_(NULL),
delay_provider_(delay_provider),
syncer_(syncer),
session_context_(context),
no_scheduling_allowed_(false) {
- DCHECK(sync_loop_);
}
SyncSchedulerImpl::~SyncSchedulerImpl() {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
StopImpl(base::Closure());
}
void SyncSchedulerImpl::OnCredentialsUpdated() {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
if (HttpResponse::SYNC_AUTH_ERROR ==
session_context_->connection_manager()->server_status()) {
@@ -214,18 +206,11 @@ void SyncSchedulerImpl::OnServerConnectionErrorFixed() {
// call DoCanaryJob to achieve this, and note that nothing -- not even a
// canary job -- can bypass a THROTTLED WaitInterval. The only thing that
// has the authority to do that is the Unthrottle timer.
- scoped_ptr<SyncSessionJob> pending(TakePendingJobForCurrentMode());
- if (!pending.get())
- return;
-
- PostTask(FROM_HERE, "DoCanaryJob",
- base::Bind(&SyncSchedulerImpl::DoCanaryJob,
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(&pending)));
+ TryCanaryJob();
}
void SyncSchedulerImpl::Start(Mode mode) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
std::string thread_name = MessageLoop::current()->thread_name();
if (thread_name.empty())
thread_name = "<Main thread>";
@@ -240,27 +225,17 @@ void SyncSchedulerImpl::Start(Mode mode) {
DCHECK(syncer_.get());
Mode old_mode = mode_;
mode_ = mode;
- AdjustPolling(NULL); // Will kick start poll timer if needed.
-
- if (old_mode != mode_) {
- // We just changed our mode. See if there are any pending jobs that we could
- // execute in the new mode.
- if (mode_ == NORMAL_MODE) {
- // It is illegal to switch to NORMAL_MODE if a previous CONFIGURATION job
- // has not yet completed.
- DCHECK(!wait_interval_.get() || !wait_interval_->pending_configure_job);
- }
+ AdjustPolling(UPDATE_INTERVAL); // Will kick start poll timer if needed.
- scoped_ptr<SyncSessionJob> pending(TakePendingJobForCurrentMode());
- if (pending.get()) {
- SDVLOG(2) << "Executing pending job. Good luck!";
- DoSyncSessionJob(pending.Pass(), NORMAL_PRIORITY);
- }
+ if (old_mode != mode_ && mode_ == NORMAL_MODE && !nudge_tracker_.IsEmpty()) {
+ // We just got back to normal mode. Let's try to run the work that was
+ // queued up while we were configuring.
+ DoNudgeSyncSessionJob(NORMAL_PRIORITY);
}
}
void SyncSchedulerImpl::SendInitialSnapshot() {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
scoped_ptr<SyncSession> dummy(new SyncSession(
session_context_, this, SyncSourceInfo()));
SyncEngineEvent event(SyncEngineEvent::STATUS_CHANGED);
@@ -290,7 +265,7 @@ void BuildModelSafeParams(
bool SyncSchedulerImpl::ScheduleConfiguration(
const ConfigurationParams& params) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
DCHECK(IsConfigRelatedUpdateSourceValue(params.source));
DCHECK_EQ(CONFIGURATION_MODE, mode_);
DCHECK(!params.ready_task.is_null());
@@ -299,7 +274,7 @@ bool SyncSchedulerImpl::ScheduleConfiguration(
// Only one configuration is allowed at a time. Verify we're not waiting
// for a pending configure job.
- DCHECK(!wait_interval_.get() || !wait_interval_->pending_configure_job);
+ DCHECK(!pending_configure_params_);
ModelSafeRoutingInfo restricted_routes;
BuildModelSafeParams(params.types_to_download,
@@ -309,27 +284,17 @@ bool SyncSchedulerImpl::ScheduleConfiguration(
// Only reconfigure if we have types to download.
if (!params.types_to_download.Empty()) {
- DCHECK(!restricted_routes.empty());
- scoped_ptr<SyncSession> session(new SyncSession(
- session_context_,
- this,
- SyncSourceInfo(params.source,
- ModelSafeRoutingInfoToInvalidationMap(
- restricted_routes,
- std::string()))));
- scoped_ptr<SyncSessionJob> job(new SyncSessionJob(
- SyncSessionJob::CONFIGURATION,
- TimeTicks::Now(),
- session.Pass(),
- params));
- bool succeeded = DoSyncSessionJob(job.Pass(), NORMAL_PRIORITY);
+ pending_configure_params_.reset(new ConfigurationParams(params));
+ bool succeeded = DoConfigurationSyncSessionJob(NORMAL_PRIORITY);
// If we failed, the job would have been saved as the pending configure
// job and a wait interval would have been set.
if (!succeeded) {
- DCHECK(wait_interval_.get() && wait_interval_->pending_configure_job);
- return false;
+ DCHECK(pending_configure_params_);
+ } else {
+ DCHECK(!pending_configure_params_);
}
+ return succeeded;
} else {
SDVLOG(2) << "No change in routing info, calling ready task directly.";
params.ready_task.Run();
@@ -338,190 +303,62 @@ bool SyncSchedulerImpl::ScheduleConfiguration(
return true;
}
-SyncSchedulerImpl::JobProcessDecision
-SyncSchedulerImpl::DecideWhileInWaitInterval(const SyncSessionJob& job,
- JobPriority priority) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- DCHECK(wait_interval_.get());
- DCHECK_NE(job.purpose(), SyncSessionJob::POLL);
-
- SDVLOG(2) << "DecideWhileInWaitInterval with WaitInterval mode "
- << WaitInterval::GetModeString(wait_interval_->mode)
- << (wait_interval_->had_nudge ? " (had nudge)" : "")
- << ((priority == CANARY_PRIORITY) ? " (canary)" : "");
-
- // If we save a job while in a WaitInterval, there is a well-defined moment
- // in time in the future when it makes sense for that SAVE-worthy job to try
- // running again -- the end of the WaitInterval.
- DCHECK(job.purpose() == SyncSessionJob::NUDGE ||
- job.purpose() == SyncSessionJob::CONFIGURATION);
-
- // If throttled, there's a clock ticking to unthrottle. We want to get
- // on the same train.
- if (wait_interval_->mode == WaitInterval::THROTTLED)
- return SAVE;
-
- DCHECK_EQ(wait_interval_->mode, WaitInterval::EXPONENTIAL_BACKOFF);
- if (job.purpose() == SyncSessionJob::NUDGE) {
- if (mode_ == CONFIGURATION_MODE)
- return SAVE;
-
- // If we already had one nudge then just drop this nudge. We will retry
- // later when the timer runs out.
- if (priority == NORMAL_PRIORITY)
- return wait_interval_->had_nudge ? DROP : CONTINUE;
- else // We are here because timer ran out. So retry.
- return CONTINUE;
+bool SyncSchedulerImpl::CanRunJobNow(JobPriority priority) {
+ DCHECK(CalledOnValidThread());
+ if (wait_interval_ && wait_interval_->mode == WaitInterval::THROTTLED) {
+ SDVLOG(1) << "Unable to run a job because we're throttled.";
+ return false;
}
- return (priority == CANARY_PRIORITY) ? CONTINUE : SAVE;
-}
-
-SyncSchedulerImpl::JobProcessDecision SyncSchedulerImpl::DecideOnJob(
- const SyncSessionJob& job,
- JobPriority priority) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- // POLL jobs do not call this function.
- DCHECK(job.purpose() == SyncSessionJob::NUDGE ||
- job.purpose() == SyncSessionJob::CONFIGURATION);
-
- // See if our type is throttled.
- ModelTypeSet throttled_types =
- session_context_->throttled_data_type_tracker()->GetThrottledTypes();
- if (job.purpose() == SyncSessionJob::NUDGE &&
- job.session()->source().updates_source == GetUpdatesCallerInfo::LOCAL) {
- ModelTypeSet requested_types;
- for (ModelTypeInvalidationMap::const_iterator i =
- job.session()->source().types.begin();
- i != job.session()->source().types.end();
- ++i) {
- requested_types.Put(i->first);
- }
-
- // If all types are throttled, do not CONTINUE. Today, we don't treat
- // a per-datatype "unthrottle" event as something that should force a
- // canary job. For this reason, there's no good time to reschedule this job
- // to run -- we'll lazily wait for an independent event to trigger a sync.
- // Note that there may already be such an event if we're in a WaitInterval,
- // so we can retry it then.
- if (!requested_types.Empty() && throttled_types.HasAll(requested_types))
- return DROP; // TODO(tim): Don't drop. http://crbug.com/177659
+ if (wait_interval_
+ && wait_interval_->mode == WaitInterval::EXPONENTIAL_BACKOFF
+ && priority != CANARY_PRIORITY) {
+ SDVLOG(1) << "Unable to run a job because we're backing off.";
+ return false;
}
- if (wait_interval_.get())
- return DecideWhileInWaitInterval(job, priority);
-
- if (mode_ == CONFIGURATION_MODE) {
- if (job.purpose() == SyncSessionJob::NUDGE)
- return SAVE; // Running requires a mode switch.
- else // Implies job.purpose() == SyncSessionJob::CONFIGURATION.
- return CONTINUE;
+ if (session_context_->connection_manager()->HasInvalidAuthToken()) {
+ SDVLOG(1) << "Unable to run a job because we have no valid auth token.";
+ return false;
}
- // We are in normal mode.
- DCHECK_EQ(mode_, NORMAL_MODE);
- DCHECK_NE(job.purpose(), SyncSessionJob::CONFIGURATION);
-
- // Note about some subtle scheduling semantics.
- //
- // It's possible at this point that |job| is known to be unnecessary, and
- // dropping it would be perfectly safe and correct. Consider
- //
- // 1) |job| is a NUDGE (for any combination of types) with a
- // |scheduled_start| time that is less than the time that the last
- // successful all-datatype NUDGE completed, and it has a NOTIFICATION
- // GetUpdatesCallerInfo value yet offers no new notification hint.
- //
- // 2) |job| is a NUDGE with a |scheduled_start| time that is less than
- // the time that the last successful matching-datatype NUDGE completed,
- // and payloads (hints) are identical to that last successful NUDGE.
- //
- // We avoid cases 1 and 2 by externally synchronizing NUDGE requests --
- // scheduling a NUDGE requires command of the sync thread, which is
- // impossible* from outside of SyncScheduler if a NUDGE is taking place.
- // And if you have command of the sync thread when scheduling a NUDGE and a
- // previous NUDGE exists, they will be coalesced and the stale job will be
- // cancelled via the session-equality check in DoSyncSessionJob.
- //
- // * It's not strictly "impossible", but it would be reentrant and hence
- // illegal. e.g. scheduling a job and re-entering the SyncScheduler is NOT a
- // legal side effect of any of the work being done as part of a sync cycle.
- // See |no_scheduling_allowed_| for details.
-
- // Decision now rests on state of auth tokens.
- if (!session_context_->connection_manager()->HasInvalidAuthToken())
- return CONTINUE;
-
- SDVLOG(2) << "No valid auth token. Using that to decide on job.";
- // Running the job would require updated auth, so we can't honour
- // job.scheduled_start().
- return job.purpose() == SyncSessionJob::NUDGE ? SAVE : DROP;
+ return true;
}
-void SyncSchedulerImpl::HandleSaveJobDecision(scoped_ptr<SyncSessionJob> job) {
- const bool is_nudge = job->purpose() == SyncSessionJob::NUDGE;
- if (is_nudge && pending_nudge_) {
- SDVLOG(2) << "Coalescing a pending nudge";
- // TODO(tim): This basically means we never use the more-careful coalescing
- // logic in ScheduleNudgeImpl that takes the min of the two nudge start
- // times, because we're calling this function first. Pull this out
- // into a function to coalesce + set start times and reuse.
- pending_nudge_->mutable_session()->CoalesceSources(
- job->session()->source());
- return;
- }
+bool SyncSchedulerImpl::CanRunNudgeJobNow(JobPriority priority) {
+ DCHECK(CalledOnValidThread());
- scoped_ptr<SyncSessionJob> job_to_save = job->CloneAndAbandon();
- if (wait_interval_.get() && !wait_interval_->pending_configure_job) {
- // This job should be made the new canary.
- if (is_nudge) {
- pending_nudge_ = job_to_save.get();
- } else {
- SDVLOG(2) << "Saving a configuration job";
- DCHECK_EQ(job->purpose(), SyncSessionJob::CONFIGURATION);
- DCHECK(!wait_interval_->pending_configure_job);
- DCHECK_EQ(mode_, CONFIGURATION_MODE);
- DCHECK(!job->config_params().ready_task.is_null());
- // The only nudge that could exist is a scheduled canary nudge.
- DCHECK(!unscheduled_nudge_storage_.get());
- if (pending_nudge_) {
- // Pre-empt the nudge canary and abandon the old nudge (owned by task).
- unscheduled_nudge_storage_ = pending_nudge_->CloneAndAbandon();
- pending_nudge_ = unscheduled_nudge_storage_.get();
- }
- wait_interval_->pending_configure_job = job_to_save.get();
- }
- TimeDelta length =
- wait_interval_->timer.desired_run_time() - TimeTicks::Now();
- wait_interval_->length = length < TimeDelta::FromSeconds(0) ?
- TimeDelta::FromSeconds(0) : length;
- RestartWaiting(job_to_save.Pass());
- return;
+ if (!CanRunJobNow(priority)) {
+ SDVLOG(1) << "Unable to run a nudge job right now";
+ return false;
}
- // Note that today there are no cases where we SAVE a CONFIGURATION job
- // when we're not in a WaitInterval. See bug 147736.
- DCHECK(is_nudge);
- // There may or may not be a pending_configure_job. Either way this nudge
- // is unschedulable.
- pending_nudge_ = job_to_save.get();
- unscheduled_nudge_storage_ = job_to_save.Pass();
-}
+ // If all types are throttled, do not continue. Today, we don't treat a
+ // per-datatype "unthrottle" event as something that should force a canary
+ // job. For this reason, there's no good time to reschedule this job to run
+ // -- we'll lazily wait for an independent event to trigger a sync.
+ ModelTypeSet throttled_types =
+ session_context_->throttled_data_type_tracker()->GetThrottledTypes();
+ if (!nudge_tracker_.GetLocallyModifiedTypes().Empty() &&
+ throttled_types.HasAll(nudge_tracker_.GetLocallyModifiedTypes())) {
+ // TODO(sync): Throttled types should be pruned from the sources list.
+ SDVLOG(1) << "Not running a nudge because we're fully datatype throttled.";
+ return false;
+ }
-// Functor for std::find_if to search by ModelSafeGroup.
-struct ModelSafeWorkerGroupIs {
- explicit ModelSafeWorkerGroupIs(ModelSafeGroup group) : group(group) {}
- bool operator()(ModelSafeWorker* w) {
- return group == w->GetModelSafeGroup();
+ if (mode_ == CONFIGURATION_MODE) {
+ SDVLOG(1) << "Not running nudge because we're in configuration mode.";
+ return false;
}
- ModelSafeGroup group;
-};
+
+ return true;
+}
void SyncSchedulerImpl::ScheduleNudgeAsync(
const TimeDelta& desired_delay,
NudgeSource source, ModelTypeSet types,
const tracked_objects::Location& nudge_location) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
SDVLOG_LOC(nudge_location, 2)
<< "Nudge scheduled with delay "
<< desired_delay.InMilliseconds() << " ms, "
@@ -540,7 +377,7 @@ void SyncSchedulerImpl::ScheduleNudgeWithStatesAsync(
const TimeDelta& desired_delay,
NudgeSource source, const ModelTypeInvalidationMap& invalidation_map,
const tracked_objects::Location& nudge_location) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
SDVLOG_LOC(nudge_location, 2)
<< "Nudge scheduled with delay "
<< desired_delay.InMilliseconds() << " ms, "
@@ -562,7 +399,7 @@ void SyncSchedulerImpl::ScheduleNudgeImpl(
GetUpdatesCallerInfo::GetUpdatesSource source,
const ModelTypeInvalidationMap& invalidation_map,
const tracked_objects::Location& nudge_location) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
DCHECK(!invalidation_map.empty()) << "Nudge scheduled for no types!";
if (no_scheduling_allowed_) {
@@ -586,63 +423,38 @@ void SyncSchedulerImpl::ScheduleNudgeImpl(
SyncSourceInfo info(source, invalidation_map);
UpdateNudgeTimeRecords(info);
- scoped_ptr<SyncSessionJob> job(new SyncSessionJob(
- SyncSessionJob::NUDGE,
- TimeTicks::Now() + delay,
- CreateSyncSession(info).Pass(),
- ConfigurationParams()));
- JobProcessDecision decision = DecideOnJob(*job, NORMAL_PRIORITY);
- SDVLOG(2) << "Should run "
- << SyncSessionJob::GetPurposeString(job->purpose())
- << " job " << job->session()
- << " in mode " << GetModeString(mode_)
- << ": " << GetDecisionString(decision);
- if (decision != CONTINUE) {
- // End of the line, though we may save the job for later.
- if (decision == SAVE) {
- HandleSaveJobDecision(job.Pass());
- } else {
- DCHECK_EQ(decision, DROP);
- }
+ // Coalesce the new nudge information with any existing information.
+ nudge_tracker_.CoalesceSources(info);
+
+ if (!CanRunNudgeJobNow(NORMAL_PRIORITY))
+ return;
+
+ if (!started_) {
+ SDVLOG_LOC(nudge_location, 2)
+ << "Schedule not started; not running a nudge.";
return;
}
- if (pending_nudge_) {
- SDVLOG(2) << "Rescheduling pending nudge";
- pending_nudge_->mutable_session()->CoalesceSources(
- job->session()->source());
- // Choose the start time as the earliest of the 2. Note that this means
- // if a nudge arrives with delay (e.g. kDefaultSessionsCommitDelaySeconds)
- // but a nudge is already scheduled to go out, we'll send the (tab) commit
- // without waiting.
- pending_nudge_->set_scheduled_start(
- std::min(job->scheduled_start(), pending_nudge_->scheduled_start()));
- // Abandon the old task by cloning and replacing the session.
- // It's possible that by "rescheduling" we're actually taking a job that
- // was previously unscheduled and giving it wings, so take care to reset
- // unscheduled nudge storage.
- job = pending_nudge_->CloneAndAbandon();
- pending_nudge_ = NULL;
- unscheduled_nudge_storage_.reset();
- // It's also possible we took a canary job, since we allow one nudge
- // per backoff interval.
- DCHECK(!wait_interval_ || !wait_interval_->had_nudge);
+ TimeTicks incoming_run_time = TimeTicks::Now() + delay;
+ if (!scheduled_nudge_time_.is_null() &&
+ (scheduled_nudge_time_ < incoming_run_time)) {
+ // Old job arrives sooner than this one. Don't reschedule it.
+ return;
}
- TimeDelta run_delay = job->scheduled_start() - TimeTicks::Now();
- if (run_delay < TimeDelta::FromMilliseconds(0))
- run_delay = TimeDelta::FromMilliseconds(0);
+ // Either there is no existing nudge in flight or the incoming nudge should be
+ // made to arrive first (preempt) the existing nudge. We reschedule in either
+ // case.
SDVLOG_LOC(nudge_location, 2)
<< "Scheduling a nudge with "
- << run_delay.InMilliseconds() << " ms delay";
-
- pending_nudge_ = job.get();
- PostDelayedTask(nudge_location, "DoSyncSessionJob",
- base::Bind(base::IgnoreResult(&SyncSchedulerImpl::DoSyncSessionJob),
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(&job),
- NORMAL_PRIORITY),
- run_delay);
+ << delay.InMilliseconds() << " ms delay";
+ scheduled_nudge_time_ = incoming_run_time;
+ pending_wakeup_timer_.Start(
+ nudge_location,
+ delay,
+ base::Bind(&SyncSchedulerImpl::DoNudgeSyncSessionJob,
+ weak_ptr_factory_.GetWeakPtr(),
+ NORMAL_PRIORITY));
}
const char* SyncSchedulerImpl::GetModeString(SyncScheduler::Mode mode) {
@@ -653,160 +465,124 @@ const char* SyncSchedulerImpl::GetModeString(SyncScheduler::Mode mode) {
return "";
}
-const char* SyncSchedulerImpl::GetDecisionString(
- SyncSchedulerImpl::JobProcessDecision mode) {
- switch (mode) {
- ENUM_CASE(CONTINUE);
- ENUM_CASE(SAVE);
- ENUM_CASE(DROP);
- }
- return "";
-}
-
-void SyncSchedulerImpl::PostTask(
- const tracked_objects::Location& from_here,
- const char* name, const base::Closure& task) {
- SDVLOG_LOC(from_here, 3) << "Posting " << name << " task";
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- if (!started_) {
- SDVLOG(1) << "Not posting task as scheduler is stopped.";
- return;
- }
- sync_loop_->PostTask(from_here, task);
-}
+void SyncSchedulerImpl::DoNudgeSyncSessionJob(JobPriority priority) {
+ DCHECK(CalledOnValidThread());
-void SyncSchedulerImpl::PostDelayedTask(
- const tracked_objects::Location& from_here,
- const char* name, const base::Closure& task, base::TimeDelta delay) {
- SDVLOG_LOC(from_here, 3) << "Posting " << name << " task with "
- << delay.InMilliseconds() << " ms delay";
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- if (!started_) {
- SDVLOG(1) << "Not posting task as scheduler is stopped.";
+ if (!CanRunNudgeJobNow(priority))
return;
- }
- sync_loop_->PostDelayedTask(from_here, task, delay);
-}
-
-bool SyncSchedulerImpl::DoSyncSessionJob(scoped_ptr<SyncSessionJob> job,
- JobPriority priority) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- if (job->purpose() == SyncSessionJob::NUDGE) {
- if (pending_nudge_ == NULL ||
- pending_nudge_->session() != job->session()) {
- // |job| is abandoned.
- SDVLOG(2) << "Dropping a nudge in "
- << "DoSyncSessionJob because another nudge was scheduled";
- return false;
- }
- pending_nudge_ = NULL;
- }
-
- if (!job->session()) {
- SDVLOG(2) << "Dropping abandoned job";
- return false; // Fix for crbug.com/190085.
- }
-
- base::AutoReset<bool> protector(&no_scheduling_allowed_, true);
- JobProcessDecision decision = DecideOnJob(*job, priority);
- SDVLOG(2) << "Should run "
- << SyncSessionJob::GetPurposeString(job->purpose())
- << " job " << job->session()
- << " in mode " << GetModeString(mode_)
- << " with source " << job->session()->source().updates_source
- << ": " << GetDecisionString(decision);
- if (decision != CONTINUE) {
- if (decision == SAVE) {
- HandleSaveJobDecision(job.Pass());
- } else {
- DCHECK_EQ(decision, DROP);
- }
- return false;
- }
- SDVLOG(2) << "Calling SyncShare with "
- << SyncSessionJob::GetPurposeString(job->purpose()) << " job";
- bool premature_exit = !syncer_->SyncShare(job->mutable_session(),
- job->start_step(),
- job->end_step());
- SDVLOG(2) << "Done SyncShare, returned: " << premature_exit;
+ DVLOG(2) << "Will run normal mode sync cycle with routing info "
+ << ModelSafeRoutingInfoToString(session_context_->routing_info());
+ SyncSession session(session_context_, this, nudge_tracker_.source_info());
+ bool premature_exit = !syncer_->SyncShare(&session, SYNCER_BEGIN, SYNCER_END);
+ AdjustPolling(FORCE_RESET);
- bool success = FinishSyncSessionJob(job.get(), premature_exit);
+ bool success = !premature_exit
+ && !sessions::HasSyncerError(
+ session.status_controller().model_neutral_state());
- if (IsSyncingCurrentlySilenced()) {
- SDVLOG(2) << "We are currently throttled; scheduling Unthrottle.";
- // If we're here, it's because |job| was silenced until a server specified
- // time. (Note, it had to be |job|, because DecideOnJob would not permit
- // any job through while in WaitInterval::THROTTLED).
- scoped_ptr<SyncSessionJob> clone = job->Clone();
- if (clone->purpose() == SyncSessionJob::NUDGE)
- pending_nudge_ = clone.get();
- else if (clone->purpose() == SyncSessionJob::CONFIGURATION)
- wait_interval_->pending_configure_job = clone.get();
- else
- NOTREACHED();
+ if (success) {
+ // That cycle took care of any outstanding work we had.
+ SDVLOG(2) << "Nudge succeeded.";
+ nudge_tracker_.Reset();
+ scheduled_nudge_time_ = base::TimeTicks();
- RestartWaiting(clone.Pass());
- return success;
+ // If we're here, then we successfully reached the server. End all backoff.
+ wait_interval_.reset();
+ NotifyRetryTime(base::Time());
+ return;
+ } else {
+ HandleFailure(session.status_controller().model_neutral_state());
}
-
- if (!success)
- ScheduleNextSync(job.Pass());
-
- return success;
}
-bool SyncSchedulerImpl::ShouldPoll() {
- if (wait_interval_.get()) {
- SDVLOG(2) << "Not running poll in wait interval.";
- return false;
- }
+bool SyncSchedulerImpl::DoConfigurationSyncSessionJob(JobPriority priority) {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(mode_, CONFIGURATION_MODE);
- if (mode_ == CONFIGURATION_MODE) {
- SDVLOG(2) << "Not running poll in configuration mode.";
+ if (!CanRunJobNow(priority)) {
+ SDVLOG(2) << "Unable to run configure job right now.";
return false;
}
- // TODO(rlarocque): Refactor decision-making logic common to all types
- // of jobs into a shared function.
-
- if (session_context_->connection_manager()->HasInvalidAuthToken()) {
- SDVLOG(2) << "Not running poll because auth token is invalid.";
+ SDVLOG(2) << "Will run configure SyncShare with routes "
+ << ModelSafeRoutingInfoToString(session_context_->routing_info());
+ SyncSourceInfo source_info(pending_configure_params_->source,
+ ModelSafeRoutingInfoToInvalidationMap(
+ session_context_->routing_info(),
+ std::string()));
+ SyncSession session(session_context_, this, source_info);
+ bool premature_exit = !syncer_->SyncShare(&session,
+ DOWNLOAD_UPDATES,
+ APPLY_UPDATES);
+ AdjustPolling(FORCE_RESET);
+
+ bool success = !premature_exit
+ && !sessions::HasSyncerError(
+ session.status_controller().model_neutral_state());
+
+ if (success) {
+ SDVLOG(2) << "Configure succeeded.";
+ pending_configure_params_->ready_task.Run();
+ pending_configure_params_.reset();
+
+ // If we're here, then we successfully reached the server. End all backoff.
+ wait_interval_.reset();
+ NotifyRetryTime(base::Time());
+ return true;
+ } else {
+ HandleFailure(session.status_controller().model_neutral_state());
return false;
}
-
- return true;
}
-void SyncSchedulerImpl::DoPollSyncSessionJob(scoped_ptr<SyncSessionJob> job) {
- DCHECK_EQ(job->purpose(), SyncSessionJob::POLL);
+void SyncSchedulerImpl::HandleFailure(
+ const sessions::ModelNeutralState& model_neutral_state) {
+ if (IsSyncingCurrentlySilenced()) {
+ SDVLOG(2) << "Was throttled during previous sync cycle.";
+ RestartWaiting();
+ } else {
+ UpdateExponentialBackoff(model_neutral_state);
+ SDVLOG(2) << "Sync cycle failed. Will back off for "
+ << wait_interval_->length.InMilliseconds() << "ms.";
+ RestartWaiting();
+ }
+}
+void SyncSchedulerImpl::DoPollSyncSessionJob() {
+ ModelSafeRoutingInfo r;
+ ModelTypeInvalidationMap invalidation_map =
+ ModelSafeRoutingInfoToInvalidationMap(r, std::string());
+ SyncSourceInfo info(GetUpdatesCallerInfo::PERIODIC, invalidation_map);
base::AutoReset<bool> protector(&no_scheduling_allowed_, true);
- if (!ShouldPoll())
+ if (!CanRunJobNow(NORMAL_PRIORITY)) {
+ SDVLOG(2) << "Unable to run a poll job right now.";
return;
+ }
- SDVLOG(2) << "Calling SyncShare with "
- << SyncSessionJob::GetPurposeString(job->purpose()) << " job";
- bool premature_exit = !syncer_->SyncShare(job->mutable_session(),
- job->start_step(),
- job->end_step());
- SDVLOG(2) << "Done SyncShare, returned: " << premature_exit;
+ if (mode_ != NORMAL_MODE) {
+ SDVLOG(2) << "Not running poll job in configure mode.";
+ return;
+ }
+
+ SDVLOG(2) << "Polling with routes "
+ << ModelSafeRoutingInfoToString(session_context_->routing_info());
+ SyncSession session(session_context_, this, info);
+ syncer_->SyncShare(&session, SYNCER_BEGIN, SYNCER_END);
- FinishSyncSessionJob(job.get(), premature_exit);
+ AdjustPolling(UPDATE_INTERVAL);
if (IsSyncingCurrentlySilenced()) {
- // This will start the countdown to unthrottle. Other kinds of jobs would
- // schedule themselves as the post-unthrottle canary. A poll job is not
- // that urgent, so it does not get to be the canary. We still need to start
- // the timer regardless. Otherwise there could be no one to clear the
- // WaitInterval when the throttling expires.
- RestartWaiting(scoped_ptr<SyncSessionJob>());
+ SDVLOG(2) << "Poll request got us throttled.";
+ // The OnSilencedUntil() call set up the WaitInterval for us. All we need
+ // to do is start the timer.
+ RestartWaiting();
}
}
void SyncSchedulerImpl::UpdateNudgeTimeRecords(const SyncSourceInfo& info) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
// We are interested in recording time between local nudges for datatypes.
// TODO(tim): Consider tracking LOCAL_NOTIFICATION as well.
@@ -830,73 +606,8 @@ void SyncSchedulerImpl::UpdateNudgeTimeRecords(const SyncSourceInfo& info) {
}
}
-bool SyncSchedulerImpl::FinishSyncSessionJob(SyncSessionJob* job,
- bool exited_prematurely) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
-
- // Let job know that we're through syncing (calling SyncShare) at this point.
- bool succeeded = false;
- {
- base::AutoReset<bool> protector(&no_scheduling_allowed_, true);
- succeeded = job->Finish(exited_prematurely);
- }
-
- SDVLOG(2) << "Updating the next polling time after SyncMain";
-
- AdjustPolling(job);
-
- if (succeeded) {
- // No job currently supported by the scheduler could succeed without
- // successfully reaching the server. Therefore, if we make it here, it is
- // appropriate to reset the backoff interval.
- wait_interval_.reset();
- NotifyRetryTime(base::Time());
- SDVLOG(2) << "Job succeeded so not scheduling more jobs";
- }
-
- return succeeded;
-}
-
-void SyncSchedulerImpl::ScheduleNextSync(
- scoped_ptr<SyncSessionJob> finished_job) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- DCHECK(finished_job->purpose() == SyncSessionJob::CONFIGURATION
- || finished_job->purpose() == SyncSessionJob::NUDGE);
-
- // TODO(rlarocque): There's no reason why we should blindly backoff and retry
- // if we don't succeed. Some types of errors are not likely to disappear on
- // their own. With the return values now available in the old_job.session,
- // we should be able to detect such errors and only retry when we detect
- // transient errors.
-
- if (IsBackingOff() && wait_interval_->timer.IsRunning() &&
- mode_ == NORMAL_MODE) {
- // When in normal mode, we allow up to one nudge per backoff interval. It
- // appears that this was our nudge for this interval, and it failed.
- //
- // Note: This does not prevent us from running canary jobs. For example,
- // an IP address change might still result in another nudge being executed
- // during this backoff interval.
- SDVLOG(2) << "A nudge during backoff failed, creating new pending nudge.";
- DCHECK_EQ(SyncSessionJob::NUDGE, finished_job->purpose());
- DCHECK(!wait_interval_->had_nudge);
-
- wait_interval_->had_nudge = true;
- DCHECK(!pending_nudge_);
-
- scoped_ptr<SyncSessionJob> new_job = finished_job->Clone();
- pending_nudge_ = new_job.get();
- RestartWaiting(new_job.Pass());
- } else {
- // Either this is the first failure or a consecutive failure after our
- // backoff timer expired. We handle it the same way in either case.
- SDVLOG(2) << "Non-'backoff nudge' SyncShare job failed";
- HandleContinuationError(finished_job.Pass());
- }
-}
-
-void SyncSchedulerImpl::AdjustPolling(const SyncSessionJob* old_job) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+void SyncSchedulerImpl::AdjustPolling(PollAdjustType type) {
+ DCHECK(CalledOnValidThread());
TimeDelta poll = (!session_context_->notifications_enabled()) ?
syncer_short_poll_interval_seconds_ :
@@ -904,7 +615,7 @@ void SyncSchedulerImpl::AdjustPolling(const SyncSessionJob* old_job) {
bool rate_changed = !poll_timer_.IsRunning() ||
poll != poll_timer_.GetCurrentDelay();
- if (old_job && old_job->purpose() != SyncSessionJob::POLL && !rate_changed)
+ if (type == FORCE_RESET && !rate_changed)
poll_timer_.Reset();
if (!rate_changed)
@@ -916,57 +627,36 @@ void SyncSchedulerImpl::AdjustPolling(const SyncSessionJob* old_job) {
&SyncSchedulerImpl::PollTimerCallback);
}
-void SyncSchedulerImpl::RestartWaiting(scoped_ptr<SyncSessionJob> job) {
+void SyncSchedulerImpl::RestartWaiting() {
CHECK(wait_interval_.get());
- wait_interval_->timer.Stop();
DCHECK(wait_interval_->length >= TimeDelta::FromSeconds(0));
+ NotifyRetryTime(base::Time::Now() + wait_interval_->length);
+ SDVLOG(2) << "Starting WaitInterval timer of length "
+ << wait_interval_->length.InMilliseconds() << "ms.";
if (wait_interval_->mode == WaitInterval::THROTTLED) {
- wait_interval_->timer.Start(FROM_HERE, wait_interval_->length,
- base::Bind(&SyncSchedulerImpl::Unthrottle,
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(&job)));
+ pending_wakeup_timer_.Start(
+ FROM_HERE,
+ wait_interval_->length,
+ base::Bind(&SyncSchedulerImpl::Unthrottle,
+ weak_ptr_factory_.GetWeakPtr()));
} else {
- wait_interval_->timer.Start(FROM_HERE, wait_interval_->length,
- base::Bind(&SyncSchedulerImpl::DoCanaryJob,
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(&job)));
+ pending_wakeup_timer_.Start(
+ FROM_HERE,
+ wait_interval_->length,
+ base::Bind(&SyncSchedulerImpl::TryCanaryJob,
+ weak_ptr_factory_.GetWeakPtr()));
}
}
-void SyncSchedulerImpl::HandleContinuationError(
- scoped_ptr<SyncSessionJob> old_job) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+void SyncSchedulerImpl::UpdateExponentialBackoff(
+ const sessions::ModelNeutralState& model_neutral_state) {
+ DCHECK(CalledOnValidThread());
TimeDelta length = delay_provider_->GetDelay(
IsBackingOff() ? wait_interval_->length :
- delay_provider_->GetInitialDelay(
- old_job->session()->status_controller().model_neutral_state()));
-
- SDVLOG(2) << "In handle continuation error with "
- << SyncSessionJob::GetPurposeString(old_job->purpose())
- << " job. The time delta(ms) is "
- << length.InMilliseconds();
-
- // This will reset the had_nudge variable as well.
+ delay_provider_->GetInitialDelay(model_neutral_state));
wait_interval_.reset(new WaitInterval(WaitInterval::EXPONENTIAL_BACKOFF,
length));
- NotifyRetryTime(base::Time::Now() + length);
- scoped_ptr<SyncSessionJob> new_job(old_job->Clone());
- new_job->set_scheduled_start(TimeTicks::Now() + length);
- if (old_job->purpose() == SyncSessionJob::CONFIGURATION) {
- SDVLOG(2) << "Configuration did not succeed, scheduling retry.";
- // Config params should always get set.
- DCHECK(!old_job->config_params().ready_task.is_null());
- wait_interval_->pending_configure_job = new_job.get();
- } else {
- // We are not in configuration mode. So wait_interval's pending job
- // should be null.
- DCHECK(wait_interval_->pending_configure_job == NULL);
- DCHECK(!pending_nudge_);
- pending_nudge_ = new_job.get();
- }
-
- RestartWaiting(new_job.Pass());
}
void SyncSchedulerImpl::RequestStop(const base::Closure& callback) {
@@ -979,7 +669,7 @@ void SyncSchedulerImpl::RequestStop(const base::Closure& callback) {
}
void SyncSchedulerImpl::StopImpl(const base::Closure& callback) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
SDVLOG(2) << "StopImpl called";
// Kill any in-flight method calls.
@@ -987,80 +677,32 @@ void SyncSchedulerImpl::StopImpl(const base::Closure& callback) {
wait_interval_.reset();
NotifyRetryTime(base::Time());
poll_timer_.Stop();
- pending_nudge_ = NULL;
- unscheduled_nudge_storage_.reset();
- if (started_) {
+ pending_wakeup_timer_.Stop();
+ pending_configure_params_.reset();
+ if (started_)
started_ = false;
- }
if (!callback.is_null())
callback.Run();
}
-void SyncSchedulerImpl::DoCanaryJob(scoped_ptr<SyncSessionJob> to_be_canary) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- SDVLOG(2) << "Do canary job";
-
- if (to_be_canary->purpose() == SyncSessionJob::NUDGE) {
- // TODO(tim): Bug 158313. Remove this check.
- if (pending_nudge_ == NULL ||
- pending_nudge_->session() != to_be_canary->session()) {
- // |job| is abandoned.
- SDVLOG(2) << "Dropping a nudge in "
- << "DoCanaryJob because another nudge was scheduled";
- return;
- }
- DCHECK_EQ(pending_nudge_->session(), to_be_canary->session());
- }
+// This is the only place where we invoke DoSyncSessionJob with canary
+// privileges. Everyone else should use NORMAL_PRIORITY.
+void SyncSchedulerImpl::TryCanaryJob() {
+ DCHECK(CalledOnValidThread());
- // This is the only place where we invoke DoSyncSessionJob with canary
- // privileges. Everyone else should use NORMAL_PRIORITY.
- DoSyncSessionJob(to_be_canary.Pass(), CANARY_PRIORITY);
-}
-
-scoped_ptr<SyncSessionJob> SyncSchedulerImpl::TakePendingJobForCurrentMode() {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- // If we find a scheduled pending_ job, abandon the old one and return a
- // a clone. If unscheduled, just hand over ownership.
- scoped_ptr<SyncSessionJob> candidate;
- if (mode_ == CONFIGURATION_MODE && wait_interval_.get()
- && wait_interval_->pending_configure_job) {
- SDVLOG(2) << "Found pending configure job";
- candidate =
- wait_interval_->pending_configure_job->CloneAndAbandon().Pass();
- wait_interval_->pending_configure_job = candidate.get();
- } else if (mode_ == NORMAL_MODE && pending_nudge_) {
- SDVLOG(2) << "Found pending nudge job";
- candidate = pending_nudge_->CloneAndAbandon();
- pending_nudge_ = candidate.get();
- unscheduled_nudge_storage_.reset();
+ if (mode_ == CONFIGURATION_MODE && pending_configure_params_) {
+ SDVLOG(2) << "Found pending configure job; will run as canary";
+ DoConfigurationSyncSessionJob(CANARY_PRIORITY);
+ } else if (mode_ == NORMAL_MODE && !nudge_tracker_.IsEmpty()) {
+ SDVLOG(2) << "Found pending nudge job; will run as canary";
+ DoNudgeSyncSessionJob(CANARY_PRIORITY);
+ } else {
+ SDVLOG(2) << "Found no work to do; will not run a canary";
}
- // If we took a job and there's a wait interval, we took the pending canary.
- if (candidate && wait_interval_)
- wait_interval_->timer.Stop();
- return candidate.Pass();
-}
-
-scoped_ptr<SyncSession> SyncSchedulerImpl::CreateSyncSession(
- const SyncSourceInfo& source) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- DVLOG(2) << "Creating sync session with routes "
- << ModelSafeRoutingInfoToString(session_context_->routing_info());
-
- SyncSourceInfo info(source);
- return scoped_ptr<SyncSession>(new SyncSession(session_context_, this, info));
}
void SyncSchedulerImpl::PollTimerCallback() {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
- ModelSafeRoutingInfo r;
- ModelTypeInvalidationMap invalidation_map =
- ModelSafeRoutingInfoToInvalidationMap(r, std::string());
- SyncSourceInfo info(GetUpdatesCallerInfo::PERIODIC, invalidation_map);
- scoped_ptr<SyncSession> s(CreateSyncSession(info));
- scoped_ptr<SyncSessionJob> job(new SyncSessionJob(SyncSessionJob::POLL,
- TimeTicks::Now(),
- s.Pass(),
- ConfigurationParams()));
+ DCHECK(CalledOnValidThread());
if (no_scheduling_allowed_) {
// The no_scheduling_allowed_ flag is set by a function-scoped AutoReset in
// functions that are called only on the sync thread. This function is also
@@ -1072,16 +714,12 @@ void SyncSchedulerImpl::PollTimerCallback() {
return;
}
- DoPollSyncSessionJob(job.Pass());
+ DoPollSyncSessionJob();
}
-void SyncSchedulerImpl::Unthrottle(scoped_ptr<SyncSessionJob> to_be_canary) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+void SyncSchedulerImpl::Unthrottle() {
+ DCHECK(CalledOnValidThread());
DCHECK_EQ(WaitInterval::THROTTLED, wait_interval_->mode);
- DCHECK(!to_be_canary.get() || pending_nudge_ == to_be_canary.get() ||
- wait_interval_->pending_configure_job == to_be_canary.get());
- SDVLOG(2) << "Unthrottled " << (to_be_canary.get() ? "with " : "without ")
- << "canary.";
// We're no longer throttled, so clear the wait interval.
wait_interval_.reset();
@@ -1092,15 +730,11 @@ void SyncSchedulerImpl::Unthrottle(scoped_ptr<SyncSessionJob> to_be_canary) {
// was just created (e.g via ScheduleNudgeImpl). The main implication is
// that we're careful to update routing info (etc) with such potentially
// stale canary jobs.
- if (to_be_canary.get()) {
- DoCanaryJob(to_be_canary.Pass());
- } else {
- DCHECK(!unscheduled_nudge_storage_.get());
- }
+ TryCanaryJob();
}
void SyncSchedulerImpl::Notify(SyncEngineEvent::EventCause cause) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
session_context_->NotifyListeners(SyncEngineEvent(cause));
}
@@ -1111,45 +745,45 @@ void SyncSchedulerImpl::NotifyRetryTime(base::Time retry_time) {
}
bool SyncSchedulerImpl::IsBackingOff() const {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
return wait_interval_.get() && wait_interval_->mode ==
WaitInterval::EXPONENTIAL_BACKOFF;
}
void SyncSchedulerImpl::OnSilencedUntil(
const base::TimeTicks& silenced_until) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
wait_interval_.reset(new WaitInterval(WaitInterval::THROTTLED,
silenced_until - TimeTicks::Now()));
NotifyRetryTime(base::Time::Now() + wait_interval_->length);
}
bool SyncSchedulerImpl::IsSyncingCurrentlySilenced() {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
return wait_interval_.get() && wait_interval_->mode ==
WaitInterval::THROTTLED;
}
void SyncSchedulerImpl::OnReceivedShortPollIntervalUpdate(
const base::TimeDelta& new_interval) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
syncer_short_poll_interval_seconds_ = new_interval;
}
void SyncSchedulerImpl::OnReceivedLongPollIntervalUpdate(
const base::TimeDelta& new_interval) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
syncer_long_poll_interval_seconds_ = new_interval;
}
void SyncSchedulerImpl::OnReceivedSessionsCommitDelay(
const base::TimeDelta& new_delay) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
sessions_commit_delay_ = new_delay;
}
void SyncSchedulerImpl::OnShouldStopSyncingPermanently() {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
SDVLOG(2) << "OnShouldStopSyncingPermanently";
syncer_->RequestEarlyExit(); // Thread-safe.
Notify(SyncEngineEvent::STOP_SYNCING_PERMANENTLY);
@@ -1157,7 +791,7 @@ void SyncSchedulerImpl::OnShouldStopSyncingPermanently() {
void SyncSchedulerImpl::OnActionableError(
const sessions::SyncSessionSnapshot& snap) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
SDVLOG(2) << "OnActionableError";
SyncEngineEvent event(SyncEngineEvent::ACTIONABLE_ERROR);
event.snapshot = snap;
@@ -1166,7 +800,7 @@ void SyncSchedulerImpl::OnActionableError(
void SyncSchedulerImpl::OnSyncProtocolError(
const sessions::SyncSessionSnapshot& snapshot) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
if (ShouldRequestEarlyExit(
snapshot.model_neutral_state().sync_protocol_error)) {
SDVLOG(2) << "Sync Scheduler requesting early exit.";
@@ -1177,12 +811,12 @@ void SyncSchedulerImpl::OnSyncProtocolError(
}
void SyncSchedulerImpl::SetNotificationsEnabled(bool notifications_enabled) {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
session_context_->set_notifications_enabled(notifications_enabled);
}
base::TimeDelta SyncSchedulerImpl::GetSessionsCommitDelay() const {
- DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(CalledOnValidThread());
return sessions_commit_delay_;
}
diff --git a/sync/engine/sync_scheduler_impl.h b/sync/engine/sync_scheduler_impl.h
index 61351b782e..efcccd39b0 100644
--- a/sync/engine/sync_scheduler_impl.h
+++ b/sync/engine/sync_scheduler_impl.h
@@ -9,23 +9,24 @@
#include <string>
#include "base/callback.h"
+#include "base/cancelable_callback.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "base/observer_list.h"
+#include "base/threading/non_thread_safe.h"
#include "base/time.h"
#include "base/timer.h"
#include "sync/base/sync_export.h"
#include "sync/engine/net/server_connection_manager.h"
#include "sync/engine/nudge_source.h"
#include "sync/engine/sync_scheduler.h"
-#include "sync/engine/sync_session_job.h"
#include "sync/engine/syncer.h"
#include "sync/internal_api/public/base/model_type_invalidation_map.h"
#include "sync/internal_api/public/engine/polling_constants.h"
#include "sync/internal_api/public/util/weak_handle.h"
+#include "sync/sessions/nudge_tracker.h"
#include "sync/sessions/sync_session.h"
#include "sync/sessions/sync_session_context.h"
@@ -33,7 +34,13 @@ namespace syncer {
class BackoffDelayProvider;
-class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
+namespace sessions {
+struct ModelNeutralState;
+}
+
+class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
+ : public SyncScheduler,
+ public base::NonThreadSafe {
public:
// |name| is a display string to identify the syncer thread. Takes
// |ownership of |syncer| and |delay_provider|.
@@ -80,15 +87,6 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
const sessions::SyncSessionSnapshot& snapshot) OVERRIDE;
private:
- enum JobProcessDecision {
- // Indicates we should continue with the current job.
- CONTINUE,
- // Indicates that we should save it to be processed later.
- SAVE,
- // Indicates we should drop this job.
- DROP,
- };
-
enum JobPriority {
// Non-canary jobs respect exponential backoff.
NORMAL_PRIORITY,
@@ -96,18 +94,24 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
CANARY_PRIORITY
};
+ enum PollAdjustType {
+ // Restart the poll interval.
+ FORCE_RESET,
+ // Restart the poll interval only if its length has changed.
+ UPDATE_INTERVAL,
+ };
+
friend class SyncSchedulerTest;
friend class SyncSchedulerWhiteboxTest;
friend class SyncerTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, NoNudgesInConfigureMode);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
DropNudgeWhileExponentialBackOff);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, SaveNudge);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
SaveNudgeWhileTypeThrottled);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinueNudge);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, DropPoll);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinuePoll);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinueConfiguration);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
SaveConfigurationWhileThrottled);
@@ -115,10 +119,7 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
SaveNudgeWhileThrottled);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
ContinueCanaryJobConfig);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
- ContinueNudgeWhileExponentialBackOff);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest, TransientPollFailure);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest, GetInitialBackoffDelay);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest,
ServerConnectionChangeDuringBackoff);
FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest,
@@ -128,9 +129,8 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
enum Mode {
// Uninitialized state, should not be set in practice.
UNKNOWN = -1,
- // A wait interval whose duration has been affected by exponential
- // backoff.
- // EXPONENTIAL_BACKOFF intervals are nudge-rate limited to 1 per interval.
+ // We enter a series of increasingly longer WaitIntervals if we experience
+ // repeated transient failures. We retry at the end of each interval.
EXPONENTIAL_BACKOFF,
// A server-initiated throttled interval. We do not allow any syncing
// during such an interval.
@@ -143,76 +143,42 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
static const char* GetModeString(Mode mode);
Mode mode;
-
- // This bool is set to true if we have observed a nudge during this
- // interval and mode == EXPONENTIAL_BACKOFF.
- bool had_nudge;
base::TimeDelta length;
- base::OneShotTimer<SyncSchedulerImpl> timer;
-
- // Configure jobs are saved only when backing off or throttling. So we
- // expose the pointer here (does not own, similar to pending_nudge).
- SyncSessionJob* pending_configure_job;
};
static const char* GetModeString(Mode mode);
- static const char* GetDecisionString(JobProcessDecision decision);
-
- // Helpers that log before posting to |sync_loop_|. These will only post
- // the task in between calls to Start/Stop.
- void PostTask(const tracked_objects::Location& from_here,
- const char* name,
- const base::Closure& task);
- void PostDelayedTask(const tracked_objects::Location& from_here,
- const char* name,
- const base::Closure& task,
- base::TimeDelta delay);
+ // Invoke the syncer to perform a nudge job.
+ void DoNudgeSyncSessionJob(JobPriority priority);
- // Invoke the Syncer to perform a non-poll job.
- bool DoSyncSessionJob(scoped_ptr<SyncSessionJob> job,
- JobPriority priority);
+ // Invoke the syncer to perform a configuration job.
+ bool DoConfigurationSyncSessionJob(JobPriority priority);
- // Returns whether or not it's safe to run a poll job at this time.
- bool ShouldPoll();
+ // Helper function for Do{Nudge,Configuration}SyncSessionJob.
+ void HandleFailure(
+ const sessions::ModelNeutralState& model_neutral_state);
// Invoke the Syncer to perform a poll job.
- void DoPollSyncSessionJob(scoped_ptr<SyncSessionJob> job);
-
- // Called after the Syncer has performed the sync represented by |job|, to
- // reset our state. |exited_prematurely| is true if the Syncer did not
- // cycle from job.start_step() to job.end_step(), likely because the
- // scheduler was forced to quit the job mid-way through.
- bool FinishSyncSessionJob(SyncSessionJob* job,
- bool exited_prematurely);
-
- // Helper to schedule retries of a failed configure or nudge job.
- void ScheduleNextSync(scoped_ptr<SyncSessionJob> finished_job);
+ void DoPollSyncSessionJob();
- // Helper to configure polling intervals. Used by Start and ScheduleNextSync.
- void AdjustPolling(const SyncSessionJob* old_job);
+ // Adjusts the poll timer to account for new poll interval, and possibly
+ // resets the poll interval, depedning on the flag's value.
+ void AdjustPolling(PollAdjustType type);
// Helper to restart waiting with |wait_interval_|'s timer.
- void RestartWaiting(scoped_ptr<SyncSessionJob> job);
+ void RestartWaiting();
- // Helper to ScheduleNextSync in case of consecutive sync errors.
- void HandleContinuationError(scoped_ptr<SyncSessionJob> old_job);
+ // Helper to adjust our wait interval when we expereince a transient failure.
+ void UpdateExponentialBackoff(
+ const sessions::ModelNeutralState& model_neutral_state);
- // Decide whether we should CONTINUE, SAVE or DROP the job.
- JobProcessDecision DecideOnJob(const SyncSessionJob& job,
- JobPriority priority);
+ // Determines if we're allowed to contact the server right now.
+ bool CanRunJobNow(JobPriority priority);
- // If DecideOnJob decides that |job| should be SAVEd, this function will
- // carry out the task of actually "saving" (or coalescing) the job.
- void HandleSaveJobDecision(scoped_ptr<SyncSessionJob> job);
+ // Determines if we're allowed to contact the server right now.
+ bool CanRunNudgeJobNow(JobPriority priority);
- // Decide on whether to CONTINUE, SAVE or DROP the job when we are in
- // backoff mode.
- JobProcessDecision DecideWhileInWaitInterval(const SyncSessionJob& job,
- JobPriority priority);
-
- // 'Impl' here refers to real implementation of public functions, running on
- // |thread_|.
+ // 'Impl' here refers to real implementation of public functions.
void StopImpl(const base::Closure& callback);
// If the scheduler's current state supports it, this will create a job based
@@ -234,35 +200,22 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
// Helper to signal listeners about changed retry time
void NotifyRetryTime(base::Time retry_time);
- // Callback to change backoff state. |to_be_canary| in both cases is the job
- // that should be granted canary privileges. Note: it is possible that the
- // job that gets scheduled when this callback is scheduled is different from
- // the job that will actually get executed, because other jobs may have been
- // scheduled while we were waiting for the callback.
- void DoCanaryJob(scoped_ptr<SyncSessionJob> to_be_canary);
- void Unthrottle(scoped_ptr<SyncSessionJob> to_be_canary);
-
- // Returns a pending job that has potential to run given the state of the
- // scheduler, if it exists. Useful whenever an event occurs that may
- // change conditions that permit a job to run, such as re-establishing
- // network connection, auth refresh, mode changes etc. Note that the returned
- // job may have been scheduled to run at a later time, or may have been
- // unscheduled. In the former case, this will result in abandoning the old
- // job and effectively cancelling it.
- scoped_ptr<SyncSessionJob> TakePendingJobForCurrentMode();
+ // Looks for pending work and, if it finds any, run this work at "canary"
+ // priority.
+ void TryCanaryJob();
+
+ // Transitions out of the THROTTLED WaitInterval then calls TryCanaryJob().
+ void Unthrottle();
// Called when the root cause of the current connection error is fixed.
void OnServerConnectionErrorFixed();
- scoped_ptr<sessions::SyncSession> CreateSyncSession(
- const sessions::SyncSourceInfo& info);
-
// Creates a session for a poll and performs the sync.
void PollTimerCallback();
- // Called once the first time thread_ is started to broadcast an initial
- // session snapshot containing data like initial_sync_ended. Important when
- // the client starts up and does not need to perform an initial sync.
+ // Called as we are started to broadcast an initial session snapshot
+ // containing data like initial_sync_ended. Important when the client starts
+ // up and does not need to perform an initial sync.
void SendInitialSnapshot();
// This is used for histogramming and analysis of ScheduleNudge* APIs.
@@ -285,10 +238,6 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
// Used for logging.
const std::string name_;
- // The message loop this object is on. Almost all methods have to
- // be called on this thread.
- MessageLoop* const sync_loop_;
-
// Set in Start(), unset in Stop().
bool started_;
@@ -306,24 +255,24 @@ class SYNC_EXPORT_PRIVATE SyncSchedulerImpl : public SyncScheduler {
// The mode of operation.
Mode mode_;
- // Tracks (does not own) in-flight nudges (scheduled or unscheduled),
- // so we can coalesce. NULL if there is no pending nudge.
- SyncSessionJob* pending_nudge_;
-
- // There are certain situations where we want to remember a nudge, but
- // there is no well defined moment in time in the future when that nudge
- // should run, e.g. if it requires a mode switch or updated auth credentials.
- // This member will own NUDGE jobs in those cases, until an external event
- // (mode switch or fixed auth) occurs to trigger a retry. Should be treated
- // as opaque / not interacted with (i.e. we could build a wrapper to
- // hide the type, but that's probably overkill).
- scoped_ptr<SyncSessionJob> unscheduled_nudge_storage_;
-
// Current wait state. Null if we're not in backoff and not throttled.
scoped_ptr<WaitInterval> wait_interval_;
scoped_ptr<BackoffDelayProvider> delay_provider_;
+ // The event that will wake us up.
+ base::OneShotTimer<SyncSchedulerImpl> pending_wakeup_timer_;
+
+ // Storage for variables related to an in-progress configure request. Note
+ // that (mode_ != CONFIGURATION_MODE) \implies !pending_configure_params_.
+ scoped_ptr<ConfigurationParams> pending_configure_params_;
+
+ // If we have a nudge pending to run soon, it will be listed here.
+ base::TimeTicks scheduled_nudge_time_;
+
+ // Keeps track of work that the syncer needs to handle.
+ sessions::NudgeTracker nudge_tracker_;
+
// Invoked to run through the sync cycle.
scoped_ptr<Syncer> syncer_;
diff --git a/sync/engine/sync_scheduler_unittest.cc b/sync/engine/sync_scheduler_unittest.cc
index 9b3d7da41c..ffb2e57953 100644
--- a/sync/engine/sync_scheduler_unittest.cc
+++ b/sync/engine/sync_scheduler_unittest.cc
@@ -72,6 +72,14 @@ void PumpLoop() {
RunLoop();
}
+void PumpLoopFor(base::TimeDelta time) {
+ // Allow the loop to run for the specified amount of time.
+ MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ base::Bind(&QuitLoopNow),
+ time);
+ RunLoop();
+}
+
ModelSafeRoutingInfo TypesToRoutingInfo(ModelTypeSet types) {
ModelSafeRoutingInfo routes;
for (ModelTypeSet::Iterator iter = types.First(); iter.Good(); iter.Inc()) {
@@ -85,7 +93,7 @@ static const size_t kMinNumSamples = 5;
class SyncSchedulerTest : public testing::Test {
public:
SyncSchedulerTest()
- : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ : weak_ptr_factory_(this),
context_(NULL),
syncer_(NULL),
delay_(NULL) {}
@@ -716,7 +724,7 @@ TEST_F(SyncSchedulerTest, ThrottlingDoesThrottle) {
StartSyncScheduler(SyncScheduler::NORMAL_MODE);
scheduler()->ScheduleNudgeAsync(
- zero(), NUDGE_SOURCE_LOCAL, types, FROM_HERE);
+ TimeDelta::FromMicroseconds(1), NUDGE_SOURCE_LOCAL, types, FROM_HERE);
PumpLoop();
StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
@@ -975,21 +983,20 @@ TEST_F(SyncSchedulerTest, BackoffDropsJobs) {
EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
r.snapshots[0].source().updates_source);
- EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(1)
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
- RecordSyncShare(&r)));
+ // Wait a while (10x poll interval) so a few poll jobs will be attempted.
+ PumpLoopFor(poll * 10);
- // We schedule a nudge with enough delay (10X poll interval) that at least
- // one or two polls would have taken place. The nudge should succeed.
+ // Try (and fail) to schedule a nudge.
scheduler()->ScheduleNudgeAsync(
- poll * 10, NUDGE_SOURCE_LOCAL, types, FROM_HERE);
- RunLoop();
+ base::TimeDelta::FromMilliseconds(1),
+ NUDGE_SOURCE_LOCAL,
+ types,
+ FROM_HERE);
Mock::VerifyAndClearExpectations(syncer());
Mock::VerifyAndClearExpectations(delay());
- ASSERT_EQ(2U, r.snapshots.size());
- EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
- r.snapshots[1].source().updates_source);
+
+ ASSERT_EQ(1U, r.snapshots.size());
EXPECT_CALL(*delay(), GetDelay(_)).Times(0);
@@ -1184,7 +1191,7 @@ TEST_F(SyncSchedulerTest, StartWhenNotConnected) {
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateConnectionFailure),
Return(true)))
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
- QuitLoopNowAction()));
+ Return(true)));
StartSyncScheduler(SyncScheduler::NORMAL_MODE);
scheduler()->ScheduleNudgeAsync(
@@ -1211,7 +1218,7 @@ TEST_F(SyncSchedulerTest, ServerConnectionChangeDuringBackoff) {
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateConnectionFailure),
Return(true)))
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
- QuitLoopNowAction()));
+ Return(true)));
scheduler()->ScheduleNudgeAsync(
zero(), NUDGE_SOURCE_LOCAL, ModelTypeSet(BOOKMARKS), FROM_HERE);
@@ -1226,6 +1233,9 @@ TEST_F(SyncSchedulerTest, ServerConnectionChangeDuringBackoff) {
MessageLoop::current()->RunUntilIdle();
}
+// This was supposed to test the scenario where we receive a nudge while a
+// connection change canary is scheduled, but has not run yet. Since we've made
+// the connection change canary synchronous, this is no longer possible.
TEST_F(SyncSchedulerTest, ConnectionChangeCanaryPreemptedByNudge) {
UseMockDelayProvider();
EXPECT_CALL(*delay(), GetDelay(_))
@@ -1239,6 +1249,8 @@ TEST_F(SyncSchedulerTest, ConnectionChangeCanaryPreemptedByNudge) {
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateConnectionFailure),
Return(true)))
.WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ Return(true)))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
QuitLoopNowAction()));
scheduler()->ScheduleNudgeAsync(
diff --git a/sync/engine/sync_scheduler_whitebox_unittest.cc b/sync/engine/sync_scheduler_whitebox_unittest.cc
deleted file mode 100644
index 77bb11dd40..0000000000
--- a/sync/engine/sync_scheduler_whitebox_unittest.cc
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop.h"
-#include "base/time.h"
-#include "sync/engine/backoff_delay_provider.h"
-#include "sync/engine/sync_scheduler_impl.h"
-#include "sync/engine/throttled_data_type_tracker.h"
-#include "sync/internal_api/public/engine/polling_constants.h"
-#include "sync/sessions/sync_session_context.h"
-#include "sync/sessions/test_util.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/mock_connection_manager.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/test/fake_extensions_activity_monitor.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeDelta;
-using base::TimeTicks;
-
-namespace syncer {
-using sessions::SyncSession;
-using sessions::SyncSessionContext;
-using sessions::SyncSourceInfo;
-using sync_pb::GetUpdatesCallerInfo;
-
-class SyncSchedulerWhiteboxTest : public testing::Test {
- public:
- virtual void SetUp() {
- dir_maker_.SetUp();
- Syncer* syncer = new Syncer();
-
- ModelSafeRoutingInfo routes;
- routes[BOOKMARKS] = GROUP_UI;
- routes[NIGORI] = GROUP_PASSIVE;
-
- workers_.push_back(make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- workers_.push_back(make_scoped_refptr(new FakeModelWorker(GROUP_PASSIVE)));
-
- std::vector<ModelSafeWorker*> workers;
- for (std::vector<scoped_refptr<FakeModelWorker> >::iterator it =
- workers_.begin(); it != workers_.end(); ++it) {
- workers.push_back(it->get());
- }
-
- connection_.reset(new MockConnectionManager(NULL));
- throttled_data_type_tracker_.reset(new ThrottledDataTypeTracker(NULL));
- context_.reset(
- new SyncSessionContext(
- connection_.get(), dir_maker_.directory(),
- workers, &extensions_activity_monitor_,
- throttled_data_type_tracker_.get(),
- std::vector<SyncEngineEventListener*>(), NULL, NULL,
- true, // enable keystore encryption
- "fake_invalidator_client_id"));
- context_->set_notifications_enabled(true);
- context_->set_account_name("Test");
- scheduler_.reset(
- new SyncSchedulerImpl("TestSyncSchedulerWhitebox",
- BackoffDelayProvider::FromDefaults(),
- context(),
- syncer));
- }
-
- virtual void TearDown() {
- scheduler_.reset();
- }
-
- void SetMode(SyncScheduler::Mode mode) {
- scheduler_->mode_ = mode;
- }
-
- void ResetWaitInterval() {
- scheduler_->wait_interval_.reset();
- }
-
- void SetWaitIntervalToThrottled() {
- scheduler_->wait_interval_.reset(new SyncSchedulerImpl::WaitInterval(
- SyncSchedulerImpl::WaitInterval::THROTTLED, TimeDelta::FromSeconds(1)));
- }
-
- void SetWaitIntervalToExponentialBackoff() {
- scheduler_->wait_interval_.reset(
- new SyncSchedulerImpl::WaitInterval(
- SyncSchedulerImpl::WaitInterval::EXPONENTIAL_BACKOFF,
- TimeDelta::FromSeconds(1)));
- }
-
- void SetWaitIntervalHadNudge(bool had_nudge) {
- scheduler_->wait_interval_->had_nudge = had_nudge;
- }
-
- SyncSchedulerImpl::JobProcessDecision DecideOnJob(
- const SyncSessionJob& job,
- SyncSchedulerImpl::JobPriority priority) {
- return scheduler_->DecideOnJob(job, priority);
- }
-
- void InitializeSyncerOnNormalMode() {
- SetMode(SyncScheduler::NORMAL_MODE);
- ResetWaitInterval();
- }
-
- SyncSchedulerImpl::JobProcessDecision CreateAndDecideJob(
- SyncSessionJob::Purpose purpose) {
- scoped_ptr<SyncSession> s(scheduler_->CreateSyncSession(SyncSourceInfo()));
- SyncSessionJob job(purpose, TimeTicks::Now(), s.Pass(),
- ConfigurationParams());
- return DecideOnJob(job, SyncSchedulerImpl::NORMAL_PRIORITY);
- }
-
- bool ShouldPoll() {
- return scheduler_->ShouldPoll();
- }
-
- SyncSessionContext* context() { return context_.get(); }
-
- private:
- MessageLoop message_loop_;
- scoped_ptr<MockConnectionManager> connection_;
- scoped_ptr<SyncSessionContext> context_;
- std::vector<scoped_refptr<FakeModelWorker> > workers_;
- FakeExtensionsActivityMonitor extensions_activity_monitor_;
- scoped_ptr<ThrottledDataTypeTracker> throttled_data_type_tracker_;
- TestDirectorySetterUpper dir_maker_;
-
- protected:
- // Declared here to ensure it is destructed before the objects it references.
- scoped_ptr<SyncSchedulerImpl> scheduler_;
-};
-
-TEST_F(SyncSchedulerWhiteboxTest, SaveNudge) {
- InitializeSyncerOnNormalMode();
-
- // Now set the mode to configure.
- SetMode(SyncScheduler::CONFIGURATION_MODE);
-
- SyncSchedulerImpl::JobProcessDecision decision =
- CreateAndDecideJob(SyncSessionJob::NUDGE);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::SAVE);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, SaveNudgeWhileTypeThrottled) {
- InitializeSyncerOnNormalMode();
-
- const ModelTypeSet types(BOOKMARKS);
-
- // Mark bookmarks as throttled.
- context()->throttled_data_type_tracker()->SetUnthrottleTime(
- types, base::TimeTicks::Now() + base::TimeDelta::FromHours(2));
-
- const ModelTypeInvalidationMap& invalidation_map =
- ModelTypeSetToInvalidationMap(types, std::string());
-
- SyncSourceInfo info(GetUpdatesCallerInfo::LOCAL, invalidation_map);
- scoped_ptr<SyncSession> s(scheduler_->CreateSyncSession(info));
-
- // Now schedule a nudge with just bookmarks and the change is local.
- SyncSessionJob job(SyncSessionJob::NUDGE,
- TimeTicks::Now(),
- s.Pass(),
- ConfigurationParams());
- SyncSchedulerImpl::JobProcessDecision decision =
- DecideOnJob(job, SyncSchedulerImpl::NORMAL_PRIORITY);
- // TODO(tim): This shouldn't drop. Bug 177659.
- EXPECT_EQ(decision, SyncSchedulerImpl::DROP);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, ContinueNudge) {
- InitializeSyncerOnNormalMode();
-
- SyncSchedulerImpl::JobProcessDecision decision = CreateAndDecideJob(
- SyncSessionJob::NUDGE);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::CONTINUE);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, ContinuePoll) {
- InitializeSyncerOnNormalMode();
- EXPECT_TRUE(ShouldPoll());
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, DropPollInConfigureMode) {
- InitializeSyncerOnNormalMode();
- SetMode(SyncScheduler::CONFIGURATION_MODE);
- EXPECT_FALSE(ShouldPoll());
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, DropPollWhenThrottled) {
- InitializeSyncerOnNormalMode();
- SetWaitIntervalToThrottled();
- EXPECT_FALSE(ShouldPoll());
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, DropPollInBackoff) {
- InitializeSyncerOnNormalMode();
- SetWaitIntervalToExponentialBackoff();
- EXPECT_FALSE(ShouldPoll());
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, ContinueConfiguration) {
- InitializeSyncerOnNormalMode();
- SetMode(SyncScheduler::CONFIGURATION_MODE);
-
- SyncSchedulerImpl::JobProcessDecision decision = CreateAndDecideJob(
- SyncSessionJob::CONFIGURATION);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::CONTINUE);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, SaveConfigurationWhileThrottled) {
- InitializeSyncerOnNormalMode();
- SetMode(SyncScheduler::CONFIGURATION_MODE);
-
- SetWaitIntervalToThrottled();
-
- SyncSchedulerImpl::JobProcessDecision decision = CreateAndDecideJob(
- SyncSessionJob::CONFIGURATION);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::SAVE);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, SaveNudgeWhileThrottled) {
- InitializeSyncerOnNormalMode();
- SetMode(SyncScheduler::CONFIGURATION_MODE);
-
- SetWaitIntervalToThrottled();
-
- SyncSchedulerImpl::JobProcessDecision decision = CreateAndDecideJob(
- SyncSessionJob::NUDGE);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::SAVE);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, ContinueNudgeWhileExponentialBackOff) {
- InitializeSyncerOnNormalMode();
- SetMode(SyncScheduler::NORMAL_MODE);
- SetWaitIntervalToExponentialBackoff();
-
- SyncSchedulerImpl::JobProcessDecision decision = CreateAndDecideJob(
- SyncSessionJob::NUDGE);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::CONTINUE);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, DropNudgeWhileExponentialBackOff) {
- InitializeSyncerOnNormalMode();
- SetMode(SyncScheduler::NORMAL_MODE);
- SetWaitIntervalToExponentialBackoff();
- SetWaitIntervalHadNudge(true);
-
- SyncSchedulerImpl::JobProcessDecision decision = CreateAndDecideJob(
- SyncSessionJob::NUDGE);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::DROP);
-}
-
-TEST_F(SyncSchedulerWhiteboxTest, ContinueCanaryJobConfig) {
- InitializeSyncerOnNormalMode();
- SetMode(SyncScheduler::CONFIGURATION_MODE);
- SetWaitIntervalToExponentialBackoff();
-
- SyncSessionJob job(SyncSessionJob::CONFIGURATION,
- TimeTicks::Now(), scoped_ptr<SyncSession>(),
- ConfigurationParams());
-
- SyncSchedulerImpl::JobProcessDecision decision =
- DecideOnJob(job, SyncSchedulerImpl::CANARY_PRIORITY);
-
- EXPECT_EQ(decision, SyncSchedulerImpl::CONTINUE);
-}
-
-} // namespace syncer
diff --git a/sync/engine/sync_session_job.cc b/sync/engine/sync_session_job.cc
deleted file mode 100644
index f0d0d33fab..0000000000
--- a/sync/engine/sync_session_job.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_session_job.h"
-#include "sync/internal_api/public/sessions/model_neutral_state.h"
-
-namespace syncer {
-
-SyncSessionJob::~SyncSessionJob() {
-}
-
-SyncSessionJob::SyncSessionJob(
- Purpose purpose,
- base::TimeTicks start,
- scoped_ptr<sessions::SyncSession> session,
- const ConfigurationParams& config_params)
- : purpose_(purpose),
- scheduled_start_(start),
- session_(session.Pass()),
- config_params_(config_params),
- finished_(NOT_FINISHED) {
-}
-
-#define ENUM_CASE(x) case x: return #x; break;
-const char* SyncSessionJob::GetPurposeString(SyncSessionJob::Purpose purpose) {
- switch (purpose) {
- ENUM_CASE(UNKNOWN);
- ENUM_CASE(POLL);
- ENUM_CASE(NUDGE);
- ENUM_CASE(CONFIGURATION);
- }
- NOTREACHED();
- return "";
-}
-#undef ENUM_CASE
-
-bool SyncSessionJob::Finish(bool early_exit) {
- DCHECK_EQ(finished_, NOT_FINISHED);
- // Did we run through all SyncerSteps from start_step() to end_step()
- // until the SyncSession returned !HasMoreToSync()?
- // Note: if not, it's possible the scheduler hasn't started with
- // SyncShare yet, it's possible there is still more to sync in the session,
- // and it's also possible the job quit part way through due to a premature
- // exit condition (such as shutdown).
- finished_ = early_exit ? EARLY_EXIT : FINISHED;
-
- if (early_exit)
- return false;
-
- // Did we hit any errors along the way?
- if (sessions::HasSyncerError(
- session_->status_controller().model_neutral_state())) {
- return false;
- }
-
- const sessions::ModelNeutralState& state(
- session_->status_controller().model_neutral_state());
- switch (purpose_) {
- case POLL:
- case NUDGE:
- DCHECK_NE(state.last_download_updates_result, UNSET);
- DCHECK_NE(state.commit_result, UNSET);
- break;
- case CONFIGURATION:
- DCHECK_NE(state.last_download_updates_result, UNSET);
- break;
- case UNKNOWN:
- default:
- NOTREACHED();
- }
-
- if (!config_params_.ready_task.is_null())
- config_params_.ready_task.Run();
- return true;
-}
-
-scoped_ptr<SyncSessionJob> SyncSessionJob::CloneAndAbandon() {
- DCHECK_EQ(finished_, NOT_FINISHED);
- // Clone |this|, and abandon it by NULL-ing session_.
- return scoped_ptr<SyncSessionJob> (new SyncSessionJob(
- purpose_, scheduled_start_, session_.Pass(),
- config_params_));
-}
-
-scoped_ptr<SyncSessionJob> SyncSessionJob::Clone() const {
- DCHECK_GT(finished_, NOT_FINISHED);
- return scoped_ptr<SyncSessionJob>(new SyncSessionJob(
- purpose_, scheduled_start_, CloneSession().Pass(),
- config_params_));
-}
-
-scoped_ptr<sessions::SyncSession> SyncSessionJob::CloneSession() const {
- return scoped_ptr<sessions::SyncSession>(
- new sessions::SyncSession(session_->context(),
- session_->delegate(), session_->source()));
-}
-
-SyncSessionJob::Purpose SyncSessionJob::purpose() const {
- return purpose_;
-}
-
-base::TimeTicks SyncSessionJob::scheduled_start() const {
- return scheduled_start_;
-}
-
-void SyncSessionJob::set_scheduled_start(base::TimeTicks start) {
- scheduled_start_ = start;
-};
-
-const sessions::SyncSession* SyncSessionJob::session() const {
- return session_.get();
-}
-
-sessions::SyncSession* SyncSessionJob::mutable_session() {
- return session_.get();
-}
-
-ConfigurationParams SyncSessionJob::config_params() const {
- return config_params_;
-}
-
-SyncerStep SyncSessionJob::start_step() const {
- SyncerStep start, end;
- GetSyncerStepsForPurpose(purpose_, &start, &end);
- return start;
-}
-
-SyncerStep SyncSessionJob::end_step() const {
- SyncerStep start, end;
- GetSyncerStepsForPurpose(purpose_, &start, &end);
- return end;
-}
-
-// static
-void SyncSessionJob::GetSyncerStepsForPurpose(Purpose purpose,
- SyncerStep* start,
- SyncerStep* end) {
- switch (purpose) {
- case SyncSessionJob::CONFIGURATION:
- *start = DOWNLOAD_UPDATES;
- *end = APPLY_UPDATES;
- return;
- case SyncSessionJob::NUDGE:
- case SyncSessionJob::POLL:
- *start = SYNCER_BEGIN;
- *end = SYNCER_END;
- return;
- default:
- NOTREACHED();
- *start = SYNCER_END;
- *end = SYNCER_END;
- return;
- }
-}
-
-} // namespace syncer
diff --git a/sync/engine/sync_session_job.h b/sync/engine/sync_session_job.h
deleted file mode 100644
index 33d21713e4..0000000000
--- a/sync/engine/sync_session_job.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNC_SESSION_JOB_H_
-#define SYNC_ENGINE_SYNC_SESSION_JOB_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/sync_scheduler.h"
-#include "sync/engine/syncer.h"
-#include "sync/sessions/sync_session.h"
-
-namespace syncer {
-
-class SYNC_EXPORT_PRIVATE SyncSessionJob {
- public:
- enum Purpose {
- // Uninitialized state, should never be hit in practice.
- UNKNOWN = -1,
- // Our poll timer schedules POLL jobs periodically based on a server
- // assigned poll interval.
- POLL,
- // A nudge task can come from a variety of components needing to force
- // a sync. The source is inferable from |session.source()|.
- NUDGE,
- // Typically used for fetching updates for a subset of the enabled types
- // during initial sync or reconfiguration.
- CONFIGURATION,
- };
-
- SyncSessionJob(Purpose purpose,
- base::TimeTicks start,
- scoped_ptr<sessions::SyncSession> session,
- const ConfigurationParams& config_params);
- ~SyncSessionJob();
-
- // Returns a new clone of the job, with a cloned SyncSession ready to be
- // retried / rescheduled. A job can only be cloned once it has finished, to
- // prevent bugs where multiple jobs are scheduled with the same session. Use
- // CloneAndAbandon if you want to clone before finishing.
- scoped_ptr<SyncSessionJob> Clone() const;
-
- // Same as Clone() above, but also ejects the SyncSession from this job,
- // preventing it from ever being used for a sync cycle.
- scoped_ptr<SyncSessionJob> CloneAndAbandon();
-
- // Record that the scheduler has deemed the job as finished and give it a
- // chance to perform any remaining cleanup and/or notification completion
- // callback invocations.
- // |early_exit| specifies whether the job 1) cycled through all the
- // SyncerSteps it needed, or 2) was pre-empted by the scheduler.
- // Returns true if we completely ran the session without errors.
- // There are many errors that could prevent a sync cycle from succeeding,
- // such as invalid local state, inability to contact the server, inability
- // to authenticate with the server, and server errors. What they have in
- // common is that the we either need to take some action and then retry the
- // sync cycle or, in the case of transient errors, retry after some backoff
- // timer has expired. Most importantly, the SyncScheduler should not assume
- // that the original action that triggered the sync cycle (ie. a nudge or a
- // notification) has been properly serviced.
- bool Finish(bool early_exit);
-
- static const char* GetPurposeString(Purpose purpose);
- static void GetSyncerStepsForPurpose(Purpose purpose,
- SyncerStep* start,
- SyncerStep* end);
-
- Purpose purpose() const;
- base::TimeTicks scheduled_start() const;
- void set_scheduled_start(base::TimeTicks start);
- const sessions::SyncSession* session() const;
- sessions::SyncSession* mutable_session();
- SyncerStep start_step() const;
- SyncerStep end_step() const;
- ConfigurationParams config_params() const;
-
- private:
- // A SyncSessionJob can be in one of these three states, controlled by the
- // Finish() function, see method comments.
- enum FinishedState {
- NOT_FINISHED, // Finish has not been called.
- EARLY_EXIT, // Finish was called but the job was "preempted",
- FINISHED // Indicates a "clean" finish operation.
- };
-
- scoped_ptr<sessions::SyncSession> CloneSession() const;
-
- const Purpose purpose_;
-
- base::TimeTicks scheduled_start_;
- scoped_ptr<sessions::SyncSession> session_;
-
- // Only used for purpose_ == CONFIGURATION. This, and different Finish() and
- // Succeeded() behavior may be arguments to subclass in the future.
- const ConfigurationParams config_params_;
-
- // Set to true if Finish() was called, false otherwise. True implies that
- // a SyncShare operation took place with |session_| and it cycled through
- // all requisite steps given |purpose_| without being preempted.
- FinishedState finished_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncSessionJob);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNC_SESSION_JOB_H_
diff --git a/sync/engine/sync_session_job_unittest.cc b/sync/engine/sync_session_job_unittest.cc
deleted file mode 100644
index 66026ce3a5..0000000000
--- a/sync/engine/sync_session_job_unittest.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_session_job.h"
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/time.h"
-#include "sync/internal_api/public/base/model_type_invalidation_map.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/sessions/sync_session_context.h"
-#include "sync/sessions/test_util.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeTicks;
-
-namespace syncer {
-
-using sessions::SyncSession;
-
-class MockDelegate : public SyncSession::Delegate {
- public:
- MockDelegate() {}
- ~MockDelegate() {}
-
- MOCK_METHOD0(IsSyncingCurrentlySilenced, bool());
- MOCK_METHOD1(OnReceivedShortPollIntervalUpdate, void(const base::TimeDelta&));
- MOCK_METHOD1(OnReceivedLongPollIntervalUpdate ,void(const base::TimeDelta&));
- MOCK_METHOD1(OnReceivedSessionsCommitDelay, void(const base::TimeDelta&));
- MOCK_METHOD1(OnSyncProtocolError, void(const sessions::SyncSessionSnapshot&));
- MOCK_METHOD0(OnShouldStopSyncingPermanently, void());
- MOCK_METHOD1(OnSilencedUntil, void(const base::TimeTicks&));
-};
-
-class SyncSessionJobTest : public testing::Test {
- public:
- SyncSessionJobTest() : config_params_callback_invoked_(false) {}
- virtual void SetUp() {
- routes_.clear();
- workers_.clear();
- config_params_callback_invoked_ = false;
- routes_[BOOKMARKS] = GROUP_PASSIVE;
- scoped_refptr<ModelSafeWorker> passive_worker(
- new FakeModelWorker(GROUP_PASSIVE));
- workers_.push_back(passive_worker);
- std::vector<ModelSafeWorker*> workers;
- GetWorkers(&workers);
- context_.reset(new sessions::SyncSessionContext(
- NULL, // |connection_manager|
- NULL, // |directory|
- workers,
- NULL, // |extensions_activity_monitor|
- NULL, // |throttled_data_type_tracker|
- std::vector<SyncEngineEventListener*>(),
- NULL, // |debug_info_getter|
- NULL, // |traffic_recorder|
- true, // |enable keystore encryption|
- "fake_invalidator_client_id"));
- context_->set_routing_info(routes_);
- }
-
- scoped_ptr<SyncSession> NewLocalSession() {
- sessions::SyncSourceInfo info(
- sync_pb::GetUpdatesCallerInfo::LOCAL, ModelTypeInvalidationMap());
- return scoped_ptr<SyncSession>(
- new SyncSession(context_.get(), &delegate_, info));
- }
-
- void GetWorkers(std::vector<ModelSafeWorker*>* out) const {
- out->clear();
- for (std::vector<scoped_refptr<ModelSafeWorker> >::const_iterator it =
- workers_.begin(); it != workers_.end(); ++it) {
- out->push_back(it->get());
- }
- }
-
- void ConfigurationParamsCallback() {
- config_params_callback_invoked_ = true;
- }
-
- bool config_params_callback_invoked() const {
- return config_params_callback_invoked_;
- }
-
- sessions::SyncSessionContext* context() { return context_.get(); }
- SyncSession::Delegate* delegate() { return &delegate_; }
- const ModelSafeRoutingInfo& routes() { return routes_; }
-
- // Checks that the two jobs are "clones" as defined by SyncSessionJob,
- // minus location and SyncSession checking, for reuse in different
- // scenarios.
- void ExpectClonesBase(SyncSessionJob* job, SyncSessionJob* clone) {
- EXPECT_EQ(job->purpose(), clone->purpose());
- EXPECT_EQ(job->scheduled_start(), clone->scheduled_start());
- EXPECT_EQ(job->start_step(), clone->start_step());
- EXPECT_EQ(job->end_step(), clone->end_step());
- }
-
- private:
- scoped_ptr<sessions::SyncSessionContext> context_;
- std::vector<scoped_refptr<ModelSafeWorker> > workers_;
- MockDelegate delegate_;
- ModelSafeRoutingInfo routes_;
- bool config_params_callback_invoked_;
-};
-
-TEST_F(SyncSessionJobTest, Clone) {
- SyncSessionJob job1(SyncSessionJob::NUDGE, TimeTicks::Now(),
- NewLocalSession().Pass(), ConfigurationParams());
-
- sessions::test_util::SimulateSuccess(job1.mutable_session(),
- job1.start_step(),
- job1.end_step());
- job1.Finish(false);
- ModelSafeRoutingInfo new_routes;
- new_routes[AUTOFILL] = GROUP_PASSIVE;
- context()->set_routing_info(new_routes);
- scoped_ptr<SyncSessionJob> clone1 = job1.Clone();
-
- ExpectClonesBase(&job1, clone1.get());
- EXPECT_NE(job1.session(), clone1->session());
-
- context()->set_routing_info(routes());
- sessions::test_util::SimulateSuccess(clone1->mutable_session(),
- clone1->start_step(),
- clone1->end_step());
- clone1->Finish(false);
- scoped_ptr<SyncSessionJob> clone2 = clone1->Clone();
-
- ExpectClonesBase(clone1.get(), clone2.get());
- EXPECT_NE(clone1->session(), clone2->session());
- EXPECT_NE(clone1->session(), clone2->session());
-
- clone1.reset();
- ExpectClonesBase(&job1, clone2.get());
- EXPECT_NE(job1.session(), clone2->session());
-}
-
-TEST_F(SyncSessionJobTest, CloneAfterEarlyExit) {
- SyncSessionJob job1(SyncSessionJob::NUDGE, TimeTicks::Now(),
- NewLocalSession().Pass(), ConfigurationParams());
- job1.Finish(true);
- scoped_ptr<SyncSessionJob> job2 = job1.Clone();
- ExpectClonesBase(&job1, job2.get());
-}
-
-TEST_F(SyncSessionJobTest, CloneAndAbandon) {
- scoped_ptr<SyncSession> session = NewLocalSession();
- SyncSession* session_ptr = session.get();
-
- SyncSessionJob job1(SyncSessionJob::NUDGE, TimeTicks::Now(),
- session.Pass(), ConfigurationParams());
- ModelSafeRoutingInfo new_routes;
- new_routes[AUTOFILL] = GROUP_PASSIVE;
- context()->set_routing_info(new_routes);
-
- scoped_ptr<SyncSessionJob> clone1 = job1.CloneAndAbandon();
- ExpectClonesBase(&job1, clone1.get());
- EXPECT_FALSE(job1.session());
- EXPECT_EQ(session_ptr, clone1->session());
-}
-
-// Tests interaction between Finish and sync cycle success / failure.
-TEST_F(SyncSessionJobTest, Finish) {
- SyncSessionJob job1(SyncSessionJob::NUDGE, TimeTicks::Now(),
- NewLocalSession().Pass(), ConfigurationParams());
-
- sessions::test_util::SimulateSuccess(job1.mutable_session(),
- job1.start_step(),
- job1.end_step());
- EXPECT_TRUE(job1.Finish(false /* early_exit */));
-
- scoped_ptr<SyncSessionJob> job2 = job1.Clone();
- sessions::test_util::SimulateConnectionFailure(job2->mutable_session(),
- job2->start_step(),
- job2->end_step());
- EXPECT_FALSE(job2->Finish(false));
-
- scoped_ptr<SyncSessionJob> job3 = job2->Clone();
- EXPECT_FALSE(job3->Finish(true));
-}
-
-TEST_F(SyncSessionJobTest, FinishCallsReadyTask) {
- ConfigurationParams params;
- params.ready_task = base::Bind(
- &SyncSessionJobTest::ConfigurationParamsCallback,
- base::Unretained(this));
-
- sessions::SyncSourceInfo info(
- sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
- ModelTypeInvalidationMap());
- scoped_ptr<SyncSession> session(
- new SyncSession(context(), delegate(), info));
-
- SyncSessionJob job1(SyncSessionJob::CONFIGURATION, TimeTicks::Now(),
- session.Pass(), params);
- sessions::test_util::SimulateSuccess(job1.mutable_session(),
- job1.start_step(),
- job1.end_step());
- job1.Finish(false);
- EXPECT_TRUE(config_params_callback_invoked());
-}
-
-} // namespace syncer
diff --git a/sync/engine/syncer.cc b/sync/engine/syncer.cc
index 90d193affe..ad0d53464f 100644
--- a/sync/engine/syncer.cc
+++ b/sync/engine/syncer.cc
@@ -22,14 +22,10 @@
#include "sync/engine/store_timestamps_command.h"
#include "sync/engine/syncer_types.h"
#include "sync/engine/throttled_data_type_tracker.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable-inl.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using base::Time;
using base::TimeDelta;
using sync_pb::ClientCommand;
@@ -45,8 +41,8 @@ using syncable::SERVER_IS_DIR;
using syncable::SERVER_MTIME;
using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_SPECIFICS;
+using syncable::SERVER_UNIQUE_POSITION;
using syncable::SERVER_VERSION;
#define ENUM_CASE(x) case x: return #x
@@ -198,20 +194,7 @@ void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest) {
dest->Put(SERVER_IS_DEL, src->Get(SERVER_IS_DEL));
dest->Put(IS_UNAPPLIED_UPDATE, src->Get(IS_UNAPPLIED_UPDATE));
dest->Put(SERVER_SPECIFICS, src->Get(SERVER_SPECIFICS));
- dest->Put(SERVER_ORDINAL_IN_PARENT, src->Get(SERVER_ORDINAL_IN_PARENT));
-}
-
-void ClearServerData(syncable::MutableEntry* entry) {
- entry->Put(SERVER_NON_UNIQUE_NAME, "");
- entry->Put(SERVER_PARENT_ID, syncable::GetNullId());
- entry->Put(SERVER_MTIME, Time());
- entry->Put(SERVER_CTIME, Time());
- entry->Put(SERVER_VERSION, 0);
- entry->Put(SERVER_IS_DIR, false);
- entry->Put(SERVER_IS_DEL, false);
- entry->Put(IS_UNAPPLIED_UPDATE, false);
- entry->Put(SERVER_SPECIFICS, sync_pb::EntitySpecifics::default_instance());
- entry->Put(SERVER_ORDINAL_IN_PARENT, Int64ToNodeOrdinal(0));
+ dest->Put(SERVER_UNIQUE_POSITION, src->Get(SERVER_UNIQUE_POSITION));
}
} // namespace syncer
diff --git a/sync/engine/syncer.h b/sync/engine/syncer.h
index ac7892e1e5..8656e7c2ca 100644
--- a/sync/engine/syncer.h
+++ b/sync/engine/syncer.h
@@ -95,7 +95,6 @@ class SYNC_EXPORT_PRIVATE Syncer {
// Utility function declarations.
void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest);
-void ClearServerData(syncable::MutableEntry* entry);
const char* SyncerStepToString(const SyncerStep);
} // namespace syncer
diff --git a/sync/engine/syncer_proto_util.cc b/sync/engine/syncer_proto_util.cc
index 9138417a3a..3b72c17347 100644
--- a/sync/engine/syncer_proto_util.cc
+++ b/sync/engine/syncer_proto_util.cc
@@ -519,6 +519,16 @@ bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
}
// static
+bool SyncerProtoUtil::ShouldMaintainPosition(
+ const sync_pb::SyncEntity& sync_entity) {
+ // Maintain positions for bookmarks that are not server-defined top-level
+ // folders.
+ return GetModelType(sync_entity) == BOOKMARKS
+ && !(sync_entity.folder() &&
+ !sync_entity.server_defined_unique_tag().empty());
+}
+
+// static
void SyncerProtoUtil::CopyProtoBytesIntoBlob(const std::string& proto_bytes,
syncable::Blob* blob) {
syncable::Blob proto_blob(proto_bytes.begin(), proto_bytes.end());
diff --git a/sync/engine/syncer_proto_util.h b/sync/engine/syncer_proto_util.h
index 1f271d8f0f..fedd2113fd 100644
--- a/sync/engine/syncer_proto_util.h
+++ b/sync/engine/syncer_proto_util.h
@@ -69,6 +69,8 @@ class SYNC_EXPORT_PRIVATE SyncerProtoUtil {
static bool Compare(const syncable::Entry& local_entry,
const sync_pb::SyncEntity& server_entry);
+ static bool ShouldMaintainPosition(const sync_pb::SyncEntity& sync_entity);
+
// Utility methods for converting between syncable::Blobs and protobuf byte
// fields.
static void CopyProtoBytesIntoBlob(const std::string& proto_bytes,
diff --git a/sync/engine/syncer_unittest.cc b/sync/engine/syncer_unittest.cc
index 1bdc13a162..196f77ac19 100644
--- a/sync/engine/syncer_unittest.cc
+++ b/sync/engine/syncer_unittest.cc
@@ -33,7 +33,6 @@
#include "sync/engine/traffic_recorder.h"
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/protocol/nigori_specifics.pb.h"
#include "sync/protocol/preference_specifics.pb.h"
@@ -59,10 +58,12 @@
using base::TimeDelta;
+using std::count;
using std::map;
using std::multimap;
using std::set;
using std::string;
+using std::vector;
namespace syncer {
@@ -96,7 +97,6 @@ using syncable::PARENT_ID;
using syncable::BASE_SERVER_SPECIFICS;
using syncable::SERVER_IS_DEL;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_SPECIFICS;
using syncable::SERVER_VERSION;
using syncable::UNIQUE_CLIENT_TAG;
@@ -176,29 +176,20 @@ class SyncerTest : public testing::Test,
GetModelSafeRoutingInfo(&info);
ModelTypeInvalidationMap invalidation_map =
ModelSafeRoutingInfoToInvalidationMap(info, std::string());
- return new SyncSession(context_.get(), this,
- sessions::SyncSourceInfo(sync_pb::GetUpdatesCallerInfo::UNKNOWN,
- invalidation_map));
- }
-
-
- void SyncShareAsDelegate(SyncSessionJob::Purpose purpose) {
- SyncerStep start;
- SyncerStep end;
- SyncSessionJob::GetSyncerStepsForPurpose(purpose, &start, &end);
-
- session_.reset(MakeSession());
- EXPECT_TRUE(syncer_->SyncShare(session_.get(), start, end));
+ sessions::SyncSourceInfo source_info(sync_pb::GetUpdatesCallerInfo::UNKNOWN,
+ invalidation_map);
+ return new SyncSession(context_.get(), this, source_info);
}
void SyncShareNudge() {
session_.reset(MakeSession());
- SyncShareAsDelegate(SyncSessionJob::NUDGE);
+ EXPECT_TRUE(syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END));
}
void SyncShareConfigure() {
session_.reset(MakeSession());
- SyncShareAsDelegate(SyncSessionJob::CONFIGURATION);
+ EXPECT_TRUE(
+ syncer_->SyncShare(session_.get(), DOWNLOAD_UPDATES, APPLY_UPDATES));
}
virtual void SetUp() {
@@ -479,7 +470,7 @@ class SyncerTest : public testing::Test,
ModelSafeRoutingInfo routing_info;
GetModelSafeRoutingInfo(&routing_info);
- if (context_.get()) {
+ if (context_) {
context_->set_routing_info(routing_info);
}
@@ -492,7 +483,7 @@ class SyncerTest : public testing::Test,
ModelSafeRoutingInfo routing_info;
GetModelSafeRoutingInfo(&routing_info);
- if (context_.get()) {
+ if (context_) {
context_->set_routing_info(routing_info);
}
@@ -860,6 +851,12 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
EXPECT_TRUE(GetCryptographer(&wtrans)->has_pending_keys());
}
+ // We need to remember the exact position of our local items, so we can
+ // make updates that do not modify those positions.
+ UniquePosition pos1;
+ UniquePosition pos2;
+ UniquePosition pos3;
+
mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark,
@@ -875,6 +872,15 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
+
+ Entry entry1(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry1.Get(syncable::UNIQUE_POSITION).Equals(
+ entry1.Get(syncable::SERVER_UNIQUE_POSITION)));
+ pos1 = entry1.Get(syncable::UNIQUE_POSITION);
+ Entry entry2(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ pos2 = entry2.Get(syncable::UNIQUE_POSITION);
+ Entry entry3(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
+ pos3 = entry3.Get(syncable::UNIQUE_POSITION);
}
// Server side encryption will not be applied due to undecryptable data.
@@ -1418,13 +1424,29 @@ TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
SyncShareNudge();
ASSERT_EQ(6u, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
- EXPECT_TRUE(parent1_id == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(parent2_id == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(ids_.FromNumber(102) == mock_server_->committed_ids()[2]);
- EXPECT_TRUE(ids_.FromNumber(-103) == mock_server_->committed_ids()[3]);
- EXPECT_TRUE(ids_.FromNumber(-104) == mock_server_->committed_ids()[4]);
- EXPECT_TRUE(ids_.FromNumber(105) == mock_server_->committed_ids()[5]);
+
+ // This strange iteration and std::count() usage is to allow the order to
+ // vary. All we really care about is that parent1_id and parent2_id are the
+ // first two IDs, and that the children make up the next four. Other than
+ // that, ordering doesn't matter.
+
+ vector<syncable::Id>::const_iterator i =
+ mock_server_->committed_ids().begin();
+ vector<syncable::Id>::const_iterator parents_begin = i;
+ i++;
+ i++;
+ vector<syncable::Id>::const_iterator parents_end = i;
+ vector<syncable::Id>::const_iterator children_begin = i;
+ vector<syncable::Id>::const_iterator children_end =
+ mock_server_->committed_ids().end();
+
+ EXPECT_EQ(1, count(parents_begin, parents_end, parent1_id));
+ EXPECT_EQ(1, count(parents_begin, parents_end, parent2_id));
+
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-103)));
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(102)));
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(105)));
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-104)));
}
TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
@@ -1456,10 +1478,15 @@ TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
SyncShareNudge();
ASSERT_EQ(3u, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(child2_id == mock_server_->committed_ids()[2]);
+ // There are two possible valid orderings.
+ if (child2_id == mock_server_->committed_ids()[1]) {
+ EXPECT_TRUE(child2_id == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[2]);
+ } else {
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child2_id == mock_server_->committed_ids()[2]);
+ }
}
TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
@@ -1605,15 +1632,15 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
TEST_F(SyncerTest, UpdateWithZeroLengthName) {
// One illegal update
- mock_server_->AddUpdateDirectory(1, 0, "", 1, 10,
- foreign_cache_guid(), "-1");
+ mock_server_->AddUpdateDirectory(
+ 1, 0, std::string(), 1, 10, foreign_cache_guid(), "-1");
// And one legal one that we're going to delete.
mock_server_->AddUpdateDirectory(2, 0, "FOO", 1, 10,
foreign_cache_guid(), "-2");
SyncShareNudge();
// Delete the legal one. The new update has a null name.
- mock_server_->AddUpdateDirectory(2, 0, "", 2, 20,
- foreign_cache_guid(), "-2");
+ mock_server_->AddUpdateDirectory(
+ 2, 0, std::string(), 2, 20, foreign_cache_guid(), "-2");
mock_server_->SetLastUpdateDeleted();
SyncShareNudge();
}
@@ -2340,7 +2367,6 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
SyncShareNudge();
syncable::Id id;
int64 version;
- NodeOrdinal server_ordinal_in_parent;
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, syncable::GET_BY_HANDLE, entry_metahandle);
@@ -2348,7 +2374,6 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
id = entry.Get(ID);
EXPECT_TRUE(id.ServerKnows());
version = entry.Get(BASE_VERSION);
- server_ordinal_in_parent = entry.Get(SERVER_ORDINAL_IN_PARENT);
}
sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
update->set_originator_cache_guid(local_cache_guid());
@@ -2357,9 +2382,6 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
EXPECT_EQ(id.GetServerId(), update->id_string());
EXPECT_EQ(root_id_.GetServerId(), update->parent_id_string());
EXPECT_EQ(version, update->version());
- EXPECT_EQ(
- NodeOrdinalToInt64(server_ordinal_in_parent),
- update->position_in_parent());
SyncShareNudge();
{
syncable::ReadTransaction trans(FROM_HERE, directory());
@@ -4013,9 +4035,11 @@ TEST_F(SyncerTest, UniqueServerTagUpdates) {
}
// Now download some tagged items as updates.
- mock_server_->AddUpdateDirectory(1, 0, "update1", 1, 10, "", "");
+ mock_server_->AddUpdateDirectory(
+ 1, 0, "update1", 1, 10, std::string(), std::string());
mock_server_->SetLastUpdateServerTag("alpha");
- mock_server_->AddUpdateDirectory(2, 0, "update2", 2, 20, "", "");
+ mock_server_->AddUpdateDirectory(
+ 2, 0, "update2", 2, 20, std::string(), std::string());
mock_server_->SetLastUpdateServerTag("bob");
SyncShareNudge();
@@ -4221,7 +4245,7 @@ TEST_F(SyncerTest, GetKeyEmpty) {
EXPECT_TRUE(directory()->GetNigoriHandler()->NeedKeystoreKey(&rtrans));
}
- mock_server_->SetKeystoreKey("");
+ mock_server_->SetKeystoreKey(std::string());
SyncShareConfigure();
EXPECT_NE(session_->status_controller().last_get_key_result(), SYNCER_OK);
@@ -4702,225 +4726,6 @@ TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
}
-// A group of tests exercising the syncer's handling of sibling ordering, as
-// represented in the sync protocol.
-class SyncerPositionUpdateTest : public SyncerTest {
- public:
- SyncerPositionUpdateTest() : next_update_id_(1), next_revision_(1) {}
-
- protected:
- void ExpectLocalItemsInServerOrder() {
- if (position_map_.empty())
- return;
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Id prev_id;
- DCHECK(prev_id.IsRoot());
- PosMap::iterator next = position_map_.begin();
- for (PosMap::iterator i = next++; i != position_map_.end(); ++i) {
- Id id = i->second;
- Entry entry_with_id(&trans, GET_BY_ID, id);
- EXPECT_TRUE(entry_with_id.good());
- EXPECT_EQ(prev_id, entry_with_id.GetPredecessorId());
- EXPECT_EQ(
- i->first,
- NodeOrdinalToInt64(entry_with_id.Get(SERVER_ORDINAL_IN_PARENT)));
- if (next == position_map_.end()) {
- EXPECT_EQ(Id(), entry_with_id.GetSuccessorId());
- } else {
- EXPECT_EQ(next->second, entry_with_id.GetSuccessorId());
- next++;
- }
- prev_id = id;
- }
- }
-
- void AddRootItemWithPosition(int64 position) {
- string id = string("ServerId") + base::Int64ToString(next_update_id_++);
- string name = "my name is my id -- " + id;
- int revision = next_revision_++;
- mock_server_->AddUpdateDirectory(id, kRootId, name, revision, revision,
- foreign_cache_guid(),
- ids_.NewLocalId().GetServerId());
- mock_server_->SetLastUpdatePosition(position);
- position_map_.insert(
- PosMap::value_type(position, Id::CreateFromServerId(id)));
- }
- private:
- typedef multimap<int64, Id> PosMap;
- PosMap position_map_;
- int next_update_id_;
- int next_revision_;
- DISALLOW_COPY_AND_ASSIGN(SyncerPositionUpdateTest);
-};
-
-TEST_F(SyncerPositionUpdateTest, InOrderPositive) {
- // Add a bunch of items in increasing order, starting with just positive
- // position values.
- AddRootItemWithPosition(100);
- AddRootItemWithPosition(199);
- AddRootItemWithPosition(200);
- AddRootItemWithPosition(201);
- AddRootItemWithPosition(400);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-TEST_F(SyncerPositionUpdateTest, InOrderNegative) {
- // Test negative position values, but in increasing order.
- AddRootItemWithPosition(-400);
- AddRootItemWithPosition(-201);
- AddRootItemWithPosition(-200);
- AddRootItemWithPosition(-150);
- AddRootItemWithPosition(100);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-TEST_F(SyncerPositionUpdateTest, ReverseOrder) {
- // Test when items are sent in the reverse order.
- AddRootItemWithPosition(400);
- AddRootItemWithPosition(201);
- AddRootItemWithPosition(200);
- AddRootItemWithPosition(100);
- AddRootItemWithPosition(-150);
- AddRootItemWithPosition(-201);
- AddRootItemWithPosition(-200);
- AddRootItemWithPosition(-400);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-TEST_F(SyncerPositionUpdateTest, RandomOrderInBatches) {
- // Mix it all up, interleaving position values, and try multiple batches of
- // updates.
- AddRootItemWithPosition(400);
- AddRootItemWithPosition(201);
- AddRootItemWithPosition(-400);
- AddRootItemWithPosition(100);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-
- AddRootItemWithPosition(-150);
- AddRootItemWithPosition(-200);
- AddRootItemWithPosition(200);
- AddRootItemWithPosition(-201);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-
- AddRootItemWithPosition(-144);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-class SyncerPositionTiebreakingTest : public SyncerTest {
- public:
- SyncerPositionTiebreakingTest()
- : low_id_(Id::CreateFromServerId("A")),
- mid_id_(Id::CreateFromServerId("M")),
- high_id_(Id::CreateFromServerId("Z")),
- next_revision_(1) {
- DCHECK(low_id_ < mid_id_);
- DCHECK(mid_id_ < high_id_);
- DCHECK(low_id_ < high_id_);
- }
-
- // Adds the item by its Id, using a constant value for the position
- // so that the syncer has to resolve the order some other way.
- void Add(const Id& id) {
- int revision = next_revision_++;
- mock_server_->AddUpdateDirectory(id.GetServerId(), kRootId,
- id.GetServerId(), revision, revision,
- foreign_cache_guid(), ids_.NewLocalId().GetServerId());
- // The update position doesn't vary.
- mock_server_->SetLastUpdatePosition(90210);
- }
-
- void ExpectLocalOrderIsByServerId() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Id null_id;
- Entry low(&trans, GET_BY_ID, low_id_);
- Entry mid(&trans, GET_BY_ID, mid_id_);
- Entry high(&trans, GET_BY_ID, high_id_);
- EXPECT_TRUE(low.good());
- EXPECT_TRUE(mid.good());
- EXPECT_TRUE(high.good());
- EXPECT_TRUE(low.GetPredecessorId() == null_id);
- EXPECT_TRUE(mid.GetPredecessorId() == low_id_);
- EXPECT_TRUE(high.GetPredecessorId() == mid_id_);
- EXPECT_TRUE(high.GetSuccessorId() == null_id);
- EXPECT_TRUE(mid.GetSuccessorId() == high_id_);
- EXPECT_TRUE(low.GetSuccessorId() == mid_id_);
- }
-
- protected:
- // When there's a tiebreak on the numeric position, it's supposed to be
- // broken by string comparison of the ids. These ids are in increasing
- // order.
- const Id low_id_;
- const Id mid_id_;
- const Id high_id_;
-
- private:
- int next_revision_;
- DISALLOW_COPY_AND_ASSIGN(SyncerPositionTiebreakingTest);
-};
-
-TEST_F(SyncerPositionTiebreakingTest, LowMidHigh) {
- Add(low_id_);
- Add(mid_id_);
- Add(high_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, LowHighMid) {
- Add(low_id_);
- Add(high_id_);
- Add(mid_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, HighMidLow) {
- Add(high_id_);
- Add(mid_id_);
- Add(low_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, HighLowMid) {
- Add(high_id_);
- Add(low_id_);
- Add(mid_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, MidHighLow) {
- Add(mid_id_);
- Add(high_id_);
- Add(low_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, MidLowHigh) {
- Add(mid_id_);
- Add(low_id_);
- Add(high_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
enum {
TEST_PARAM_BOOKMARK_ENABLE_BIT,
TEST_PARAM_AUTOFILL_ENABLE_BIT,
diff --git a/sync/engine/syncer_util.cc b/sync/engine/syncer_util.cc
index 3548ba1502..e0e566f7de 100644
--- a/sync/engine/syncer_util.cc
+++ b/sync/engine/syncer_util.cc
@@ -9,12 +9,15 @@
#include <string>
#include <vector>
+#include "base/base64.h"
#include "base/location.h"
#include "base/metrics/histogram.h"
+#include "base/string_number_conversions.h"
#include "sync/engine/conflict_resolver.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/engine/syncer_types.h"
#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/protocol/password_specifics.pb.h"
#include "sync/protocol/sync.pb.h"
@@ -29,13 +32,9 @@
#include "sync/util/cryptographer.h"
#include "sync/util/time.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely uing Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
namespace syncer {
+using syncable::BASE_SERVER_SPECIFICS;
using syncable::BASE_VERSION;
using syncable::CHANGES_VERSION;
using syncable::CREATE_NEW_UPDATE_ITEM;
@@ -54,7 +53,6 @@ using syncable::META_HANDLE;
using syncable::MTIME;
using syncable::MutableEntry;
using syncable::NON_UNIQUE_NAME;
-using syncable::BASE_SERVER_SPECIFICS;
using syncable::PARENT_ID;
using syncable::SERVER_CTIME;
using syncable::SERVER_IS_DEL;
@@ -62,13 +60,15 @@ using syncable::SERVER_IS_DIR;
using syncable::SERVER_MTIME;
using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_SPECIFICS;
+using syncable::SERVER_UNIQUE_POSITION;
using syncable::SERVER_VERSION;
-using syncable::UNIQUE_CLIENT_TAG;
-using syncable::UNIQUE_SERVER_TAG;
using syncable::SPECIFICS;
using syncable::SYNCER;
+using syncable::UNIQUE_BOOKMARK_TAG;
+using syncable::UNIQUE_CLIENT_TAG;
+using syncable::UNIQUE_POSITION;
+using syncable::UNIQUE_SERVER_TAG;
using syncable::WriteTransaction;
syncable::Id FindLocalIdToUpdate(
@@ -273,7 +273,32 @@ UpdateAttemptResponse AttemptToUpdateEntry(
return SUCCESS;
}
+std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update) {
+ if (!update.has_originator_cache_guid() ||
+ !update.has_originator_client_item_id()) {
+ return std::string();
+ }
+
+ return syncable::GenerateSyncableBookmarkHash(
+ update.originator_cache_guid(), update.originator_client_item_id());
+}
+
+UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update,
+ const std::string& suffix) {
+ DCHECK(UniquePosition::IsValidSuffix(suffix));
+ if (!(SyncerProtoUtil::ShouldMaintainPosition(update))) {
+ return UniquePosition::CreateInvalid();
+ } else if (update.has_unique_position()) {
+ return UniquePosition::FromProto(update.unique_position());
+ } else if (update.has_position_in_parent()) {
+ return UniquePosition::FromInt64(update.position_in_parent(), suffix);
+ } else {
+ return UniquePosition::CreateInvalid();
+ }
+}
+
namespace {
+
// Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally,
// when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based
// protocol.
@@ -294,9 +319,34 @@ void UpdateBookmarkSpecifics(const std::string& singleton_tag,
local_entry->Put(SERVER_SPECIFICS, pb);
}
+void UpdateBookmarkPositioning(const sync_pb::SyncEntity& update,
+ MutableEntry* local_entry) {
+ // Update our unique bookmark tag. In many cases this will be identical to
+ // the tag we already have. However, clients that have recently upgraded to
+ // versions that support unique positions will have incorrect tags. See the
+ // v86 migration logic in directory_backing_store.cc for more information.
+ //
+ // Both the old and new values are unique to this element. Applying this
+ // update will not risk the creation of conflicting unique tags.
+ std::string bookmark_tag = GetUniqueBookmarkTagFromUpdate(update);
+ if (UniquePosition::IsValidSuffix(bookmark_tag)) {
+ local_entry->PutUniqueBookmarkTag(bookmark_tag);
+ }
+
+ // Update our position.
+ UniquePosition update_pos =
+ GetUpdatePosition(update, local_entry->Get(UNIQUE_BOOKMARK_TAG));
+ if (update_pos.IsValid()) {
+ local_entry->Put(syncable::SERVER_UNIQUE_POSITION, update_pos);
+ } else {
+ // TODO(sync): This and other cases of unexpected input should be handled
+ // better.
+ NOTREACHED();
+ }
+}
+
} // namespace
-// Pass in name and checksum because of UTF8 conversion.
void UpdateServerFieldsFromUpdate(
MutableEntry* target,
const sync_pb::SyncEntity& update,
@@ -355,9 +405,9 @@ void UpdateServerFieldsFromUpdate(
bookmark.bookmark_favicon(),
target);
}
- if (update.has_position_in_parent())
- target->Put(SERVER_ORDINAL_IN_PARENT,
- Int64ToNodeOrdinal(update.position_in_parent()));
+ if (SyncerProtoUtil::ShouldMaintainPosition(update)) {
+ UpdateBookmarkPositioning(update, target);
+ }
target->Put(SERVER_IS_DEL, update.deleted());
// We only mark the entry as unapplied if its version is greater than the
@@ -378,21 +428,6 @@ void CreateNewEntry(syncable::WriteTransaction *trans,
}
}
-void SplitServerInformationIntoNewEntry(
- syncable::WriteTransaction* trans,
- syncable::MutableEntry* entry) {
- syncable::Id id = entry->Get(ID);
- ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId());
- entry->Put(BASE_VERSION, 0);
-
- MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id);
- CopyServerFields(entry, &new_entry);
- ClearServerData(entry);
-
- DVLOG(1) << "Splitting server information, local entry: " << *entry
- << " server entry: " << new_entry;
-}
-
// This function is called on an entry when we can update the user-facing data
// from the server data.
void UpdateLocalDataFromServerData(
@@ -415,11 +450,8 @@ void UpdateLocalDataFromServerData(
} else {
entry->Put(NON_UNIQUE_NAME, entry->Get(SERVER_NON_UNIQUE_NAME));
entry->Put(PARENT_ID, entry->Get(SERVER_PARENT_ID));
+ entry->Put(UNIQUE_POSITION, entry->Get(SERVER_UNIQUE_POSITION));
CHECK(entry->Put(IS_DEL, false));
- Id new_predecessor =
- entry->ComputePrevIdFromServerPosition(entry->Get(SERVER_PARENT_ID));
- CHECK(entry->PutPredecessor(new_predecessor))
- << " Illegal predecessor after converting from server position.";
}
entry->Put(CTIME, entry->Get(SERVER_CTIME));
@@ -448,47 +480,6 @@ VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) {
return VERIFY_OK;
}
-bool AddItemThenPredecessors(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids) {
-
- if (!inserted_items->insert(item->Get(META_HANDLE)).second)
- return false;
- commit_ids->push_back(item->Get(ID));
- if (item->Get(IS_DEL))
- return true; // Deleted items have no predecessors.
-
- Id prev_id = item->GetPredecessorId();
- while (!prev_id.IsRoot()) {
- Entry prev(trans, GET_BY_ID, prev_id);
- CHECK(prev.good()) << "Bad id when walking predecessors.";
- if (!prev.Get(inclusion_filter))
- break;
- if (!inserted_items->insert(prev.Get(META_HANDLE)).second)
- break;
- commit_ids->push_back(prev_id);
- prev_id = prev.GetPredecessorId();
- }
- return true;
-}
-
-void AddPredecessorsThenItem(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids) {
- size_t initial_size = commit_ids->size();
- if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items,
- commit_ids))
- return;
- // Reverse what we added to get the correct order.
- std::reverse(commit_ids->begin() + initial_size, commit_ids->end());
-}
-
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
std::set<syncable::Id>* deleted_folders) {
@@ -639,10 +630,10 @@ VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
// Move the old one aside and start over. It's too tricky to get the old one
// back into a state that would pass CheckTreeInvariants().
if (target->Get(IS_DEL)) {
- DCHECK(target->Get(UNIQUE_CLIENT_TAG).empty())
- << "Doing move-aside undeletion on client-tagged item.";
+ if (target->Get(UNIQUE_CLIENT_TAG).empty())
+ LOG(WARNING) << "Doing move-aside undeletion on client-tagged item.";
target->Put(ID, trans->directory()->NextId());
- target->Put(UNIQUE_CLIENT_TAG, "");
+ target->Put(UNIQUE_CLIENT_TAG, std::string());
target->Put(BASE_VERSION, CHANGES_VERSION);
target->Put(SERVER_VERSION, 0);
return VERIFY_SUCCESS;
diff --git a/sync/engine/syncer_util.h b/sync/engine/syncer_util.h
index 108622cacf..ea947abcf3 100644
--- a/sync/engine/syncer_util.h
+++ b/sync/engine/syncer_util.h
@@ -48,6 +48,21 @@ UpdateAttemptResponse AttemptToUpdateEntry(
syncable::MutableEntry* const entry,
Cryptographer* cryptographer);
+// Returns the most accurate position information available in this update. It
+// prefers to use the unique_position() field, but will fall back to using the
+// int64-based position_in_parent if necessary.
+//
+// The suffix parameter is the unique bookmark tag for the item being updated.
+//
+// Will return an invalid position if no valid position can be constructed, or
+// if this type does not support positioning.
+UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update,
+ const std::string& suffix);
+
+// Fetch the cache_guid and item_id-based unique bookmark tag from an update.
+// Will return an empty string if someting unexpected happens.
+std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update);
+
// Pass in name to avoid redundant UTF8 conversion.
void UpdateServerFieldsFromUpdate(
syncable::MutableEntry* local_entry,
@@ -58,10 +73,6 @@ void UpdateServerFieldsFromUpdate(
void CreateNewEntry(syncable::WriteTransaction *trans,
const syncable::Id& id);
-void SplitServerInformationIntoNewEntry(
- syncable::WriteTransaction* trans,
- syncable::MutableEntry* entry);
-
// This function is called on an entry when we can update the user-facing data
// from the server data.
void UpdateLocalDataFromServerData(syncable::WriteTransaction* trans,
@@ -88,32 +99,6 @@ VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
const sync_pb::SyncEntity& update,
syncable::MutableEntry* target);
-// Append |item|, followed by a chain of its predecessors selected by
-// |inclusion_filter|, to the |commit_ids| vector and tag them as included by
-// storing in the set |inserted_items|. |inclusion_filter| (typically one of
-// IS_UNAPPLIED_UPDATE or IS_UNSYNCED) selects which type of predecessors to
-// include. Returns true if |item| was added, and false if it was already in
-// the list.
-//
-// Use AddPredecessorsThenItem instead of this method if you want the
-// item to be the last, rather than first, item appended.
-bool AddItemThenPredecessors(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids);
-
-// Exactly like AddItemThenPredecessors, except items are appended in the
-// reverse (and generally more useful) order: a chain of predecessors from
-// far to near, and finally the item.
-void AddPredecessorsThenItem(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids);
-
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
std::set<syncable::Id>* deleted_folders);
diff --git a/sync/internal_api/DEPS b/sync/internal_api/DEPS
index 394c49c14d..9eb47ef8e4 100644
--- a/sync/internal_api/DEPS
+++ b/sync/internal_api/DEPS
@@ -10,4 +10,5 @@ include_rules = [
"+sync/syncable",
"+sync/test",
"+sync/util",
+ "+third_party/zlib", # For UniquePosition compression
]
diff --git a/sync/internal_api/base_node.cc b/sync/internal_api/base_node.cc
index 4ae318ba9f..1e1513b1b7 100644
--- a/sync/internal_api/base_node.cc
+++ b/sync/internal_api/base_node.cc
@@ -72,7 +72,7 @@ bool BaseNode::DecryptIfNecessary() {
// Passwords have their own legacy encryption structure.
scoped_ptr<sync_pb::PasswordSpecificsData> data(DecryptPasswordSpecifics(
specifics, GetTransaction()->GetCryptographer()));
- if (!data.get()) {
+ if (!data) {
LOG(ERROR) << "Failed to decrypt password specifics.";
return false;
}
@@ -209,20 +209,13 @@ int64 BaseNode::GetSuccessorId() const {
}
int64 BaseNode::GetFirstChildId() const {
- syncable::Directory* dir = GetTransaction()->GetDirectory();
- syncable::BaseTransaction* trans = GetTransaction()->GetWrappedTrans();
- syncable::Id id_string;
- // TODO(akalin): Propagate up the error further (see
- // http://crbug.com/100907).
- CHECK(dir->GetFirstChildId(trans,
- GetEntry()->Get(syncable::ID), &id_string));
+ syncable::Id id_string = GetEntry()->GetFirstChildId();
if (id_string.IsRoot())
return kInvalidId;
return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
}
int BaseNode::GetTotalNodeCount() const {
- syncable::Directory* dir = GetTransaction()->GetDirectory();
syncable::BaseTransaction* trans = GetTransaction()->GetWrappedTrans();
int count = 1; // Start with one to include the node itself.
@@ -238,13 +231,14 @@ int BaseNode::GetTotalNodeCount() const {
syncable::Entry entry(trans, syncable::GET_BY_HANDLE, handle);
if (!entry.good())
continue;
- syncable::Id id = entry.Get(syncable::ID);
- syncable::Id child_id;
- if (dir->GetFirstChildId(trans, id, &child_id) && !child_id.IsRoot())
- stack.push(IdToMetahandle(trans, child_id));
syncable::Id successor_id = entry.GetSuccessorId();
if (!successor_id.IsRoot())
stack.push(IdToMetahandle(trans, successor_id));
+ if (!entry.Get(syncable::IS_DIR))
+ continue;
+ syncable::Id child_id = entry.GetFirstChildId();
+ if (!child_id.IsRoot())
+ stack.push(IdToMetahandle(trans, child_id));
}
return count;
}
@@ -261,21 +255,23 @@ DictionaryValue* BaseNode::GetSummaryAsValue() const {
DictionaryValue* BaseNode::GetDetailsAsValue() const {
DictionaryValue* node_info = GetSummaryAsValue();
node_info->SetString(
- "modificationTime",
- GetTimeDebugString(GetModificationTime()));
+ "modificationTime", GetTimeDebugString(GetModificationTime()));
node_info->SetString("parentId", base::Int64ToString(GetParentId()));
// Specifics are already in the Entry value, so no need to duplicate
// it here.
- node_info->SetString("externalId",
- base::Int64ToString(GetExternalId()));
- node_info->SetString("predecessorId",
- base::Int64ToString(GetPredecessorId()));
- node_info->SetString("successorId",
- base::Int64ToString(GetSuccessorId()));
- node_info->SetString("firstChildId",
- base::Int64ToString(GetFirstChildId()));
- node_info->Set("entry",
- GetEntry()->ToValue(GetTransaction()->GetCryptographer()));
+ node_info->SetString("externalId", base::Int64ToString(GetExternalId()));
+ if (GetEntry()->ShouldMaintainPosition() &&
+ !GetEntry()->Get(syncable::IS_DEL)) {
+ node_info->SetString("successorId", base::Int64ToString(GetSuccessorId()));
+ node_info->SetString(
+ "predecessorId", base::Int64ToString(GetPredecessorId()));
+ }
+ if (GetEntry()->Get(syncable::IS_DIR)) {
+ node_info->SetString(
+ "firstChildId", base::Int64ToString(GetFirstChildId()));
+ }
+ node_info->Set(
+ "entry", GetEntry()->ToValue(GetTransaction()->GetCryptographer()));
return node_info;
}
@@ -333,6 +329,12 @@ const sync_pb::SessionSpecifics& BaseNode::GetSessionSpecifics() const {
return GetEntitySpecifics().session();
}
+const sync_pb::ManagedUserSettingSpecifics&
+ BaseNode::GetManagedUserSettingSpecifics() const {
+ DCHECK_EQ(GetModelType(), MANAGED_USER_SETTINGS);
+ return GetEntitySpecifics().managed_user_setting();
+}
+
const sync_pb::DeviceInfoSpecifics& BaseNode::GetDeviceInfoSpecifics() const {
DCHECK_EQ(GetModelType(), DEVICE_INFO);
return GetEntitySpecifics().device_info();
diff --git a/sync/internal_api/change_reorder_buffer.cc b/sync/internal_api/change_reorder_buffer.cc
index 5358fa253d..0ddb6b35c3 100644
--- a/sync/internal_api/change_reorder_buffer.cc
+++ b/sync/internal_api/change_reorder_buffer.cc
@@ -154,8 +154,9 @@ bool ChangeReorderBuffer::GetAllChangesInTreeOrder(
CHECK_EQ(BaseNode::INIT_OK, node.InitByIdLookup(i->first));
// We only care about parents of entry's with position-sensitive models.
- if (ShouldMaintainPosition(node.GetEntry()->GetModelType())) {
+ if (node.GetEntry()->ShouldMaintainPosition()) {
parents_of_position_changes.insert(node.GetParentId());
+ traversal.ExpandToInclude(trans, node.GetParentId());
}
}
}
@@ -200,12 +201,7 @@ bool ChangeReorderBuffer::GetAllChangesInTreeOrder(
// There were ordering changes on the children of this parent, so
// enumerate all the children in the sibling order.
syncable::Entry parent(trans, syncable::GET_BY_HANDLE, next);
- syncable::Id id;
- if (!trans->directory()->GetFirstChildId(
- trans, parent.Get(syncable::ID), &id)) {
- *changes = ImmutableChangeRecordList();
- return false;
- }
+ syncable::Id id = parent.GetFirstChildId();
while (!id.IsRoot()) {
syncable::Entry child(trans, syncable::GET_BY_ID, id);
CHECK(child.good());
diff --git a/sync/internal_api/debug_info_event_listener.cc b/sync/internal_api/debug_info_event_listener.cc
index 107b10e0eb..4beef12457 100644
--- a/sync/internal_api/debug_info_event_listener.cc
+++ b/sync/internal_api/debug_info_event_listener.cc
@@ -14,7 +14,7 @@ DebugInfoEventListener::DebugInfoEventListener()
: events_dropped_(false),
cryptographer_has_pending_keys_(false),
cryptographer_ready_(false),
- weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+ weak_ptr_factory_(this) {
}
DebugInfoEventListener::~DebugInfoEventListener() {
@@ -43,26 +43,6 @@ void DebugInfoEventListener::OnSyncCycleCompleted(
sync_completed_event_info->mutable_caller_info()->set_notifications_enabled(
snapshot.notifications_enabled());
- // Log the sources and per-type payloads coalesced into this session.
- const std::vector<sessions::SyncSourceInfo>& snap_sources =
- snapshot.debug_info_sources_list();
- for (std::vector<sessions::SyncSourceInfo>::const_iterator source_iter =
- snap_sources.begin(); source_iter != snap_sources.end(); ++source_iter) {
- sync_pb::SourceInfo* pb_source_info =
- sync_completed_event_info->add_source_info();
-
- pb_source_info->set_source(source_iter->updates_source);
-
- for (ModelTypeInvalidationMap::const_iterator type_iter =
- source_iter->types.begin();
- type_iter != source_iter->types.end(); ++type_iter) {
- sync_pb::TypeHint* pb_type_hint = pb_source_info->add_type_hint();
- pb_type_hint->set_data_type_id(
- GetSpecificsFieldNumberFromModelType(type_iter->first));
- pb_type_hint->set_has_valid_hint(!type_iter->second.payload.empty());
- }
- }
-
AddEventToQueue(event_info);
}
diff --git a/sync/internal_api/http_bridge.cc b/sync/internal_api/http_bridge.cc
index df864bf68e..a063dce0ab 100644
--- a/sync/internal_api/http_bridge.cc
+++ b/sync/internal_api/http_bridge.cc
@@ -39,7 +39,7 @@ HttpBridge::RequestContextGetter::~RequestContextGetter() {}
net::URLRequestContext*
HttpBridge::RequestContextGetter::GetURLRequestContext() {
// Lazily create the context.
- if (!context_.get()) {
+ if (!context_) {
net::URLRequestContext* baseline_context =
baseline_context_getter_->GetURLRequestContext();
context_.reset(
@@ -58,16 +58,20 @@ HttpBridge::RequestContextGetter::GetNetworkTaskRunner() const {
HttpBridgeFactory::HttpBridgeFactory(
net::URLRequestContextGetter* baseline_context_getter,
- const std::string& user_agent)
+ const std::string& user_agent,
+ const NetworkTimeUpdateCallback& network_time_update_callback)
: request_context_getter_(
new HttpBridge::RequestContextGetter(
- baseline_context_getter, user_agent)) {}
+ baseline_context_getter, user_agent)),
+ network_time_update_callback_(network_time_update_callback) {
+}
HttpBridgeFactory::~HttpBridgeFactory() {
}
HttpPostProviderInterface* HttpBridgeFactory::Create() {
- HttpBridge* http = new HttpBridge(request_context_getter_);
+ HttpBridge* http = new HttpBridge(request_context_getter_,
+ network_time_update_callback_);
http->AddRef();
return http;
}
@@ -130,12 +134,15 @@ HttpBridge::URLFetchState::URLFetchState() : url_poster(NULL),
error_code(-1) {}
HttpBridge::URLFetchState::~URLFetchState() {}
-HttpBridge::HttpBridge(HttpBridge::RequestContextGetter* context_getter)
+HttpBridge::HttpBridge(
+ HttpBridge::RequestContextGetter* context_getter,
+ const NetworkTimeUpdateCallback& network_time_update_callback)
: context_getter_for_request_(context_getter),
network_task_runner_(
context_getter_for_request_->GetNetworkTaskRunner()),
created_on_loop_(MessageLoop::current()),
- http_post_completed_(false, false) {
+ http_post_completed_(false, false),
+ network_time_update_callback_(network_time_update_callback) {
}
HttpBridge::~HttpBridge() {
@@ -225,6 +232,7 @@ void HttpBridge::MakeAsynchronousPost() {
fetch_state_.url_poster->SetUploadData(content_type_, request_content_);
fetch_state_.url_poster->SetExtraRequestHeaders(extra_headers_);
fetch_state_.url_poster->SetLoadFlags(net::LOAD_DO_NOT_SEND_COOKIES);
+ fetch_state_.start_time = base::Time::Now();
fetch_state_.url_poster->Start();
}
@@ -285,6 +293,7 @@ void HttpBridge::OnURLFetchComplete(const net::URLFetcher* source) {
if (fetch_state_.aborted)
return;
+ fetch_state_.end_time = base::Time::Now();
fetch_state_.request_completed = true;
fetch_state_.request_succeeded =
(net::URLRequestStatus::SUCCESS == source->GetStatus().status());
@@ -299,6 +308,7 @@ void HttpBridge::OnURLFetchComplete(const net::URLFetcher* source) {
source->GetResponseAsString(&fetch_state_.response_content);
fetch_state_.response_headers = source->GetResponseHeaders();
+ UpdateNetworkTime();
// End of the line for url_poster_. It lives only on the IO loop.
// We defer deletion because we're inside a callback from a component of the
@@ -316,4 +326,22 @@ net::URLRequestContextGetter* HttpBridge::GetRequestContextGetterForTest()
return context_getter_for_request_;
}
+void HttpBridge::UpdateNetworkTime() {
+ std::string sane_time_str;
+ if (!fetch_state_.request_succeeded || fetch_state_.start_time.is_null() ||
+ fetch_state_.end_time < fetch_state_.start_time ||
+ !fetch_state_.response_headers->EnumerateHeader(NULL, "Sane-Time-Millis",
+ &sane_time_str)) {
+ return;
+ }
+
+ int64 sane_time_ms = 0;
+ if (base::StringToInt64(sane_time_str, &sane_time_ms)) {
+ network_time_update_callback_.Run(
+ base::Time::FromJsTime(sane_time_ms),
+ base::TimeDelta::FromMilliseconds(1),
+ fetch_state_.end_time - fetch_state_.start_time);
+ }
+}
+
} // namespace syncer
diff --git a/sync/internal_api/http_bridge_unittest.cc b/sync/internal_api/http_bridge_unittest.cc
index 2692480c73..ad5036725f 100644
--- a/sync/internal_api/http_bridge_unittest.cc
+++ b/sync/internal_api/http_bridge_unittest.cc
@@ -5,7 +5,7 @@
#include "base/message_loop_proxy.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
-#include "net/test/test_server.h"
+#include "net/test/spawned_test_server.h"
#include "net/url_request/test_url_fetcher_factory.h"
#include "net/url_request/url_fetcher_delegate.h"
#include "net/url_request/url_request_test_util.h"
@@ -23,8 +23,8 @@ const base::FilePath::CharType kDocRoot[] =
class SyncHttpBridgeTest : public testing::Test {
public:
SyncHttpBridgeTest()
- : test_server_(net::TestServer::TYPE_HTTP,
- net::TestServer::kLocalhost,
+ : test_server_(net::SpawnedTestServer::TYPE_HTTP,
+ net::SpawnedTestServer::kLocalhost,
base::FilePath(kDocRoot)),
fake_default_request_context_getter_(NULL),
bridge_for_race_test_(NULL),
@@ -55,7 +55,8 @@ class SyncHttpBridgeTest : public testing::Test {
HttpBridge* bridge = new HttpBridge(
new HttpBridge::RequestContextGetter(
fake_default_request_context_getter_,
- "user agent"));
+ "user agent"),
+ NetworkTimeUpdateCallback());
return bridge;
}
@@ -91,7 +92,7 @@ class SyncHttpBridgeTest : public testing::Test {
return fake_default_request_context_getter_;
}
- net::TestServer test_server_;
+ net::SpawnedTestServer test_server_;
base::Thread* io_thread() { return &io_thread_; }
@@ -121,7 +122,8 @@ class ShuntedHttpBridge : public HttpBridge {
SyncHttpBridgeTest* test, bool never_finishes)
: HttpBridge(
new HttpBridge::RequestContextGetter(
- baseline_context_getter, "user agent")),
+ baseline_context_getter, "user agent"),
+ NetworkTimeUpdateCallback()),
test_(test), never_finishes_(never_finishes) { }
protected:
virtual void MakeAsynchronousPost() OVERRIDE {
diff --git a/sync/internal_api/js_mutation_event_observer.cc b/sync/internal_api/js_mutation_event_observer.cc
index 2dd3e518f9..6d856200f7 100644
--- a/sync/internal_api/js_mutation_event_observer.cc
+++ b/sync/internal_api/js_mutation_event_observer.cc
@@ -16,7 +16,7 @@
namespace syncer {
JsMutationEventObserver::JsMutationEventObserver()
- : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {}
+ : weak_ptr_factory_(this) {}
JsMutationEventObserver::~JsMutationEventObserver() {
DCHECK(CalledOnValidThread());
diff --git a/sync/internal_api/js_sync_manager_observer_unittest.cc b/sync/internal_api/js_sync_manager_observer_unittest.cc
index ce097973b3..0084a4b2b1 100644
--- a/sync/internal_api/js_sync_manager_observer_unittest.cc
+++ b/sync/internal_api/js_sync_manager_observer_unittest.cc
@@ -83,7 +83,6 @@ TEST_F(JsSyncManagerObserverTest, OnSyncCycleCompleted) {
2,
7,
sessions::SyncSourceInfo(),
- std::vector<sessions::SyncSourceInfo>(),
false,
0,
base::Time::Now(),
diff --git a/sync/internal_api/public/base/model_type.h b/sync/internal_api/public/base/model_type.h
index ebf76653f2..268715a9b9 100644
--- a/sync/internal_api/public/base/model_type.h
+++ b/sync/internal_api/public/base/model_type.h
@@ -90,6 +90,11 @@ enum ModelType {
FAVICON_IMAGES,
// Favicon tracking information.
FAVICON_TRACKING,
+ // These preferences are synced before other user types and are never
+ // encrypted.
+ PRIORITY_PREFERENCES,
+ // Managed user settings.
+ MANAGED_USER_SETTINGS,
// ---- Proxy types ----
// Proxy types are excluded from the sync protocol, but are still considered
@@ -113,10 +118,7 @@ enum ModelType {
DEVICE_INFO,
// Flags to enable experimental features.
EXPERIMENTS,
- // These preferences are never encrypted so that they can be applied before
- // the encryption system is fully initialized.
- PRIORITY_PREFERENCES,
- LAST_CONTROL_MODEL_TYPE = PRIORITY_PREFERENCES,
+ LAST_CONTROL_MODEL_TYPE = EXPERIMENTS,
LAST_REAL_MODEL_TYPE = LAST_CONTROL_MODEL_TYPE,
@@ -158,10 +160,6 @@ SYNC_EXPORT_PRIVATE ModelType GetModelType(
SYNC_EXPORT ModelType GetModelTypeFromSpecifics(
const sync_pb::EntitySpecifics& specifics);
-// If this returns false, we shouldn't bother maintaining a position
-// value (sibling ordering) for this item.
-bool ShouldMaintainPosition(ModelType model_type);
-
// Protocol types are those types that have actual protocol buffer
// representations. This distinguishes them from Proxy types, which have no
// protocol representation and are never sent to the server.
@@ -179,6 +177,10 @@ SYNC_EXPORT bool IsUserSelectableType(ModelType model_type);
// This is the subset of UserTypes() that can be encrypted.
SYNC_EXPORT_PRIVATE ModelTypeSet EncryptableUserTypes();
+// This is the subset of UserTypes() that have priority over other types. These
+// types are synced before other user types and are never encrypted.
+SYNC_EXPORT ModelTypeSet PriorityUserTypes();
+
// Proxy types are placeholder types for handling implicitly enabling real
// types. They do not exist at the server, and are simply used for
// UI/Configuration logic.
@@ -276,8 +278,9 @@ bool RealModelTypeToNotificationType(ModelType model_type,
// Converts a notification type to a real model type. Returns true
// iff |notification_type| was the notification type of a real model
// type and |model_type| was filled in.
-bool NotificationTypeToRealModelType(const std::string& notification_type,
- ModelType* model_type);
+SYNC_EXPORT bool NotificationTypeToRealModelType(
+ const std::string& notification_type,
+ ModelType* model_type);
// Returns true if |model_type| is a real datatype
SYNC_EXPORT bool IsRealDataType(ModelType model_type);
diff --git a/sync/internal_api/public/base/model_type_invalidation_map_unittest.cc b/sync/internal_api/public/base/model_type_invalidation_map_unittest.cc
index f05147931c..772ee7eaf5 100644
--- a/sync/internal_api/public/base/model_type_invalidation_map_unittest.cc
+++ b/sync/internal_api/public/base/model_type_invalidation_map_unittest.cc
@@ -35,7 +35,7 @@ TEST_F(ModelTypeInvalidationMapTest, TypeInvalidationMapToValue) {
scoped_ptr<DictionaryValue> value(ModelTypeInvalidationMapToValue(states));
EXPECT_EQ(2u, value->size());
ExpectDictStringValue(states[BOOKMARKS].payload, *value, "Bookmarks");
- ExpectDictStringValue("", *value, "Apps");
+ ExpectDictStringValue(std::string(), *value, "Apps");
EXPECT_FALSE(value->HasKey("Preferences"));
}
diff --git a/sync/internal_api/public/base/ordinal_unittest.cc b/sync/internal_api/public/base/ordinal_unittest.cc
index 20a6d194ed..8c77d6d658 100644
--- a/sync/internal_api/public/base/ordinal_unittest.cc
+++ b/sync/internal_api/public/base/ordinal_unittest.cc
@@ -87,7 +87,7 @@ COMPILE_ASSERT(LargeOrdinal::kRadix == 256,
// IsValid() should return false for all of them.
TEST(Ordinal, Invalid) {
// Length criterion.
- EXPECT_FALSE(TestOrdinal("").IsValid());
+ EXPECT_FALSE(TestOrdinal(std::string()).IsValid());
EXPECT_FALSE(LongOrdinal("0001").IsValid());
const char kBeforeZero[] = { '0' - 1, '\0' };
diff --git a/sync/internal_api/public/base/unique_position.cc b/sync/internal_api/public/base/unique_position.cc
index 839fc82d27..6bce972768 100644
--- a/sync/internal_api/public/base/unique_position.cc
+++ b/sync/internal_api/public/base/unique_position.cc
@@ -5,12 +5,15 @@
#include "sync/internal_api/public/base/unique_position.h"
#include "base/logging.h"
+#include "base/stl_util.h"
#include "base/string_number_conversions.h"
#include "sync/protocol/unique_position.pb.h"
+#include "third_party/zlib/zlib.h"
namespace syncer {
const size_t UniquePosition::kSuffixLength = 28;
+const size_t UniquePosition::kCompressBytesThreshold = 128;
// static.
bool UniquePosition::IsValidSuffix(const std::string& suffix) {
@@ -40,8 +43,32 @@ UniquePosition UniquePosition::CreateInvalid() {
// static.
UniquePosition UniquePosition::FromProto(const sync_pb::UniquePosition& proto) {
- UniquePosition result(proto.value());
- return result;
+ if (proto.has_value()) {
+ return UniquePosition(proto.value());
+ } else if (proto.has_compressed_value() && proto.has_uncompressed_length()) {
+ uLongf uncompressed_len = proto.uncompressed_length();
+ std::string uncompressed;
+
+ uncompressed.resize(uncompressed_len);
+ int result = uncompress(
+ reinterpret_cast<Bytef*>(string_as_array(&uncompressed)),
+ &uncompressed_len,
+ reinterpret_cast<const Bytef*>(proto.compressed_value().data()),
+ proto.compressed_value().size());
+ if (result != Z_OK) {
+ DLOG(ERROR) << "Unzip failed " << result;
+ return UniquePosition::CreateInvalid();
+ }
+ if (uncompressed_len != proto.uncompressed_length()) {
+ DLOG(ERROR)
+ << "Uncompressed length " << uncompressed_len
+ << " did not match specified length " << proto.uncompressed_length();
+ return UniquePosition::CreateInvalid();
+ }
+ return UniquePosition(uncompressed);
+ } else {
+ return UniquePosition::CreateInvalid();
+ }
}
// static.
@@ -61,7 +88,7 @@ UniquePosition UniquePosition::FromInt64(
UniquePosition UniquePosition::InitialPosition(
const std::string& suffix) {
DCHECK(IsValidSuffix(suffix));
- return UniquePosition("", suffix);
+ return UniquePosition(std::string(), suffix);
}
// static.
@@ -114,7 +141,42 @@ bool UniquePosition::Equals(const UniquePosition& other) const {
}
void UniquePosition::ToProto(sync_pb::UniquePosition* proto) const {
- proto->set_value(bytes_);
+ proto->Clear();
+ if (bytes_.size() < kCompressBytesThreshold) {
+ // If it's small, then just write it. This is the common case.
+ proto->set_value(bytes_);
+ } else {
+ // We've got a large one. Compress it.
+ proto->set_uncompressed_length(bytes_.size());
+ std::string* compressed = proto->mutable_compressed_value();
+
+ uLongf compressed_len = compressBound(bytes_.size());
+ compressed->resize(compressed_len);
+ int result = compress(reinterpret_cast<Bytef*>(string_as_array(compressed)),
+ &compressed_len,
+ reinterpret_cast<const Bytef*>(bytes_.data()),
+ bytes_.size());
+ if (result != Z_OK) {
+ NOTREACHED() << "Failed to compress position: " << result;
+ // Maybe we can write an uncompressed version?
+ proto->Clear();
+ proto->set_value(bytes_);
+ } else if (compressed_len >= bytes_.size()) {
+ // Oops, we made it bigger. Just write the uncompressed version instead.
+ proto->Clear();
+ proto->set_value(bytes_);
+ } else {
+ // Success! Don't forget to adjust the string's length.
+ compressed->resize(compressed_len);
+ }
+ }
+}
+
+void UniquePosition::SerializeToString(std::string* blob) const {
+ DCHECK(blob);
+ sync_pb::UniquePosition proto;
+ ToProto(&proto);
+ proto.SerializeToString(blob);
}
int64 UniquePosition::ToInt64() const {
@@ -140,6 +202,9 @@ bool UniquePosition::IsValid() const {
}
std::string UniquePosition::ToDebugString() const {
+ if (bytes_.empty())
+ return std::string("INVALID[]");
+
std::string debug_string = base::HexEncode(bytes_.data(), bytes_.length());
if (!IsValid()) {
debug_string = "INVALID[" + debug_string + "]";
@@ -165,7 +230,7 @@ std::string UniquePosition::FindSmallerWithSuffix(
if (suffix_zeroes > ref_zeroes) {
// Implies suffix < ref.
- return "";
+ return std::string();
}
if (suffix.substr(suffix_zeroes) < reference.substr(ref_zeroes)) {
@@ -200,7 +265,7 @@ std::string UniquePosition::FindGreaterWithSuffix(
if (suffix_FFs > ref_FFs) {
// Implies suffix > reference.
- return "";
+ return std::string();
}
if (suffix.substr(suffix_FFs) > reference.substr(ref_FFs)) {
@@ -234,7 +299,7 @@ std::string UniquePosition::FindBetweenWithSuffix(
// Sometimes our suffix puts us where we want to be.
if (before < suffix && suffix < after) {
- return "";
+ return std::string();
}
size_t i = 0;
diff --git a/sync/internal_api/public/base/unique_position.h b/sync/internal_api/public/base/unique_position.h
index 3fbd5af5f1..70b15d945c 100644
--- a/sync/internal_api/public/base/unique_position.h
+++ b/sync/internal_api/public/base/unique_position.h
@@ -41,6 +41,7 @@ namespace syncer {
class SYNC_EXPORT_PRIVATE UniquePosition {
public:
static const size_t kSuffixLength;
+ static const size_t kCompressBytesThreshold;
static bool IsValidSuffix(const std::string& suffix);
static bool IsValidBytes(const std::string& bytes);
@@ -49,6 +50,7 @@ class SYNC_EXPORT_PRIVATE UniquePosition {
static UniquePosition CreateInvalid();
// Converts from a 'sync_pb::UniquePosition' protobuf to a UniquePosition.
+ // This may return an invalid position if the parsing fails.
static UniquePosition FromProto(const sync_pb::UniquePosition& proto);
// Creates a position with the given suffix. Ordering among positions created
@@ -78,6 +80,9 @@ class SYNC_EXPORT_PRIVATE UniquePosition {
// Serializes the position's internal state to a protobuf.
void ToProto(sync_pb::UniquePosition* proto) const;
+ // Serializes the protobuf representation of this object as a string.
+ void SerializeToString(std::string* blob) const;
+
// Returns a human-readable representation of this item's internal state.
std::string ToDebugString() const;
diff --git a/sync/internal_api/public/base/unique_position_unittest.cc b/sync/internal_api/public/base/unique_position_unittest.cc
index 0f979c853f..80bf11838e 100644
--- a/sync/internal_api/public/base/unique_position_unittest.cc
+++ b/sync/internal_api/public/base/unique_position_unittest.cc
@@ -65,11 +65,15 @@ const UniquePosition kSmallPosition = FromBytes(
std::string(kSmallPositionLength - 1, '\x00') + '\x01' + '\xFF');
const UniquePosition kSmallPositionPlusOne = FromBytes(
std::string(kSmallPositionLength - 1, '\x00') + '\x02' + '\xFF');
+const UniquePosition kHugePosition = FromBytes(
+ std::string(UniquePosition::kCompressBytesThreshold, '\xFF') + '\xAB');
const std::string kMinSuffix =
std::string(UniquePosition::kSuffixLength - 1, '\x00') + '\x01';
const std::string kMaxSuffix(UniquePosition::kSuffixLength, '\xFF');
-const std::string kNormalSuffix(UniquePosition::kSuffixLength, '\xAB');
+const std::string kNormalSuffix(
+ "\x68\x44\x6C\x6B\x32\x58\x78\x34\x69\x70\x46\x34\x79\x49"
+ "\x44\x4F\x66\x4C\x58\x41\x31\x34\x68\x59\x56\x43\x6F\x3D");
::testing::AssertionResult LessThan(const char* m_expr,
const char* n_expr,
@@ -83,14 +87,60 @@ const std::string kNormalSuffix(UniquePosition::kSuffixLength, '\xAB');
<< " (" << m.ToDebugString() << " and " << n.ToDebugString() << ")";
}
-TEST_F(UniquePositionTest, SerializeAndDeserialize) {
- UniquePosition pos = kGenericPredecessor;
- sync_pb::UniquePosition proto;
+::testing::AssertionResult Equals(const char* m_expr,
+ const char* n_expr,
+ const UniquePosition &m,
+ const UniquePosition &n) {
+ if (m.Equals(n))
+ return ::testing::AssertionSuccess();
+
+ return ::testing::AssertionFailure()
+ << m_expr << " is not equal to " << n_expr
+ << " (" << m.ToDebugString() << " != " << n.ToDebugString() << ")";
+}
+
+// Test encoding and decoding of a small (uncompressed) position.
+TEST_F(UniquePositionTest, SerializeAndDeserializeSmallPosition) {
+ std::string serialized;
+
+ UniquePosition pos1 = kGenericPredecessor;
+ sync_pb::UniquePosition proto1;
+ pos1.ToProto(&proto1);
+
+ // Double-check that this test is testing what we think it tests.
+ EXPECT_TRUE(proto1.has_value());
+ EXPECT_FALSE(proto1.has_compressed_value());
+ EXPECT_FALSE(proto1.has_uncompressed_length());
+
+ proto1.SerializeToString(&serialized);
+
+ sync_pb::UniquePosition proto2;
+ proto2.ParseFromString(serialized);
+ UniquePosition pos2 = UniquePosition::FromProto(proto2);
+
+ EXPECT_PRED_FORMAT2(Equals, pos1, pos2);
+}
+
+// Test encoding and decoding of a large (compressed) position.
+TEST_F(UniquePositionTest, SerializeAndDeserializeLargePosition) {
+ std::string serialized;
+
+ UniquePosition pos1 = kHugePosition;
+ sync_pb::UniquePosition proto1;
+ pos1.ToProto(&proto1);
+
+ // Double-check that this test is testing what we think it tests.
+ EXPECT_FALSE(proto1.has_value());
+ EXPECT_TRUE(proto1.has_compressed_value());
+ EXPECT_TRUE(proto1.has_uncompressed_length());
+
+ proto1.SerializeToString(&serialized);
- pos.ToProto(&proto);
- UniquePosition deserialized = UniquePosition::FromProto(proto);
+ sync_pb::UniquePosition proto2;
+ proto2.ParseFromString(serialized);
+ UniquePosition pos2 = UniquePosition::FromProto(proto2);
- EXPECT_TRUE(pos.Equals(deserialized));
+ EXPECT_PRED_FORMAT2(Equals, pos1, pos2);
}
class RelativePositioningTest : public UniquePositionTest { };
diff --git a/sync/internal_api/public/base_node.h b/sync/internal_api/public/base_node.h
index 360292353a..7cf2edb2af 100644
--- a/sync/internal_api/public/base_node.h
+++ b/sync/internal_api/public/base_node.h
@@ -153,6 +153,11 @@ class SYNC_EXPORT BaseNode {
// data. Can only be called if GetModelType() == SESSIONS.
const sync_pb::SessionSpecifics& GetSessionSpecifics() const;
+ // Getter specific to the MANAGED_USER_SETTINGS datatype. Returns protobuf
+ // data. Can only be called if GetModelType() == MANAGED_USER_SETTINGS.
+ const sync_pb::ManagedUserSettingSpecifics&
+ GetManagedUserSettingSpecifics() const;
+
// Getter specific to the DEVICE_INFO datatype. Returns protobuf
// data. Can only be called if GetModelType() == DEVICE_INFO.
const sync_pb::DeviceInfoSpecifics& GetDeviceInfoSpecifics() const;
diff --git a/sync/internal_api/public/engine/passive_model_worker.h b/sync/internal_api/public/engine/passive_model_worker.h
index 4b0160666d..a6ea011dd7 100644
--- a/sync/internal_api/public/engine/passive_model_worker.h
+++ b/sync/internal_api/public/engine/passive_model_worker.h
@@ -11,7 +11,9 @@
#include "sync/internal_api/public/engine/model_safe_worker.h"
#include "sync/internal_api/public/util/syncer_error.h"
+namespace base {
class MessageLoop;
+}
namespace syncer {
@@ -20,7 +22,7 @@ namespace syncer {
// thread).
class SYNC_EXPORT PassiveModelWorker : public ModelSafeWorker {
public:
- explicit PassiveModelWorker(const MessageLoop* sync_loop);
+ explicit PassiveModelWorker(const base::MessageLoop* sync_loop);
// ModelSafeWorker implementation. Called on the sync thread.
virtual SyncerError DoWorkAndWaitUntilDone(
@@ -30,7 +32,7 @@ class SYNC_EXPORT PassiveModelWorker : public ModelSafeWorker {
private:
virtual ~PassiveModelWorker();
- const MessageLoop* const sync_loop_;
+ const base::MessageLoop* const sync_loop_;
DISALLOW_COPY_AND_ASSIGN(PassiveModelWorker);
};
diff --git a/sync/internal_api/public/http_bridge.h b/sync/internal_api/public/http_bridge.h
index 31863fdcd2..7830190948 100644
--- a/sync/internal_api/public/http_bridge.h
+++ b/sync/internal_api/public/http_bridge.h
@@ -8,12 +8,14 @@
#include <string>
#include "base/basictypes.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "googleurl/src/gurl.h"
+#include "net/base/network_time_notifier.h"
#include "net/url_request/url_fetcher_delegate.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_context_getter.h"
@@ -21,9 +23,12 @@
#include "sync/internal_api/public/http_post_provider_factory.h"
#include "sync/internal_api/public/http_post_provider_interface.h"
-class MessageLoop;
class HttpBridgeTest;
+namespace base {
+class MessageLoop;
+}
+
namespace net {
class HttpResponseHeaders;
class HttpUserAgentSettings;
@@ -32,6 +37,15 @@ class URLFetcher;
namespace syncer {
+// Callback for updating the network time.
+// Params:
+// const base::Time& network_time - the new network time.
+// const base::TimeDelta& resolution - how precise the reading is.
+// const base::TimeDelta& latency - the http request's latency.
+typedef base::Callback<void(const base::Time&,
+ const base::TimeDelta&,
+ const base::TimeDelta&)> NetworkTimeUpdateCallback;
+
// A bridge between the syncer and Chromium HTTP layers.
// Provides a way for the sync backend to use Chromium directly for HTTP
// requests rather than depending on a third party provider (e.g libcurl).
@@ -43,8 +57,6 @@ class SYNC_EXPORT_PRIVATE HttpBridge
public HttpPostProviderInterface,
public net::URLFetcherDelegate {
public:
- friend class SyncHttpBridgeTest;
-
// A request context used for HTTP requests bridged from the sync backend.
// A bridged RequestContext has a dedicated in-memory cookie store and does
// not use a cache. Thus the same type can be used for incognito mode.
@@ -99,7 +111,8 @@ class SYNC_EXPORT_PRIVATE HttpBridge
DISALLOW_COPY_AND_ASSIGN(RequestContextGetter);
};
- explicit HttpBridge(RequestContextGetter* context);
+ HttpBridge(RequestContextGetter* context,
+ const NetworkTimeUpdateCallback& network_time_update_callback);
// HttpPostProvider implementation.
virtual void SetExtraRequestHeaders(const char* headers) OVERRIDE;
@@ -133,6 +146,7 @@ class SYNC_EXPORT_PRIVATE HttpBridge
virtual void MakeAsynchronousPost();
private:
+ friend class SyncHttpBridgeTest;
friend class ::HttpBridgeTest;
// Called on the IO loop to issue the network request. The extra level
@@ -146,6 +160,8 @@ class SYNC_EXPORT_PRIVATE HttpBridge
// fetcher.
void DestroyURLFetcherOnIOThread(net::URLFetcher* fetcher);
+ void UpdateNetworkTime();
+
// Gets a customized net::URLRequestContext for bridged requests. See
// RequestContext definition for details.
const scoped_refptr<RequestContextGetter> context_getter_for_request_;
@@ -157,7 +173,7 @@ class SYNC_EXPORT_PRIVATE HttpBridge
// the network.
// This should be the main syncer thread (SyncerThread) which is what blocks
// on network IO through curl_easy_perform.
- MessageLoop* const created_on_loop_;
+ base::MessageLoop* const created_on_loop_;
// The URL to POST to.
GURL url_for_request_;
@@ -182,6 +198,11 @@ class SYNC_EXPORT_PRIVATE HttpBridge
// deleted on. We must manually delete url_poster_ on the IO loop.
net::URLFetcher* url_poster;
+ // Start and finish time of request. Set immediately before sending
+ // request and after receiving response.
+ base::Time start_time;
+ base::Time end_time;
+
// Used to support 'Abort' functionality.
bool aborted;
@@ -201,6 +222,9 @@ class SYNC_EXPORT_PRIVATE HttpBridge
mutable base::Lock fetch_state_lock_;
URLFetchState fetch_state_;
+ // Callback for updating network time.
+ NetworkTimeUpdateCallback network_time_update_callback_;
+
DISALLOW_COPY_AND_ASSIGN(HttpBridge);
};
@@ -208,7 +232,8 @@ class SYNC_EXPORT HttpBridgeFactory : public HttpPostProviderFactory {
public:
HttpBridgeFactory(
net::URLRequestContextGetter* baseline_context_getter,
- const std::string& user_agent);
+ const std::string& user_agent,
+ const NetworkTimeUpdateCallback& network_time_update_callback);
virtual ~HttpBridgeFactory();
// HttpPostProviderFactory:
@@ -223,6 +248,8 @@ class SYNC_EXPORT HttpBridgeFactory : public HttpPostProviderFactory {
const scoped_refptr<HttpBridge::RequestContextGetter>
request_context_getter_;
+ NetworkTimeUpdateCallback network_time_update_callback_;
+
DISALLOW_COPY_AND_ASSIGN(HttpBridgeFactory);
};
diff --git a/sync/internal_api/public/sessions/sync_session_snapshot.cc b/sync/internal_api/public/sessions/sync_session_snapshot.cc
index b0e9facbcf..8c0c2b9df2 100644
--- a/sync/internal_api/public/sessions/sync_session_snapshot.cc
+++ b/sync/internal_api/public/sessions/sync_session_snapshot.cc
@@ -31,7 +31,6 @@ SyncSessionSnapshot::SyncSessionSnapshot(
int num_hierarchy_conflicts,
int num_server_conflicts,
const SyncSourceInfo& source,
- const std::vector<SyncSourceInfo>& debug_info_sources_list,
bool notifications_enabled,
size_t num_entries,
base::Time sync_start_time,
@@ -44,7 +43,6 @@ SyncSessionSnapshot::SyncSessionSnapshot(
num_hierarchy_conflicts_(num_hierarchy_conflicts),
num_server_conflicts_(num_server_conflicts),
source_(source),
- debug_info_sources_list_(debug_info_sources_list),
notifications_enabled_(notifications_enabled),
num_entries_(num_entries),
sync_start_time_(sync_start_time),
@@ -86,13 +84,6 @@ DictionaryValue* SyncSessionSnapshot::ToValue() const {
num_server_conflicts_);
value->SetInteger("numEntries", num_entries_);
value->Set("source", source_.ToValue());
- scoped_ptr<ListValue> sources_list(new ListValue());
- for (std::vector<SyncSourceInfo>::const_iterator i =
- debug_info_sources_list_.begin();
- i != debug_info_sources_list_.end(); ++i) {
- sources_list->Append(i->ToValue());
- }
- value->Set("sourcesList", sources_list.release());
value->SetBoolean("notificationsEnabled", notifications_enabled_);
scoped_ptr<DictionaryValue> counter_entries(new DictionaryValue());
@@ -147,11 +138,6 @@ SyncSourceInfo SyncSessionSnapshot::source() const {
return source_;
}
-const std::vector<SyncSourceInfo>&
-SyncSessionSnapshot::debug_info_sources_list() const {
- return debug_info_sources_list_;
-}
-
bool SyncSessionSnapshot::notifications_enabled() const {
return notifications_enabled_;
}
diff --git a/sync/internal_api/public/sessions/sync_session_snapshot.h b/sync/internal_api/public/sessions/sync_session_snapshot.h
index be29d35c6c..3e1c20e70c 100644
--- a/sync/internal_api/public/sessions/sync_session_snapshot.h
+++ b/sync/internal_api/public/sessions/sync_session_snapshot.h
@@ -38,7 +38,6 @@ class SYNC_EXPORT SyncSessionSnapshot {
int num_hierarchy_conflicts,
int num_server_conflicts,
const SyncSourceInfo& source,
- const std::vector<SyncSourceInfo>& debug_info_sources_list,
bool notifications_enabled,
size_t num_entries,
base::Time sync_start_time,
@@ -61,7 +60,6 @@ class SYNC_EXPORT SyncSessionSnapshot {
int num_hierarchy_conflicts() const;
int num_server_conflicts() const;
SyncSourceInfo source() const;
- const std::vector<SyncSourceInfo>& debug_info_sources_list() const;
bool notifications_enabled() const;
size_t num_entries() const;
base::Time sync_start_time() const;
@@ -79,7 +77,6 @@ class SYNC_EXPORT SyncSessionSnapshot {
int num_hierarchy_conflicts_;
int num_server_conflicts_;
SyncSourceInfo source_;
- std::vector<SyncSourceInfo> debug_info_sources_list_;
bool notifications_enabled_;
size_t num_entries_;
base::Time sync_start_time_;
diff --git a/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc b/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
index 9301ffd7fd..1d9f19875d 100644
--- a/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
+++ b/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
@@ -47,11 +47,6 @@ TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
SyncSourceInfo source;
scoped_ptr<DictionaryValue> expected_source_value(source.ToValue());
- std::vector<SyncSourceInfo> debug_info_sources_list;
- debug_info_sources_list.push_back(source);
- scoped_ptr<ListValue> expected_sources_list_value(new ListValue());
- expected_sources_list_value->Append(source.ToValue());
-
SyncSessionSnapshot snapshot(model_neutral,
download_progress_markers,
kIsSilenced,
@@ -59,14 +54,13 @@ TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
kNumHierarchyConflicts,
kNumServerConflicts,
source,
- debug_info_sources_list,
false,
0,
base::Time::Now(),
std::vector<int>(MODEL_TYPE_COUNT,0),
std::vector<int>(MODEL_TYPE_COUNT, 0));
scoped_ptr<DictionaryValue> value(snapshot.ToValue());
- EXPECT_EQ(18u, value->size());
+ EXPECT_EQ(17u, value->size());
ExpectDictIntegerValue(model_neutral.num_successful_commits,
*value, "numSuccessfulCommits");
ExpectDictIntegerValue(model_neutral.num_successful_bookmark_commits,
@@ -93,7 +87,6 @@ TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
ExpectDictIntegerValue(kNumServerConflicts, *value,
"numServerConflicts");
ExpectDictDictionaryValue(*expected_source_value, *value, "source");
- ExpectDictListValue(*expected_sources_list_value, *value, "sourcesList");
ExpectDictBooleanValue(false, *value, "notificationsEnabled");
}
diff --git a/sync/internal_api/public/sync_manager.h b/sync/internal_api/public/sync_manager.h
index acd539418f..d754bf01a3 100644
--- a/sync/internal_api/public/sync_manager.h
+++ b/sync/internal_api/public/sync_manager.h
@@ -61,8 +61,12 @@ enum ConnectionStatus {
// Contains everything needed to talk to and identify a user account.
struct SyncCredentials {
+ // The email associated with this account.
std::string email;
+ // The raw authentication token's bytes.
std::string sync_token;
+ // (optional) The time at which the token was fetched/refreshed.
+ base::Time sync_token_time;
};
// SyncManager encapsulates syncable::Directory and serves as the parent of all
@@ -292,6 +296,8 @@ class SYNC_EXPORT SyncManager {
// |user_agent| is a 7-bit ASCII string suitable for use as the User-Agent
// HTTP header. Used internally when collecting stats to classify clients.
// |invalidator| is owned and used to listen for invalidations.
+ // |invalidator_client_id| is used to unqiuely identify this client to the
+ // invalidation notification server.
// |restored_key_for_bootstrapping| is the key used to boostrap the
// cryptographer
// |keystore_encryption_enabled| determines whether we enable the keystore
@@ -312,6 +318,7 @@ class SYNC_EXPORT SyncManager {
ChangeDelegate* change_delegate,
const SyncCredentials& credentials,
scoped_ptr<Invalidator> invalidator,
+ const std::string& invalidator_client_id,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping,
scoped_ptr<InternalComponentsFactory> internal_components_factory,
diff --git a/sync/internal_api/public/test/fake_sync_manager.h b/sync/internal_api/public/test/fake_sync_manager.h
index 2d0f9dec39..25b28f66ba 100644
--- a/sync/internal_api/public/test/fake_sync_manager.h
+++ b/sync/internal_api/public/test/fake_sync_manager.h
@@ -57,6 +57,10 @@ class FakeSyncManager : public SyncManager {
// Returns the types that have most recently received a refresh request.
ModelTypeSet GetLastRefreshRequestTypes();
+ // Returns the most recent configuration reason since the last call to
+ // GetAndResetConfigureReason, or since startup if never called.
+ ConfigureReason GetAndResetConfigureReason();
+
// Posts a method to invalidate the given IDs on the sync thread.
void Invalidate(const ObjectIdInvalidationMap& invalidation_map);
@@ -81,6 +85,7 @@ class FakeSyncManager : public SyncManager {
ChangeDelegate* change_delegate,
const SyncCredentials& credentials,
scoped_ptr<Invalidator> invalidator,
+ const std::string& invalidator_client_id,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping,
scoped_ptr<InternalComponentsFactory> internal_components_factory,
@@ -157,6 +162,9 @@ class FakeSyncManager : public SyncManager {
// The types for which a refresh was most recently requested.
ModelTypeSet last_refresh_request_types_;
+ // The most recent configure reason.
+ ConfigureReason last_configure_reason_;
+
scoped_ptr<FakeSyncEncryptionHandler> fake_encryption_handler_;
TestUserShare test_user_share_;
diff --git a/sync/internal_api/public/util/experiments.h b/sync/internal_api/public/util/experiments.h
index 7194ab334e..c4b7dd5f28 100644
--- a/sync/internal_api/public/util/experiments.h
+++ b/sync/internal_api/public/util/experiments.h
@@ -12,8 +12,6 @@ namespace syncer {
const char kKeystoreEncryptionTag[] = "keystore_encryption";
const char kKeystoreEncryptionFlag[] = "sync-keystore-encryption";
const char kAutofillCullingTag[] = "autofill_culling";
-const char kFullHistorySyncTag[] = "history_delete_directives";
-const char kFullHistorySyncFlag[] = "full-history-sync";
const char kFaviconSyncTag[] = "favicon_sync";
const char kFaviconSyncFlag[] = "enable-sync-favicons";
@@ -21,14 +19,14 @@ const char kFaviconSyncFlag[] = "enable-sync-favicons";
struct Experiments {
Experiments() : keystore_encryption(false),
autofill_culling(false),
- full_history_sync(false),
- favicon_sync(false) {}
+ favicon_sync(false),
+ favicon_sync_limit(200) {}
bool Matches(const Experiments& rhs) {
return (keystore_encryption == rhs.keystore_encryption &&
autofill_culling == rhs.autofill_culling &&
- full_history_sync == rhs.full_history_sync &&
- favicon_sync == rhs.favicon_sync);
+ favicon_sync == rhs.favicon_sync &&
+ favicon_sync_limit == rhs.favicon_sync_limit);
}
// Enable keystore encryption logic and the new encryption UI.
@@ -37,11 +35,11 @@ struct Experiments {
// Enable deletion of expired autofill entries (if autofill sync is enabled).
bool autofill_culling;
- // Enable full history sync (and history delete directives) for this client.
- bool full_history_sync;
-
// Enable the favicons sync datatypes (favicon images and favicon tracking).
bool favicon_sync;
+
+ // The number of favicons that a client is permitted to sync.
+ int favicon_sync_limit;
};
} // namespace syncer
diff --git a/sync/internal_api/public/util/weak_handle_unittest.cc b/sync/internal_api/public/util/weak_handle_unittest.cc
index a7f0c1a092..fcfd7bd716 100644
--- a/sync/internal_api/public/util/weak_handle_unittest.cc
+++ b/sync/internal_api/public/util/weak_handle_unittest.cc
@@ -21,7 +21,7 @@ using ::testing::StrictMock;
class Base {
public:
- Base() : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {}
+ Base() : weak_ptr_factory_(this) {}
WeakHandle<Base> AsWeakHandle() {
return MakeWeakHandle(weak_ptr_factory_.GetWeakPtr());
diff --git a/sync/internal_api/public/write_node.h b/sync/internal_api/public/write_node.h
index 07c896b487..3cb4039a20 100644
--- a/sync/internal_api/public/write_node.h
+++ b/sync/internal_api/public/write_node.h
@@ -160,6 +160,11 @@ class SYNC_EXPORT WriteNode : public BaseNode {
// Should only be called if GetModelType() == SESSIONS.
void SetSessionSpecifics(const sync_pb::SessionSpecifics& specifics);
+ // Set the managed user setting specifics (name and value).
+ // Should only be called if GetModelType() == MANAGED_USER_SETTINGS.
+ void SetManagedUserSettingSpecifics(
+ const sync_pb::ManagedUserSettingSpecifics& specifics);
+
// Set the device info specifics.
// Should only be called if GetModelType() == DEVICE_INFO.
void SetDeviceInfoSpecifics(const sync_pb::DeviceInfoSpecifics& specifics);
diff --git a/sync/internal_api/sync_encryption_handler_impl.cc b/sync/internal_api/sync_encryption_handler_impl.cc
index 1a645bfcdb..71bf4d5213 100644
--- a/sync/internal_api/sync_encryption_handler_impl.cc
+++ b/sync/internal_api/sync_encryption_handler_impl.cc
@@ -136,7 +136,7 @@ std::string PackKeystoreBootstrapToken(
const std::string& current_keystore_key,
Encryptor* encryptor) {
if (current_keystore_key.empty())
- return "";
+ return std::string();
base::ListValue keystore_key_values;
for (size_t i = 0; i < old_keystore_keys.size(); ++i)
@@ -177,7 +177,7 @@ bool UnpackKeystoreBootstrapToken(
JSONStringValueSerializer json(&decrypted_keystore_bootstrap);
scoped_ptr<base::Value> deserialized_keystore_keys(
json.Deserialize(NULL, NULL));
- if (!deserialized_keystore_keys.get())
+ if (!deserialized_keystore_keys)
return false;
base::ListValue* internal_list_value = NULL;
if (!deserialized_keystore_keys->GetAsList(&internal_list_value))
@@ -210,7 +210,7 @@ SyncEncryptionHandlerImpl::SyncEncryptionHandlerImpl(
Encryptor* encryptor,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping)
- : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ : weak_ptr_factory_(this),
user_share_(user_share),
vault_unsafe_(encryptor, SensitiveTypes()),
encrypt_everything_(false),
@@ -1112,7 +1112,7 @@ void SyncEncryptionHandlerImpl::SetCustomPassphrase(
if (passphrase_type_ != KEYSTORE_PASSPHRASE) {
DVLOG(1) << "Failing to set a custom passphrase because one has already "
<< "been set.";
- FinishSetPassphrase(false, "", trans, nigori_node);
+ FinishSetPassphrase(false, std::string(), trans, nigori_node);
return;
}
@@ -1125,7 +1125,7 @@ void SyncEncryptionHandlerImpl::SetCustomPassphrase(
// if statement above. For the sake of safety though, we check for it in
// case a client is misbehaving.
LOG(ERROR) << "Failing to set custom passphrase because of pending keys.";
- FinishSetPassphrase(false, "", trans, nigori_node);
+ FinishSetPassphrase(false, std::string(), trans, nigori_node);
return;
}
diff --git a/sync/internal_api/sync_encryption_handler_impl_unittest.cc b/sync/internal_api/sync_encryption_handler_impl_unittest.cc
index 8abbd7edb0..919a65dd4d 100644
--- a/sync/internal_api/sync_encryption_handler_impl_unittest.cc
+++ b/sync/internal_api/sync_encryption_handler_impl_unittest.cc
@@ -91,7 +91,8 @@ class SyncEncryptionHandlerImplTest : public ::testing::Test {
encryption_handler_.reset(
new SyncEncryptionHandlerImpl(user_share(),
&encryptor_,
- "", "" /* bootstrap tokens */));
+ std::string(),
+ std::string() /* bootstrap tokens */));
encryption_handler_->AddObserver(&observer_);
}
@@ -347,7 +348,8 @@ TEST_F(SyncEncryptionHandlerImplTest, NigoriEncryptionTypes) {
StrictMock<SyncEncryptionHandlerObserverMock> observer2;
SyncEncryptionHandlerImpl handler2(user_share(),
&encryptor_,
- "", "" /* bootstrap tokens */);
+ std::string(),
+ std::string() /* bootstrap tokens */);
handler2.AddObserver(&observer2);
// Just set the sensitive types (shouldn't trigger any notifications).
@@ -611,9 +613,8 @@ TEST_F(SyncEncryptionHandlerImplTest, SetKeystoreMigratesAndUpdatesBootstrap) {
WriteTransaction trans(FROM_HERE, user_share());
EXPECT_FALSE(GetCryptographer()->is_initialized());
EXPECT_TRUE(encryption_handler()->NeedKeystoreKey(trans.GetWrappedTrans()));
- EXPECT_FALSE(
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(""),
- trans.GetWrappedTrans()));
+ EXPECT_FALSE(encryption_handler()->SetKeystoreKeys(
+ BuildEncryptionKeyProto(std::string()), trans.GetWrappedTrans()));
EXPECT_TRUE(encryption_handler()->NeedKeystoreKey(trans.GetWrappedTrans()));
}
Mock::VerifyAndClearExpectations(observer());
@@ -679,7 +680,7 @@ TEST_F(SyncEncryptionHandlerImplTest, SetKeystoreMigratesAndUpdatesBootstrap) {
// token.
SyncEncryptionHandlerImpl handler2(user_share(),
&encryptor_,
- "", // Cryptographer bootstrap.
+ std::string(), // Cryptographer bootstrap.
keystore_bootstrap);
{
diff --git a/sync/internal_api/sync_manager_impl.cc b/sync/internal_api/sync_manager_impl.cc
index bd93a3c2c9..931f2790e5 100644
--- a/sync/internal_api/sync_manager_impl.cc
+++ b/sync/internal_api/sync_manager_impl.cc
@@ -56,6 +56,7 @@ namespace syncer {
using sessions::SyncSessionContext;
using syncable::ImmutableWriteTransactionInfo;
using syncable::SPECIFICS;
+using syncable::UNIQUE_POSITION;
namespace {
@@ -121,6 +122,8 @@ class NudgeStrategy {
return ACCOMPANY_ONLY;
case PREFERENCES:
case SESSIONS:
+ case FAVICON_IMAGES:
+ case FAVICON_TRACKING:
return CUSTOM;
default:
return IMMEDIATE;
@@ -148,6 +151,8 @@ class NudgeStrategy {
kPreferencesNudgeDelayMilliseconds);
break;
case SESSIONS:
+ case FAVICON_IMAGES:
+ case FAVICON_TRACKING:
delay = core->scheduler()->GetSessionsCommitDelay();
break;
default:
@@ -163,7 +168,7 @@ class NudgeStrategy {
SyncManagerImpl::SyncManagerImpl(const std::string& name)
: name_(name),
- weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ weak_ptr_factory_(this),
change_delegate_(NULL),
initialized_(false),
observing_network_connectivity_changes_(false),
@@ -225,12 +230,9 @@ bool SyncManagerImpl::VisiblePositionsDiffer(
const syncable::EntryKernelMutation& mutation) const {
const syncable::EntryKernel& a = mutation.original;
const syncable::EntryKernel& b = mutation.mutated;
- // If the datatype isn't one where the browser model cares about position,
- // don't bother notifying that data model of position-only changes.
- if (!ShouldMaintainPosition(GetModelTypeFromSpecifics(b.ref(SPECIFICS)))) {
+ if (!b.ShouldMaintainPosition())
return false;
- }
- if (a.ref(syncable::NEXT_ID) != b.ref(syncable::NEXT_ID))
+ if (!a.ref(UNIQUE_POSITION).Equals(b.ref(UNIQUE_POSITION)))
return true;
if (a.ref(syncable::PARENT_ID) != b.ref(syncable::PARENT_ID))
return true;
@@ -340,6 +342,7 @@ void SyncManagerImpl::Init(
SyncManager::ChangeDelegate* change_delegate,
const SyncCredentials& credentials,
scoped_ptr<Invalidator> invalidator,
+ const std::string& invalidator_client_id,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping,
scoped_ptr<InternalComponentsFactory> internal_components_factory,
@@ -382,8 +385,9 @@ void SyncManagerImpl::Init(
sync_encryption_handler_->AddObserver(&debug_info_event_listener_);
sync_encryption_handler_->AddObserver(&js_sync_encryption_handler_observer_);
- base::FilePath absolute_db_path(database_path_);
- file_util::AbsolutePath(&absolute_db_path);
+ base::FilePath absolute_db_path = database_path_;
+ DCHECK(absolute_db_path.IsAbsolute());
+
scoped_ptr<syncable::DirectoryBackingStore> backing_store =
internal_components_factory->BuildDirectoryBackingStore(
credentials.email, absolute_db_path).Pass();
@@ -417,16 +421,11 @@ void SyncManagerImpl::Init(
std::string sync_id = directory()->cache_guid();
- // TODO(rlarocque): The invalidator client ID should be independent from the
- // sync client ID. See crbug.com/124142.
- const std::string invalidator_client_id = sync_id;
-
allstatus_.SetSyncId(sync_id);
allstatus_.SetInvalidatorClientId(invalidator_client_id);
DVLOG(1) << "Setting sync client ID: " << sync_id;
DVLOG(1) << "Setting invalidator client ID: " << invalidator_client_id;
- invalidator_->SetUniqueId(invalidator_client_id);
// Build a SyncSessionContext and store the worker in it.
DVLOG(1) << "Sync is bringing up SyncSessionContext.";
@@ -599,11 +598,14 @@ void SyncManagerImpl::UpdateCredentials(const SyncCredentials& credentials) {
DCHECK(!credentials.sync_token.empty());
observing_network_connectivity_changes_ = true;
- if (!connection_manager_->set_auth_token(credentials.sync_token))
+ if (!connection_manager_->SetAuthToken(credentials.sync_token,
+ credentials.sync_token_time))
return; // Auth token is known to be invalid, so exit early.
invalidator_->UpdateCredentials(credentials.email, credentials.sync_token);
scheduler_->OnCredentialsUpdated();
+
+ // TODO(zea): pass the credential age to the debug info event listener.
}
void SyncManagerImpl::UpdateEnabledTypes(ModelTypeSet enabled_types) {
@@ -656,7 +658,7 @@ void SyncManagerImpl::RemoveObserver(SyncManager::Observer* observer) {
void SyncManagerImpl::StopSyncingForShutdown(const base::Closure& callback) {
DVLOG(2) << "StopSyncingForShutdown";
scheduler_->RequestStop(callback);
- if (connection_manager_.get())
+ if (connection_manager_)
connection_manager_->TerminateAllIO();
}
@@ -671,7 +673,7 @@ void SyncManagerImpl::ShutdownOnSyncThread() {
scheduler_.reset();
session_context_.reset();
- if (sync_encryption_handler_.get()) {
+ if (sync_encryption_handler_) {
sync_encryption_handler_->RemoveObserver(&debug_info_event_listener_);
sync_encryption_handler_->RemoveObserver(this);
}
@@ -686,11 +688,11 @@ void SyncManagerImpl::ShutdownOnSyncThread() {
//
// TODO(akalin): Fix this behavior.
- if (invalidator_.get())
+ if (invalidator_)
invalidator_->UnregisterHandler(this);
invalidator_.reset();
- if (connection_manager_.get())
+ if (connection_manager_)
connection_manager_->RemoveListener(this);
connection_manager_.reset();
@@ -869,7 +871,7 @@ void SyncManagerImpl::SetExtraChangeRecordData(int64 id,
// Passwords must use their own legacy ExtraPasswordChangeRecordData.
scoped_ptr<sync_pb::PasswordSpecificsData> data(
DecryptPasswordSpecifics(original_specifics, cryptographer));
- if (!data.get()) {
+ if (!data) {
NOTREACHED();
return;
}
@@ -986,7 +988,7 @@ void SyncManagerImpl::OnSyncEngineEvent(const SyncEngineEvent& event) {
bool is_notifiable_commit =
(event.snapshot.model_neutral_state().num_successful_commits > 0);
if (is_notifiable_commit) {
- if (invalidator_.get()) {
+ if (invalidator_) {
const ObjectIdInvalidationMap& invalidation_map =
ModelTypeInvalidationMapToObjectIdInvalidationMap(
event.snapshot.source().types);
@@ -1291,7 +1293,7 @@ void SyncManagerImpl::OnIncomingInvalidation(
void SyncManagerImpl::RefreshTypes(ModelTypeSet types) {
DCHECK(thread_checker_.CalledOnValidThread());
const ModelTypeInvalidationMap& type_invalidation_map =
- ModelTypeSetToInvalidationMap(types, "");
+ ModelTypeSetToInvalidationMap(types, std::string());
if (type_invalidation_map.empty()) {
LOG(WARNING) << "Sync received refresh request with no types specified.";
} else {
@@ -1367,22 +1369,15 @@ bool SyncManagerImpl::ReceivedExperiment(Experiments* experiments) {
found_experiment = true;
}
- ReadNode full_history_sync_node(&trans);
- if (full_history_sync_node.InitByClientTagLookup(
- syncer::EXPERIMENTS,
- syncer::kFullHistorySyncTag) == BaseNode::INIT_OK &&
- full_history_sync_node.GetExperimentsSpecifics().
- history_delete_directives().enabled()) {
- experiments->full_history_sync = true;
- found_experiment = true;
- }
-
ReadNode favicon_sync_node(&trans);
if (favicon_sync_node.InitByClientTagLookup(
syncer::EXPERIMENTS,
- syncer::kFaviconSyncTag) == BaseNode::INIT_OK &&
- favicon_sync_node.GetExperimentsSpecifics().favicon_sync().enabled()) {
- experiments->favicon_sync = true;
+ syncer::kFaviconSyncTag) == BaseNode::INIT_OK) {
+ experiments->favicon_sync = favicon_sync_node.GetExperimentsSpecifics().
+ favicon_sync().enabled();
+ experiments->favicon_sync_limit =
+ favicon_sync_node.GetExperimentsSpecifics().favicon_sync().
+ favicon_sync_limit();
found_experiment = true;
}
diff --git a/sync/internal_api/sync_manager_impl.h b/sync/internal_api/sync_manager_impl.h
index d58a1f27bb..acc0175772 100644
--- a/sync/internal_api/sync_manager_impl.h
+++ b/sync/internal_api/sync_manager_impl.h
@@ -75,6 +75,7 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
SyncManager::ChangeDelegate* change_delegate,
const SyncCredentials& credentials,
scoped_ptr<Invalidator> invalidator,
+ const std::string& invalidator_client_id,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping,
scoped_ptr<InternalComponentsFactory> internal_components_factory,
@@ -219,11 +220,10 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
typedef std::map<std::string, JsMessageHandler> JsMessageHandlerMap;
// Determine if the parents or predecessors differ between the old and new
- // versions of an entry stored in |a| and |b|. Note that a node's index may
- // change without its NEXT_ID changing if the node at NEXT_ID also moved (but
- // the relative order is unchanged). To handle such cases, we rely on the
- // caller to treat a position update on any sibling as updating the positions
- // of all siblings.
+ // versions of an entry. Note that a node's index may change without its
+ // UNIQUE_POSITION changing if its sibling nodes were changed. To handle such
+ // cases, we rely on the caller to treat a position update on any sibling as
+ // updating the positions of all siblings.
bool VisiblePositionsDiffer(
const syncable::EntryKernelMutation& mutation) const;
diff --git a/sync/internal_api/sync_manager_impl_unittest.cc b/sync/internal_api/sync_manager_impl_unittest.cc
index 37416c0a90..8367725004 100644
--- a/sync/internal_api/sync_manager_impl_unittest.cc
+++ b/sync/internal_api/sync_manager_impl_unittest.cc
@@ -532,6 +532,8 @@ namespace {
void CheckNodeValue(const BaseNode& node, const base::DictionaryValue& value,
bool is_detailed) {
+ size_t expected_field_count = 4;
+
ExpectInt64Value(node.GetId(), value, "id");
{
bool is_folder = false;
@@ -539,28 +541,22 @@ void CheckNodeValue(const BaseNode& node, const base::DictionaryValue& value,
EXPECT_EQ(node.GetIsFolder(), is_folder);
}
ExpectDictStringValue(node.GetTitle(), value, "title");
- {
- ModelType expected_model_type = node.GetModelType();
- std::string type_str;
- EXPECT_TRUE(value.GetString("type", &type_str));
- if (expected_model_type >= FIRST_REAL_MODEL_TYPE) {
- ModelType model_type = ModelTypeFromString(type_str);
- EXPECT_EQ(expected_model_type, model_type);
- } else if (expected_model_type == TOP_LEVEL_FOLDER) {
- EXPECT_EQ("Top-level folder", type_str);
- } else if (expected_model_type == UNSPECIFIED) {
- EXPECT_EQ("Unspecified", type_str);
- } else {
- ADD_FAILURE();
- }
+
+ ModelType expected_model_type = node.GetModelType();
+ std::string type_str;
+ EXPECT_TRUE(value.GetString("type", &type_str));
+ if (expected_model_type >= FIRST_REAL_MODEL_TYPE) {
+ ModelType model_type = ModelTypeFromString(type_str);
+ EXPECT_EQ(expected_model_type, model_type);
+ } else if (expected_model_type == TOP_LEVEL_FOLDER) {
+ EXPECT_EQ("Top-level folder", type_str);
+ } else if (expected_model_type == UNSPECIFIED) {
+ EXPECT_EQ("Unspecified", type_str);
+ } else {
+ ADD_FAILURE();
}
+
if (is_detailed) {
- ExpectInt64Value(node.GetParentId(), value, "parentId");
- ExpectTimeValue(node.GetModificationTime(), value, "modificationTime");
- ExpectInt64Value(node.GetExternalId(), value, "externalId");
- ExpectInt64Value(node.GetPredecessorId(), value, "predecessorId");
- ExpectInt64Value(node.GetSuccessorId(), value, "successorId");
- ExpectInt64Value(node.GetFirstChildId(), value, "firstChildId");
{
scoped_ptr<base::DictionaryValue> expected_entry(
node.GetEntry()->ToValue(NULL));
@@ -568,10 +564,27 @@ void CheckNodeValue(const BaseNode& node, const base::DictionaryValue& value,
EXPECT_TRUE(value.Get("entry", &entry));
EXPECT_TRUE(base::Value::Equals(entry, expected_entry.get()));
}
- EXPECT_EQ(11u, value.size());
- } else {
- EXPECT_EQ(4u, value.size());
+
+ ExpectInt64Value(node.GetParentId(), value, "parentId");
+ ExpectTimeValue(node.GetModificationTime(), value, "modificationTime");
+ ExpectInt64Value(node.GetExternalId(), value, "externalId");
+ expected_field_count += 4;
+
+ if (value.HasKey("predecessorId")) {
+ ExpectInt64Value(node.GetPredecessorId(), value, "predecessorId");
+ expected_field_count++;
+ }
+ if (value.HasKey("successorId")) {
+ ExpectInt64Value(node.GetSuccessorId(), value, "successorId");
+ expected_field_count++;
+ }
+ if (value.HasKey("firstChildId")) {
+ ExpectInt64Value(node.GetFirstChildId(), value, "firstChildId");
+ expected_field_count++;
+ }
}
+
+ EXPECT_EQ(expected_field_count, value.size());
}
} // namespace
@@ -581,7 +594,7 @@ TEST_F(SyncApiTest, BaseNodeGetSummaryAsValue) {
ReadNode node(&trans);
node.InitByRootLookup();
scoped_ptr<base::DictionaryValue> details(node.GetSummaryAsValue());
- if (details.get()) {
+ if (details) {
CheckNodeValue(node, *details, false);
} else {
ADD_FAILURE();
@@ -593,7 +606,7 @@ TEST_F(SyncApiTest, BaseNodeGetDetailsAsValue) {
ReadNode node(&trans);
node.InitByRootLookup();
scoped_ptr<base::DictionaryValue> details(node.GetDetailsAsValue());
- if (details.get()) {
+ if (details) {
CheckNodeValue(node, *details, true);
} else {
ADD_FAILURE();
@@ -710,7 +723,7 @@ class TestHttpPostProviderInterface : public HttpPostProviderInterface {
}
virtual const std::string GetResponseHeaderValue(
const std::string& name) const OVERRIDE {
- return "";
+ return std::string();
}
virtual void Abort() OVERRIDE {}
};
@@ -807,19 +820,25 @@ class SyncManagerTest : public testing::Test,
GetModelSafeRoutingInfo(&routing_info);
// Takes ownership of |fake_invalidator_|.
- sync_manager_.Init(temp_dir_.path(),
- WeakHandle<JsEventHandler>(),
- "bogus", 0, false,
- scoped_ptr<HttpPostProviderFactory>(
- new TestHttpPostProviderFactory()),
- workers, &extensions_activity_monitor_, this,
- credentials,
- scoped_ptr<Invalidator>(fake_invalidator_),
- "", "", // bootstrap tokens
- scoped_ptr<InternalComponentsFactory>(GetFactory()),
- &encryptor_,
- &handler_,
- NULL);
+ sync_manager_.Init(
+ temp_dir_.path(),
+ WeakHandle<JsEventHandler>(),
+ "bogus",
+ 0,
+ false,
+ scoped_ptr<HttpPostProviderFactory>(new TestHttpPostProviderFactory()),
+ workers,
+ &extensions_activity_monitor_,
+ this,
+ credentials,
+ scoped_ptr<Invalidator>(fake_invalidator_),
+ "fake_invalidator_client_id",
+ std::string(),
+ std::string(), // bootstrap tokens
+ scoped_ptr<InternalComponentsFactory>(GetFactory()),
+ &encryptor_,
+ &handler_,
+ NULL);
sync_manager_.GetEncryptionHandler()->AddObserver(&encryption_observer_);
@@ -1167,9 +1186,9 @@ class SyncManagerGetNodesByIdTest : public SyncManagerTest {
base::ListValue args;
base::ListValue* ids = new base::ListValue();
args.Append(ids);
- ids->Append(new base::StringValue(""));
- SendJsMessage(message_name,
- JsArgList(&args), reply_handler.AsWeakHandle());
+ ids->Append(new base::StringValue(std::string()));
+ SendJsMessage(
+ message_name, JsArgList(&args), reply_handler.AsWeakHandle());
}
{
@@ -1259,9 +1278,9 @@ TEST_F(SyncManagerTest, GetChildNodeIdsFailure) {
{
base::ListValue args;
- args.Append(new base::StringValue(""));
- SendJsMessage("getChildNodeIds",
- JsArgList(&args), reply_handler.AsWeakHandle());
+ args.Append(new base::StringValue(std::string()));
+ SendJsMessage(
+ "getChildNodeIds", JsArgList(&args), reply_handler.AsWeakHandle());
}
{
@@ -1543,12 +1562,12 @@ TEST_F(SyncManagerTest, EncryptDataTypesWithData) {
// Next batch_size nodes are a different type and on their own.
for (; i < 2*batch_size; ++i) {
MakeNode(sync_manager_.GetUserShare(), SESSIONS,
- base::StringPrintf("%"PRIuS"", i));
+ base::StringPrintf("%" PRIuS "", i));
}
// Last batch_size nodes are a third type that will not need encryption.
for (; i < 3*batch_size; ++i) {
MakeNode(sync_manager_.GetUserShare(), THEMES,
- base::StringPrintf("%"PRIuS"", i));
+ base::StringPrintf("%" PRIuS "", i));
}
{
@@ -2660,6 +2679,57 @@ TEST_F(SyncManagerTest, SetNonBookmarkTitleWithEncryption) {
}
}
+// Ensure that titles are truncated to 255 bytes, and attempting to reset
+// them to their longer version does not set IS_UNSYNCED.
+TEST_F(SyncManagerTest, SetLongTitle) {
+ const int kNumChars = 512;
+ const std::string kClientTag = "tag";
+ std::string title(kNumChars, '0');
+ sync_pb::EntitySpecifics entity_specifics;
+ entity_specifics.mutable_preference()->set_name("name");
+ entity_specifics.mutable_preference()->set_value("value");
+ MakeServerNode(sync_manager_.GetUserShare(),
+ PREFERENCES,
+ "short_title",
+ syncable::GenerateSyncableHash(PREFERENCES,
+ kClientTag),
+ entity_specifics);
+ // New node shouldn't start off unsynced.
+ EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
+
+ // Manually change to the long title. Should set is_unsynced.
+ {
+ WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
+ WriteNode node(&trans);
+ EXPECT_EQ(BaseNode::INIT_OK,
+ node.InitByClientTagLookup(PREFERENCES, kClientTag));
+ node.SetTitle(UTF8ToWide(title));
+ EXPECT_EQ(node.GetTitle(), title.substr(0, 255));
+ }
+ EXPECT_TRUE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
+
+ // Manually change to the same title. Should not set is_unsynced.
+ {
+ WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
+ WriteNode node(&trans);
+ EXPECT_EQ(BaseNode::INIT_OK,
+ node.InitByClientTagLookup(PREFERENCES, kClientTag));
+ node.SetTitle(UTF8ToWide(title));
+ EXPECT_EQ(node.GetTitle(), title.substr(0, 255));
+ }
+ EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
+
+ // Manually change to new title. Should set is_unsynced.
+ {
+ WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
+ WriteNode node(&trans);
+ EXPECT_EQ(BaseNode::INIT_OK,
+ node.InitByClientTagLookup(PREFERENCES, kClientTag));
+ node.SetTitle(UTF8ToWide("title2"));
+ }
+ EXPECT_TRUE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
+}
+
// Create an encrypted entry when the cryptographer doesn't think the type is
// marked for encryption. Ensure reads/writes don't break and don't unencrypt
// the data.
diff --git a/sync/internal_api/syncapi_server_connection_manager_unittest.cc b/sync/internal_api/syncapi_server_connection_manager_unittest.cc
index f90c5e8a09..a0fe420474 100644
--- a/sync/internal_api/syncapi_server_connection_manager_unittest.cc
+++ b/sync/internal_api/syncapi_server_connection_manager_unittest.cc
@@ -45,7 +45,7 @@ class BlockingHttpPost : public HttpPostProviderInterface {
}
virtual const std::string GetResponseHeaderValue(
const std::string& name) const OVERRIDE {
- return "";
+ return std::string();
}
virtual void Abort() OVERRIDE {
wait_for_abort_.Signal();
diff --git a/sync/internal_api/test/fake_sync_manager.cc b/sync/internal_api/test/fake_sync_manager.cc
index 57c6c776ac..674c5be10e 100644
--- a/sync/internal_api/test/fake_sync_manager.cc
+++ b/sync/internal_api/test/fake_sync_manager.cc
@@ -30,7 +30,8 @@ FakeSyncManager::FakeSyncManager(ModelTypeSet initial_sync_ended_types,
ModelTypeSet configure_fail_types) :
initial_sync_ended_types_(initial_sync_ended_types),
progress_marker_types_(progress_marker_types),
- configure_fail_types_(configure_fail_types) {
+ configure_fail_types_(configure_fail_types),
+ last_configure_reason_(CONFIGURE_REASON_UNKNOWN) {
fake_encryption_handler_.reset(new FakeSyncEncryptionHandler());
}
@@ -54,6 +55,12 @@ ModelTypeSet FakeSyncManager::GetAndResetEnabledTypes() {
return enabled_types;
}
+ConfigureReason FakeSyncManager::GetAndResetConfigureReason() {
+ ConfigureReason reason = last_configure_reason_;
+ last_configure_reason_ = CONFIGURE_REASON_UNKNOWN;
+ return reason;
+}
+
void FakeSyncManager::Invalidate(
const ObjectIdInvalidationMap& invalidation_map) {
if (!sync_task_runner_->PostTask(
@@ -97,6 +104,7 @@ void FakeSyncManager::Init(
ChangeDelegate* change_delegate,
const SyncCredentials& credentials,
scoped_ptr<Invalidator> invalidator,
+ const std::string& invalidator_client_id,
const std::string& restored_key_for_bootstrapping,
const std::string& restored_keystore_key_for_bootstrapping,
scoped_ptr<InternalComponentsFactory> internal_components_factory,
@@ -189,6 +197,7 @@ void FakeSyncManager::ConfigureSyncer(
const ModelSafeRoutingInfo& new_routing_info,
const base::Closure& ready_task,
const base::Closure& retry_task) {
+ last_configure_reason_ = reason;
ModelTypeSet enabled_types = GetRoutingInfoTypes(new_routing_info);
ModelTypeSet disabled_types = Difference(
ModelTypeSet::All(), enabled_types);
diff --git a/sync/internal_api/test/test_entry_factory.cc b/sync/internal_api/test/test_entry_factory.cc
index e53c4582e6..f9d7d49c69 100644
--- a/sync/internal_api/test/test_entry_factory.cc
+++ b/sync/internal_api/test/test_entry_factory.cc
@@ -99,13 +99,6 @@ void TestEntryFactory::CreateUnsyncedItem(
WriteTransaction trans(FROM_HERE, UNITTEST, directory_);
- Id predecessor_id;
- if (model_type == BOOKMARKS) {
- bool lookup_result = directory_->GetLastChildIdForTest(
- &trans, parent_id, &predecessor_id);
- DCHECK(lookup_result);
- }
-
MutableEntry entry(&trans, syncable::CREATE, model_type, parent_id, name);
DCHECK(entry.good());
entry.Put(syncable::ID, item_id);
@@ -119,12 +112,6 @@ void TestEntryFactory::CreateUnsyncedItem(
AddDefaultFieldValue(model_type, &default_specifics);
entry.Put(syncable::SPECIFICS, default_specifics);
- // Bookmarks get inserted at the end of the list.
- if (model_type == BOOKMARKS) {
- bool put_result = entry.PutPredecessor(predecessor_id);
- DCHECK(put_result);
- }
-
if (item_id.ServerKnows()) {
entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
entry.Put(syncable::SERVER_IS_DIR, false);
@@ -177,12 +164,6 @@ int64 TestEntryFactory::CreateSyncedItem(
entry.Put(syncable::IS_DEL, false);
entry.Put(syncable::PARENT_ID, parent_id);
- // TODO(sync): Place bookmarks at the end of the list?
- if (!entry.PutPredecessor(TestIdFactory::root())) {
- NOTREACHED();
- return syncable::kInvalidMetaHandle;
- }
-
entry.Put(syncable::SERVER_VERSION, GetNextRevision());
entry.Put(syncable::IS_UNAPPLIED_UPDATE, false);
entry.Put(syncable::SERVER_NON_UNIQUE_NAME, name);
diff --git a/sync/internal_api/test/test_user_share.cc b/sync/internal_api/test/test_user_share.cc
index 9434617704..331807a1cf 100644
--- a/sync/internal_api/test/test_user_share.cc
+++ b/sync/internal_api/test/test_user_share.cc
@@ -19,7 +19,7 @@ namespace syncer {
TestUserShare::TestUserShare() : dir_maker_(new TestDirectorySetterUpper()) {}
TestUserShare::~TestUserShare() {
- if (user_share_.get())
+ if (user_share_)
ADD_FAILURE() << "Should have called TestUserShare::TearDown()";
}
diff --git a/sync/internal_api/write_node.cc b/sync/internal_api/write_node.cc
index 6ab752cdcf..db461d6cdb 100644
--- a/sync/internal_api/write_node.cc
+++ b/sync/internal_api/write_node.cc
@@ -4,6 +4,7 @@
#include "sync/internal_api/public/write_node.h"
+#include "base/string_util.h"
#include "base/utf_string_conversions.h"
#include "base/values.h"
#include "sync/internal_api/public/base_transaction.h"
@@ -57,6 +58,7 @@ void WriteNode::SetTitle(const std::wstring& title) {
new_legal_title = kEncryptedString;
} else {
SyncAPINameToServerName(WideToUTF8(title), &new_legal_title);
+ TruncateUTF8ToByteSize(new_legal_title, 255, &new_legal_title);
}
std::string current_legal_title;
@@ -184,6 +186,13 @@ void WriteNode::SetSessionSpecifics(
SetEntitySpecifics(entity_specifics);
}
+void WriteNode::SetManagedUserSettingSpecifics(
+ const sync_pb::ManagedUserSettingSpecifics& new_value) {
+ sync_pb::EntitySpecifics entity_specifics;
+ entity_specifics.mutable_managed_user_setting()->CopyFrom(new_value);
+ SetEntitySpecifics(entity_specifics);
+}
+
void WriteNode::SetDeviceInfoSpecifics(
const sync_pb::DeviceInfoSpecifics& new_value) {
sync_pb::EntitySpecifics entity_specifics;
diff --git a/sync/notifier/fake_invalidation_state_tracker.cc b/sync/notifier/fake_invalidation_state_tracker.cc
index a228922b8c..6e147fe408 100644
--- a/sync/notifier/fake_invalidation_state_tracker.cc
+++ b/sync/notifier/fake_invalidation_state_tracker.cc
@@ -49,6 +49,7 @@ void FakeInvalidationStateTracker::Forget(const ObjectIdSet& ids) {
void FakeInvalidationStateTracker::SetInvalidatorClientId(
const std::string& client_id) {
+ Clear();
invalidator_client_id_ = client_id;
}
@@ -65,6 +66,12 @@ std::string FakeInvalidationStateTracker::GetBootstrapData() const {
return bootstrap_data_;
}
+void FakeInvalidationStateTracker::Clear() {
+ invalidator_client_id_ = "";
+ state_map_ = InvalidationStateMap();
+ bootstrap_data_ = "";
+}
+
void FakeInvalidationStateTracker::GenerateAckHandles(
const ObjectIdSet& ids,
const scoped_refptr<base::TaskRunner>& task_runner,
diff --git a/sync/notifier/fake_invalidation_state_tracker.h b/sync/notifier/fake_invalidation_state_tracker.h
index 79606ee320..b43699bbd2 100644
--- a/sync/notifier/fake_invalidation_state_tracker.h
+++ b/sync/notifier/fake_invalidation_state_tracker.h
@@ -31,6 +31,7 @@ class FakeInvalidationStateTracker
virtual std::string GetInvalidatorClientId() const OVERRIDE;
virtual void SetBootstrapData(const std::string& data) OVERRIDE;
virtual std::string GetBootstrapData() const OVERRIDE;
+ virtual void Clear() OVERRIDE;
virtual void GenerateAckHandles(
const ObjectIdSet& ids,
const scoped_refptr<base::TaskRunner>& task_runner,
diff --git a/sync/notifier/fake_invalidator.cc b/sync/notifier/fake_invalidator.cc
index c8216af75e..088d239b99 100644
--- a/sync/notifier/fake_invalidator.cc
+++ b/sync/notifier/fake_invalidator.cc
@@ -19,10 +19,6 @@ ObjectIdSet FakeInvalidator::GetRegisteredIds(
return registrar_.GetRegisteredIds(handler);
}
-const std::string& FakeInvalidator::GetUniqueId() const {
- return unique_id_;
-}
-
const std::string& FakeInvalidator::GetCredentialsEmail() const {
return email_;
}
@@ -67,10 +63,6 @@ InvalidatorState FakeInvalidator::GetInvalidatorState() const {
return registrar_.GetInvalidatorState();
}
-void FakeInvalidator::SetUniqueId(const std::string& unique_id) {
- unique_id_ = unique_id;
-}
-
void FakeInvalidator::UpdateCredentials(
const std::string& email, const std::string& token) {
email_ = email;
diff --git a/sync/notifier/fake_invalidator.h b/sync/notifier/fake_invalidator.h
index e20fcc02da..87380d0c97 100644
--- a/sync/notifier/fake_invalidator.h
+++ b/sync/notifier/fake_invalidator.h
@@ -37,7 +37,6 @@ class FakeInvalidator : public Invalidator {
virtual void Acknowledge(const invalidation::ObjectId& id,
const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void SetUniqueId(const std::string& unique_id) OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
virtual void SendInvalidation(
@@ -45,7 +44,6 @@ class FakeInvalidator : public Invalidator {
private:
InvalidatorRegistrar registrar_;
- std::string unique_id_;
std::string state_;
std::string email_;
std::string token_;
diff --git a/sync/notifier/fake_invalidator_unittest.cc b/sync/notifier/fake_invalidator_unittest.cc
index de1bb7189c..d8cae840b2 100644
--- a/sync/notifier/fake_invalidator_unittest.cc
+++ b/sync/notifier/fake_invalidator_unittest.cc
@@ -21,6 +21,7 @@ class FakeInvalidatorTestDelegate {
}
void CreateInvalidator(
+ const std::string& invalidator_client_id,
const std::string& initial_state,
const base::WeakPtr<InvalidationStateTracker>&
invalidation_state_tracker) {
diff --git a/sync/notifier/invalidation_notifier.cc b/sync/notifier/invalidation_notifier.cc
index 30e5df30f1..695705c8a6 100644
--- a/sync/notifier/invalidation_notifier.cc
+++ b/sync/notifier/invalidation_notifier.cc
@@ -19,6 +19,7 @@ namespace syncer {
InvalidationNotifier::InvalidationNotifier(
scoped_ptr<notifier::PushClient> push_client,
+ const std::string& invalidator_client_id,
const InvalidationStateMap& initial_invalidation_state_map,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
@@ -27,6 +28,7 @@ InvalidationNotifier::InvalidationNotifier(
initial_invalidation_state_map_(initial_invalidation_state_map),
invalidation_state_tracker_(invalidation_state_tracker),
client_info_(client_info),
+ invalidator_client_id_(invalidator_client_id),
invalidation_bootstrap_data_(invalidation_bootstrap_data),
invalidation_listener_(&tick_clock_, push_client.Pass()) {
}
@@ -63,19 +65,12 @@ InvalidatorState InvalidationNotifier::GetInvalidatorState() const {
return registrar_.GetInvalidatorState();
}
-void InvalidationNotifier::SetUniqueId(const std::string& unique_id) {
- DCHECK(CalledOnValidThread());
- client_id_ = unique_id;
- DVLOG(1) << "Setting unique ID to " << unique_id;
- CHECK(!client_id_.empty());
-}
-
void InvalidationNotifier::UpdateCredentials(
const std::string& email, const std::string& token) {
if (state_ == STOPPED) {
invalidation_listener_.Start(
base::Bind(&invalidation::CreateInvalidationClient),
- client_id_, client_info_, invalidation_bootstrap_data_,
+ invalidator_client_id_, client_info_, invalidation_bootstrap_data_,
initial_invalidation_state_map_,
invalidation_state_tracker_,
this);
diff --git a/sync/notifier/invalidation_notifier.h b/sync/notifier/invalidation_notifier.h
index 602ba73d35..269511f153 100644
--- a/sync/notifier/invalidation_notifier.h
+++ b/sync/notifier/invalidation_notifier.h
@@ -43,6 +43,7 @@ class SYNC_EXPORT_PRIVATE InvalidationNotifier
// |invalidation_state_tracker| must be initialized.
InvalidationNotifier(
scoped_ptr<notifier::PushClient> push_client,
+ const std::string& invalidator_client_id,
const InvalidationStateMap& initial_invalidation_state_map,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>&
@@ -59,7 +60,6 @@ class SYNC_EXPORT_PRIVATE InvalidationNotifier
virtual void Acknowledge(const invalidation::ObjectId& id,
const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void SetUniqueId(const std::string& unique_id) OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
virtual void SendInvalidation(
@@ -95,7 +95,7 @@ class SYNC_EXPORT_PRIVATE InvalidationNotifier
const std::string client_info_;
// The client ID to pass to |invalidation_listener_|.
- std::string client_id_;
+ const std::string invalidator_client_id_;
// The initial bootstrap data to pass to |invalidation_listener_|.
const std::string invalidation_bootstrap_data_;
diff --git a/sync/notifier/invalidation_notifier_unittest.cc b/sync/notifier/invalidation_notifier_unittest.cc
index b1856bacc8..6bb92762d5 100644
--- a/sync/notifier/invalidation_notifier_unittest.cc
+++ b/sync/notifier/invalidation_notifier_unittest.cc
@@ -33,6 +33,7 @@ class InvalidationNotifierTestDelegate {
}
void CreateInvalidator(
+ const std::string& invalidator_client_id,
const std::string& initial_state,
const base::WeakPtr<InvalidationStateTracker>&
invalidation_state_tracker) {
@@ -40,6 +41,7 @@ class InvalidationNotifierTestDelegate {
invalidator_.reset(
new InvalidationNotifier(
scoped_ptr<notifier::PushClient>(new notifier::FakePushClient()),
+ invalidator_client_id,
InvalidationStateMap(),
initial_state,
MakeWeakHandle(invalidation_state_tracker),
diff --git a/sync/notifier/invalidation_state_tracker.h b/sync/notifier/invalidation_state_tracker.h
index 58a5bc8a08..e3e5bd21a9 100644
--- a/sync/notifier/invalidation_state_tracker.h
+++ b/sync/notifier/invalidation_state_tracker.h
@@ -75,6 +75,9 @@ class InvalidationStateTracker {
virtual void SetBootstrapData(const std::string& data) = 0;
virtual std::string GetBootstrapData() const = 0;
+ // Erases invalidation versions, client ID, and state stored on disk.
+ virtual void Clear() = 0;
+
// Used for generating our own local ack handles. Generates a new ack handle
// for each object id in |ids|. The result is returned via |callback| posted
// to |task_runner|.
diff --git a/sync/notifier/invalidator.h b/sync/notifier/invalidator.h
index 7854b2817d..a0f881e839 100644
--- a/sync/notifier/invalidator.h
+++ b/sync/notifier/invalidator.h
@@ -78,11 +78,6 @@ class SYNC_EXPORT Invalidator {
// the updated state.
virtual InvalidatorState GetInvalidatorState() const = 0;
- // SetUniqueId must be called once, before any call to
- // UpdateCredentials. |unique_id| should be a non-empty globally
- // unique string.
- virtual void SetUniqueId(const std::string& unique_id) = 0;
-
// The observers won't be notified of any notifications until
// UpdateCredentials is called at least once. It can be called more than
// once.
diff --git a/sync/notifier/invalidator_factory.cc b/sync/notifier/invalidator_factory.cc
index 3f09f5f9d6..6b69924887 100644
--- a/sync/notifier/invalidator_factory.cc
+++ b/sync/notifier/invalidator_factory.cc
@@ -6,7 +6,9 @@
#include <string>
+#include "base/base64.h"
#include "base/logging.h"
+#include "base/rand_util.h"
#include "jingle/notifier/listener/push_client.h"
#include "sync/notifier/invalidator.h"
#include "sync/notifier/non_blocking_invalidator.h"
@@ -17,6 +19,7 @@ namespace {
Invalidator* CreateDefaultInvalidator(
const notifier::NotifierOptions& notifier_options,
+ const std::string& invalidator_client_id,
const InvalidationStateMap& initial_invalidation_state_map,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
@@ -28,14 +31,24 @@ Invalidator* CreateDefaultInvalidator(
// on this behaviour. See crbug.com/97780.
return new P2PInvalidator(
notifier::PushClient::CreateDefault(notifier_options),
+ invalidator_client_id,
NOTIFY_ALL);
}
return new NonBlockingInvalidator(
- notifier_options, initial_invalidation_state_map,
+ notifier_options, invalidator_client_id, initial_invalidation_state_map,
invalidation_bootstrap_data, invalidation_state_tracker, client_info);
}
+std::string GenerateInvalidatorClientId() {
+ // Generate a GUID with 128 bits worth of base64-encoded randomness.
+ // This format is similar to that of sync's cache_guid.
+ const int kGuidBytes = 128 / 8;
+ std::string guid;
+ base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
+ return guid;
+}
+
} // namespace
// TODO(akalin): Remove the dependency on jingle if OS_ANDROID is defined.
@@ -45,16 +58,28 @@ InvalidatorFactory::InvalidatorFactory(
const base::WeakPtr<InvalidationStateTracker>&
invalidation_state_tracker)
: notifier_options_(notifier_options),
- client_info_(client_info),
- initial_invalidation_state_map_(
- invalidation_state_tracker.get() ?
- invalidation_state_tracker->GetAllInvalidationStates() :
- InvalidationStateMap()),
- invalidation_bootstrap_data_(
- invalidation_state_tracker.get() ?
- invalidation_state_tracker->GetBootstrapData() :
- std::string()),
- invalidation_state_tracker_(invalidation_state_tracker) {
+ client_info_(client_info) {
+ if (!invalidation_state_tracker) {
+ return;
+ }
+
+ // TODO(rlarocque): This is not the most obvious place for client ID
+ // generation code. We should try to find a better place for it when we
+ // refactor the invalidator into its own service.
+ if (invalidation_state_tracker->GetInvalidatorClientId().empty()) {
+ // This also clears any existing state. We can't reuse old invalidator
+ // state with the new ID anyway.
+ invalidation_state_tracker->SetInvalidatorClientId(
+ GenerateInvalidatorClientId());
+ }
+
+ initial_invalidation_state_map_ =
+ invalidation_state_tracker->GetAllInvalidationStates();
+ invalidator_client_id_ =
+ invalidation_state_tracker->GetInvalidatorClientId();
+ invalidation_bootstrap_data_ = invalidation_state_tracker->GetBootstrapData();
+ invalidation_state_tracker_ = WeakHandle<InvalidationStateTracker>(
+ invalidation_state_tracker);
}
InvalidatorFactory::~InvalidatorFactory() {
@@ -67,10 +92,16 @@ Invalidator* InvalidatorFactory::CreateInvalidator() {
return NULL;
#else
return CreateDefaultInvalidator(notifier_options_,
+ invalidator_client_id_,
initial_invalidation_state_map_,
invalidation_bootstrap_data_,
invalidation_state_tracker_,
client_info_);
#endif
}
+
+std::string InvalidatorFactory::GetInvalidatorClientId() const {
+ return invalidator_client_id_;
+}
+
} // namespace syncer
diff --git a/sync/notifier/invalidator_factory.h b/sync/notifier/invalidator_factory.h
index f7bd3e0f1c..782850eb49 100644
--- a/sync/notifier/invalidator_factory.h
+++ b/sync/notifier/invalidator_factory.h
@@ -36,13 +36,18 @@ class SYNC_EXPORT InvalidatorFactory {
// factory from which it was created. Can be called on any thread.
Invalidator* CreateInvalidator();
+ // Returns the unique ID that was (or will be) passed to the invalidator.
+ std::string GetInvalidatorClientId() const;
+
private:
const notifier::NotifierOptions notifier_options_;
+
+ // Some of these should be const, but can't be set up in member initializers.
+ InvalidationStateMap initial_invalidation_state_map_;
const std::string client_info_;
- const InvalidationStateMap initial_invalidation_state_map_;
- const std::string invalidation_bootstrap_data_;
- const WeakHandle<InvalidationStateTracker>
- invalidation_state_tracker_;
+ std::string invalidator_client_id_;
+ std::string invalidation_bootstrap_data_;
+ WeakHandle<InvalidationStateTracker> invalidation_state_tracker_;
};
} // namespace syncer
diff --git a/sync/notifier/invalidator_registrar_unittest.cc b/sync/notifier/invalidator_registrar_unittest.cc
index fe4b409244..070c134360 100644
--- a/sync/notifier/invalidator_registrar_unittest.cc
+++ b/sync/notifier/invalidator_registrar_unittest.cc
@@ -52,10 +52,6 @@ class RegistrarInvalidator : public Invalidator {
return registrar_.GetInvalidatorState();
}
- virtual void SetUniqueId(const std::string& unique_id) OVERRIDE {
- // Do nothing.
- }
-
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE {
// Do nothing.
@@ -81,6 +77,7 @@ class RegistrarInvalidatorTestDelegate {
}
void CreateInvalidator(
+ const std::string& invalidator_client_id,
const std::string& initial_state,
const base::WeakPtr<InvalidationStateTracker>&
invalidation_state_tracker) {
diff --git a/sync/notifier/invalidator_test_template.h b/sync/notifier/invalidator_test_template.h
index 72d8c634a1..0353000422 100644
--- a/sync/notifier/invalidator_test_template.h
+++ b/sync/notifier/invalidator_test_template.h
@@ -101,11 +101,11 @@ class InvalidatorTest : public testing::Test {
}
Invalidator* CreateAndInitializeInvalidator() {
- this->delegate_.CreateInvalidator("fake_initial_state",
+ this->delegate_.CreateInvalidator("fake_invalidator_client_id",
+ "fake_initial_state",
this->fake_tracker_.AsWeakPtr());
Invalidator* const invalidator = this->delegate_.GetInvalidator();
- invalidator->SetUniqueId("fake_id");
this->delegate_.WaitForInvalidator();
invalidator->UpdateCredentials("foo@bar.com", "fake_token");
this->delegate_.WaitForInvalidator();
diff --git a/sync/notifier/non_blocking_invalidator.cc b/sync/notifier/non_blocking_invalidator.cc
index 32ffd9e9fa..a67b1962ff 100644
--- a/sync/notifier/non_blocking_invalidator.cc
+++ b/sync/notifier/non_blocking_invalidator.cc
@@ -30,6 +30,7 @@ class NonBlockingInvalidator::Core
// Helpers called on I/O thread.
void Initialize(
const notifier::NotifierOptions& notifier_options,
+ const std::string& invalidator_client_id,
const InvalidationStateMap& initial_invalidation_state_map,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
@@ -38,7 +39,6 @@ class NonBlockingInvalidator::Core
void UpdateRegisteredIds(const ObjectIdSet& ids);
void Acknowledge(const invalidation::ObjectId& id,
const AckHandle& ack_handle);
- void SetUniqueId(const std::string& unique_id);
void UpdateCredentials(const std::string& email, const std::string& token);
// InvalidationHandler implementation (all called on I/O thread by
@@ -72,6 +72,7 @@ NonBlockingInvalidator::Core::~Core() {
void NonBlockingInvalidator::Core::Initialize(
const notifier::NotifierOptions& notifier_options,
+ const std::string& invalidator_client_id,
const InvalidationStateMap& initial_invalidation_state_map,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
@@ -85,6 +86,7 @@ void NonBlockingInvalidator::Core::Initialize(
invalidation_notifier_.reset(
new InvalidationNotifier(
notifier::PushClient::CreateDefaultOnIOThread(notifier_options),
+ invalidator_client_id,
initial_invalidation_state_map,
invalidation_bootstrap_data,
invalidation_state_tracker,
@@ -110,11 +112,6 @@ void NonBlockingInvalidator::Core::Acknowledge(const invalidation::ObjectId& id,
invalidation_notifier_->Acknowledge(id, ack_handle);
}
-void NonBlockingInvalidator::Core::SetUniqueId(const std::string& unique_id) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- invalidation_notifier_->SetUniqueId(unique_id);
-}
-
void NonBlockingInvalidator::Core::UpdateCredentials(const std::string& email,
const std::string& token) {
DCHECK(network_task_runner_->BelongsToCurrentThread());
@@ -138,12 +135,13 @@ void NonBlockingInvalidator::Core::OnIncomingInvalidation(
NonBlockingInvalidator::NonBlockingInvalidator(
const notifier::NotifierOptions& notifier_options,
+ const std::string& invalidator_client_id,
const InvalidationStateMap& initial_invalidation_state_map,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>&
invalidation_state_tracker,
const std::string& client_info)
- : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ : weak_ptr_factory_(this),
core_(
new Core(MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()))),
parent_task_runner_(
@@ -156,6 +154,7 @@ NonBlockingInvalidator::NonBlockingInvalidator(
&NonBlockingInvalidator::Core::Initialize,
core_.get(),
notifier_options,
+ invalidator_client_id,
initial_invalidation_state_map,
invalidation_bootstrap_data,
invalidation_state_tracker,
@@ -217,16 +216,6 @@ InvalidatorState NonBlockingInvalidator::GetInvalidatorState() const {
return registrar_.GetInvalidatorState();
}
-void NonBlockingInvalidator::SetUniqueId(const std::string& unique_id) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&NonBlockingInvalidator::Core::SetUniqueId,
- core_.get(), unique_id))) {
- NOTREACHED();
- }
-}
-
void NonBlockingInvalidator::UpdateCredentials(const std::string& email,
const std::string& token) {
DCHECK(parent_task_runner_->BelongsToCurrentThread());
diff --git a/sync/notifier/non_blocking_invalidator.h b/sync/notifier/non_blocking_invalidator.h
index 0da2e4c140..9c366fe950 100644
--- a/sync/notifier/non_blocking_invalidator.h
+++ b/sync/notifier/non_blocking_invalidator.h
@@ -38,6 +38,7 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
// |invalidation_state_tracker| must be initialized.
NonBlockingInvalidator(
const notifier::NotifierOptions& notifier_options,
+ const std::string& invalidator_client_id,
const InvalidationStateMap& initial_invalidation_state_map,
const std::string& invalidation_bootstrap_data,
const WeakHandle<InvalidationStateTracker>&
@@ -54,7 +55,6 @@ class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
virtual void Acknowledge(const invalidation::ObjectId& id,
const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void SetUniqueId(const std::string& unique_id) OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
virtual void SendInvalidation(
diff --git a/sync/notifier/non_blocking_invalidator_unittest.cc b/sync/notifier/non_blocking_invalidator_unittest.cc
index 0a38fb31cd..a143aca06e 100644
--- a/sync/notifier/non_blocking_invalidator_unittest.cc
+++ b/sync/notifier/non_blocking_invalidator_unittest.cc
@@ -33,6 +33,7 @@ class NonBlockingInvalidatorTestDelegate {
}
void CreateInvalidator(
+ const std::string& invalidator_client_id,
const std::string& initial_state,
const base::WeakPtr<InvalidationStateTracker>&
invalidation_state_tracker) {
@@ -47,6 +48,7 @@ class NonBlockingInvalidatorTestDelegate {
invalidator_.reset(
new NonBlockingInvalidator(
invalidator_options,
+ invalidator_client_id,
InvalidationStateMap(),
initial_state,
MakeWeakHandle(invalidation_state_tracker),
diff --git a/sync/notifier/p2p_invalidator.cc b/sync/notifier/p2p_invalidator.cc
index e71d1e3c08..78257575d8 100644
--- a/sync/notifier/p2p_invalidator.cc
+++ b/sync/notifier/p2p_invalidator.cc
@@ -40,7 +40,7 @@ std::string P2PNotificationTargetToString(P2PNotificationTarget target) {
return kNotifyAll;
default:
NOTREACHED();
- return "";
+ return std::string();
}
}
@@ -137,8 +137,10 @@ bool P2PNotificationData::ResetFromString(const std::string& str) {
}
P2PInvalidator::P2PInvalidator(scoped_ptr<notifier::PushClient> push_client,
+ const std::string& invalidator_client_id,
P2PNotificationTarget send_notification_target)
: push_client_(push_client.Pass()),
+ invalidator_client_id_(invalidator_client_id),
logged_in_(false),
notifications_enabled_(false),
send_notification_target_(send_notification_target) {
@@ -169,7 +171,9 @@ void P2PInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
ObjectIdLessThan());
registrar_.UpdateRegisteredIds(handler, ids);
const P2PNotificationData notification_data(
- unique_id_, NOTIFY_SELF, ObjectIdSetToInvalidationMap(new_ids, ""));
+ invalidator_client_id_,
+ NOTIFY_SELF,
+ ObjectIdSetToInvalidationMap(new_ids, std::string()));
SendNotificationData(notification_data);
}
@@ -189,11 +193,6 @@ InvalidatorState P2PInvalidator::GetInvalidatorState() const {
return registrar_.GetInvalidatorState();
}
-void P2PInvalidator::SetUniqueId(const std::string& unique_id) {
- DCHECK(thread_checker_.CalledOnValidThread());
- unique_id_ = unique_id;
-}
-
void P2PInvalidator::UpdateCredentials(
const std::string& email, const std::string& token) {
DCHECK(thread_checker_.CalledOnValidThread());
@@ -215,7 +214,7 @@ void P2PInvalidator::SendInvalidation(
const ObjectIdInvalidationMap& invalidation_map) {
DCHECK(thread_checker_.CalledOnValidThread());
const P2PNotificationData notification_data(
- unique_id_, send_notification_target_, invalidation_map);
+ invalidator_client_id_, send_notification_target_, invalidation_map);
SendNotificationData(notification_data);
}
@@ -226,8 +225,10 @@ void P2PInvalidator::OnNotificationsEnabled() {
registrar_.UpdateInvalidatorState(INVALIDATIONS_ENABLED);
if (just_turned_on) {
const P2PNotificationData notification_data(
- unique_id_, NOTIFY_SELF,
- ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(), ""));
+ invalidator_client_id_,
+ NOTIFY_SELF,
+ ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(),
+ std::string()));
SendNotificationData(notification_data);
}
}
@@ -258,12 +259,13 @@ void P2PInvalidator::OnIncomingNotification(
if (!notification_data.ResetFromString(notification.data)) {
LOG(WARNING) << "Could not parse notification data from "
<< notification.data;
- notification_data =
- P2PNotificationData(
- unique_id_, NOTIFY_ALL,
- ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(), ""));
+ notification_data = P2PNotificationData(
+ invalidator_client_id_,
+ NOTIFY_ALL,
+ ObjectIdSetToInvalidationMap(registrar_.GetAllRegisteredIds(),
+ std::string()));
}
- if (!notification_data.IsTargeted(unique_id_)) {
+ if (!notification_data.IsTargeted(invalidator_client_id_)) {
DVLOG(1) << "Not a target of the notification -- "
<< "not emitting notification";
return;
diff --git a/sync/notifier/p2p_invalidator.h b/sync/notifier/p2p_invalidator.h
index 952299f6f8..a56521e09c 100644
--- a/sync/notifier/p2p_invalidator.h
+++ b/sync/notifier/p2p_invalidator.h
@@ -93,6 +93,7 @@ class SYNC_EXPORT_PRIVATE P2PInvalidator
// to send notifications to all clients except for the one that triggered the
// notification. See crbug.com/97780.
P2PInvalidator(scoped_ptr<notifier::PushClient> push_client,
+ const std::string& invalidator_client_id,
P2PNotificationTarget send_notification_target);
virtual ~P2PInvalidator();
@@ -105,7 +106,6 @@ class SYNC_EXPORT_PRIVATE P2PInvalidator
virtual void Acknowledge(const invalidation::ObjectId& id,
const AckHandle& ack_handle) OVERRIDE;
virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void SetUniqueId(const std::string& unique_id) OVERRIDE;
virtual void UpdateCredentials(
const std::string& email, const std::string& token) OVERRIDE;
virtual void SendInvalidation(
@@ -131,7 +131,7 @@ class SYNC_EXPORT_PRIVATE P2PInvalidator
// The push client.
scoped_ptr<notifier::PushClient> push_client_;
// Our unique ID.
- std::string unique_id_;
+ std::string invalidator_client_id_;
// Whether we have called UpdateCredentials() yet.
bool logged_in_;
bool notifications_enabled_;
diff --git a/sync/notifier/p2p_invalidator_unittest.cc b/sync/notifier/p2p_invalidator_unittest.cc
index 31a180e007..6acc3d1703 100644
--- a/sync/notifier/p2p_invalidator_unittest.cc
+++ b/sync/notifier/p2p_invalidator_unittest.cc
@@ -27,6 +27,7 @@ class P2PInvalidatorTestDelegate {
}
void CreateInvalidator(
+ const std::string& invalidator_client_id,
const std::string& initial_state,
const base::WeakPtr<InvalidationStateTracker>&
invalidation_state_tracker) {
@@ -36,6 +37,7 @@ class P2PInvalidatorTestDelegate {
invalidator_.reset(
new P2PInvalidator(
scoped_ptr<notifier::PushClient>(fake_push_client_),
+ invalidator_client_id,
NOTIFY_OTHERS));
}
@@ -67,7 +69,7 @@ class P2PInvalidatorTestDelegate {
void TriggerOnIncomingInvalidation(
const ObjectIdInvalidationMap& invalidation_map) {
const P2PNotificationData notification_data(
- "", NOTIFY_ALL, invalidation_map);
+ std::string(), NOTIFY_ALL, invalidation_map);
notifier::Notification notification;
notification.channel = kSyncP2PNotificationChannel;
notification.data = notification_data.ToString();
@@ -84,7 +86,8 @@ class P2PInvalidatorTest : public testing::Test {
protected:
P2PInvalidatorTest()
: next_sent_notification_to_reflect_(0) {
- delegate_.CreateInvalidator("fake_state",
+ delegate_.CreateInvalidator("sender",
+ "fake_state",
base::WeakPtr<InvalidationStateTracker>());
delegate_.GetInvalidator()->RegisterHandler(&fake_handler_);
}
@@ -157,7 +160,7 @@ TEST_F(P2PInvalidatorTest, P2PNotificationDataIsTargeted) {
// default-constructed P2PNotificationData.
TEST_F(P2PInvalidatorTest, P2PNotificationDataDefault) {
const P2PNotificationData notification_data;
- EXPECT_TRUE(notification_data.IsTargeted(""));
+ EXPECT_TRUE(notification_data.IsTargeted(std::string()));
EXPECT_FALSE(notification_data.IsTargeted("other1"));
EXPECT_FALSE(notification_data.IsTargeted("other2"));
EXPECT_TRUE(notification_data.GetIdInvalidationMap().empty());
@@ -176,7 +179,8 @@ TEST_F(P2PInvalidatorTest, P2PNotificationDataDefault) {
TEST_F(P2PInvalidatorTest, P2PNotificationDataNonDefault) {
const ObjectIdInvalidationMap& invalidation_map =
ObjectIdSetToInvalidationMap(
- ModelTypeSetToObjectIdSet(ModelTypeSet(BOOKMARKS, THEMES)), "");
+ ModelTypeSetToObjectIdSet(ModelTypeSet(BOOKMARKS, THEMES)),
+ std::string());
const P2PNotificationData notification_data(
"sender", NOTIFY_ALL, invalidation_map);
EXPECT_TRUE(notification_data.IsTargeted("sender"));
@@ -214,8 +218,6 @@ TEST_F(P2PInvalidatorTest, NotificationsBasic) {
invalidator->UpdateRegisteredIds(&fake_handler_,
ModelTypeSetToObjectIdSet(enabled_types));
- invalidator->SetUniqueId("sender");
-
const char kEmail[] = "foo@bar.com";
const char kToken[] = "token";
invalidator->UpdateCredentials(kEmail, kToken);
@@ -246,7 +248,8 @@ TEST_F(P2PInvalidatorTest, NotificationsBasic) {
{
const ObjectIdInvalidationMap& invalidation_map =
ObjectIdSetToInvalidationMap(
- ModelTypeSetToObjectIdSet(ModelTypeSet(THEMES, APPS)), "");
+ ModelTypeSetToObjectIdSet(ModelTypeSet(THEMES, APPS)),
+ std::string());
invalidator->SendInvalidation(invalidation_map);
}
@@ -263,8 +266,8 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
const ModelTypeSet expected_types(THEMES);
const ObjectIdInvalidationMap& invalidation_map =
- ObjectIdSetToInvalidationMap(
- ModelTypeSetToObjectIdSet(changed_types), "");
+ ObjectIdSetToInvalidationMap(ModelTypeSetToObjectIdSet(changed_types),
+ std::string());
P2PInvalidator* const invalidator = delegate_.GetInvalidator();
notifier::FakePushClient* const push_client = delegate_.GetPushClient();
@@ -272,7 +275,6 @@ TEST_F(P2PInvalidatorTest, SendNotificationData) {
invalidator->UpdateRegisteredIds(&fake_handler_,
ModelTypeSetToObjectIdSet(enabled_types));
- invalidator->SetUniqueId("sender");
invalidator->UpdateCredentials("foo@bar.com", "fake_token");
ReflectSentNotifications();
diff --git a/sync/notifier/push_client_channel.cc b/sync/notifier/push_client_channel.cc
index 4e8f78e886..b5f31c14a1 100644
--- a/sync/notifier/push_client_channel.cc
+++ b/sync/notifier/push_client_channel.cc
@@ -81,7 +81,7 @@ void PushClientChannel::OnNotificationsDisabled(
void PushClientChannel::OnIncomingNotification(
const notifier::Notification& notification) {
- if (!incoming_receiver_.get()) {
+ if (!incoming_receiver_) {
DLOG(ERROR) << "No receiver for incoming notification";
return;
}
diff --git a/sync/notifier/push_client_channel_unittest.cc b/sync/notifier/push_client_channel_unittest.cc
index ffe90e7943..d017e1b9fd 100644
--- a/sync/notifier/push_client_channel_unittest.cc
+++ b/sync/notifier/push_client_channel_unittest.cc
@@ -73,7 +73,7 @@ TEST_F(PushClientChannelTest, EncodeDecode) {
TEST_F(PushClientChannelTest, EncodeDecodeNoContext) {
const notifier::Notification& notification =
PushClientChannel::EncodeMessageForTest(
- kMessage, "", kSchedulingHash);
+ kMessage, std::string(), kSchedulingHash);
std::string message;
std::string service_context = kServiceContext;
int64 scheduling_hash = kSchedulingHash + 1;
diff --git a/sync/notifier/sync_invalidation_listener.cc b/sync/notifier/sync_invalidation_listener.cc
index d4af58932a..ddb9e35184 100644
--- a/sync/notifier/sync_invalidation_listener.cc
+++ b/sync/notifier/sync_invalidation_listener.cc
@@ -31,11 +31,10 @@ SyncInvalidationListener::Delegate::~Delegate() {}
SyncInvalidationListener::SyncInvalidationListener(
base::TickClock* tick_clock,
scoped_ptr<notifier::PushClient> push_client)
- : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
- ack_tracker_(tick_clock, ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ : weak_ptr_factory_(this),
+ ack_tracker_(tick_clock, this),
push_client_(push_client.get()),
- sync_system_resources_(push_client.Pass(),
- ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ sync_system_resources_(push_client.Pass(), this),
delegate_(NULL),
ticl_state_(DEFAULT_INVALIDATION_ERROR),
push_client_state_(DEFAULT_INVALIDATION_ERROR) {
@@ -103,16 +102,6 @@ void SyncInvalidationListener::Start(
registration_manager_.reset(
new RegistrationManager(invalidation_client_.get()));
- // TODO(rlarocque): This call exists as part of an effort to move the
- // invalidator's ID out of sync. It writes the provided (sync-managed) ID to
- // storage that lives on the UI thread. Once this has been in place for a
- // milestone or two, we can remove it and start looking for invalidator client
- // IDs exclusively in the InvalidationStateTracker. See crbug.com/124142.
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::SetInvalidatorClientId,
- client_id);
-
// Set up reminders for any invalidations that have not been locally
// acknowledged.
ObjectIdSet unacknowledged_ids;
@@ -139,7 +128,7 @@ void SyncInvalidationListener::UpdateRegisteredIds(const ObjectIdSet& ids) {
// |ticl_state_| can go to INVALIDATIONS_ENABLED even without a
// working XMPP connection (as observed by us), so check it instead
// of GetState() (see http://crbug.com/139424).
- if (ticl_state_ == INVALIDATIONS_ENABLED && registration_manager_.get()) {
+ if (ticl_state_ == INVALIDATIONS_ENABLED && registration_manager_) {
DoRegistrationUpdate();
}
}
@@ -404,7 +393,7 @@ AckTracker* SyncInvalidationListener::GetAckTrackerForTest() {
void SyncInvalidationListener::Stop() {
DCHECK(CalledOnValidThread());
- if (!invalidation_client_.get()) {
+ if (!invalidation_client_) {
return;
}
diff --git a/sync/notifier/sync_invalidation_listener_unittest.cc b/sync/notifier/sync_invalidation_listener_unittest.cc
index 44c172532b..dba4728620 100644
--- a/sync/notifier/sync_invalidation_listener_unittest.cc
+++ b/sync/notifier/sync_invalidation_listener_unittest.cc
@@ -148,7 +148,7 @@ class FakeDelegate : public SyncInvalidationListener::Delegate {
std::string GetPayload(const ObjectId& id) const {
ObjectIdInvalidationMap::const_iterator it = invalidations_.find(id);
- return (it == invalidations_.end()) ? "" : it->second.payload;
+ return (it == invalidations_.end()) ? std::string() : it->second.payload;
}
InvalidatorState GetInvalidatorState() const {
@@ -277,11 +277,6 @@ class SyncInvalidationListenerTest : public testing::Test {
MakeWeakHandle(fake_tracker_.AsWeakPtr()),
&fake_delegate_);
DCHECK(fake_invalidation_client_);
-
- // TODO(rlarocque): This is necessary for the deferred write of the client
- // ID to take place. We can remove this statement when we remove the
- // WriteInvalidatorClientId test. See crbug.com/124142.
- message_loop_.RunUntilIdle();
}
void StopClient() {
@@ -424,14 +419,6 @@ class SyncInvalidationListenerTest : public testing::Test {
FakeDelegate fake_delegate_;
};
-// Verify the client ID is written to the state tracker on client start.
-// TODO(rlarocque): Remove this test when migration code is removed.
-// See crbug.com/124142.
-TEST_F(SyncInvalidationListenerTest, WriteInvalidatorClientId) {
- // The client is started by the harness, so we don't have to do anything here.
- EXPECT_EQ(kClientId, GetInvalidatorClientId());
-}
-
// Write a new state to the client. It should propagate to the
// tracker.
TEST_F(SyncInvalidationListenerTest, WriteState) {
diff --git a/sync/notifier/sync_system_resources.cc b/sync/notifier/sync_system_resources.cc
index 53286579af..386bffe660 100644
--- a/sync/notifier/sync_system_resources.cc
+++ b/sync/notifier/sync_system_resources.cc
@@ -61,7 +61,7 @@ void SyncLogger::SetSystemResources(invalidation::SystemResources* resources) {
}
SyncInvalidationScheduler::SyncInvalidationScheduler()
- : ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
+ : weak_factory_(this),
created_on_loop_(MessageLoop::current()),
is_started_(false),
is_stopped_(false) {
@@ -181,14 +181,15 @@ void SyncStorage::SetSystemResources(
void SyncStorage::RunAndDeleteWriteKeyCallback(
invalidation::WriteKeyCallback* callback) {
- callback->Run(invalidation::Status(invalidation::Status::SUCCESS, ""));
+ callback->Run(
+ invalidation::Status(invalidation::Status::SUCCESS, std::string()));
delete callback;
}
void SyncStorage::RunAndDeleteReadKeyCallback(
invalidation::ReadKeyCallback* callback, const std::string& value) {
callback->Run(std::make_pair(
- invalidation::Status(invalidation::Status::SUCCESS, ""),
+ invalidation::Status(invalidation::Status::SUCCESS, std::string()),
value));
delete callback;
}
diff --git a/sync/notifier/sync_system_resources_unittest.cc b/sync/notifier/sync_system_resources_unittest.cc
index 77423b3fb8..cb406da304 100644
--- a/sync/notifier/sync_system_resources_unittest.cc
+++ b/sync/notifier/sync_system_resources_unittest.cc
@@ -168,9 +168,10 @@ TEST_F(SyncSystemResourcesTest, WriteState) {
EXPECT_CALL(mock_storage_callback, Run(_))
.WillOnce(SaveArg<0>(&results));
sync_system_resources_.storage()->WriteKey(
- "", "state", mock_storage_callback.CreateCallback());
+ std::string(), "state", mock_storage_callback.CreateCallback());
message_loop_.RunUntilIdle();
- EXPECT_EQ(invalidation::Status(invalidation::Status::SUCCESS, ""), results);
+ EXPECT_EQ(invalidation::Status(invalidation::Status::SUCCESS, std::string()),
+ results);
}
} // namespace
diff --git a/sync/protocol/experiments_specifics.proto b/sync/protocol/experiments_specifics.proto
index 2779837e67..f834cb91c0 100644
--- a/sync/protocol/experiments_specifics.proto
+++ b/sync/protocol/experiments_specifics.proto
@@ -27,9 +27,11 @@ message AutofillCullingFlags {
optional bool enabled = 1;
}
-// Whether the favicon sync datatypes are enabled.
+// Whether the favicon sync datatypes are enabled, and what parameters
+// they should operate under.
message FaviconSyncFlags {
optional bool enabled = 1;
+ optional int32 favicon_sync_limit = 2 [default = 200];
}
// Contains one flag or set of related flags. Each node of the experiments type
diff --git a/sync/protocol/get_updates_caller_info.proto b/sync/protocol/get_updates_caller_info.proto
index 86fe9c83fb..a54232489f 100644
--- a/sync/protocol/get_updates_caller_info.proto
+++ b/sync/protocol/get_updates_caller_info.proto
@@ -10,6 +10,9 @@ option retain_unknown_fields = true;
package sync_pb;
message GetUpdatesCallerInfo {
+ // This message was deprecated in M28. The preferred represenation of this
+ // information is now the GetUpdatesOrigin enum, which is defined in
+ // sync_enums.proto.
enum GetUpdatesSource {
UNKNOWN = 0; // The source was not set by the caller.
FIRST_UPDATE = 1; // First request after browser restart. Not to
diff --git a/sync/protocol/managed_user_setting_specifics.proto b/sync/protocol/managed_user_setting_specifics.proto
new file mode 100644
index 0000000000..fcd1d96426
--- /dev/null
+++ b/sync/protocol/managed_user_setting_specifics.proto
@@ -0,0 +1,21 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Sync protocol datatype extension for managed user settings.
+
+// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
+// any fields in this file.
+
+syntax = "proto2";
+
+option optimize_for = LITE_RUNTIME;
+option retain_unknown_fields = true;
+
+package sync_pb;
+
+// Properties of managed user setting sync objects.
+message ManagedUserSettingSpecifics {
+ optional string name = 1;
+ optional string value = 2;
+}
diff --git a/sync/protocol/priority_preference_specifics.proto b/sync/protocol/priority_preference_specifics.proto
index e52e0befa0..97b554e3ed 100644
--- a/sync/protocol/priority_preference_specifics.proto
+++ b/sync/protocol/priority_preference_specifics.proto
@@ -14,8 +14,9 @@ option retain_unknown_fields = true;
package sync_pb;
+import "preference_specifics.proto";
+
// Properties of a synced priority preference.
message PriorityPreferenceSpecifics {
- optional string name = 1;
- optional string value = 2;
+ optional PreferenceSpecifics preference = 1;
}
diff --git a/sync/protocol/proto_enum_conversions.cc b/sync/protocol/proto_enum_conversions.cc
index ef1d812433..f0095a760b 100644
--- a/sync/protocol/proto_enum_conversions.cc
+++ b/sync/protocol/proto_enum_conversions.cc
@@ -87,6 +87,23 @@ const char* GetUpdatesSourceString(
return "";
}
+const char* GetUpdatesOriginString(
+ sync_pb::SyncEnums::GetUpdatesOrigin origin) {
+ ASSERT_ENUM_BOUNDS(sync_pb::SyncEnums, GetUpdatesOrigin,
+ UNKNOWN_ORIGIN, GU_TRIGGER);
+ switch (origin) {
+ ENUM_CASE(sync_pb::SyncEnums, UNKNOWN_ORIGIN);
+ ENUM_CASE(sync_pb::SyncEnums, PERIODIC);
+ ENUM_CASE(sync_pb::SyncEnums, NEWLY_SUPPORTED_DATATYPE);
+ ENUM_CASE(sync_pb::SyncEnums, MIGRATION);
+ ENUM_CASE(sync_pb::SyncEnums, NEW_CLIENT);
+ ENUM_CASE(sync_pb::SyncEnums, RECONFIGURATION);
+ ENUM_CASE(sync_pb::SyncEnums, GU_TRIGGER);
+ }
+ NOTREACHED();
+ return "";
+}
+
const char* GetResponseTypeString(
sync_pb::CommitResponse::ResponseType response_type) {
ASSERT_ENUM_BOUNDS(sync_pb::CommitResponse, ResponseType, SUCCESS,
diff --git a/sync/protocol/proto_enum_conversions.h b/sync/protocol/proto_enum_conversions.h
index a0278589b0..2f29ed5ed5 100644
--- a/sync/protocol/proto_enum_conversions.h
+++ b/sync/protocol/proto_enum_conversions.h
@@ -33,6 +33,9 @@ SYNC_EXPORT_PRIVATE const char* GetPageTransitionRedirectTypeString(
SYNC_EXPORT const char* GetUpdatesSourceString(
sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source);
+SYNC_EXPORT const char* GetUpdatesOriginString(
+ sync_pb::SyncEnums::GetUpdatesOrigin origin);
+
SYNC_EXPORT_PRIVATE const char* GetResponseTypeString(
sync_pb::CommitResponse::ResponseType response_type);
diff --git a/sync/protocol/proto_value_conversions.cc b/sync/protocol/proto_value_conversions.cc
index ebb4d9568a..6963eaf4b0 100644
--- a/sync/protocol/proto_value_conversions.cc
+++ b/sync/protocol/proto_value_conversions.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/string_number_conversions.h"
#include "base/values.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/app_notification_specifics.pb.h"
#include "sync/protocol/app_setting_specifics.pb.h"
#include "sync/protocol/app_specifics.pb.h"
@@ -35,6 +36,7 @@
#include "sync/protocol/synced_notification_specifics.pb.h"
#include "sync/protocol/theme_specifics.pb.h"
#include "sync/protocol/typed_url_specifics.pb.h"
+#include "sync/protocol/unique_position.pb.h"
namespace syncer {
@@ -312,14 +314,6 @@ base::DictionaryValue* BookmarkSpecificsToValue(
return value;
}
-base::DictionaryValue* PriorityPreferenceSpecificsToValue(
- const sync_pb::PriorityPreferenceSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(name);
- SET_STR(value);
- return value;
-}
-
base::DictionaryValue* DeviceInfoSpecificsToValue(
const sync_pb::DeviceInfoSpecifics& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -338,13 +332,26 @@ base::DictionaryValue* DictionarySpecificsToValue(
return value;
}
+namespace {
+
+DictionaryValue* FaviconSyncFlagsToValue(
+ const sync_pb::FaviconSyncFlags& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_BOOL(enabled);
+ SET_INT32(favicon_sync_limit);
+ return value;
+}
+
+}
+
base::DictionaryValue* ExperimentsSpecificsToValue(
const sync_pb::ExperimentsSpecifics& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
SET_EXPERIMENT_ENABLED_FIELD(keystore_encryption);
SET_EXPERIMENT_ENABLED_FIELD(history_delete_directives);
SET_EXPERIMENT_ENABLED_FIELD(autofill_culling);
- SET_EXPERIMENT_ENABLED_FIELD(favicon_sync);
+ if (proto.has_favicon_sync())
+ SET(favicon_sync, FaviconSyncFlagsToValue);
return value;
}
@@ -408,6 +415,14 @@ base::DictionaryValue* HistoryDeleteDirectiveSpecificsToValue(
return value;
}
+base::DictionaryValue* ManagedUserSettingSpecificsToValue(
+ const sync_pb::ManagedUserSettingSpecifics& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR(name);
+ SET_STR(value);
+ return value;
+}
+
base::DictionaryValue* NigoriSpecificsToValue(
const sync_pb::NigoriSpecifics& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -450,6 +465,13 @@ base::DictionaryValue* PreferenceSpecificsToValue(
return value;
}
+base::DictionaryValue* PriorityPreferenceSpecificsToValue(
+ const sync_pb::PriorityPreferenceSpecifics& specifics) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_FIELD(preference, PreferenceSpecificsToValue);
+ return value;
+}
+
base::DictionaryValue* SyncedNotificationSpecificsToValue(
const sync_pb::SyncedNotificationSpecifics& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -529,6 +551,7 @@ base::DictionaryValue* EntitySpecificsToValue(
SET_FIELD(favicon_image, FaviconImageSpecificsToValue);
SET_FIELD(favicon_tracking, FaviconTrackingSpecificsToValue);
SET_FIELD(history_delete_directive, HistoryDeleteDirectiveSpecificsToValue);
+ SET_FIELD(managed_user_setting, ManagedUserSettingSpecificsToValue);
SET_FIELD(nigori, NigoriSpecificsToValue);
SET_FIELD(password, PasswordSpecificsToValue);
SET_FIELD(preference, PreferenceSpecificsToValue);
@@ -543,6 +566,12 @@ base::DictionaryValue* EntitySpecificsToValue(
namespace {
+StringValue* UniquePositionToStringValue(
+ const sync_pb::UniquePosition& proto) {
+ UniquePosition pos = UniquePosition::FromProto(proto);
+ return new StringValue(pos.ToDebugString());
+}
+
base::DictionaryValue* SyncEntityToValue(const sync_pb::SyncEntity& proto,
bool include_specifics) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -557,6 +586,7 @@ base::DictionaryValue* SyncEntityToValue(const sync_pb::SyncEntity& proto,
SET_INT64(sync_timestamp);
SET_STR(server_defined_unique_tag);
SET_INT64(position_in_parent);
+ SET(unique_position, UniquePositionToStringValue);
SET_STR(insert_after_item_id);
SET_BOOL(deleted);
SET_STR(originator_cache_guid);
@@ -600,6 +630,17 @@ base::DictionaryValue* CommitMessageToValue(
return value;
}
+base::DictionaryValue* GetUpdateTriggersToValue(
+ const sync_pb::GetUpdateTriggers& proto) {
+ base::DictionaryValue* value = new base::DictionaryValue();
+ SET_STR_REP(notification_hint);
+ SET_BOOL(client_dropped_hints);
+ SET_BOOL(invalidations_out_of_sync);
+ SET_INT64(local_modification_nudges);
+ SET_INT64(datatype_refresh_nudges);
+ return value;
+}
+
base::DictionaryValue* DataTypeProgressMarkerToValue(
const sync_pb::DataTypeProgressMarker& proto) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -607,6 +648,7 @@ base::DictionaryValue* DataTypeProgressMarkerToValue(
SET_BYTES(token);
SET_INT64(timestamp_token_for_migration);
SET_STR(notification_hint);
+ SET(get_update_triggers, GetUpdateTriggersToValue);
return value;
}
@@ -628,6 +670,7 @@ base::DictionaryValue* GetUpdatesMessageToValue(
SET_BOOL(streaming);
SET_BOOL(need_encryption_key);
SET_BOOL(create_mobile_bookmarks_folder);
+ SET_ENUM(get_updates_origin, GetUpdatesOriginString);
return value;
}
diff --git a/sync/protocol/proto_value_conversions.h b/sync/protocol/proto_value_conversions.h
index 66dd684495..5a98758e02 100644
--- a/sync/protocol/proto_value_conversions.h
+++ b/sync/protocol/proto_value_conversions.h
@@ -41,6 +41,7 @@ class FaviconTrackingSpecifics;
class GlobalIdDirective;
class HistoryDeleteDirectiveSpecifics;
class KeystoreEncryptionFlagsSpecifics;
+class ManagedUserSettingSpecifics;
class NigoriSpecifics;
class PasswordSpecifics;
class PasswordSpecificsData;
@@ -166,6 +167,9 @@ SYNC_EXPORT base::DictionaryValue* HistoryDeleteDirectiveSpecificsToValue(
const sync_pb::HistoryDeleteDirectiveSpecifics&
history_delete_directive_specifics);
+SYNC_EXPORT_PRIVATE base::DictionaryValue* ManagedUserSettingSpecificsToValue(
+ const sync_pb::ManagedUserSettingSpecifics& managed_user_setting_specifics);
+
SYNC_EXPORT_PRIVATE base::DictionaryValue* NigoriSpecificsToValue(
const sync_pb::NigoriSpecifics& nigori_specifics);
diff --git a/sync/protocol/proto_value_conversions_unittest.cc b/sync/protocol/proto_value_conversions_unittest.cc
index 6258fa6842..fe2750cb7a 100644
--- a/sync/protocol/proto_value_conversions_unittest.cc
+++ b/sync/protocol/proto_value_conversions_unittest.cc
@@ -23,6 +23,7 @@
#include "sync/protocol/extension_specifics.pb.h"
#include "sync/protocol/favicon_image_specifics.pb.h"
#include "sync/protocol/favicon_tracking_specifics.pb.h"
+#include "sync/protocol/managed_user_setting_specifics.pb.h"
#include "sync/protocol/nigori_specifics.pb.h"
#include "sync/protocol/password_specifics.pb.h"
#include "sync/protocol/preference_specifics.pb.h"
@@ -52,7 +53,7 @@ TEST_F(ProtoValueConversionsTest, ProtoChangeCheck) {
// If this number changes, that means we added or removed a data
// type. Don't forget to add a unit test for {New
// type}SpecificsToValue below.
- EXPECT_EQ(26, MODEL_TYPE_COUNT);
+ EXPECT_EQ(27, MODEL_TYPE_COUNT);
// We'd also like to check if we changed any field in our messages.
// However, that's hard to do: sizeof could work, but it's
@@ -173,6 +174,10 @@ TEST_F(ProtoValueConversionsTest, HistoryDeleteDirectiveSpecificsToValue) {
TestSpecificsToValue(HistoryDeleteDirectiveSpecificsToValue);
}
+TEST_F(ProtoValueConversionsTest, ManagedUserSettingSpecificsToValue) {
+ TestSpecificsToValue(ManagedUserSettingSpecificsToValue);
+}
+
TEST_F(ProtoValueConversionsTest, NigoriSpecificsToValue) {
TestSpecificsToValue(NigoriSpecificsToValue);
}
@@ -231,6 +236,7 @@ TEST_F(ProtoValueConversionsTest, EntitySpecificsToValue) {
SET_FIELD(favicon_image);
SET_FIELD(favicon_tracking);
SET_FIELD(history_delete_directive);
+ SET_FIELD(managed_user_setting);
SET_FIELD(nigori);
SET_FIELD(password);
SET_FIELD(preference);
diff --git a/sync/protocol/sync.proto b/sync/protocol/sync.proto
index 37e445522a..b449e5282d 100644
--- a/sync/protocol/sync.proto
+++ b/sync/protocol/sync.proto
@@ -32,6 +32,7 @@ import "favicon_tracking_specifics.proto";
import "get_updates_caller_info.proto";
import "history_delete_directive_specifics.proto";
import "nigori_specifics.proto";
+import "managed_user_setting_specifics.proto";
import "password_specifics.proto";
import "preference_specifics.proto";
import "priority_preference_specifics.proto";
@@ -114,6 +115,7 @@ message EntitySpecifics {
optional DictionarySpecifics dictionary = 170540;
optional FaviconTrackingSpecifics favicon_tracking = 181534;
optional FaviconImageSpecifics favicon_image = 182019;
+ optional ManagedUserSettingSpecifics managed_user_setting = 186662;
}
message SyncEntity {
@@ -396,6 +398,72 @@ message CommitMessage {
optional ClientConfigParams config_params = 4;
};
+// This message communicates additional per-type information related to
+// requests with origin GU_TRIGGER. This message is not relevant when any
+// other origin value is used.
+// Introduced in M28.
+message GetUpdateTriggers {
+ // An opaque-to-the-client string of bytes, received through a notification,
+ // that the server may interpret as a hint about the location of the latest
+ // version of the data for this type.
+ //
+ // Note that this will eventually replace the 'optional' field of the same
+ // name defined in the progress marker, but the client and server should
+ // support both until it's safe to deprecate the old one.
+ //
+ // This field was introduced in M28.
+ repeated string notification_hint = 1;
+
+ // This flag is set if the client was forced to drop hints because the number
+ // of queued hints exceeded its limit. The oldest hints will be discarded
+ // first. Introduced in M28.
+ optional bool client_dropped_hints = 2;
+
+ // This flag is set if the invalidation server reports that it may have
+ // dropped some invalidations at some point. The client will also drop any
+ // locally cached hints that are older than the server-did-drop notification.
+ //
+ // TODO(sync): Determine the format for this.
+ //
+ // optional bool server_dropped_hints = 6;
+
+ // This flag is set when the client suspects that its list of invalidation
+ // hints may be incomplete. This may be the case if:
+ // - The client is syncing for the first time.
+ // - The client has just restarted and it was unable to keep track of
+ // invalidations that were received prior to the restart.
+ // - The client's connection to the invalidation server is currently or
+ // was recently broken.
+ //
+ // It's difficult to provide more details here. This is implemented by
+ // setting the flag to false whenever anything that might adversely affect
+ // notifications happens (eg. a crash, restart on a platform that doesn't
+ // support invalidation ack-tracking, transient invalidation error) and is
+ // unset only after we've experienced one successful sync cycle while
+ // notifications were enabled.
+ //
+ // This flag was introduced in M28.
+ optional bool invalidations_out_of_sync = 3;
+
+ // This counts the number of times the syncer has been asked to commit
+ // changes for this type since the last successful sync cycle. The number of
+ // nudges may not be related to the actual number of items modified. It
+ // often correlates with the number of user actions, but that's not always
+ // the case.
+ // Introduced in M28.
+ optional int64 local_modification_nudges = 4;
+
+ // This counts the number of times the syncer has been explicitly asked to
+ // fetch updates for this type since the last successful sync cycle. These
+ // explicit refresh requests should be relatively rare on most platforms, and
+ // associated with user actions. For example, at the time of this writing
+ // the most common (only?) source of refresh requests is when a user opens
+ // the new tab page on a platform that does not support sessions
+ // invalidations.
+ // Introduced in M28.
+ optional int64 datatype_refresh_nudges = 5;
+}
+
message DataTypeProgressMarker {
// An integer identifying the data type whose progress is tracked by this
// marker. The legitimate values of this field correspond to the protobuf
@@ -436,7 +504,13 @@ message DataTypeProgressMarker {
// An opaque-to-the-client string of bytes, received through a notification,
// that the server may interpret as a hint about the location of the latest
// version of the data for this type.
+ //
+ // Deprecated in M28. We should use the repeated field version in the
+ // PerClientTypeState instead.
optional string notification_hint = 4;
+
+ // This field will be included only in GetUpdates with origin GU_TRIGGER.
+ optional GetUpdateTriggers get_update_triggers = 5;
}
message GetUpdatesMessage {
@@ -450,6 +524,8 @@ message GetUpdatesMessage {
optional int64 from_timestamp = 1;
// Indicates the reason for the GetUpdatesMessage.
+ // Deprecated in M28. We should eventually rely on GetUpdatesOrigin instead.
+ // Newer clients will support both systems during the transition period.
optional GetUpdatesCallerInfo caller_info = 2;
// Indicates whether related folders should be fetched.
@@ -476,6 +552,10 @@ message GetUpdatesMessage {
// Per-datatype progress marker. If present, the server will ignore
// the values of requested_types and from_timestamp, using this instead.
+ //
+ // With the exception of certain configuration or initial sync requests, the
+ // client should include one instance of this field for each enabled data
+ // type.
repeated DataTypeProgressMarker from_progress_marker = 6;
// Indicates whether the response should be sent in chunks. This may be
@@ -497,6 +577,11 @@ message GetUpdatesMessage {
// Whether to create the mobile bookmarks folder if it's not
// already created. Should be set to true only by mobile clients.
optional bool create_mobile_bookmarks_folder = 1000 [default = false];
+
+ // This value is an udpated version of the GetUpdatesCallerInfo's
+ // GetUpdatesSource. It describes the reason for the GetUpdate request.
+ // Introduced in M28.
+ optional SyncEnums.GetUpdatesOrigin get_updates_origin = 9;
};
message AuthenticateMessage {
diff --git a/sync/protocol/sync_enums.proto b/sync/protocol/sync_enums.proto
index 30921a545f..a928ca2172 100644
--- a/sync/protocol/sync_enums.proto
+++ b/sync/protocol/sync_enums.proto
@@ -114,4 +114,34 @@ message SyncEnums {
TYPE_PHONE = 6;
TYPE_TABLET = 7;
}
+
+ // This is the successor to GetUpdatesSource. It merges the "normal mode"
+ // values (LOCAL, NOTIFICATION and DATATYPE_REFRESH), which were never really
+ // mutually exclusive to being with, into the GU_TRIGGER value. It also
+ // drops support for some old values that are not supported by newer clients.
+ //
+ // Mind the gaps: Some values are intentionally unused because we want to
+ // keep the values in sync with GetUpdatesSource as much as possible. Please
+ // don't add any values < 12 unless there's a good reason for it.
+ //
+ // Introduced in M28.
+ enum GetUpdatesOrigin {
+ UNKNOWN_ORIGIN = 0; // The source was not set by the caller.
+ PERIODIC = 4; // The source of the update was periodic polling.
+ NEWLY_SUPPORTED_DATATYPE = 7; // The client is in configuration mode
+ // because it's syncing all datatypes, and
+ // support for a new datatype was recently
+ // released via a software auto-update.
+ MIGRATION = 8; // The client is in configuration mode because a
+ // MIGRATION_DONE error previously returned by the
+ // server necessitated resynchronization.
+ NEW_CLIENT = 9; // The client is in configuration mode because the
+ // user enabled sync for the first time. Not to be
+ // confused with FIRST_UPDATE.
+ RECONFIGURATION = 10; // The client is in configuration mode because the
+ // user opted to sync a different set of datatypes.
+ GU_TRIGGER = 12; // The client is in 'normal' mode. It may have several
+ // reasons for requesting an update. See the per-type
+ // GetUpdateTriggers message for more details.
+ }
}
diff --git a/sync/protocol/synced_notification_data.proto b/sync/protocol/synced_notification_data.proto
index fed7ee7d90..72c612d725 100644
--- a/sync/protocol/synced_notification_data.proto
+++ b/sync/protocol/synced_notification_data.proto
@@ -63,8 +63,8 @@ message SyncedNotification {
// The creator of the notification.
optional SyncedNotificationCreator creator = 3;
- // TODO(petewil): This won't build. Import the relevant protobuf.
- // optional MapData client_data = 4;
+ // Client specific data.
+ optional MapData client_data = 4;
}
message CoalescedSyncedNotification {
@@ -103,4 +103,28 @@ message CoalescedSyncedNotification {
message SyncedNotificationList {
repeated CoalescedSyncedNotification coalesced_notification = 1;
-} \ No newline at end of file
+}
+
+// MapData, Data, and ListData are used to sending aribitrary payloads
+// between instances of applications using Synced Notifications. The
+// schema atop MapData will be defined by the client application.
+message MapData {
+ message Entry {
+ optional string key = 1;
+ optional Data value = 2;
+ };
+ repeated Entry entry = 1;
+};
+
+message Data {
+ optional bool boolean_value = 1;
+ optional int32 int_value = 2;
+ optional double float_value = 3;
+ optional string string_value = 4;
+ optional ListData list_value = 5;
+ optional MapData map_value = 6;
+};
+
+message ListData {
+ repeated Data value = 1;
+}; \ No newline at end of file
diff --git a/sync/protocol/synced_notification_render.proto b/sync/protocol/synced_notification_render.proto
index ce5a7215ec..46d890f491 100644
--- a/sync/protocol/synced_notification_render.proto
+++ b/sync/protocol/synced_notification_render.proto
@@ -16,70 +16,89 @@ package sync_pb;
// Data that is used directly by endpoints to render notifications in the case
// where no "native" app can handle the notification.
message SyncedNotificationRenderInfo {
-
- // Render information for the collapsed (summary) view of a coalesced
- // notification.
- message CollapsedInfo {
- message SimpleCollapsedLayout {
- // Application icon.
- optional SyncedNotificationImage app_icon = 1;
-
- // Profile image(s) of the notification creator(s) to show in the
- // collapsed UI.
- repeated SyncedNotificationProfileImage profile_image = 2;
-
- // Heading - often the name(s) of the notification creator(s).
- optional string heading = 3;
-
- // Description - often the action that generated the notification.
- optional string description = 4;
- }
- optional SimpleCollapsedLayout simple_collapsed_layout = 1;
-
- // The creation time of the notification in microseconds since the UNIX
- // epoch.
- optional uint64 creation_timestamp_usec = 2;
-
- // The default destination target.
- optional SyncedNotificationDestination default_destination = 3;
-
- // Secondary destinations and actions grouped into a message to account for
- // ordering.
- message Target {
- optional SyncedNotificationDestination destination = 1;
- optional SyncedNotificationAction action = 2;
- }
- repeated Target target = 4;
- }
+ // Render information for the collapsed (summary) view of a notification.
optional CollapsedInfo collapsed_info = 1;
- // Render information for the expanded (detail) view of a coalesced
- // notification.
- message ExpandedInfo {
- message SimpleExpandedLayout {
- // Title - often the title of the underlying entity referred to by the
- // notification(s).
- optional string title = 1;
-
- // Text content - often a snippet of text from the underlying entity
- // reference or the notification.
- optional string text = 2;
-
- // Media.
- message Media {
- // TOOD(jro): Do we need other media types?
- optional SyncedNotificationImage image = 1;
- }
- repeated Media media = 3;
- }
- optional SimpleExpandedLayout simple_expanded_layout = 1;
-
- // Collapsed information for each notification in the coalesced group.
- repeated CollapsedInfo collapsed_info = 2;
- }
+ // Render information for the expanded view of a notification.
optional ExpandedInfo expanded_info = 2;
}
+// Render information for the collapsed (summary) view of a coalesced
+// notification.
+message CollapsedInfo {
+ optional SimpleCollapsedLayout simple_collapsed_layout = 1;
+
+ // The creation time of the notification in microseconds since the UNIX
+ // epoch.
+ optional uint64 creation_timestamp_usec = 2;
+
+ // The default destination target.
+ optional SyncedNotificationDestination default_destination = 3;
+
+ repeated Target target = 4;
+}
+
+// Render information for the expanded (detail) view of a coalesced
+// notification.
+message ExpandedInfo {
+ optional SimpleExpandedLayout simple_expanded_layout = 1;
+
+ // Collapsed information for each notification in the coalesced group.
+ repeated CollapsedInfo collapsed_info = 2;
+}
+
+message SimpleCollapsedLayout {
+ // Application icon.
+ optional SyncedNotificationImage app_icon = 1;
+
+ // Profile image(s) of the notification creator(s) to show in the
+ // collapsed UI.
+ repeated SyncedNotificationProfileImage profile_image = 2;
+
+ // Heading - often the name(s) of the notification creator(s).
+ optional string heading = 3;
+
+ // Description - often the action that generated the notification.
+ optional string description = 4;
+}
+
+message SimpleExpandedLayout {
+ // Title - often the title of the underlying entity referred to by the
+ // notification(s).
+ optional string title = 1;
+
+ // Text content - often a snippet of text from the underlying entity
+ // reference or the notification.
+ optional string text = 2;
+
+ repeated Media media = 3;
+
+ // Profile image, usually this is the creator of the referenced entity.
+ optional SyncedNotificationProfileImage profile_image = 4;
+
+ // A set of targets for actions the user can take, or destinations the
+ // viewer can be taken to. Usually these relate to the referenced entity.
+ repeated Target target = 5;
+}
+
+// Media.
+message Media {
+ // TOOD(jro): Do we need other media types?
+ optional SyncedNotificationImage image = 1;
+}
+
+// Secondary destinations and actions grouped into a message to account for
+// ordering.
+message Target {
+ // URL that the user will be taken to by clicking on the notification.
+ optional SyncedNotificationDestination destination = 1;
+ // URI to POST if the user clicks on a button.
+ optional SyncedNotificationAction action = 2;
+
+ // A key to identify this target within a group of targets.
+ optional string target_key = 3;
+}
+
// A Destination is a target URL that the user can be taken to by clicking on or
// selecting the notification or part thereof.
message SyncedNotificationDestination {
@@ -101,8 +120,8 @@ message SyncedNotificationDestination {
// action button associated with the notification on native mobile, a link, or
// even the notification card itself.
message SyncedNotificationAction {
- // The description for the Action.
- optional string text = 1;
+ // The description for the Action.
+ optional string text = 1;
// The icon to use for the Action.
optional SyncedNotificationImage icon = 2;
@@ -133,4 +152,4 @@ message SyncedNotificationProfileImage {
optional string oid = 2;
// Name to display for this image.
optional string display_name = 3;
-} \ No newline at end of file
+}
diff --git a/sync/protocol/unique_position.proto b/sync/protocol/unique_position.proto
index c0743126d5..4402ba0e37 100644
--- a/sync/protocol/unique_position.proto
+++ b/sync/protocol/unique_position.proto
@@ -24,5 +24,15 @@ package sync_pb;
// Items under the same parent are positioned relative to each other by a
// lexicographic comparison of their UniquePosition values.
message UniquePosition {
+ // The uncompressed string of bytes representing the position.
optional bytes value = 1;
+
+ // The client may choose to write a compressed position to this field instead
+ // of populating the 'value' above. If it chooses to use compression, the
+ // 'value' field above must be empty. The position value will be compressed
+ // with gzip and stored in the compressed_value field. The position's
+ // uncompressed length must be specified and written to the
+ // uncompressed_length field.
+ optional bytes compressed_value = 2;
+ optional uint64 uncompressed_length = 3;
}
diff --git a/sync/sessions/nudge_tracker.cc b/sync/sessions/nudge_tracker.cc
new file mode 100644
index 0000000000..714b67d13f
--- /dev/null
+++ b/sync/sessions/nudge_tracker.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/nudge_tracker.h"
+#include "sync/protocol/sync.pb.h"
+
+namespace syncer {
+namespace sessions {
+
+NudgeTracker::NudgeTracker() { }
+
+NudgeTracker::~NudgeTracker() { }
+
+void NudgeTracker::CoalesceSources(const SyncSourceInfo& source) {
+ CoalesceStates(source.types, &source_info_.types);
+ source_info_.updates_source = source.updates_source;
+}
+
+bool NudgeTracker::IsEmpty() {
+ return source_info_.types.empty();
+}
+
+void NudgeTracker::Reset() {
+ source_info_ = SyncSourceInfo();
+}
+
+// TODO(rlarocque): This function often reports incorrect results. However, it
+// is compatible with the "classic" behaviour. We would need to make the nudge
+// tracker stop overwriting its own information (ie. fix crbug.com/231693)
+// before we could even try to report correct results. The main issue is that
+// an notifications and local modifications nudges may overlap with each other
+// in sych a way that we lose track of which types were or were not locally
+// modified.
+ModelTypeSet NudgeTracker::GetLocallyModifiedTypes() const {
+ ModelTypeSet locally_modified;
+
+ if (source_info_.updates_source != sync_pb::GetUpdatesCallerInfo::LOCAL) {
+ return locally_modified;
+ }
+
+ for (ModelTypeInvalidationMap::const_iterator i = source_info_.types.begin();
+ i != source_info().types.end(); ++i) {
+ locally_modified.Put(i->first);
+ }
+ return locally_modified;
+}
+
+} // namespace sessions
+} // namespace syncer
diff --git a/sync/sessions/nudge_tracker.h b/sync/sessions/nudge_tracker.h
new file mode 100644
index 0000000000..0128ce096e
--- /dev/null
+++ b/sync/sessions/nudge_tracker.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class to track the outstanding work required to bring the client back into
+// sync with the server.
+#ifndef SYNC_SESSIONS_NUDGE_TRACKER_H_
+#define SYNC_SESSIONS_NUDGE_TRACKER_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "sync/base/sync_export.h"
+#include "sync/internal_api/public/sessions/sync_source_info.h"
+
+namespace syncer {
+namespace sessions {
+
+struct SyncSourceInfo;
+
+class SYNC_EXPORT_PRIVATE NudgeTracker {
+ public:
+ NudgeTracker();
+ ~NudgeTracker();
+
+ // Merges in the information from another nudge.
+ void CoalesceSources(const SyncSourceInfo& source);
+
+ // Returns true if there are no unserviced nudges.
+ bool IsEmpty();
+
+ // Clear all unserviced nudges.
+ void Reset();
+
+ // Returns the coalesced source info.
+ const SyncSourceInfo& source_info() const {
+ return source_info_;
+ }
+
+ // Returns the set of locally modified types, according to our tracked source
+ // infos. The result is often wrong; see implementation comment for details.
+ ModelTypeSet GetLocallyModifiedTypes() const;
+
+ private:
+ // Merged source info for the nudge(s).
+ SyncSourceInfo source_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(NudgeTracker);
+};
+
+} // namespace sessions
+} // namespace syncer
+
+#endif // SYNC_SESSIONS_NUDGE_TRACKER_H_
diff --git a/sync/sessions/nudge_tracker_unittest.cc b/sync/sessions/nudge_tracker_unittest.cc
new file mode 100644
index 0000000000..192a737c93
--- /dev/null
+++ b/sync/sessions/nudge_tracker_unittest.cc
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/nudge_tracker.h"
+
+#include "sync/internal_api/public/base/model_type_invalidation_map.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+namespace {
+
+ModelTypeSet ParamsMeaningAllEnabledTypes() {
+ ModelTypeSet request_params(BOOKMARKS, AUTOFILL);
+ return request_params;
+}
+
+ModelTypeSet ParamsMeaningJustOneEnabledType() {
+ return ModelTypeSet(AUTOFILL);
+}
+
+} // namespace
+
+namespace sessions {
+
+TEST(NudgeTrackerTest, CoalesceSources) {
+ ModelTypeInvalidationMap one_type =
+ ModelTypeSetToInvalidationMap(
+ ParamsMeaningJustOneEnabledType(),
+ std::string());
+ ModelTypeInvalidationMap all_types =
+ ModelTypeSetToInvalidationMap(
+ ParamsMeaningAllEnabledTypes(),
+ std::string());
+ sessions::SyncSourceInfo source_one(
+ sync_pb::GetUpdatesCallerInfo::NOTIFICATION, one_type);
+ sessions::SyncSourceInfo source_two(
+ sync_pb::GetUpdatesCallerInfo::LOCAL, all_types);
+
+ NudgeTracker tracker;
+ EXPECT_TRUE(tracker.IsEmpty());
+
+ tracker.CoalesceSources(source_one);
+ EXPECT_EQ(source_one.updates_source, tracker.source_info().updates_source);
+
+ tracker.CoalesceSources(source_two);
+ EXPECT_EQ(source_two.updates_source, tracker.source_info().updates_source);
+}
+
+TEST(NudgeTrackerTest, LocallyModifiedTypes_WithInvalidationFirst) {
+ ModelTypeInvalidationMap one_type =
+ ModelTypeSetToInvalidationMap(
+ ParamsMeaningJustOneEnabledType(),
+ std::string());
+ ModelTypeInvalidationMap all_types =
+ ModelTypeSetToInvalidationMap(
+ ParamsMeaningAllEnabledTypes(),
+ std::string());
+ sessions::SyncSourceInfo source_one(
+ sync_pb::GetUpdatesCallerInfo::NOTIFICATION, all_types);
+ sessions::SyncSourceInfo source_two(
+ sync_pb::GetUpdatesCallerInfo::LOCAL, one_type);
+
+ NudgeTracker tracker;
+ EXPECT_TRUE(tracker.IsEmpty());
+ EXPECT_TRUE(tracker.GetLocallyModifiedTypes().Empty());
+
+ tracker.CoalesceSources(source_one);
+ EXPECT_TRUE(tracker.GetLocallyModifiedTypes().Empty());
+
+ tracker.CoalesceSources(source_two);
+ // TODO: This result is wrong, but that's how the code has always been. A
+ // local invalidation for a single type should mean that we have only one
+ // locally modified source. It should not "inherit" the list of data types
+ // from the previous source.
+ EXPECT_TRUE(tracker.GetLocallyModifiedTypes().Equals(
+ ParamsMeaningAllEnabledTypes()));
+}
+
+TEST(NudgeTrackerTest, LocallyModifiedTypes_WithInvalidationSecond) {
+ ModelTypeInvalidationMap one_type =
+ ModelTypeSetToInvalidationMap(
+ ParamsMeaningJustOneEnabledType(),
+ std::string());
+ ModelTypeInvalidationMap all_types =
+ ModelTypeSetToInvalidationMap(
+ ParamsMeaningAllEnabledTypes(),
+ std::string());
+ sessions::SyncSourceInfo source_one(
+ sync_pb::GetUpdatesCallerInfo::LOCAL, one_type);
+ sessions::SyncSourceInfo source_two(
+ sync_pb::GetUpdatesCallerInfo::NOTIFICATION, all_types);
+
+ NudgeTracker tracker;
+ EXPECT_TRUE(tracker.IsEmpty());
+ EXPECT_TRUE(tracker.GetLocallyModifiedTypes().Empty());
+
+ tracker.CoalesceSources(source_one);
+ EXPECT_TRUE(tracker.GetLocallyModifiedTypes().Equals(
+ ParamsMeaningJustOneEnabledType()));
+
+ tracker.CoalesceSources(source_two);
+
+ // TODO: This result is wrong, but that's how the code has always been.
+ // The receipt of an invalidation should have no effect on the set of
+ // locally modified types.
+ EXPECT_TRUE(tracker.GetLocallyModifiedTypes().Empty());
+}
+
+} // namespace sessions
+} // namespace syncer
diff --git a/sync/sessions/sync_session.cc b/sync/sessions/sync_session.cc
index 58cf11a3c5..2e82dae0f3 100644
--- a/sync/sessions/sync_session.cc
+++ b/sync/sessions/sync_session.cc
@@ -23,17 +23,10 @@ SyncSession::SyncSession(
source_(source),
delegate_(delegate) {
status_controller_.reset(new StatusController());
- debug_info_sources_list_.push_back(source_);
}
SyncSession::~SyncSession() {}
-void SyncSession::CoalesceSources(const SyncSourceInfo& source) {
- debug_info_sources_list_.push_back(source);
- CoalesceStates(source.types, &source_.types);
- source_.updates_source = source.updates_source;
-}
-
SyncSessionSnapshot SyncSession::TakeSnapshot() const {
syncable::Directory* dir = context_->directory();
@@ -56,7 +49,6 @@ SyncSessionSnapshot SyncSession::TakeSnapshot() const {
status_controller_->num_hierarchy_conflicts(),
status_controller_->num_server_conflicts(),
source_,
- debug_info_sources_list_,
context_->notifications_enabled(),
dir->GetEntriesCount(),
status_controller_->sync_start_time(),
diff --git a/sync/sessions/sync_session.h b/sync/sessions/sync_session.h
index 126d57f55b..763bd3bc47 100644
--- a/sync/sessions/sync_session.h
+++ b/sync/sessions/sync_session.h
@@ -101,10 +101,6 @@ class SYNC_EXPORT_PRIVATE SyncSession {
// Builds and sends a snapshot to the session context's listeners.
void SendEventNotification(SyncEngineEvent::EventCause cause);
- // Overwrite the sync update source with the most recent and merge the
- // type/state map.
- void CoalesceSources(const SyncSourceInfo& source);
-
// TODO(akalin): Split this into context() and mutable_context().
SyncSessionContext* context() const { return context_; }
Delegate* delegate() const { return delegate_; }
@@ -124,10 +120,6 @@ class SYNC_EXPORT_PRIVATE SyncSession {
// The source for initiating this sync session.
SyncSourceInfo source_;
- // A list of sources for sessions that have been merged with this one.
- // Currently used only for logging.
- std::vector<SyncSourceInfo> debug_info_sources_list_;
-
// The delegate for this session, must never be NULL.
Delegate* const delegate_;
diff --git a/sync/sessions/sync_session_unittest.cc b/sync/sessions/sync_session_unittest.cc
index c3c4d69e66..118e64bd2d 100644
--- a/sync/sessions/sync_session_unittest.cc
+++ b/sync/sessions/sync_session_unittest.cc
@@ -172,26 +172,6 @@ TEST_F(SyncSessionTest, MoreToDownloadIfGotNoChangesRemaining) {
EXPECT_TRUE(status()->download_updates_succeeded());
}
-TEST_F(SyncSessionTest, CoalesceSources) {
- ModelTypeInvalidationMap one_type =
- ModelTypeSetToInvalidationMap(
- ParamsMeaningJustOneEnabledType(),
- std::string());
- ModelTypeInvalidationMap all_types =
- ModelTypeSetToInvalidationMap(
- ParamsMeaningAllEnabledTypes(),
- std::string());
- SyncSourceInfo source_one(sync_pb::GetUpdatesCallerInfo::PERIODIC, one_type);
- SyncSourceInfo source_two(sync_pb::GetUpdatesCallerInfo::LOCAL, all_types);
-
- SyncSession session(context_.get(), this, source_one);
-
- session.CoalesceSources(source_two);
-
- EXPECT_EQ(source_two.updates_source, session.source().updates_source);
- EXPECT_THAT(all_types, Eq(session.source().types));
-}
-
TEST_F(SyncSessionTest, MakeTypeInvalidationMapFromBitSet) {
ModelTypeSet types;
std::string payload = "test";
diff --git a/sync/sync_core.gypi b/sync/sync_core.gypi
index 716f2af521..9c311eea12 100644
--- a/sync/sync_core.gypi
+++ b/sync/sync_core.gypi
@@ -68,8 +68,6 @@
'engine/sync_scheduler.h',
'engine/sync_scheduler_impl.cc',
'engine/sync_scheduler_impl.h',
- 'engine/sync_session_job.cc',
- 'engine/sync_session_job.h',
'engine/syncer.cc',
'engine/syncer.h',
'engine/syncer_command.cc',
@@ -104,6 +102,8 @@
'protocol/sync_protocol_error.cc',
'protocol/sync_protocol_error.h',
'sessions/debug_info_getter.h',
+ 'sessions/nudge_tracker.cc',
+ 'sessions/nudge_tracker.h',
'sessions/ordered_commit_set.cc',
'sessions/ordered_commit_set.h',
'sessions/status_controller.cc',
@@ -137,7 +137,12 @@
'syncable/nigori_util.h',
'syncable/on_disk_directory_backing_store.cc',
'syncable/on_disk_directory_backing_store.h',
+ 'syncable/parent_child_index.cc',
+ 'syncable/parent_child_index.h',
+ 'syncable/scoped_index_updater.h',
'syncable/scoped_kernel_lock.h',
+ 'syncable/scoped_parent_child_index_updater.cc',
+ 'syncable/scoped_parent_child_index_updater.h',
'syncable/syncable-inl.h',
'syncable/syncable_base_transaction.cc',
'syncable/syncable_base_transaction.h',
diff --git a/sync/sync_internal_api.gypi b/sync/sync_internal_api.gypi
index 04c36a6232..9016a80562 100644
--- a/sync/sync_internal_api.gypi
+++ b/sync/sync_internal_api.gypi
@@ -13,6 +13,7 @@
'../base/base.gyp:base',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../net/net.gyp:net',
+ '../third_party/zlib/zlib.gyp:zlib',
],
'sources': [
'internal_api/base_node.cc',
diff --git a/sync/sync_proto.gypi b/sync/sync_proto.gypi
index f56a36ad7e..04f5e35490 100644
--- a/sync/sync_proto.gypi
+++ b/sync/sync_proto.gypi
@@ -28,6 +28,7 @@
'protocol/get_updates_caller_info.proto',
'protocol/history_delete_directive_specifics.proto',
'protocol/nigori_specifics.proto',
+ 'protocol/managed_user_setting_specifics.proto',
'protocol/password_specifics.proto',
'protocol/preference_specifics.proto',
'protocol/priority_preference_specifics.proto',
diff --git a/sync/sync_tests.gypi b/sync/sync_tests.gypi
index 059c930154..f7e2b64782 100644
--- a/sync/sync_tests.gypi
+++ b/sync/sync_tests.gypi
@@ -243,15 +243,12 @@
'engine/apply_control_data_updates_unittest.cc',
'engine/apply_updates_and_resolve_conflicts_command_unittest.cc',
'engine/backoff_delay_provider_unittest.cc',
- 'engine/build_commit_command_unittest.cc',
'engine/download_updates_command_unittest.cc',
'engine/model_changing_syncer_command_unittest.cc',
'engine/process_commit_response_command_unittest.cc',
'engine/process_updates_command_unittest.cc',
'engine/store_timestamps_command_unittest.cc',
- 'engine/sync_session_job_unittest.cc',
'engine/sync_scheduler_unittest.cc',
- 'engine/sync_scheduler_whitebox_unittest.cc',
'engine/syncer_proto_util_unittest.cc',
'engine/syncer_unittest.cc',
'engine/throttled_data_type_tracker_unittest.cc',
@@ -261,12 +258,14 @@
'js/sync_js_controller_unittest.cc',
'protocol/proto_enum_conversions_unittest.cc',
'protocol/proto_value_conversions_unittest.cc',
+ 'sessions/nudge_tracker_unittest.cc',
'sessions/ordered_commit_set_unittest.cc',
'sessions/status_controller_unittest.cc',
'sessions/sync_session_unittest.cc',
'syncable/directory_backing_store_unittest.cc',
'syncable/model_type_unittest.cc',
'syncable/nigori_util_unittest.cc',
+ 'syncable/parent_child_index_unittest.cc',
'syncable/syncable_enum_conversions_unittest.cc',
'syncable/syncable_id_unittest.cc',
'syncable/syncable_unittest.cc',
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index a9e0f65b99..a844e66e4c 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -4,17 +4,18 @@
#include "sync/syncable/directory.h"
+#include "base/base64.h"
#include "base/debug/trace_event.h"
-#include "base/perftimer.h"
#include "base/stl_util.h"
#include "base/string_number_conversions.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/entry_kernel.h"
#include "sync/syncable/in_memory_directory_backing_store.h"
#include "sync/syncable/on_disk_directory_backing_store.h"
#include "sync/syncable/scoped_index_updater.h"
+#include "sync/syncable/scoped_parent_child_index_updater.h"
#include "sync/syncable/syncable-inl.h"
#include "sync/syncable/syncable_base_transaction.h"
#include "sync/syncable/syncable_changes_version.h"
@@ -36,7 +37,6 @@ void InitializeIndexEntry(EntryKernel* entry,
index->insert(entry);
}
}
-
}
// static
@@ -44,29 +44,6 @@ bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) {
return !a->ref(UNIQUE_CLIENT_TAG).empty();
}
-bool ParentIdAndHandleIndexer::Comparator::operator() (
- const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const {
- int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID));
- if (cmp != 0)
- return cmp < 0;
-
- const NodeOrdinal& a_position = a->ref(SERVER_ORDINAL_IN_PARENT);
- const NodeOrdinal& b_position = b->ref(SERVER_ORDINAL_IN_PARENT);
- if (!a_position.Equals(b_position))
- return a_position.LessThan(b_position);
-
- cmp = a->ref(ID).compare(b->ref(ID));
- return cmp < 0;
-}
-
-// static
-bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) {
- // This index excludes deleted items and the root item. The root
- // item is excluded so that it doesn't show up as a child of itself.
- return !a->ref(IS_DEL) && !a->ref(ID).IsRoot();
-}
-
// static
const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
FILE_PATH_LITERAL("SyncData.sqlite3");
@@ -116,7 +93,7 @@ Directory::Kernel::Kernel(
name(name),
metahandles_index(new Directory::MetahandlesIndex),
ids_index(new Directory::IdsIndex),
- parent_id_child_index(new Directory::ParentIdChildIndex),
+ parent_child_index(new ParentChildIndex),
client_tag_index(new Directory::ClientTagIndex),
unsynced_metahandles(new MetahandleSet),
dirty_metahandles(new MetahandleSet),
@@ -135,7 +112,7 @@ Directory::Kernel::~Kernel() {
delete unsynced_metahandles;
delete dirty_metahandles;
delete metahandles_to_purge;
- delete parent_id_child_index;
+ delete parent_child_index;
delete client_tag_index;
delete ids_index;
STLDeleteElements(metahandles_index);
@@ -181,8 +158,8 @@ void Directory::InitializeIndices() {
MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
for (; it != kernel_->metahandles_index->end(); ++it) {
EntryKernel* entry = *it;
- InitializeIndexEntry<ParentIdAndHandleIndexer>(entry,
- kernel_->parent_id_child_index);
+ if (ParentChildIndex::ShouldInclude(entry))
+ kernel_->parent_child_index->Insert(entry);
InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index);
InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index);
const int64 metahandle = entry->ref(META_HANDLE);
@@ -367,8 +344,8 @@ bool Directory::InsertEntry(WriteTransaction* trans,
trans))
return false;
- if (!entry->ref(IS_DEL)) {
- if (!SyncAssert(kernel_->parent_id_child_index->insert(entry).second,
+ if (ParentChildIndex::ShouldInclude(entry)) {
+ if (!SyncAssert(kernel_->parent_child_index->Insert(entry),
FROM_HERE,
error,
trans)) {
@@ -399,8 +376,8 @@ bool Directory::ReindexId(WriteTransaction* trans,
{
// Update the indices that depend on the ID field.
ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index);
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry,
- kernel_->parent_id_child_index);
+ ScopedParentChildIndexUpdater updater_b(lock, entry,
+ kernel_->parent_child_index);
entry->put(ID, new_id);
}
return true;
@@ -413,8 +390,8 @@ bool Directory::ReindexParentId(WriteTransaction* trans,
{
// Update the indices that depend on the PARENT_ID field.
- ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry,
- kernel_->parent_id_child_index);
+ ScopedParentChildIndexUpdater index_updater(lock, entry,
+ kernel_->parent_child_index);
entry->put(PARENT_ID, new_parent_id);
}
return true;
@@ -550,7 +527,7 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
// Might not be in it
num_erased = kernel_->client_tag_index->erase(entry);
DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
- if (!SyncAssert(!kernel_->parent_id_child_index->count(entry),
+ if (!SyncAssert(!kernel_->parent_child_index->Contains(entry),
FROM_HERE,
"Deleted entry still present",
(&trans)))
@@ -589,8 +566,6 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types,
// Note the dance around incrementing |it|, since we sometimes erase().
if ((IsRealDataType(local_type) && types.Has(local_type)) ||
(IsRealDataType(server_type) && types.Has(server_type))) {
- if (!UnlinkEntryFromOrder(*it, &trans, &lock, DATA_TYPE_PURGE))
- return false;
int64 handle = (*it)->ref(META_HANDLE);
kernel_->metahandles_to_purge->insert(handle);
@@ -606,8 +581,8 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types,
num_erased =
kernel_->unapplied_update_metahandles[server_type].erase(handle);
DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
- num_erased = kernel_->parent_id_child_index->erase(entry);
- DCHECK_EQ(entry->ref(IS_DEL), !num_erased);
+ if (kernel_->parent_child_index->Contains(entry))
+ kernel_->parent_child_index->Remove(entry);
kernel_->metahandles_index->erase(it++);
if ((types_to_journal.Has(local_type) ||
@@ -730,14 +705,6 @@ bool Directory::InitialSyncEndedForType(
return entry.good() && entry.Get(syncable::BASE_VERSION) != CHANGES_VERSION;
}
-template <class T> void Directory::TestAndSet(
- T* kernel_data, const T* data_to_set) {
- if (*kernel_data != *data_to_set) {
- *kernel_data = *data_to_set;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
- }
-}
-
string Directory::store_birthday() const {
ScopedKernelLock lock(this);
return kernel_->persisted_info.store_birthday;
@@ -1030,76 +997,6 @@ void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
invariant_check_level_ = check_level;
}
-bool Directory::UnlinkEntryFromOrder(EntryKernel* entry,
- WriteTransaction* trans,
- ScopedKernelLock* lock,
- UnlinkReason unlink_reason) {
- if (!SyncAssert(!trans || this == trans->directory(),
- FROM_HERE,
- "Transaction not pointing to the right directory",
- trans))
- return false;
- Id old_previous = entry->ref(PREV_ID);
- Id old_next = entry->ref(NEXT_ID);
-
- entry->put(NEXT_ID, entry->ref(ID));
- entry->put(PREV_ID, entry->ref(ID));
- entry->mark_dirty(kernel_->dirty_metahandles);
-
- if (!old_previous.IsRoot()) {
- if (old_previous == old_next) {
- // Note previous == next doesn't imply previous == next == Get(ID). We
- // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
- // and deleted before receiving the server ID in the commit response.
- if (!SyncAssert(
- (old_next == entry->ref(ID)) || !old_next.ServerKnows(),
- FROM_HERE,
- "Encounteered inconsistent entry while deleting",
- trans)) {
- return false;
- }
- return true; // Done if we were already self-looped (hence unlinked).
- }
- EntryKernel* previous_entry = GetEntryById(old_previous, lock);
- ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
- // TODO(tim): Multiple asserts here for bug 101039 investigation.
- if (type == AUTOFILL) {
- if (!SyncAssert(previous_entry != NULL,
- FROM_HERE,
- "Could not find previous autofill entry",
- trans)) {
- return false;
- }
- } else {
- if (!SyncAssert(previous_entry != NULL,
- FROM_HERE,
- "Could not find previous entry",
- trans)) {
- return false;
- }
- }
- if (unlink_reason == NODE_MANIPULATION)
- trans->SaveOriginal(previous_entry);
- previous_entry->put(NEXT_ID, old_next);
- previous_entry->mark_dirty(kernel_->dirty_metahandles);
- }
-
- if (!old_next.IsRoot()) {
- EntryKernel* next_entry = GetEntryById(old_next, lock);
- if (!SyncAssert(next_entry != NULL,
- FROM_HERE,
- "Could not find next entry",
- trans)) {
- return false;
- }
- if (unlink_reason == NODE_MANIPULATION)
- trans->SaveOriginal(next_entry);
- next_entry->put(PREV_ID, old_previous);
- next_entry->mark_dirty(kernel_->dirty_metahandles);
- }
- return true;
-}
-
int64 Directory::NextMetahandle() {
ScopedKernelLock lock(this);
int64 metahandle = (kernel_->next_metahandle)++;
@@ -1121,171 +1018,161 @@ Id Directory::NextId() {
bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
ScopedKernelLock lock(this);
- return (GetPossibleFirstChild(lock, id) != NULL);
+ return kernel_->parent_child_index->GetChildren(id) != NULL;
}
-bool Directory::GetFirstChildId(BaseTransaction* trans,
- const Id& parent_id,
- Id* first_child_id) {
+Id Directory::GetFirstChildId(BaseTransaction* trans,
+ const EntryKernel* parent) {
+ DCHECK(parent);
+ DCHECK(parent->ref(IS_DIR));
+
ScopedKernelLock lock(this);
- EntryKernel* entry = GetPossibleFirstChild(lock, parent_id);
- if (!entry) {
- *first_child_id = Id();
- return true;
- }
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(parent->ref(ID));
- // Walk to the front of the list; the server position ordering
- // is commonly identical to the linked-list ordering, but pending
- // unsynced or unapplied items may diverge.
- while (!entry->ref(PREV_ID).IsRoot()) {
- entry = GetEntryById(entry->ref(PREV_ID), &lock);
- if (!entry) {
- *first_child_id = Id();
- return false;
- }
- }
- *first_child_id = entry->ref(ID);
- return true;
+ // We're expected to return root if there are no children.
+ if (!children)
+ return Id();
+
+ return (*children->begin())->ref(ID);
}
-bool Directory::GetLastChildIdForTest(
- BaseTransaction* trans, const Id& parent_id, Id* last_child_id) {
+syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
ScopedKernelLock lock(this);
- EntryKernel* entry = GetPossibleFirstChild(lock, parent_id);
- if (!entry) {
- *last_child_id = Id();
- return true;
- }
- // Walk to the back of the list; the server position ordering
- // is commonly identical to the linked-list ordering, but pending
- // unsynced or unapplied items may diverge.
- while (!entry->ref(NEXT_ID).IsRoot()) {
- entry = GetEntryById(entry->ref(NEXT_ID), &lock);
- if (!entry) {
- *last_child_id = Id();
- return false;
- }
+ DCHECK(ParentChildIndex::ShouldInclude(e));
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
+ DCHECK(children && !children->empty());
+ OrderedChildSet::const_iterator i = children->find(e);
+ DCHECK(i != children->end());
+
+ if (i == children->begin()) {
+ return Id();
+ } else {
+ i--;
+ return (*i)->ref(ID);
}
-
- *last_child_id = entry->ref(ID);
- return true;
}
-Id Directory::ComputePrevIdFromServerPosition(
- const EntryKernel* entry,
- const syncable::Id& parent_id) {
+syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
ScopedKernelLock lock(this);
- // Find the natural insertion point in the parent_id_child_index, and
- // work back from there, filtering out ineligible candidates.
- ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(
- lock,
- parent_id,
- NodeOrdinalToInt64(entry->ref(SERVER_ORDINAL_IN_PARENT)),
- entry->ref(ID));
- ParentIdChildIndex::iterator first_sibling =
- GetParentChildIndexLowerBound(lock, parent_id);
-
- while (sibling != first_sibling) {
- --sibling;
- EntryKernel* candidate = *sibling;
-
- // The item itself should never be in the range under consideration.
- DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE));
-
- // Ignore unapplied updates -- they might not even be server-siblings.
- if (candidate->ref(IS_UNAPPLIED_UPDATE))
- continue;
+ DCHECK(ParentChildIndex::ShouldInclude(e));
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
+ DCHECK(children && !children->empty());
+ OrderedChildSet::const_iterator i = children->find(e);
+ DCHECK(i != children->end());
+
+ i++;
+ if (i == children->end()) {
+ return Id();
+ } else {
+ return (*i)->ref(ID);
+ }
+}
- // We can't trust the SERVER_ fields of unsynced items, but they are
- // potentially legitimate local predecessors. In the case where
- // |update_item| and an unsynced item wind up in the same insertion
- // position, we need to choose how to order them. The following check puts
- // the unapplied update first; removing it would put the unsynced item(s)
- // first.
- if (candidate->ref(IS_UNSYNCED))
- continue;
+// TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
+// items as siblings of items that do not maintain postions. It is required
+// only for tests. See crbug.com/178282.
+void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
+ DCHECK(!e->ref(IS_DEL));
+ if (!e->ShouldMaintainPosition()) {
+ DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
+ return;
+ }
+ std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
+ DCHECK(!suffix.empty());
- // Skip over self-looped items, which are not valid predecessors. This
- // shouldn't happen in practice, but is worth defending against.
- if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) &&
- !candidate->ref(PREV_ID).IsRoot()) {
- NOTREACHED();
- continue;
+ // Remove our item from the ParentChildIndex and remember to re-add it later.
+ ScopedKernelLock lock(this);
+ ScopedParentChildIndexUpdater updater(lock, e, kernel_->parent_child_index);
+
+ // Note: The ScopedParentChildIndexUpdater will update this set for us as we
+ // leave this function.
+ const OrderedChildSet* siblings =
+ kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
+
+ if (!siblings) {
+ // This parent currently has no other children.
+ DCHECK(predecessor->ref(ID).IsRoot());
+ UniquePosition pos = UniquePosition::InitialPosition(suffix);
+ e->put(UNIQUE_POSITION, pos);
+ return;
+ }
+
+ if (predecessor->ref(ID).IsRoot()) {
+ // We have at least one sibling, and we're inserting to the left of them.
+ UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
+
+ UniquePosition pos;
+ if (!successor_pos.IsValid()) {
+ // If all our successors are of non-positionable types, just create an
+ // initial position. We arbitrarily choose to sort invalid positions to
+ // the right of the valid positions.
+ //
+ // We really shouldn't need to support this. See TODO above.
+ pos = UniquePosition::InitialPosition(suffix);
+ } else {
+ DCHECK(!siblings->empty());
+ pos = UniquePosition::Before(successor_pos, suffix);
}
- return candidate->ref(ID);
+
+ e->put(UNIQUE_POSITION, pos);
+ return;
}
- // This item will be the first in the sibling order.
- return Id();
-}
-Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex(
- const ScopedKernelLock& lock,
- const Id& parent_id,
- int64 position_in_parent,
- const Id& item_id_for_tiebreaking) {
- kernel_->needle.put(PARENT_ID, parent_id);
- kernel_->needle.put(SERVER_ORDINAL_IN_PARENT,
- Int64ToNodeOrdinal(position_in_parent));
- kernel_->needle.put(ID, item_id_for_tiebreaking);
- return kernel_->parent_id_child_index->lower_bound(&kernel_->needle);
-}
+ // We can't support placing an item after an invalid position. Fortunately,
+ // the tests don't exercise this particular case. We should not support
+ // siblings with invalid positions at all. See TODO above.
+ DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
+
+ OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
+ DCHECK(neighbour != siblings->end());
+
+ ++neighbour;
+ if (neighbour == siblings->end()) {
+ // Inserting at the end of the list.
+ UniquePosition pos = UniquePosition::After(
+ predecessor->ref(UNIQUE_POSITION),
+ suffix);
+ e->put(UNIQUE_POSITION, pos);
+ return;
+ }
-Directory::ParentIdChildIndex::iterator
-Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock,
- const Id& parent_id) {
- // Peg the parent ID, and use the least values for the remaining
- // index variables.
- return LocateInParentChildIndex(lock, parent_id,
- std::numeric_limits<int64>::min(),
- Id::GetLeastIdForLexicographicComparison());
-}
+ EntryKernel* successor = *neighbour;
+
+ // Another mixed valid and invalid position case. This one could be supported
+ // in theory, but we're trying to deprecate support for siblings with and
+ // without valid positions. See TODO above.
+ DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
-Directory::ParentIdChildIndex::iterator
-Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock,
- const Id& parent_id) {
- // The upper bound of |parent_id|'s range is the lower
- // bound of |++parent_id|'s range.
- return GetParentChildIndexLowerBound(lock,
- parent_id.GetLexicographicSuccessor());
+ // Finally, the normal case: inserting between two elements.
+ UniquePosition pos = UniquePosition::Between(
+ predecessor->ref(UNIQUE_POSITION),
+ successor->ref(UNIQUE_POSITION),
+ suffix);
+ e->put(UNIQUE_POSITION, pos);
+ return;
}
+// TODO(rlarocque): Avoid this indirection. Just return the set.
void Directory::AppendChildHandles(const ScopedKernelLock& lock,
const Id& parent_id,
Directory::ChildHandles* result) {
- typedef ParentIdChildIndex::iterator iterator;
- CHECK(result);
- for (iterator i = GetParentChildIndexLowerBound(lock, parent_id),
- end = GetParentChildIndexUpperBound(lock, parent_id);
- i != end; ++i) {
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(parent_id);
+ if (!children)
+ return;
+
+ for (OrderedChildSet::const_iterator i = children->begin();
+ i != children->end(); ++i) {
DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
result->push_back((*i)->ref(META_HANDLE));
}
}
-EntryKernel* Directory::GetPossibleFirstChild(
- const ScopedKernelLock& lock, const Id& parent_id) {
- // We can use the server positional ordering as a hint because it's generally
- // in sync with the local (linked-list) positional ordering, and we have an
- // index on it.
- ParentIdChildIndex::iterator candidate =
- GetParentChildIndexLowerBound(lock, parent_id);
- ParentIdChildIndex::iterator end_range =
- GetParentChildIndexUpperBound(lock, parent_id);
- for (; candidate != end_range; ++candidate) {
- EntryKernel* entry = *candidate;
- // Filter out self-looped items, which are temporarily not in the child
- // ordering.
- if (entry->ref(PREV_ID).IsRoot() ||
- entry->ref(PREV_ID) != entry->ref(NEXT_ID)) {
- return entry;
- }
- }
- // There were no children in the linked list.
- return NULL;
-}
-
ScopedKernelLock::ScopedKernelLock(const Directory* dir)
: scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
}
diff --git a/sync/syncable/directory.h b/sync/syncable/directory.h
index 51ceaf82bd..61c5522491 100644
--- a/sync/syncable/directory.h
+++ b/sync/syncable/directory.h
@@ -17,6 +17,7 @@
#include "sync/syncable/dir_open_result.h"
#include "sync/syncable/entry_kernel.h"
#include "sync/syncable/metahandle_set.h"
+#include "sync/syncable/parent_child_index.h"
#include "sync/syncable/scoped_kernel_lock.h"
#include "sync/syncable/syncable_delete_journal.h"
@@ -86,21 +87,6 @@ struct ClientTagIndexer {
static bool ShouldInclude(const EntryKernel* a);
};
-// This index contains EntryKernels ordered by parent ID and metahandle.
-// It allows efficient lookup of the children of a given parent.
-struct ParentIdAndHandleIndexer {
- // This index is of the parent ID and metahandle. We use a custom
- // comparator.
- class Comparator {
- public:
- bool operator() (const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const;
- };
-
- // This index does not include deleted items.
- static bool ShouldInclude(const EntryKernel* a);
-};
-
// Given an Indexer providing the semantics of an index, defines the
// set type used to actually contain the index.
template <typename Indexer>
@@ -108,13 +94,6 @@ struct Index {
typedef std::set<EntryKernel*, typename Indexer::Comparator> Set;
};
-// Reason for unlinking.
-enum UnlinkReason {
- NODE_MANIPULATION, // To be used by any operation manipulating the linked
- // list.
- DATA_TYPE_PURGE // To be used when purging a dataype.
-};
-
enum InvariantCheckLevel {
OFF = 0, // No checking.
VERIFY_CHANGES = 1, // Checks only mutated entries. Does not check hierarchy.
@@ -126,9 +105,7 @@ class SYNC_EXPORT Directory {
friend class Entry;
friend class MutableEntry;
friend class ReadTransaction;
- friend class ReadTransactionWithoutDB;
friend class ScopedKernelLock;
- friend class ScopedKernelUnlock;
friend class WriteTransaction;
friend class SyncableDirectoryTest;
friend class syncer::TestUserShare;
@@ -233,9 +210,8 @@ class SYNC_EXPORT Directory {
void Close();
int64 NextMetahandle();
- // Always returns a negative id. Positive client ids are generated
- // by the server only.
- Id NextId();
+ // Returns a negative integer unique to this client.
+ syncable::Id NextId();
bool good() const { return NULL != kernel_; }
@@ -320,13 +296,6 @@ class SYNC_EXPORT Directory {
const Id& new_parent_id);
void ClearDirtyMetahandles();
- // These don't do semantic checking.
- // The semantic checking is implemented higher up.
- bool UnlinkEntryFromOrder(EntryKernel* entry,
- WriteTransaction* trans,
- ScopedKernelLock* lock,
- UnlinkReason unlink_reason);
-
DirOpenResult OpenImpl(
const std::string& name,
DirectoryChangeDelegate* delegate,
@@ -337,8 +306,6 @@ class SYNC_EXPORT Directory {
// before calling.
EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
- template <class T> void TestAndSet(T* kernel_data, const T* data_to_set);
-
public:
typedef std::vector<int64> ChildHandles;
@@ -361,23 +328,27 @@ class SYNC_EXPORT Directory {
// and fill in |*first_child_id| with its id. Fills in a root Id if
// parent has no children. Returns true if the first child was
// successfully found, or false if an error was encountered.
- bool GetFirstChildId(BaseTransaction* trans, const Id& parent_id,
- Id* first_child_id) WARN_UNUSED_RESULT;
+ Id GetFirstChildId(BaseTransaction* trans, const EntryKernel* parent);
- // Find the last child in the positional ordering under a parent,
- // and fill in |*first_child_id| with its id. Fills in a root Id if
- // parent has no children. Returns true if the first child was
- // successfully found, or false if an error was encountered.
- bool GetLastChildIdForTest(BaseTransaction* trans, const Id& parent_id,
- Id* last_child_id) WARN_UNUSED_RESULT;
+ // These functions allow one to fetch the next or previous item under
+ // the same folder. Returns the "root" ID if there is no predecessor
+ // or successor.
+ //
+ // TODO(rlarocque): These functions are used mainly for tree traversal. We
+ // should replace these with an iterator API. See crbug.com/178275.
+ syncable::Id GetPredecessorId(EntryKernel*);
+ syncable::Id GetSuccessorId(EntryKernel*);
- // Compute a local predecessor position for |update_item|. The position
- // is determined by the SERVER_POSITION_IN_PARENT value of |update_item|,
- // as well as the SERVER_POSITION_IN_PARENT values of any up-to-date
- // children of |parent_id|.
- Id ComputePrevIdFromServerPosition(
- const EntryKernel* update_item,
- const syncable::Id& parent_id);
+ // Places |e| as a successor to |predecessor|. If |predecessor| is NULL,
+ // |e| will be placed as the left-most item in its folder.
+ //
+ // Both |e| and |predecessor| must be valid entries under the same parent.
+ //
+ // TODO(rlarocque): This function includes limited support for placing items
+ // with valid positions (ie. Bookmarks) as siblings of items that have no set
+ // ordering (ie. Autofill items). This support is required only for tests,
+ // and should be removed. See crbug.com/178282.
+ void PutPredecessor(EntryKernel* e, EntryKernel* predecessor);
// SaveChanges works by taking a consistent snapshot of the current Directory
// state and indices (by deep copy) under a ReadTransaction, passing this
@@ -486,12 +457,9 @@ class SYNC_EXPORT Directory {
Directory& operator = (const Directory&);
public:
+ // These contain all items, including IS_DEL items.
typedef Index<MetahandleIndexer>::Set MetahandlesIndex;
typedef Index<IdIndexer>::Set IdsIndex;
- // All entries in memory must be in both the MetahandlesIndex and
- // the IdsIndex, but only non-deleted entries will be the
- // ParentIdChildIndex.
- typedef Index<ParentIdAndHandleIndexer>::Set ParentIdChildIndex;
// Contains both deleted and existing entries with tags.
// We can't store only existing tags because the client would create
@@ -538,7 +506,11 @@ class SYNC_EXPORT Directory {
MetahandlesIndex* metahandles_index;
// Entries indexed by id
IdsIndex* ids_index;
- ParentIdChildIndex* parent_id_child_index;
+
+ // Contains non-deleted items, indexed according to parent and position
+ // within parent. Protected by the ScopedKernelLock.
+ ParentChildIndex* parent_child_index;
+
ClientTagIndex* client_tag_index;
// So we don't have to create an EntryKernel every time we want to
// look something up in an index. Needle in haystack metaphor.
@@ -583,36 +555,11 @@ class SYNC_EXPORT Directory {
const WeakHandle<TransactionObserver> transaction_observer;
};
- // Helper method used to do searches on |parent_id_child_index|.
- ParentIdChildIndex::iterator LocateInParentChildIndex(
- const ScopedKernelLock& lock,
- const Id& parent_id,
- int64 position_in_parent,
- const Id& item_id_for_tiebreaking);
-
- // Return an iterator to the beginning of the range of the children of
- // |parent_id| in the kernel's parent_id_child_index.
- ParentIdChildIndex::iterator GetParentChildIndexLowerBound(
- const ScopedKernelLock& lock,
- const Id& parent_id);
-
- // Return an iterator to just past the end of the range of the
- // children of |parent_id| in the kernel's parent_id_child_index.
- ParentIdChildIndex::iterator GetParentChildIndexUpperBound(
- const ScopedKernelLock& lock,
- const Id& parent_id);
-
// Append the handles of the children of |parent_id| to |result|.
void AppendChildHandles(
const ScopedKernelLock& lock,
const Id& parent_id, Directory::ChildHandles* result);
- // Return a pointer to what is probably (but not certainly) the
- // first child of |parent_id|, or NULL if |parent_id| definitely has
- // no children.
- EntryKernel* GetPossibleFirstChild(
- const ScopedKernelLock& lock, const Id& parent_id);
-
Kernel* kernel_;
scoped_ptr<DirectoryBackingStore> store_;
diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
index 06839574b0..7e2fd5a8d4 100644
--- a/sync/syncable/directory_backing_store.cc
+++ b/sync/syncable/directory_backing_store.cc
@@ -10,13 +10,8 @@
#include "base/base64.h"
#include "base/debug/trace_event.h"
-#include "base/file_util.h"
-#include "base/hash_tables.h"
#include "base/logging.h"
-#include "base/metrics/histogram.h"
#include "base/rand_util.h"
-#include "base/stl_util.h"
-#include "base/string_number_conversions.h"
#include "base/stringprintf.h"
#include "base/time.h"
#include "sql/connection.h"
@@ -27,6 +22,7 @@
#include "sync/protocol/sync.pb.h"
#include "sync/syncable/syncable-inl.h"
#include "sync/syncable/syncable_columns.h"
+#include "sync/syncable/syncable_util.h"
#include "sync/util/time.h"
using std::string;
@@ -39,7 +35,7 @@ namespace syncable {
static const string::size_type kUpdateStatementBufferSize = 2048;
// Increment this version whenever updating DB tables.
-const int32 kCurrentDBVersion = 85;
+const int32 kCurrentDBVersion = 86;
// Iterate over the fields of |entry| and bind each to |statement| for
// updating. Returns the number of args bound.
@@ -64,13 +60,14 @@ void BindFields(const EntryKernel& entry,
for ( ; i < STRING_FIELDS_END; ++i) {
statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
}
- std::string temp;
for ( ; i < PROTO_FIELDS_END; ++i) {
+ std::string temp;
entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
statement->BindBlob(index++, temp.data(), temp.length());
}
- for( ; i < ORDINAL_FIELDS_END; ++i) {
- temp = entry.ref(static_cast<OrdinalField>(i)).ToInternalValue();
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
+ std::string temp;
+ entry.ref(static_cast<UniquePositionField>(i)).SerializeToString(&temp);
statement->BindBlob(index++, temp.data(), temp.length());
}
}
@@ -104,19 +101,18 @@ scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
statement->ColumnBlob(i), statement->ColumnByteLength(i));
}
- for( ; i < ORDINAL_FIELDS_END; ++i) {
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
std::string temp;
statement->ColumnBlobAsString(i, &temp);
- NodeOrdinal unpacked_ord(temp);
- // Its safe to assume that an invalid ordinal is a sign that
- // some external corruption has occurred. Return NULL to force
- // a re-download of the sync data.
- if(!unpacked_ord.IsValid()) {
- DVLOG(1) << "Unpacked invalid ordinal. Signaling that the DB is corrupt";
+ sync_pb::UniquePosition proto;
+ if (!proto.ParseFromString(temp)) {
+ DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
return scoped_ptr<EntryKernel>(NULL);
}
- kernel->mutable_ref(static_cast<OrdinalField>(i)) = unpacked_ord;
+
+ kernel->mutable_ref(static_cast<UniquePositionField>(i)) =
+ UniquePosition::FromProto(proto);
}
return kernel.Pass();
}
@@ -400,6 +396,13 @@ bool DirectoryBackingStore::InitializeTables() {
version_on_disk = 85;
}
+ // Version 86 migration converts bookmarks to the unique positioning system.
+ // It also introduces a new field to store a unique ID for each bookmark.
+ if (version_on_disk == 85) {
+ if (MigrateVersion85To86())
+ version_on_disk = 86;
+ }
+
// If one of the migrations requested it, drop columns that aren't current.
// It's only safe to do this after migrating all the way to the current
// version.
@@ -961,7 +964,8 @@ bool DirectoryBackingStore::MigrateVersion76To77() {
#if defined(OS_WIN)
// On Windows, we used to store timestamps in FILETIME format (100s of
// ns since Jan 1, 1601). Magic numbers taken from
-// http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win32-filetime
+// http://stackoverflow.com/questions/5398557/
+// java-library-for-dealing-with-win32-filetime
// .
#define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
#else
@@ -1085,7 +1089,7 @@ bool DirectoryBackingStore::MigrateVersion83To84() {
}
bool DirectoryBackingStore::MigrateVersion84To85() {
- // Version 84 removes the initial_sync_ended flag.
+ // Version 85 removes the initial_sync_ended flag.
if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
return false;
if (!CreateModelsTable())
@@ -1101,6 +1105,131 @@ bool DirectoryBackingStore::MigrateVersion84To85() {
return true;
}
+bool DirectoryBackingStore::MigrateVersion85To86() {
+ // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
+ // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
+ // and SERVER_UNIQUE_POSITION.
+ if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
+ "server_unique_position BLOB")) {
+ return false;
+ }
+ if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
+ "unique_position BLOB")) {
+ return false;
+ }
+ if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
+ "unique_bookmark_tag VARCHAR")) {
+ return false;
+ }
+
+ // Fetch the cache_guid from the DB, because we don't otherwise have access to
+ // it from here.
+ sql::Statement get_cache_guid(db_->GetUniqueStatement(
+ "SELECT cache_guid FROM share_info"));
+ if (!get_cache_guid.Step()) {
+ return false;
+ }
+ std::string cache_guid = get_cache_guid.ColumnString(0);
+ DCHECK(!get_cache_guid.Step());
+ DCHECK(get_cache_guid.Succeeded());
+
+ sql::Statement get(db_->GetUniqueStatement(
+ "SELECT "
+ " metahandle, "
+ " id, "
+ " specifics, "
+ " is_dir, "
+ " unique_server_tag, "
+ " server_ordinal_in_parent "
+ "FROM metas"));
+
+ // Note that we set both the local and server position based on the server
+ // position. We wll lose any unsynced local position changes. Unfortunately,
+ // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
+ // can't be translated into a UNIQUE_POSTION in a reliable way.
+ sql::Statement put(db_->GetCachedStatement(
+ SQL_FROM_HERE,
+ "UPDATE metas SET"
+ " server_unique_position = ?,"
+ " unique_position = ?,"
+ " unique_bookmark_tag = ?"
+ "WHERE metahandle = ?"));
+
+ while (get.Step()) {
+ int64 metahandle = get.ColumnInt64(0);
+
+ std::string id_string;
+ get.ColumnBlobAsString(1, &id_string);
+
+ sync_pb::EntitySpecifics specifics;
+ specifics.ParseFromArray(
+ get.ColumnBlob(2), get.ColumnByteLength(2));
+
+ bool is_dir = get.ColumnBool(3);
+
+ std::string server_unique_tag = get.ColumnString(4);
+
+ std::string ordinal_string;
+ get.ColumnBlobAsString(5, &ordinal_string);
+ NodeOrdinal ordinal(ordinal_string);
+
+
+ std::string unique_bookmark_tag;
+
+ // We only maintain positions for bookmarks that are not server-defined
+ // top-level folders.
+ UniquePosition position;
+ if (GetModelTypeFromSpecifics(specifics) == BOOKMARKS
+ && !(is_dir && !server_unique_tag.empty())) {
+ if (id_string.at(0) == 'c') {
+ // We found an uncommitted item. This is rare, but fortunate. This
+ // means we can set the bookmark tag according to the originator client
+ // item ID and originator cache guid, because (unlike the other case) we
+ // know that this client is the originator.
+ unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
+ cache_guid,
+ id_string.substr(1));
+ } else {
+ // If we've already committed the item, then we don't know who the
+ // originator was. We do not have access to the originator client item
+ // ID and originator cache guid at this point.
+ //
+ // We will base our hash entirely on the server ID instead. This is
+ // incorrect, but at least all clients that undergo this migration step
+ // will be incorrect in the same way.
+ //
+ // To get everyone back into a synced state, we will update the bookmark
+ // tag according to the originator_cache_guid and originator_item_id
+ // when we see updates for this item. That should ensure that commonly
+ // modified items will end up with the proper tag values eventually.
+ unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
+ std::string(), // cache_guid left intentionally blank.
+ id_string.substr(1));
+ }
+
+ int64 int_position = NodeOrdinalToInt64(ordinal);
+ position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
+ } else {
+ // Leave bookmark_tag and position at their default (invalid) values.
+ }
+
+ std::string position_blob;
+ position.SerializeToString(&position_blob);
+ put.BindBlob(0, position_blob.data(), position_blob.length());
+ put.BindBlob(1, position_blob.data(), position_blob.length());
+ put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
+ put.BindInt64(3, metahandle);
+
+ if (!put.Run())
+ return false;
+ put.Reset(true);
+ }
+
+ SetVersion(86);
+ needs_column_refresh_ = true;
+ return true;
+}
+
bool DirectoryBackingStore::CreateTables() {
DVLOG(1) << "First run, creating tables";
// Create two little tables share_version and share_info
@@ -1140,7 +1269,7 @@ bool DirectoryBackingStore::CreateTables() {
"?);")); // bag_of_chips
s.BindString(0, dir_name_); // id
s.BindString(1, dir_name_); // name
- s.BindString(2, ""); // store_birthday
+ s.BindString(2, std::string()); // store_birthday
// TODO(akalin): Remove this unused db_create_version field. (Or
// actually use it for something.) http://crbug.com/118356
s.BindString(3, "Unknown"); // db_create_version
@@ -1165,13 +1294,10 @@ bool DirectoryBackingStore::CreateTables() {
const int64 now = TimeToProtoTime(base::Time::Now());
sql::Statement s(db_->GetUniqueStatement(
"INSERT INTO metas "
- "( id, metahandle, is_dir, ctime, mtime, server_ordinal_in_parent) "
- "VALUES ( \"r\", 1, 1, ?, ?, ?)"));
+ "( id, metahandle, is_dir, ctime, mtime ) "
+ "VALUES ( \"r\", 1, 1, ?, ? )"));
s.BindInt64(0, now);
s.BindInt64(1, now);
- const std::string ord =
- NodeOrdinal::CreateInitialOrdinal().ToInternalValue();
- s.BindBlob(2, ord.data(), ord.length());
if (!s.Run())
return false;
@@ -1273,8 +1399,8 @@ bool DirectoryBackingStore::CreateShareInfoTableVersion71(
}
// This function checks to see if the given list of Metahandles has any nodes
-// whose PREV_ID, PARENT_ID or NEXT_ID values refer to ID values that do not
-// actually exist. Returns true on success.
+// whose PARENT_ID values refer to ID values that do not actually exist.
+// Returns true on success.
bool DirectoryBackingStore::VerifyReferenceIntegrity(
const syncable::MetahandlesIndex &index) {
TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
@@ -1295,10 +1421,10 @@ bool DirectoryBackingStore::VerifyReferenceIntegrity(
for (MetahandlesIndex::const_iterator it = index.begin();
it != index.end(); ++it) {
EntryKernel* entry = *it;
- bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end);
bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
- bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end);
- is_ok = is_ok && prev_exists && parent_exists && next_exists;
+ if (!parent_exists) {
+ return false;
+ }
}
return is_ok;
}
@@ -1317,7 +1443,7 @@ bool DirectoryBackingStore::LoadEntriesInternal(const std::string& table,
while (s.Step()) {
scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
// A null kernel is evidence of external data corruption.
- if (!kernel.get())
+ if (!kernel)
return false;
bucket->insert(kernel.release());
}
diff --git a/sync/syncable/directory_backing_store.h b/sync/syncable/directory_backing_store.h
index 3ee884487d..1b0b7211aa 100644
--- a/sync/syncable/directory_backing_store.h
+++ b/sync/syncable/directory_backing_store.h
@@ -172,6 +172,7 @@ class SYNC_EXPORT_PRIVATE DirectoryBackingStore : public base::NonThreadSafe {
bool MigrateVersion82To83();
bool MigrateVersion83To84();
bool MigrateVersion84To85();
+ bool MigrateVersion85To86();
scoped_ptr<sql::Connection> db_;
sql::Statement save_meta_statment_;
diff --git a/sync/syncable/directory_backing_store_unittest.cc b/sync/syncable/directory_backing_store_unittest.cc
index a9da2ca04c..6b5f5aff11 100644
--- a/sync/syncable/directory_backing_store_unittest.cc
+++ b/sync/syncable/directory_backing_store_unittest.cc
@@ -72,15 +72,16 @@ class MigrationTest : public testing::TestWithParam<int> {
void SetUpVersion83Database(sql::Connection* connection);
void SetUpVersion84Database(sql::Connection* connection);
void SetUpVersion85Database(sql::Connection* connection);
+ void SetUpVersion86Database(sql::Connection* connection);
void SetUpCurrentDatabaseAndCheckVersion(sql::Connection* connection) {
- SetUpVersion85Database(connection); // Prepopulates data.
+ SetUpVersion86Database(connection); // Prepopulates data.
scoped_ptr<TestDirectoryBackingStore> dbs(
new TestDirectoryBackingStore(GetUsername(), connection));
+ ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
ASSERT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
}
private:
@@ -2426,6 +2427,96 @@ void MigrationTest::SetUpVersion85Database(sql::Connection* connection) {
ASSERT_TRUE(connection->CommitTransaction());
}
+void MigrationTest::SetUpVersion86Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',86);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB,"
+ " transaction_version BIGINT default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,b"
+ "ase_version bigint default -1,server_version bigint default 0,local_e"
+ "xternal_id bigint default 0,transaction_version bigint default 0,mtim"
+ "e bigint default 0,server_mtime bigint default 0,ctime bigint default"
+ " 0,server_ctime bigint default 0,id varchar(255) default 'r',parent_i"
+ "d varchar(255) default 'r',server_parent_id varchar(255) default 'r',"
+ "is_unsynced bit default 0,is_unapplied_update bit default 0,is_del bi"
+ "t default 0,is_dir bit default 0,server_is_dir bit default 0,server_i"
+ "s_del bit default 0,non_unique_name varchar,server_non_unique_name va"
+ "rchar(255),unique_server_tag varchar,unique_client_tag varchar,specif"
+ "ics blob,server_specifics blob,base_server_specifics blob,server_uniq"
+ "ue_position blob,unique_position blob,unique_bookmark_tag blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0,0,0,"
+ META_PROTO_TIMES_VALS(1)
+ ",'r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,"
+ "X'',X'',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(6,694,694,6,0,"
+ META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9',0,0,0,1,1,0,'T"
+ "he Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000',NULL,X'"
+ "',X'',X'');"
+ "INSERT INTO 'metas' VALUES(7,663,663,0,0,"
+ META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r',0,0,0,1,1,0,'Google Chro"
+ "me','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(8,664,664,0,0,"
+ META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7',0,0,0,1,1,0,'B"
+ "ookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2"
+ "881000',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(9,665,665,1,0,"
+ META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8',0,0,0,1,1,0,'B"
+ "ookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C288100"
+ "0',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(10,666,666,2,0,"
+ META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8',0,0,0,1,1,0,"
+ "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,X'C2881000"
+ "',X'C2881000',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(11,683,683,8,0,"
+ META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6',0,0,0,0,0,0,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,NU"
+ "LL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1206"
+ "414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756D2E6"
+ "F72672F6F7468657212084146414756415346',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(12,685,685,9,0,"
+ META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6',0,0,0,1,1,0,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',X'C2881000'"
+ ",NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(13,687,687,10,0,"
+ META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6',0,0,0,0,0,0"
+ ",'ICANN | Internet Corporation for Assigned Names and Numbers','ICANN"
+ " | Internet Corporation for Assigned Names and Numbers',NULL,NULL,X'C"
+ "28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504E47415846"
+ "3041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2E636F6D2F1"
+ "20744414146415346',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(14,692,692,11,0,"
+ META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6',0,0,0,0,0,0"
+ ",'The WebKit Open Source Project','The WebKit Open Source Project',NU"
+ "LL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E475"
+ "8',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473259"
+ "',NULL,X'',X'',X'');"
+ "CREATE TABLE deleted_metas (metahandle bigint primary key ON CONFLICT"
+ " FAIL,base_version bigint default -1,server_version bigint default 0,"
+ "local_external_id bigint default 0,transaction_version bigint default"
+ " 0,mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
+ "default 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defau"
+ "lt 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,s"
+ "erver_is_del bit default 0,non_unique_name varchar,server_non_unique_"
+ "name varchar(255),unique_server_tag varchar,unique_client_tag varchar"
+ ",specifics blob,server_specifics blob,base_server_specifics blob,serv"
+ "er_unique_position blob,unique_position blob,unique_bookmark_tag blob"
+ ");"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birt"
+ "hday TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
+ "fault -2, cache_guid TEXT, notification_state BLOB, bag_of_chips BLOB"
+ ");"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.or"
+ "g','c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-13107"
+ "8,'9010788312004066376x-6609234393368420856x',NULL,NULL);"));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -2812,34 +2903,6 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion80To81) {
ASSERT_EQ(expected_ordinal, actual_ordinal);
}
-TEST_F(DirectoryBackingStoreTest, DetectInvalidOrdinal) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion81Database(&connection);
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_EQ(81, dbs->GetVersion());
-
- // Insert row with bad ordinal.
- const int64 now = TimeToProtoTime(base::Time::Now());
- sql::Statement s(connection.GetUniqueStatement(
- "INSERT INTO metas "
- "( id, metahandle, is_dir, ctime, mtime, server_ordinal_in_parent) "
- "VALUES( \"c-invalid\", 9999, 1, ?, ?, \" \")"));
- s.BindInt64(0, now);
- s.BindInt64(1, now);
- ASSERT_TRUE(s.Run());
-
- // Trying to unpack this entry should signal that the DB is corrupted.
- MetahandlesIndex entry_bucket;
- JournalIndex delete_journals;;
- STLElementDeleter<MetahandlesIndex> deleter(&entry_bucket);
- Directory::KernelLoadInfo kernel_load_info;
- ASSERT_EQ(FAILED_DATABASE_CORRUPT,
- dbs->Load(&entry_bucket, &delete_journals, &kernel_load_info));
-}
-
TEST_F(DirectoryBackingStoreTest, MigrateVersion81To82) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -2897,6 +2960,130 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion84To85) {
ASSERT_FALSE(connection.DoesColumnExist("models", "initial_sync_ended"));
}
+TEST_F(DirectoryBackingStoreTest, MigrateVersion85To86) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion85Database(&connection);
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "next_id"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "prev_id"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "server_ordinal_in_parent"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "unique_position"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "server_unique_position"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "unique_bookmark_tag"));
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_TRUE(dbs->MigrateVersion85To86());
+ EXPECT_EQ(86, dbs->GetVersion());
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_position"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "server_unique_position"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_bookmark_tag"));
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->RefreshColumns());
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "next_id"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "prev_id"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "server_ordinal_in_parent"));
+
+ {
+ MetahandlesIndex metas;
+ STLElementDeleter<MetahandlesIndex> deleter(&metas);
+ dbs->LoadEntries(&metas);
+
+ EntryKernel needle;
+
+ // Grab a bookmark and examine it.
+ needle.put(META_HANDLE, 5);
+ MetahandlesIndex::iterator i = metas.find(&needle);
+ ASSERT_FALSE(i == metas.end());
+ EntryKernel* bm = *i;
+ ASSERT_EQ(bm->ref(ID).value(), "s_ID_5");
+
+ EXPECT_TRUE(bm->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(bm->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_EQ(UniquePosition::kSuffixLength,
+ bm->ref(UNIQUE_BOOKMARK_TAG).length());
+
+ // Grab a non-bookmark and examine it.
+ needle.put(META_HANDLE, 1);
+ MetahandlesIndex::iterator j = metas.find(&needle);
+ ASSERT_FALSE(j == metas.end());
+ EntryKernel* root = *j;
+ ASSERT_EQ(root->ref(ID).value(), "r");
+
+ EXPECT_FALSE(root->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(root->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(root->ref(UNIQUE_BOOKMARK_TAG).empty());
+
+ // Make sure we didn't mistake the bookmark root node for a real bookmark.
+ needle.put(META_HANDLE, 8);
+ MetahandlesIndex::iterator k = metas.find(&needle);
+ ASSERT_FALSE(k == metas.end());
+ EntryKernel* bm_root = *k;
+ ASSERT_EQ(bm_root->ref(ID).value(), "s_ID_8");
+ ASSERT_EQ(bm_root->ref(UNIQUE_SERVER_TAG), "google_chrome_bookmarks");
+
+ EXPECT_FALSE(bm_root->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(bm_root->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(bm_root->ref(UNIQUE_BOOKMARK_TAG).empty());
+
+ // Make sure we didn't assign positions to server-created folders, either.
+ needle.put(META_HANDLE, 10);
+ MetahandlesIndex::iterator l = metas.find(&needle);
+ ASSERT_FALSE(l == metas.end());
+ EntryKernel* perm_folder = *l;
+ ASSERT_EQ(perm_folder->ref(ID).value(), "s_ID_10");
+ ASSERT_EQ(perm_folder->ref(UNIQUE_SERVER_TAG), "other_bookmarks");
+
+ EXPECT_FALSE(perm_folder->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(perm_folder->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(perm_folder->ref(UNIQUE_BOOKMARK_TAG).empty());
+
+ // Make sure that the syncable::Directory and the migration code agree on
+ // which items should or should not have unique position values. This test
+ // may become obsolete if the directory's definition of that function
+ // changes, but, until then, this is a useful test.
+ for (MetahandlesIndex::iterator it = metas.begin();
+ it != metas.end(); it++) {
+ SCOPED_TRACE((*it)->ref(ID));
+ if ((*it)->ShouldMaintainPosition()) {
+ EXPECT_TRUE((*it)->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE((*it)->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE((*it)->ref(UNIQUE_BOOKMARK_TAG).empty());
+ } else {
+ EXPECT_FALSE((*it)->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE((*it)->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE((*it)->ref(UNIQUE_BOOKMARK_TAG).empty());
+ }
+ }
+ }
+}
+
+TEST_F(DirectoryBackingStoreTest, DetectInvalidPosition) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion86Database(&connection);
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_EQ(86, dbs->GetVersion());
+
+ // Insert row with bad position.
+ sql::Statement s(connection.GetUniqueStatement(
+ "INSERT INTO metas "
+ "( id, metahandle, is_dir, ctime, mtime,"
+ " unique_position, server_unique_position) "
+ "VALUES('c-invalid', 9999, 1, 0, 0, 'BAD_POS', 'BAD_POS')"));
+ ASSERT_TRUE(s.Run());
+
+ // Trying to unpack this entry should signal that the DB is corrupted.
+ MetahandlesIndex entry_bucket;
+ JournalIndex delete_journals;;
+ STLElementDeleter<MetahandlesIndex> deleter(&entry_bucket);
+ Directory::KernelLoadInfo kernel_load_info;
+ ASSERT_EQ(FAILED_DATABASE_CORRUPT,
+ dbs->Load(&entry_bucket, &delete_journals, &kernel_load_info));
+}
+
TEST_P(MigrationTest, ToCurrentVersion) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -2955,6 +3142,12 @@ TEST_P(MigrationTest, ToCurrentVersion) {
case 84:
SetUpVersion84Database(&connection);
break;
+ case 85:
+ SetUpVersion85Database(&connection);
+ break;
+ case 86:
+ SetUpVersion86Database(&connection);
+ break;
default:
// If you see this error, it may mean that you've increased the
// database version number but you haven't finished adding unit tests
@@ -3169,7 +3362,7 @@ TEST_P(MigrationTest, ToCurrentVersion) {
}
INSTANTIATE_TEST_CASE_P(DirectoryBackingStore, MigrationTest,
- testing::Range(67, kCurrentDBVersion));
+ testing::Range(67, kCurrentDBVersion + 1));
TEST_F(DirectoryBackingStoreTest, ModelTypeIds) {
ModelTypeSet protocol_types = ProtocolTypes();
diff --git a/sync/syncable/entry.cc b/sync/syncable/entry.cc
index c6cc4f0bc0..2d98c6fdff 100644
--- a/sync/syncable/entry.cc
+++ b/sync/syncable/entry.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "base/json/string_escape.h"
+#include "base/string_util.h"
#include "sync/syncable/blob.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/syncable_base_transaction.h"
@@ -41,10 +42,6 @@ Directory* Entry::dir() const {
return basetrans_->directory();
}
-Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const {
- return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id);
-}
-
DictionaryValue* Entry::ToValue(Cryptographer* cryptographer) const {
DictionaryValue* entry_info = new DictionaryValue();
entry_info->SetBoolean("good", good());
@@ -96,11 +93,19 @@ ModelType Entry::GetModelType() const {
}
Id Entry::GetPredecessorId() const {
- return kernel_->ref(PREV_ID);
+ return dir()->GetPredecessorId(kernel_);
}
Id Entry::GetSuccessorId() const {
- return kernel_->ref(NEXT_ID);
+ return dir()->GetSuccessorId(kernel_);
+}
+
+Id Entry::GetFirstChildId() const {
+ return dir()->GetFirstChildId(basetrans_, kernel_);
+}
+
+bool Entry::ShouldMaintainPosition() const {
+ return kernel_->ShouldMaintainPosition();
}
std::ostream& operator<<(std::ostream& s, const Blob& blob) {
@@ -142,9 +147,9 @@ std::ostream& operator<<(std::ostream& os, const Entry& entry) {
&escaped_str);
os << g_metas_columns[i].name << ": " << escaped_str << ", ";
}
- for ( ; i < ORDINAL_FIELDS_END; ++i) {
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
os << g_metas_columns[i].name << ": "
- << kernel->ref(static_cast<OrdinalField>(i)).ToDebugString()
+ << kernel->ref(static_cast<UniquePositionField>(i)).ToDebugString()
<< ", ";
}
os << "TempFlags: ";
diff --git a/sync/syncable/entry.h b/sync/syncable/entry.h
index e1c8717a2e..f9d3f90cc9 100644
--- a/sync/syncable/entry.h
+++ b/sync/syncable/entry.h
@@ -93,7 +93,7 @@ class SYNC_EXPORT Entry {
DCHECK(kernel_);
return kernel_->ref(field);
}
- inline const NodeOrdinal& Get(OrdinalField field) const {
+ inline const UniquePosition& Get(UniquePositionField field) const {
DCHECK(kernel_);
return kernel_->ref(field);
}
@@ -107,6 +107,7 @@ class SYNC_EXPORT Entry {
Id GetPredecessorId() const;
Id GetSuccessorId() const;
+ Id GetFirstChildId() const;
inline bool ExistsOnClientBecauseNameIsNonEmpty() const {
DCHECK(kernel_);
@@ -118,18 +119,16 @@ class SYNC_EXPORT Entry {
return kernel_->ref(ID).IsRoot();
}
+ // Returns true if this is an entry that is expected to maintain a certain
+ // sort ordering relative to its siblings under the same parent.
+ bool ShouldMaintainPosition() const;
+
Directory* dir() const;
const EntryKernel GetKernelCopy() const {
return *kernel_;
}
- // Compute a local predecessor position for |update_item|, based on its
- // absolute server position. The returned ID will be a valid predecessor
- // under SERVER_PARENT_ID that is consistent with the
- // SERVER_POSITION_IN_PARENT ordering.
- Id ComputePrevIdFromServerPosition(const Id& parent_id) const;
-
// Dumps all entry info into a DictionaryValue and returns it.
// Transfers ownership of the DictionaryValue to the caller.
base::DictionaryValue* ToValue(Cryptographer* cryptographer) const;
diff --git a/sync/syncable/entry_kernel.cc b/sync/syncable/entry_kernel.cc
index 5b21612139..01f2cae96d 100644
--- a/sync/syncable/entry_kernel.cc
+++ b/sync/syncable/entry_kernel.cc
@@ -21,6 +21,20 @@ EntryKernel::EntryKernel() : dirty_(false) {
EntryKernel::~EntryKernel() {}
+ModelType EntryKernel::GetModelType() const {
+ ModelType specifics_type = GetModelTypeFromSpecifics(ref(SPECIFICS));
+ if (specifics_type != UNSPECIFIED)
+ return specifics_type;
+ if (ref(ID).IsRoot())
+ return TOP_LEVEL_FOLDER;
+ // Loose check for server-created top-level folders that aren't
+ // bound to a particular model type.
+ if (!ref(UNIQUE_SERVER_TAG).empty() && ref(SERVER_IS_DIR))
+ return TOP_LEVEL_FOLDER;
+
+ return UNSPECIFIED;
+}
+
ModelType EntryKernel::GetServerModelType() const {
ModelType specifics_type = GetModelTypeFromSpecifics(ref(SERVER_SPECIFICS));
if (specifics_type != UNSPECIFIED)
@@ -35,6 +49,13 @@ ModelType EntryKernel::GetServerModelType() const {
return UNSPECIFIED;
}
+bool EntryKernel::ShouldMaintainPosition() const {
+ // We maintain positions for all bookmarks, except those that are
+ // server-created top-level folders.
+ return (GetModelTypeFromSpecifics(ref(SPECIFICS)) == syncer::BOOKMARKS)
+ && !(!ref(UNIQUE_SERVER_TAG).empty() && ref(IS_DIR));
+}
+
namespace {
// Utility function to loop through a set of enum values and add the
@@ -96,10 +117,6 @@ base::StringValue* IdToValue(const Id& id) {
return id.ToValue();
}
-base::StringValue* OrdinalToValue(const NodeOrdinal& ord) {
- return new base::StringValue(ord.ToDebugString());
-}
-
base::FundamentalValue* BooleanToValue(bool bool_val) {
return new base::FundamentalValue(bool_val);
}
@@ -108,6 +125,10 @@ base::StringValue* StringToValue(const std::string& str) {
return new base::StringValue(str);
}
+StringValue* UniquePositionToValue(const UniquePosition& pos) {
+ return Value::CreateStringValue(pos.ToDebugString());
+}
+
} // namespace
base::DictionaryValue* EntryKernel::ToValue(
@@ -160,10 +181,10 @@ base::DictionaryValue* EntryKernel::ToValue(
SetEncryptableProtoValues(*this, cryptographer, kernel_info,
PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
- // Ordinal fields
+ // UniquePosition fields
SetFieldValues(*this, kernel_info,
- &GetOrdinalFieldString, &OrdinalToValue,
- ORDINAL_FIELDS_BEGIN, ORDINAL_FIELDS_END - 1);
+ &GetUniquePositionFieldString, &UniquePositionToValue,
+ UNIQUE_POSITION_FIELDS_BEGIN, UNIQUE_POSITION_FIELDS_END - 1);
// Bit temps.
SetFieldValues(*this, kernel_info,
diff --git a/sync/syncable/entry_kernel.h b/sync/syncable/entry_kernel.h
index e9cf828a59..16a6e9707c 100644
--- a/sync/syncable/entry_kernel.h
+++ b/sync/syncable/entry_kernel.h
@@ -11,7 +11,7 @@
#include "base/values.h"
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/internal_api/public/util/immutable.h"
#include "sync/protocol/sync.pb.h"
#include "sync/syncable/metahandle_set.h"
@@ -83,9 +83,6 @@ enum IdField {
ID = ID_FIELDS_BEGIN,
PARENT_ID,
SERVER_PARENT_ID,
-
- PREV_ID,
- NEXT_ID,
ID_FIELDS_END
};
@@ -127,6 +124,7 @@ enum StringField {
// identifies a singleton instance.
UNIQUE_SERVER_TAG, // Tagged by the server
UNIQUE_CLIENT_TAG, // Tagged by the client
+ UNIQUE_BOOKMARK_TAG, // Client tags for bookmark items
STRING_FIELDS_END,
};
@@ -146,21 +144,21 @@ enum ProtoField {
enum {
PROTO_FIELDS_COUNT = PROTO_FIELDS_END - PROTO_FIELDS_BEGIN,
- ORDINAL_FIELDS_BEGIN = PROTO_FIELDS_END
+ UNIQUE_POSITION_FIELDS_BEGIN = PROTO_FIELDS_END
};
-enum OrdinalField {
- // An Ordinal that identifies the relative ordering of this object
- // among its siblings.
- SERVER_ORDINAL_IN_PARENT = ORDINAL_FIELDS_BEGIN,
- ORDINAL_FIELDS_END
+enum UniquePositionField {
+ SERVER_UNIQUE_POSITION = UNIQUE_POSITION_FIELDS_BEGIN,
+ UNIQUE_POSITION,
+ UNIQUE_POSITION_FIELDS_END
};
enum {
- ORDINAL_FIELDS_COUNT = ORDINAL_FIELDS_END - ORDINAL_FIELDS_BEGIN,
- FIELD_COUNT = ORDINAL_FIELDS_END - BEGIN_FIELDS,
+ UNIQUE_POSITION_FIELDS_COUNT =
+ UNIQUE_POSITION_FIELDS_END - UNIQUE_POSITION_FIELDS_BEGIN,
+ FIELD_COUNT = UNIQUE_POSITION_FIELDS_END - BEGIN_FIELDS,
// Past this point we have temporaries, stored in memory only.
- BEGIN_TEMPS = ORDINAL_FIELDS_END,
+ BEGIN_TEMPS = UNIQUE_POSITION_FIELDS_END,
BIT_TEMPS_BEGIN = BEGIN_TEMPS,
};
@@ -184,7 +182,7 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
int64 int64_fields[INT64_FIELDS_COUNT];
base::Time time_fields[TIME_FIELDS_COUNT];
Id id_fields[ID_FIELDS_COUNT];
- NodeOrdinal ordinal_fields[ORDINAL_FIELDS_COUNT];
+ UniquePosition unique_position_fields[UNIQUE_POSITION_FIELDS_COUNT];
std::bitset<BIT_FIELDS_COUNT> bit_fields;
std::bitset<BIT_TEMPS_COUNT> bit_temps;
@@ -252,8 +250,8 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
inline void put(ProtoField field, const sync_pb::EntitySpecifics& value) {
specifics_fields[field - PROTO_FIELDS_BEGIN].CopyFrom(value);
}
- inline void put(OrdinalField field, const NodeOrdinal& value) {
- ordinal_fields[field - ORDINAL_FIELDS_BEGIN] = value;
+ inline void put(UniquePositionField field, const UniquePosition& value) {
+ unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN] = value;
}
inline void put(BitTemp field, bool value) {
bit_temps[field - BIT_TEMPS_BEGIN] = value;
@@ -290,8 +288,8 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
inline const sync_pb::EntitySpecifics& ref(ProtoField field) const {
return specifics_fields[field - PROTO_FIELDS_BEGIN];
}
- inline const NodeOrdinal& ref(OrdinalField field) const {
- return ordinal_fields[field - ORDINAL_FIELDS_BEGIN];
+ inline const UniquePosition& ref(UniquePositionField field) const {
+ return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN];
}
inline bool ref(BitTemp field) const {
return bit_temps[field - BIT_TEMPS_BEGIN];
@@ -307,11 +305,13 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
inline Id& mutable_ref(IdField field) {
return id_fields[field - ID_FIELDS_BEGIN];
}
- inline NodeOrdinal& mutable_ref(OrdinalField field) {
- return ordinal_fields[field - ORDINAL_FIELDS_BEGIN];
+ inline UniquePosition& mutable_ref(UniquePositionField field) {
+ return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN];
}
+ ModelType GetModelType() const;
ModelType GetServerModelType() const;
+ bool ShouldMaintainPosition() const;
// Dumps all kernel info into a DictionaryValue and returns it.
// Transfers ownership of the DictionaryValue to the caller.
diff --git a/sync/syncable/in_memory_directory_backing_store.cc b/sync/syncable/in_memory_directory_backing_store.cc
index 94513549b5..f130fb5308 100644
--- a/sync/syncable/in_memory_directory_backing_store.cc
+++ b/sync/syncable/in_memory_directory_backing_store.cc
@@ -8,7 +8,9 @@ namespace syncer {
namespace syncable {
InMemoryDirectoryBackingStore::InMemoryDirectoryBackingStore(
- const std::string& dir_name) : DirectoryBackingStore(dir_name) {
+ const std::string& dir_name)
+ : DirectoryBackingStore(dir_name),
+ consistent_cache_guid_requested_(false) {
}
DirOpenResult InMemoryDirectoryBackingStore::Load(
@@ -23,6 +25,13 @@ DirOpenResult InMemoryDirectoryBackingStore::Load(
if (!InitializeTables())
return FAILED_OPEN_DATABASE;
+ if (consistent_cache_guid_requested_) {
+ if (!db_->Execute("UPDATE share_info "
+ "SET cache_guid = 'IrcjZ2jyzHDV9Io4+zKcXQ=='")) {
+ return FAILED_OPEN_DATABASE;
+ }
+ }
+
if (!DropDeletedEntries())
return FAILED_DATABASE_CORRUPT;
if (!LoadEntries(entry_bucket))
diff --git a/sync/syncable/in_memory_directory_backing_store.h b/sync/syncable/in_memory_directory_backing_store.h
index 846d96af71..3afa0516ad 100644
--- a/sync/syncable/in_memory_directory_backing_store.h
+++ b/sync/syncable/in_memory_directory_backing_store.h
@@ -29,7 +29,13 @@ class SYNC_EXPORT_PRIVATE InMemoryDirectoryBackingStore
JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
+ void request_consistent_cache_guid() {
+ consistent_cache_guid_requested_ = true;
+ }
+
private:
+ bool consistent_cache_guid_requested_;
+
DISALLOW_COPY_AND_ASSIGN(InMemoryDirectoryBackingStore);
};
diff --git a/sync/syncable/model_type.cc b/sync/syncable/model_type.cc
index 37bdd361ac..140d7ab523 100644
--- a/sync/syncable/model_type.cc
+++ b/sync/syncable/model_type.cc
@@ -101,6 +101,9 @@ void AddDefaultFieldValue(ModelType datatype,
case FAVICON_TRACKING:
specifics->mutable_favicon_tracking();
break;
+ case MANAGED_USER_SETTINGS:
+ specifics->mutable_managed_user_setting();
+ break;
default:
NOTREACHED() << "No known extension for model type.";
}
@@ -187,6 +190,8 @@ int GetSpecificsFieldNumberFromModelType(ModelType model_type) {
return sync_pb::EntitySpecifics::kFaviconImageFieldNumber;
case FAVICON_TRACKING:
return sync_pb::EntitySpecifics::kFaviconTrackingFieldNumber;
+ case MANAGED_USER_SETTINGS:
+ return sync_pb::EntitySpecifics::kManagedUserSettingFieldNumber;
default:
NOTREACHED() << "No known extension for model type.";
return 0;
@@ -303,11 +308,10 @@ ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
if (specifics.has_favicon_tracking())
return FAVICON_TRACKING;
- return UNSPECIFIED;
-}
+ if (specifics.has_managed_user_setting())
+ return MANAGED_USER_SETTINGS;
-bool ShouldMaintainPosition(ModelType model_type) {
- return model_type == BOOKMARKS;
+ return UNSPECIFIED;
}
ModelTypeSet ProtocolTypes() {
@@ -353,6 +357,11 @@ ModelTypeSet EncryptableUserTypes() {
encryptable_user_types.Remove(HISTORY_DELETE_DIRECTIVES);
// Synced notifications are not encrypted since the server must see changes.
encryptable_user_types.Remove(SYNCED_NOTIFICATIONS);
+ // Priority preferences are not encrypted because they might be synced before
+ // encryption is ready.
+ encryptable_user_types.RemoveAll(PriorityUserTypes());
+ // Managed user settings are not encrypted since they are set server-side.
+ encryptable_user_types.Remove(MANAGED_USER_SETTINGS);
// Proxy types have no sync representation and are therefore not encrypted.
// Note however that proxy types map to one or more protocol types, which
// may or may not be encrypted themselves.
@@ -360,6 +369,10 @@ ModelTypeSet EncryptableUserTypes() {
return encryptable_user_types;
}
+ModelTypeSet PriorityUserTypes() {
+ return ModelTypeSet(PRIORITY_PREFERENCES);
+}
+
ModelTypeSet ControlTypes() {
ModelTypeSet set;
// TODO(sync): We should be able to build the actual enumset's internal
@@ -369,9 +382,6 @@ ModelTypeSet ControlTypes() {
set.Put(ModelTypeFromInt(i));
}
- // TODO(albertb): Re-enable this when the server supports it.
- set.Remove(PRIORITY_PREFERENCES);
-
return set;
}
@@ -440,6 +450,8 @@ const char* ModelTypeToString(ModelType model_type) {
return "Favicon Images";
case FAVICON_TRACKING:
return "Favicon Tracking";
+ case MANAGED_USER_SETTINGS:
+ return "Managed User Settings";
case PROXY_TABS:
return "Tabs";
default:
@@ -507,6 +519,8 @@ int ModelTypeToHistogramInt(ModelType model_type) {
return 24;
case PROXY_TABS:
return 25;
+ case MANAGED_USER_SETTINGS:
+ return 26;
// Silence a compiler warning.
case MODEL_TYPE_COUNT:
return 0;
@@ -523,7 +537,7 @@ base::StringValue* ModelTypeToValue(ModelType model_type) {
return new base::StringValue("Unspecified");
}
NOTREACHED();
- return new base::StringValue("");
+ return new base::StringValue(std::string());
}
ModelType ModelTypeFromValue(const base::Value& value) {
@@ -588,6 +602,8 @@ ModelType ModelTypeFromString(const std::string& model_type_string) {
return FAVICON_IMAGES;
else if (model_type_string == "Favicon Tracking")
return FAVICON_TRACKING;
+ else if (model_type_string == "Managed User Settings")
+ return MANAGED_USER_SETTINGS;
else if (model_type_string == "Tabs")
return PROXY_TABS;
else
@@ -676,6 +692,8 @@ std::string ModelTypeToRootTag(ModelType type) {
return "google_chrome_favicon_images";
case FAVICON_TRACKING:
return "google_chrome_favicon_tracking";
+ case MANAGED_USER_SETTINGS:
+ return "google_chrome_managed_user_settings";
case PROXY_TABS:
return std::string();
default:
@@ -713,6 +731,7 @@ const char kPriorityPreferenceNotificationType[] = "PRIORITY_PREFERENCE";
const char kDictionaryNotificationType[] = "DICTIONARY";
const char kFaviconImageNotificationType[] = "FAVICON_IMAGE";
const char kFaviconTrackingNotificationType[] = "FAVICON_TRACKING";
+const char kManagedUserSettingNotificationType[] = "MANAGED_USER_SETTING";
} // namespace
bool RealModelTypeToNotificationType(ModelType model_type,
@@ -787,6 +806,9 @@ bool RealModelTypeToNotificationType(ModelType model_type,
case FAVICON_TRACKING:
*notification_type = kFaviconTrackingNotificationType;
return true;
+ case MANAGED_USER_SETTINGS:
+ *notification_type = kManagedUserSettingNotificationType;
+ return true;
default:
break;
}
@@ -865,6 +887,9 @@ bool NotificationTypeToRealModelType(const std::string& notification_type,
} else if (notification_type == kFaviconTrackingNotificationType) {
*model_type = FAVICON_TRACKING;
return true;
+ } else if (notification_type == kManagedUserSettingNotificationType) {
+ *model_type = MANAGED_USER_SETTINGS;
+ return true;
}
*model_type = UNSPECIFIED;
return false;
diff --git a/sync/syncable/mutable_entry.cc b/sync/syncable/mutable_entry.cc
index 70b2a268ae..7c91135ffe 100644
--- a/sync/syncable/mutable_entry.cc
+++ b/sync/syncable/mutable_entry.cc
@@ -5,10 +5,11 @@
#include "sync/syncable/mutable_entry.h"
#include "base/memory/scoped_ptr.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/scoped_index_updater.h"
#include "sync/syncable/scoped_kernel_lock.h"
+#include "sync/syncable/scoped_parent_child_index_updater.h"
#include "sync/syncable/syncable-inl.h"
#include "sync/syncable/syncable_changes_version.h"
#include "sync/syncable/syncable_util.h"
@@ -36,7 +37,6 @@ void MutableEntry::Init(WriteTransaction* trans,
kernel->put(MTIME, now);
// We match the database defaults here
kernel->put(BASE_VERSION, CHANGES_VERSION);
- kernel->put(SERVER_ORDINAL_IN_PARENT, NodeOrdinal::CreateInitialOrdinal());
// Normally the SPECIFICS setting code is wrapped in logic to deal with
// unknown fields and encryption. Since all we want to do here is ensure that
@@ -63,8 +63,19 @@ MutableEntry::MutableEntry(WriteTransaction* trans,
: Entry(trans),
write_transaction_(trans) {
Init(trans, model_type, parent_id, name);
- bool insert_result = trans->directory()->InsertEntry(trans, kernel_);
- DCHECK(insert_result);
+ // We need to have a valid position ready before we can index the item.
+ if (model_type == BOOKMARKS) {
+ // Base the tag off of our cache-guid and local "c-" style ID.
+ std::string unique_tag = syncable::GenerateSyncableBookmarkHash(
+ trans->directory()->cache_guid(), Get(ID).GetServerId());
+ kernel_->put(UNIQUE_BOOKMARK_TAG, unique_tag);
+ kernel_->put(UNIQUE_POSITION, UniquePosition::InitialPosition(unique_tag));
+ } else {
+ DCHECK(!ShouldMaintainPosition());
+ }
+
+ bool result = trans->directory()->InsertEntry(trans, kernel_);
+ DCHECK(result);
}
MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
@@ -80,7 +91,6 @@ MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
kernel->put(ID, id);
kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
- kernel->put(SERVER_ORDINAL_IN_PARENT, NodeOrdinal::CreateInitialOrdinal());
kernel->put(IS_DEL, true);
// We match the database defaults here
kernel->put(BASE_VERSION, CHANGES_VERSION);
@@ -118,10 +128,6 @@ bool MutableEntry::PutIsDel(bool is_del) {
return true;
}
if (is_del) {
- if (!UnlinkFromOrder()) {
- return false;
- }
-
// If the server never knew about this item and it's deleted then we don't
// need to keep it around. Unsetting IS_UNSYNCED will:
// - Ensure that the item is never committed to the server.
@@ -139,20 +145,13 @@ bool MutableEntry::PutIsDel(bool is_del) {
ScopedKernelLock lock(dir());
// Some indices don't include deleted items and must be updated
// upon a value change.
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
- dir()->kernel_->parent_id_child_index);
+ ScopedParentChildIndexUpdater updater(lock, kernel_,
+ dir()->kernel_->parent_child_index);
kernel_->put(IS_DEL, is_del);
kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
}
- if (!is_del)
- // Restores position to the 0th index.
- if (!PutPredecessor(Id())) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
- }
-
return true;
}
@@ -185,11 +184,12 @@ bool MutableEntry::Put(IdField field, const Id& value) {
if (!dir()->ReindexId(write_transaction(), kernel_, value))
return false;
} else if (PARENT_ID == field) {
- PutParentIdPropertyOnly(value); // Makes sibling order inconsistent.
- // Fixes up the sibling order inconsistency.
- if (!PutPredecessor(Id())) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
+ PutParentIdPropertyOnly(value);
+ if (!Get(IS_DEL)) {
+ if (!PutPredecessor(Id())) {
+ // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
+ NOTREACHED();
+ }
}
} else {
kernel_->put(field, value);
@@ -199,15 +199,16 @@ bool MutableEntry::Put(IdField field, const Id& value) {
return true;
}
-bool MutableEntry::Put(OrdinalField field, const NodeOrdinal& value) {
+bool MutableEntry::Put(UniquePositionField field, const UniquePosition& value) {
DCHECK(kernel_);
- DCHECK(value.IsValid());
write_transaction_->SaveOriginal(kernel_);
if(!kernel_->ref(field).Equals(value)) {
+ // We should never overwrite a valid position with an invalid one.
+ DCHECK(value.IsValid());
ScopedKernelLock lock(dir());
- if (SERVER_ORDINAL_IN_PARENT == field) {
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(
- lock, kernel_, dir()->kernel_->parent_id_child_index);
+ if (UNIQUE_POSITION == field) {
+ ScopedParentChildIndexUpdater updater(
+ lock, kernel_, dir()->kernel_->parent_child_index);
kernel_->put(field, value);
} else {
kernel_->put(field, value);
@@ -375,67 +376,34 @@ bool MutableEntry::Put(IndexedBitField field, bool value) {
return true;
}
-bool MutableEntry::UnlinkFromOrder() {
- ScopedKernelLock lock(dir());
- return dir()->UnlinkEntryFromOrder(kernel_,
- write_transaction(),
- &lock,
- NODE_MANIPULATION);
+void MutableEntry::PutUniqueBookmarkTag(const std::string& tag) {
+ // This unique tag will eventually be used as the unique suffix when adjusting
+ // this bookmark's position. Let's make sure it's a valid suffix.
+ if (!UniquePosition::IsValidSuffix(tag)) {
+ NOTREACHED();
+ return;
+ }
+
+ if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty()
+ && tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) {
+ // There is only one scenario where our tag is expected to change. That
+ // scenario occurs when our current tag is a non-correct tag assigned during
+ // the UniquePosition migration.
+ std::string migration_generated_tag =
+ GenerateSyncableBookmarkHash(std::string(),
+ kernel_->ref(ID).GetServerId());
+ DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG));
+ }
+
+ kernel_->put(UNIQUE_BOOKMARK_TAG, tag);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
}
bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
- if (!UnlinkFromOrder())
+ MutableEntry predecessor(write_transaction_, GET_BY_ID, predecessor_id);
+ if (!predecessor.good())
return false;
-
- if (Get(IS_DEL)) {
- DCHECK(predecessor_id.IsNull());
- return true;
- }
-
- // TODO(ncarter): It should be possible to not maintain position for
- // non-bookmark items. However, we'd need to robustly handle all possible
- // permutations of setting IS_DEL and the SPECIFICS to identify the
- // object type; or else, we'd need to add a ModelType to the
- // MutableEntry's Create ctor.
- // if (!ShouldMaintainPosition()) {
- // return false;
- // }
-
- // This is classic insert-into-doubly-linked-list from CS 101 and your last
- // job interview. An "IsRoot" Id signifies the head or tail.
- Id successor_id;
- if (!predecessor_id.IsRoot()) {
- MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id);
- if (!predecessor.good()) {
- LOG(ERROR) << "Predecessor is not good : "
- << predecessor_id.GetServerId();
- return false;
- }
- if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
- return false;
- successor_id = predecessor.GetSuccessorId();
- predecessor.Put(NEXT_ID, Get(ID));
- } else {
- syncable::Directory* dir = trans()->directory();
- if (!dir->GetFirstChildId(trans(), Get(PARENT_ID), &successor_id)) {
- return false;
- }
- }
- if (!successor_id.IsRoot()) {
- MutableEntry successor(write_transaction(), GET_BY_ID, successor_id);
- if (!successor.good()) {
- LOG(ERROR) << "Successor is not good: "
- << successor_id.GetServerId();
- return false;
- }
- if (successor.Get(PARENT_ID) != Get(PARENT_ID))
- return false;
- successor.Put(PREV_ID, Get(ID));
- }
- DCHECK(predecessor_id != Get(ID));
- DCHECK(successor_id != Get(ID));
- Put(PREV_ID, predecessor_id);
- Put(NEXT_ID, successor_id);
+ dir()->PutPredecessor(kernel_, predecessor.kernel_);
return true;
}
diff --git a/sync/syncable/mutable_entry.h b/sync/syncable/mutable_entry.h
index 51cd794545..134e4a9b8f 100644
--- a/sync/syncable/mutable_entry.h
+++ b/sync/syncable/mutable_entry.h
@@ -7,7 +7,6 @@
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/metahandle_set.h"
@@ -52,7 +51,7 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public Entry {
bool Put(Int64Field field, const int64& value);
bool Put(TimeField field, const base::Time& value);
bool Put(IdField field, const Id& value);
- bool Put(OrdinalField field, const NodeOrdinal& value);
+ bool Put(UniquePositionField field, const UniquePosition& value);
// Do a simple property-only update if the PARENT_ID field. Use with caution.
//
@@ -75,6 +74,8 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public Entry {
}
bool Put(IndexedBitField field, bool value);
+ void PutUniqueBookmarkTag(const std::string& tag);
+
// Sets the position of this item, and updates the entry kernels of the
// adjacent siblings so that list invariants are maintained. Returns false
// and fails if |predecessor_id| does not identify a sibling. Pass the root
diff --git a/sync/syncable/nigori_util.cc b/sync/syncable/nigori_util.cc
index 4888f66ef3..5c88316c99 100644
--- a/sync/syncable/nigori_util.cc
+++ b/sync/syncable/nigori_util.cc
@@ -108,12 +108,7 @@ bool VerifyDataTypeEncryptionForTest(
}
std::queue<Id> to_visit;
- Id id_string;
- if (!trans->directory()->GetFirstChildId(
- trans, type_root.Get(ID), &id_string)) {
- NOTREACHED();
- return false;
- }
+ Id id_string = type_root.GetFirstChildId();
to_visit.push(id_string);
while (!to_visit.empty()) {
id_string = to_visit.front();
@@ -127,12 +122,7 @@ bool VerifyDataTypeEncryptionForTest(
return false;
}
if (child.Get(IS_DIR)) {
- Id child_id_string;
- if (!trans->directory()->GetFirstChildId(
- trans, child.Get(ID), &child_id_string)) {
- NOTREACHED();
- return false;
- }
+ Id child_id_string = child.GetFirstChildId();
// Traverse the children.
to_visit.push(child_id_string);
}
@@ -252,7 +242,7 @@ void UpdateNigoriFromEncryptedTypes(ModelTypeSet encrypted_types,
bool encrypt_everything,
sync_pb::NigoriSpecifics* nigori) {
nigori->set_encrypt_everything(encrypt_everything);
- COMPILE_ASSERT(26 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
+ COMPILE_ASSERT(27 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
nigori->set_encrypt_bookmarks(
encrypted_types.Has(BOOKMARKS));
nigori->set_encrypt_preferences(
@@ -286,7 +276,7 @@ ModelTypeSet GetEncryptedTypesFromNigori(
return ModelTypeSet::All();
ModelTypeSet encrypted_types;
- COMPILE_ASSERT(26 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
+ COMPILE_ASSERT(27 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
if (nigori.encrypt_bookmarks())
encrypted_types.Put(BOOKMARKS);
if (nigori.encrypt_preferences())
diff --git a/sync/syncable/parent_child_index.cc b/sync/syncable/parent_child_index.cc
new file mode 100644
index 0000000000..71fb92e411
--- /dev/null
+++ b/sync/syncable/parent_child_index.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/parent_child_index.h"
+
+#include "base/stl_util.h"
+
+#include "sync/syncable/entry_kernel.h"
+#include "sync/syncable/syncable_id.h"
+
+namespace syncer {
+namespace syncable {
+
+bool ChildComparator::operator()(
+ const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ const UniquePosition& a_pos = a->ref(UNIQUE_POSITION);
+ const UniquePosition& b_pos = b->ref(UNIQUE_POSITION);
+
+ if (a_pos.IsValid() && b_pos.IsValid()) {
+ // Position is important to this type.
+ return a_pos.LessThan(b_pos);
+ } else if (a_pos.IsValid() && !b_pos.IsValid()) {
+ // TODO(rlarocque): Remove this case.
+ // An item with valid position as sibling of one with invalid position.
+ // We should not support this, but the tests rely on it. For now, just
+ // move all invalid position items to the right.
+ return true;
+ } else if (!a_pos.IsValid() && b_pos.IsValid()) {
+ // TODO(rlarocque): Remove this case.
+ // Mirror of the above case.
+ return false;
+ } else {
+ // Position doesn't matter.
+ DCHECK(!a->ref(UNIQUE_POSITION).IsValid());
+ DCHECK(!b->ref(UNIQUE_POSITION).IsValid());
+ return a->ref(ID) < b->ref(ID);
+ }
+}
+
+ParentChildIndex::ParentChildIndex() {
+}
+
+ParentChildIndex::~ParentChildIndex() {
+ STLDeleteContainerPairSecondPointers(
+ parent_children_map_.begin(), parent_children_map_.end());
+}
+
+bool ParentChildIndex::ShouldInclude(const EntryKernel* entry) {
+ // This index excludes deleted items and the root item. The root
+ // item is excluded so that it doesn't show up as a child of itself.
+ return !entry->ref(IS_DEL) && !entry->ref(ID).IsRoot();
+}
+
+bool ParentChildIndex::Insert(EntryKernel* entry) {
+ DCHECK(ShouldInclude(entry));
+
+ const syncable::Id& parent_id = entry->ref(PARENT_ID);
+ OrderedChildSet* children = NULL;
+ ParentChildrenMap::iterator i = parent_children_map_.find(parent_id);
+ if (i != parent_children_map_.end()) {
+ children = i->second;
+ } else {
+ children = new OrderedChildSet();
+ parent_children_map_.insert(std::make_pair(parent_id, children));
+ }
+
+ return children->insert(entry).second;
+}
+
+// Like the other containers used to help support the syncable::Directory, this
+// one does not own any EntryKernels. This function removes references to the
+// given EntryKernel but does not delete it.
+void ParentChildIndex::Remove(EntryKernel* e) {
+ ParentChildrenMap::iterator parent =
+ parent_children_map_.find(e->ref(PARENT_ID));
+ DCHECK(parent != parent_children_map_.end());
+
+ OrderedChildSet* children = parent->second;
+ OrderedChildSet::iterator j = children->find(e);
+ DCHECK(j != children->end());
+
+ children->erase(j);
+ if (children->empty()) {
+ delete children;
+ parent_children_map_.erase(parent);
+ }
+}
+
+bool ParentChildIndex::Contains(EntryKernel *e) const {
+ const syncable::Id& parent_id = e->ref(PARENT_ID);
+ ParentChildrenMap::const_iterator parent =
+ parent_children_map_.find(parent_id);
+ if (parent == parent_children_map_.end()) {
+ return false;
+ }
+ const OrderedChildSet* children = parent->second;
+ DCHECK(children && !children->empty());
+ return children->count(e) > 0;
+}
+
+const OrderedChildSet* ParentChildIndex::GetChildren(const syncable::Id& id) {
+ ParentChildrenMap::iterator parent = parent_children_map_.find(id);
+ if (parent == parent_children_map_.end()) {
+ return NULL;
+ }
+
+ // A successful lookup implies at least some children exist.
+ DCHECK(!parent->second->empty());
+ return parent->second;
+}
+
+} // namespace syncable
+} // namespace syncer
diff --git a/sync/syncable/parent_child_index.h b/sync/syncable/parent_child_index.h
new file mode 100644
index 0000000000..fd0f2e89c8
--- /dev/null
+++ b/sync/syncable/parent_child_index.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_PARENT_CHILD_INDEX
+#define SYNC_SYNCABLE_PARENT_CHILD_INDEX
+
+#include <map>
+#include <set>
+
+#include "base/basictypes.h"
+#include "sync/base/sync_export.h"
+
+namespace syncer {
+namespace syncable {
+
+struct EntryKernel;
+class Id;
+class ParentChildIndex;
+
+// A node ordering function.
+struct SYNC_EXPORT_PRIVATE ChildComparator {
+ bool operator() (const EntryKernel* a, const EntryKernel* b) const;
+};
+
+// An ordered set of nodes.
+typedef std::set<EntryKernel*, ChildComparator> OrderedChildSet;
+
+// Container that tracks parent-child relationships.
+// Provides fast lookup of all items under a given parent.
+class SYNC_EXPORT_PRIVATE ParentChildIndex {
+ public:
+ ParentChildIndex();
+ ~ParentChildIndex();
+
+ // Returns whether or not this entry belongs in the index.
+ // True for all non-deleted, non-root entries.
+ static bool ShouldInclude(const EntryKernel* e);
+
+ // Inserts a given child into the index.
+ bool Insert(EntryKernel* e);
+
+ // Removes a given child from the index.
+ void Remove(EntryKernel* e);
+
+ // Returns true if this item is in the index as a child.
+ bool Contains(EntryKernel* e) const;
+
+ // Returns all children of the entry with the given Id. Returns NULL if the
+ // node has no children or the Id does not identify a valid directory node.
+ const OrderedChildSet* GetChildren(const Id& id);
+
+ private:
+ typedef std::map<syncable::Id, OrderedChildSet*> ParentChildrenMap;
+
+ // A map of parent IDs to children.
+ // Parents with no children are not included in this map.
+ ParentChildrenMap parent_children_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParentChildIndex);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_PARENT_CHILD_INDEX
diff --git a/sync/syncable/parent_child_index_unittest.cc b/sync/syncable/parent_child_index_unittest.cc
new file mode 100644
index 0000000000..5ae9d27bd8
--- /dev/null
+++ b/sync/syncable/parent_child_index_unittest.cc
@@ -0,0 +1,344 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/parent_child_index.h"
+
+#include <list>
+
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "sync/syncable/entry_kernel.h"
+#include "sync/syncable/syncable_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+namespace syncable {
+
+namespace {
+
+static const std::string kCacheGuid = "8HhNIHlEOCGQbIAALr9QEg==";
+
+class ParentChildIndexTest : public testing::Test {
+ public:
+ virtual void TearDown() {
+ // To make memory management easier, we take ownership of all EntryKernels
+ // returned by our factory methods and delete them here.
+ STLDeleteElements(&owned_entry_kernels_);
+ }
+
+ // Unfortunately, we can't use the regular Entry factory methods, because the
+ // ParentChildIndex deals in EntryKernels.
+
+ static syncable::Id GetBookmarkRootId() {
+ return syncable::Id::CreateFromServerId("bookmark_folder");
+ }
+
+ static syncable::Id GetBookmarkId(int n) {
+ return syncable::Id::CreateFromServerId("b" + base::IntToString(n));
+ }
+
+ static syncable::Id GetClientUniqueId(int n) {
+ return syncable::Id::CreateFromServerId("c" + base::IntToString(n));
+ }
+
+ EntryKernel* MakeRoot() {
+ // Mimics the root node.
+ EntryKernel* root = new EntryKernel();
+ root->put(META_HANDLE, 1);
+ root->put(BASE_VERSION, -1);
+ root->put(SERVER_VERSION, 0);
+ root->put(IS_DIR, true);
+ root->put(ID, syncable::Id());
+ root->put(PARENT_ID, syncable::Id());
+ root->put(SERVER_PARENT_ID, syncable::Id());
+
+ owned_entry_kernels_.push_back(root);
+ return root;
+ }
+
+ EntryKernel* MakeBookmarkRoot() {
+ // Mimics a server-created bookmark folder.
+ EntryKernel* folder = new EntryKernel;
+ folder->put(META_HANDLE, 1);
+ folder->put(BASE_VERSION, 9);
+ folder->put(SERVER_VERSION, 9);
+ folder->put(IS_DIR, true);
+ folder->put(ID, GetBookmarkRootId());
+ folder->put(SERVER_PARENT_ID, syncable::Id());
+ folder->put(PARENT_ID, syncable::Id());
+ folder->put(UNIQUE_SERVER_TAG, "google_chrome_bookmarks");
+
+ owned_entry_kernels_.push_back(folder);
+ return folder;
+ }
+
+ EntryKernel* MakeBookmark(int n, int pos, bool is_dir) {
+ // Mimics a regular bookmark or folder.
+ EntryKernel* bm = new EntryKernel();
+ bm->put(META_HANDLE, n);
+ bm->put(BASE_VERSION, 10);
+ bm->put(SERVER_VERSION, 10);
+ bm->put(IS_DIR, is_dir);
+ bm->put(ID, GetBookmarkId(n));
+ bm->put(PARENT_ID, GetBookmarkRootId());
+ bm->put(SERVER_PARENT_ID, GetBookmarkRootId());
+
+ bm->put(UNIQUE_BOOKMARK_TAG,
+ syncable::GenerateSyncableBookmarkHash(kCacheGuid,
+ bm->ref(ID).GetServerId()));
+
+ UniquePosition unique_pos =
+ UniquePosition::FromInt64(pos, bm->ref(UNIQUE_BOOKMARK_TAG));
+ bm->put(UNIQUE_POSITION, unique_pos);
+ bm->put(SERVER_UNIQUE_POSITION, unique_pos);
+
+ owned_entry_kernels_.push_back(bm);
+ return bm;
+ }
+
+ EntryKernel* MakeUniqueClientItem(int n) {
+ EntryKernel* item = new EntryKernel();
+ item->put(META_HANDLE, n);
+ item->put(BASE_VERSION, 10);
+ item->put(SERVER_VERSION, 10);
+ item->put(IS_DIR, false);
+ item->put(ID, GetClientUniqueId(n));
+ item->put(PARENT_ID, syncable::Id());
+ item->put(SERVER_PARENT_ID, syncable::Id());
+ item->put(UNIQUE_CLIENT_TAG, base::IntToString(n));
+
+ owned_entry_kernels_.push_back(item);
+ return item;
+ }
+
+ ParentChildIndex index_;
+
+ private:
+ std::list<EntryKernel*> owned_entry_kernels_;
+};
+
+TEST_F(ParentChildIndexTest, TestRootNode) {
+ EntryKernel* root = MakeRoot();
+ EXPECT_FALSE(ParentChildIndex::ShouldInclude(root));
+}
+
+TEST_F(ParentChildIndexTest, TestBookmarkRootFolder) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ EXPECT_TRUE(ParentChildIndex::ShouldInclude(bm_folder));
+}
+
+// Tests iteration over a set of siblings.
+TEST_F(ParentChildIndexTest, ChildInsertionAndIteration) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ // Make some folder and non-folder entries.
+ EntryKernel* b1 = MakeBookmark(1, 1, false);
+ EntryKernel* b2 = MakeBookmark(2, 2, false);
+ EntryKernel* b3 = MakeBookmark(3, 3, true);
+ EntryKernel* b4 = MakeBookmark(4, 4, false);
+
+ // Insert them out-of-order to test different cases.
+ index_.Insert(b3); // Only child.
+ index_.Insert(b4); // Right-most child.
+ index_.Insert(b1); // Left-most child.
+ index_.Insert(b2); // Between existing items.
+
+ // Double-check they've been added.
+ EXPECT_TRUE(index_.Contains(b1));
+ EXPECT_TRUE(index_.Contains(b2));
+ EXPECT_TRUE(index_.Contains(b3));
+ EXPECT_TRUE(index_.Contains(b4));
+
+ // Check the ordering.
+ const OrderedChildSet* children = index_.GetChildren(GetBookmarkRootId());
+ ASSERT_TRUE(children);
+ ASSERT_EQ(children->size(), 4UL);
+ OrderedChildSet::const_iterator it = children->begin();
+ EXPECT_EQ(*it, b1);
+ it++;
+ EXPECT_EQ(*it, b2);
+ it++;
+ EXPECT_EQ(*it, b3);
+ it++;
+ EXPECT_EQ(*it, b4);
+ it++;
+ EXPECT_TRUE(it == children->end());
+}
+
+// Tests iteration when hierarchy is involved.
+TEST_F(ParentChildIndexTest, ChildInsertionAndIterationWithHierarchy) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ // Just below the root, we have folders f1 and f2.
+ EntryKernel* f1 = MakeBookmark(1, 1, false);
+ EntryKernel* f2 = MakeBookmark(2, 2, false);
+ EntryKernel* f3 = MakeBookmark(3, 3, false);
+
+ // Under folder f1, we have two bookmarks.
+ EntryKernel* f1_b1 = MakeBookmark(101, 1, false);
+ f1_b1->put(PARENT_ID, GetBookmarkId(1));
+ EntryKernel* f1_b2 = MakeBookmark(102, 2, false);
+ f1_b2->put(PARENT_ID, GetBookmarkId(1));
+
+ // Under folder f2, there is one bookmark.
+ EntryKernel* f2_b1 = MakeBookmark(201, 1, false);
+ f2_b1->put(PARENT_ID, GetBookmarkId(2));
+
+ // Under folder f3, there is nothing.
+
+ // Insert in a strange order, because we can.
+ index_.Insert(f1_b2);
+ index_.Insert(f2);
+ index_.Insert(f2_b1);
+ index_.Insert(f1);
+ index_.Insert(f1_b1);
+ index_.Insert(f3);
+
+ OrderedChildSet::const_iterator it;
+
+ // Iterate over children of the bookmark root.
+ const OrderedChildSet* top_children = index_.GetChildren(GetBookmarkRootId());
+ ASSERT_TRUE(top_children);
+ ASSERT_EQ(top_children->size(), 3UL);
+ it = top_children->begin();
+ EXPECT_EQ(*it, f1);
+ it++;
+ EXPECT_EQ(*it, f2);
+ it++;
+ EXPECT_EQ(*it, f3);
+ it++;
+ EXPECT_TRUE(it == top_children->end());
+
+ // Iterate over children of the first folder.
+ const OrderedChildSet* f1_children = index_.GetChildren(GetBookmarkId(1));
+ ASSERT_TRUE(f1_children);
+ ASSERT_EQ(f1_children->size(), 2UL);
+ it = f1_children->begin();
+ EXPECT_EQ(*it, f1_b1);
+ it++;
+ EXPECT_EQ(*it, f1_b2);
+ it++;
+ EXPECT_TRUE(it == f1_children->end());
+
+ // Iterate over children of the second folder.
+ const OrderedChildSet* f2_children = index_.GetChildren(GetBookmarkId(2));
+ ASSERT_TRUE(f2_children);
+ ASSERT_EQ(f2_children->size(), 1UL);
+ it = f2_children->begin();
+ EXPECT_EQ(*it, f2_b1);
+ it++;
+ EXPECT_TRUE(it == f2_children->end());
+
+ // Check for children of the third folder.
+ const OrderedChildSet* f3_children = index_.GetChildren(GetBookmarkId(3));
+ EXPECT_FALSE(f3_children);
+}
+
+// Tests removing items.
+TEST_F(ParentChildIndexTest, RemoveWithHierarchy) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ // Just below the root, we have folders f1 and f2.
+ EntryKernel* f1 = MakeBookmark(1, 1, false);
+ EntryKernel* f2 = MakeBookmark(2, 2, false);
+ EntryKernel* f3 = MakeBookmark(3, 3, false);
+
+ // Under folder f1, we have two bookmarks.
+ EntryKernel* f1_b1 = MakeBookmark(101, 1, false);
+ f1_b1->put(PARENT_ID, GetBookmarkId(1));
+ EntryKernel* f1_b2 = MakeBookmark(102, 2, false);
+ f1_b2->put(PARENT_ID, GetBookmarkId(1));
+
+ // Under folder f2, there is one bookmark.
+ EntryKernel* f2_b1 = MakeBookmark(201, 1, false);
+ f2_b1->put(PARENT_ID, GetBookmarkId(2));
+
+ // Under folder f3, there is nothing.
+
+ // Insert in any order.
+ index_.Insert(f2_b1);
+ index_.Insert(f3);
+ index_.Insert(f1_b2);
+ index_.Insert(f1);
+ index_.Insert(f2);
+ index_.Insert(f1_b1);
+
+ // Check that all are in the index.
+ EXPECT_TRUE(index_.Contains(f1));
+ EXPECT_TRUE(index_.Contains(f2));
+ EXPECT_TRUE(index_.Contains(f3));
+ EXPECT_TRUE(index_.Contains(f1_b1));
+ EXPECT_TRUE(index_.Contains(f1_b2));
+ EXPECT_TRUE(index_.Contains(f2_b1));
+
+ // Remove them all in any order.
+ index_.Remove(f3);
+ EXPECT_FALSE(index_.Contains(f3));
+ index_.Remove(f1_b2);
+ EXPECT_FALSE(index_.Contains(f1_b2));
+ index_.Remove(f2_b1);
+ EXPECT_FALSE(index_.Contains(f2_b1));
+ index_.Remove(f1);
+ EXPECT_FALSE(index_.Contains(f1));
+ index_.Remove(f2);
+ EXPECT_FALSE(index_.Contains(f2));
+ index_.Remove(f1_b1);
+ EXPECT_FALSE(index_.Contains(f1_b1));
+}
+
+// Test that involves two non-ordered items.
+TEST_F(ParentChildIndexTest, UnorderedChildren) {
+ // Make two unique client tag items under the root node.
+ EntryKernel* u1 = MakeUniqueClientItem(1);
+ EntryKernel* u2 = MakeUniqueClientItem(2);
+
+ EXPECT_FALSE(u1->ShouldMaintainPosition());
+ EXPECT_FALSE(u2->ShouldMaintainPosition());
+
+ index_.Insert(u1);
+ index_.Insert(u2);
+
+ const OrderedChildSet* children = index_.GetChildren(syncable::Id());
+ EXPECT_EQ(children->count(u1), 1UL);
+ EXPECT_EQ(children->count(u2), 1UL);
+ EXPECT_EQ(children->size(), 2UL);
+}
+
+// Test ordered and non-ordered entries under the same parent.
+// TODO(rlarocque): We should not need to support this.
+TEST_F(ParentChildIndexTest, OrderedAndUnorderedChildren) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ EntryKernel* b1 = MakeBookmark(1, 1, false);
+ EntryKernel* b2 = MakeBookmark(2, 2, false);
+ EntryKernel* u1 = MakeUniqueClientItem(1);
+ u1->put(PARENT_ID, GetBookmarkRootId());
+
+ index_.Insert(b1);
+ index_.Insert(u1);
+ index_.Insert(b2);
+
+ const OrderedChildSet* children = index_.GetChildren(GetBookmarkRootId());
+ ASSERT_TRUE(children);
+ EXPECT_EQ(children->size(), 3UL);
+
+ // Ensure that the non-positionable item is moved to the far right.
+ OrderedChildSet::const_iterator it = children->begin();
+ EXPECT_EQ(*it, b1);
+ it++;
+ EXPECT_EQ(*it, b2);
+ it++;
+ EXPECT_EQ(*it, u1);
+ it++;
+ EXPECT_TRUE(it == children->end());
+}
+
+} // namespace
+} // namespace syncable
+} // namespace syncer
+
diff --git a/sync/syncable/scoped_parent_child_index_updater.cc b/sync/syncable/scoped_parent_child_index_updater.cc
new file mode 100644
index 0000000000..0dc3e95332
--- /dev/null
+++ b/sync/syncable/scoped_parent_child_index_updater.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/scoped_parent_child_index_updater.h"
+
+#include "sync/syncable/parent_child_index.h"
+
+namespace syncer {
+namespace syncable {
+
+ScopedParentChildIndexUpdater::ScopedParentChildIndexUpdater(
+ ScopedKernelLock& proof_of_lock,
+ EntryKernel* entry,
+ ParentChildIndex* index) : entry_(entry), index_(index) {
+ if (ParentChildIndex::ShouldInclude(entry_)) {
+ index_->Remove(entry_);
+ }
+}
+
+ScopedParentChildIndexUpdater::~ScopedParentChildIndexUpdater() {
+ if (ParentChildIndex::ShouldInclude(entry_)) {
+ index_->Insert(entry_);
+ }
+}
+
+} // namespace syncer
+} // namespace syncable
diff --git a/sync/syncable/scoped_parent_child_index_updater.h b/sync/syncable/scoped_parent_child_index_updater.h
new file mode 100644
index 0000000000..89385feb98
--- /dev/null
+++ b/sync/syncable/scoped_parent_child_index_updater.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
+#define SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
+
+#include "base/basictypes.h"
+#include "sync/base/sync_export.h"
+
+namespace syncer {
+namespace syncable {
+
+class ParentChildIndex;
+class ScopedKernelLock;
+struct EntryKernel;
+
+// Temporarily removes an item from the ParentChildIndex and re-adds it this
+// object goes out of scope.
+class ScopedParentChildIndexUpdater {
+ public:
+ ScopedParentChildIndexUpdater(ScopedKernelLock& proof_of_lock,
+ EntryKernel* entry,
+ ParentChildIndex* index);
+ ~ScopedParentChildIndexUpdater();
+
+ private:
+ EntryKernel* entry_;
+ ParentChildIndex* index_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedParentChildIndexUpdater);
+};
+
+} // namespace syncer
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
diff --git a/sync/syncable/syncable_columns.h b/sync/syncable/syncable_columns.h
index 6eda058778..9d45c7404a 100644
--- a/sync/syncable/syncable_columns.h
+++ b/sync/syncable/syncable_columns.h
@@ -37,8 +37,6 @@ static const ColumnSpec g_metas_columns[] = {
{"id", "varchar(255) default \"r\""},
{"parent_id", "varchar(255) default \"r\""},
{"server_parent_id", "varchar(255) default \"r\""},
- {"prev_id", "varchar(255) default \"r\""},
- {"next_id", "varchar(255) default \"r\""},
//////////////////////////////////////
// bits
{"is_unsynced", "bit default 0"},
@@ -53,14 +51,16 @@ static const ColumnSpec g_metas_columns[] = {
{"server_non_unique_name", "varchar(255)"},
{"unique_server_tag", "varchar"},
{"unique_client_tag", "varchar"},
+ {"unique_bookmark_tag", "varchar"},
//////////////////////////////////////
// Blobs (serialized protos).
{"specifics", "blob"},
{"server_specifics", "blob"},
{"base_server_specifics", "blob"},
//////////////////////////////////////
- // Blobs (ordinals).
- {"server_ordinal_in_parent", "blob"},
+ // Blobs (positions).
+ {"server_unique_position", "blob"},
+ {"unique_position", "blob"},
};
// At least enforce that there are equal number of column names and fields.
diff --git a/sync/syncable/syncable_enum_conversions.cc b/sync/syncable/syncable_enum_conversions.cc
index b4425a6750..8f27912604 100644
--- a/sync/syncable/syncable_enum_conversions.cc
+++ b/sync/syncable/syncable_enum_conversions.cc
@@ -72,14 +72,12 @@ const char* GetTimeFieldString(TimeField time_field) {
}
const char* GetIdFieldString(IdField id_field) {
- ASSERT_ENUM_BOUNDS(ID, NEXT_ID,
+ ASSERT_ENUM_BOUNDS(ID, SERVER_PARENT_ID,
ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
switch (id_field) {
ENUM_CASE(ID);
ENUM_CASE(PARENT_ID);
ENUM_CASE(SERVER_PARENT_ID);
- ENUM_CASE(PREV_ID);
- ENUM_CASE(NEXT_ID);
case ID_FIELDS_END: break;
}
NOTREACHED();
@@ -122,13 +120,14 @@ const char* GetBitFieldString(BitField bit_field) {
}
const char* GetStringFieldString(StringField string_field) {
- ASSERT_ENUM_BOUNDS(NON_UNIQUE_NAME, UNIQUE_CLIENT_TAG,
+ ASSERT_ENUM_BOUNDS(NON_UNIQUE_NAME, UNIQUE_BOOKMARK_TAG,
STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
switch (string_field) {
ENUM_CASE(NON_UNIQUE_NAME);
ENUM_CASE(SERVER_NON_UNIQUE_NAME);
ENUM_CASE(UNIQUE_SERVER_TAG);
ENUM_CASE(UNIQUE_CLIENT_TAG);
+ ENUM_CASE(UNIQUE_BOOKMARK_TAG);
case STRING_FIELDS_END: break;
}
NOTREACHED();
@@ -148,12 +147,14 @@ const char* GetProtoFieldString(ProtoField proto_field) {
return "";
}
-const char* GetOrdinalFieldString(OrdinalField ordinal_field) {
- ASSERT_ENUM_BOUNDS(SERVER_ORDINAL_IN_PARENT, SERVER_ORDINAL_IN_PARENT,
- ORDINAL_FIELDS_BEGIN, ORDINAL_FIELDS_END - 1);
- switch(ordinal_field) {
- ENUM_CASE(SERVER_ORDINAL_IN_PARENT);
- case ORDINAL_FIELDS_END: break;
+const char* GetUniquePositionFieldString(UniquePositionField position_field) {
+ ASSERT_ENUM_BOUNDS(SERVER_UNIQUE_POSITION, UNIQUE_POSITION,
+ UNIQUE_POSITION_FIELDS_BEGIN,
+ UNIQUE_POSITION_FIELDS_END - 1);
+ switch(position_field) {
+ ENUM_CASE(SERVER_UNIQUE_POSITION);
+ ENUM_CASE(UNIQUE_POSITION);
+ case UNIQUE_POSITION_FIELDS_END: break;
}
NOTREACHED();
return "";
diff --git a/sync/syncable/syncable_enum_conversions.h b/sync/syncable/syncable_enum_conversions.h
index 3727420909..12f6428591 100644
--- a/sync/syncable/syncable_enum_conversions.h
+++ b/sync/syncable/syncable_enum_conversions.h
@@ -41,8 +41,8 @@ SYNC_EXPORT_PRIVATE const char* GetStringFieldString(StringField string_field);
SYNC_EXPORT_PRIVATE const char* GetProtoFieldString(ProtoField proto_field);
-SYNC_EXPORT_PRIVATE const char* GetOrdinalFieldString(
- OrdinalField ordinal_field);
+SYNC_EXPORT_PRIVATE const char* GetUniquePositionFieldString(
+ UniquePositionField position_field);
SYNC_EXPORT_PRIVATE const char* GetBitTempString(BitTemp bit_temp);
diff --git a/sync/syncable/syncable_enum_conversions_unittest.cc b/sync/syncable/syncable_enum_conversions_unittest.cc
index eac4a37743..f74d1301ae 100644
--- a/sync/syncable/syncable_enum_conversions_unittest.cc
+++ b/sync/syncable/syncable_enum_conversions_unittest.cc
@@ -77,9 +77,10 @@ TEST_F(SyncableEnumConversionsTest, GetProtoFieldString) {
GetProtoFieldString, PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
}
-TEST_F(SyncableEnumConversionsTest, GetOrdinalFieldString) {
+TEST_F(SyncableEnumConversionsTest, GetUniquePositionFieldString) {
TestEnumStringFunction(
- GetOrdinalFieldString, ORDINAL_FIELDS_BEGIN, ORDINAL_FIELDS_END - 1);
+ GetUniquePositionFieldString,
+ UNIQUE_POSITION_FIELDS_BEGIN, UNIQUE_POSITION_FIELDS_END - 1);
}
TEST_F(SyncableEnumConversionsTest, GetBitTempString) {
diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc
index b4772b9f1f..e5562aa9f9 100644
--- a/sync/syncable/syncable_unittest.cc
+++ b/sync/syncable/syncable_unittest.cc
@@ -19,7 +19,6 @@
#include "base/test/values_test_util.h"
#include "base/threading/platform_thread.h"
#include "base/values.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/syncable/directory_backing_store.h"
#include "sync/syncable/directory_change_delegate.h"
@@ -52,7 +51,7 @@ class SyncableKernelTest : public testing::Test {};
TEST_F(SyncableKernelTest, ToValue) {
EntryKernel kernel;
scoped_ptr<DictionaryValue> value(kernel.ToValue(NULL));
- if (value.get()) {
+ if (value) {
// Not much to check without repeating the ToValue() code.
EXPECT_TRUE(value->HasKey("isDirty"));
// The extra +2 is for "isDirty" and "serverModelType".
@@ -230,10 +229,10 @@ TEST_F(SyncableGeneralTest, ChildrenOps) {
Entry e(&rtrans, GET_BY_ID, id);
ASSERT_FALSE(e.good()); // Hasn't been written yet.
+ Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
+ ASSERT_TRUE(root.good());
EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_TRUE(child_id.IsRoot());
+ EXPECT_TRUE(root.GetFirstChildId().IsRoot());
}
{
@@ -254,10 +253,10 @@ TEST_F(SyncableGeneralTest, ChildrenOps) {
Entry child(&rtrans, GET_BY_HANDLE, written_metahandle);
ASSERT_TRUE(child.good());
+ Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
+ ASSERT_TRUE(root.good());
EXPECT_TRUE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_EQ(e.Get(ID), child_id);
+ EXPECT_EQ(e.Get(ID), root.GetFirstChildId());
}
{
@@ -273,10 +272,10 @@ TEST_F(SyncableGeneralTest, ChildrenOps) {
Entry e(&rtrans, GET_BY_ID, id);
ASSERT_TRUE(e.good());
+ Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
+ ASSERT_TRUE(root.good());
EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_TRUE(child_id.IsRoot());
+ EXPECT_TRUE(root.GetFirstChildId().IsRoot());
}
dir.SaveChanges();
@@ -417,6 +416,32 @@ TEST_F(SyncableGeneralTest, ToValue) {
dir.SaveChanges();
}
+// Test that the bookmark tag generation algorithm remains unchanged.
+TEST_F(SyncableGeneralTest, BookmarkTagTest) {
+ InMemoryDirectoryBackingStore* store = new InMemoryDirectoryBackingStore("x");
+
+ // The two inputs that form the bookmark tag are the directory's cache_guid
+ // and its next_id value. We don't need to take any action to ensure
+ // consistent next_id values, but we do need to explicitly request that our
+ // InMemoryDirectoryBackingStore always return the same cache_guid.
+ store->request_consistent_cache_guid();
+
+ Directory dir(store, &handler_, NULL, NULL, NULL);
+ ASSERT_EQ(OPENED, dir.Open("x", &delegate_, NullTransactionObserver()));
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry bm(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "bm");
+ bm.Put(IS_UNSYNCED, true);
+
+ // If this assertion fails, that might indicate that the algorithm used to
+ // generate bookmark tags has been modified. This could have implications
+ // for bookmark ordering. Please make sure you know what you're doing if
+ // you intend to make such a change.
+ ASSERT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=", bm.Get(UNIQUE_BOOKMARK_TAG));
+ }
+}
+
// A test fixture for syncable::Directory. Uses an in-memory database to keep
// the unit tests fast.
class SyncableDirectoryTest : public testing::Test {
@@ -437,7 +462,7 @@ class SyncableDirectoryTest : public testing::Test {
}
virtual void TearDown() {
- if (dir_.get())
+ if (dir_)
dir_->SaveChanges();
dir_.reset();
}
@@ -1496,12 +1521,19 @@ TEST_F(SyncableDirectoryTest, OldClientLeftUnsyncedDeletedLocalItem) {
}
}
-TEST_F(SyncableDirectoryTest, OrdinalWithNullSurvivesSaveAndReload) {
+TEST_F(SyncableDirectoryTest, PositionWithNullSurvivesSaveAndReload) {
TestIdFactory id_factory;
Id null_child_id;
const char null_cstr[] = "\0null\0test";
std::string null_str(null_cstr, arraysize(null_cstr) - 1);
- NodeOrdinal null_ord = NodeOrdinal(null_str);
+ // Pad up to the minimum length with 0x7f characters, then add a string that
+ // contains a few NULLs to the end. This is slightly wrong, since the suffix
+ // part of a UniquePosition shouldn't contain NULLs, but it's good enough for
+ // this test.
+ std::string suffix =
+ std::string(UniquePosition::kSuffixLength - null_str.length(), '\x7f')
+ + null_str;
+ UniquePosition null_pos = UniquePosition::FromInt64(10, suffix);
{
WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
@@ -1512,7 +1544,8 @@ TEST_F(SyncableDirectoryTest, OrdinalWithNullSurvivesSaveAndReload) {
MutableEntry child(&trans, CREATE, BOOKMARKS, parent.Get(ID), "child");
child.Put(IS_UNSYNCED, true);
- child.Put(SERVER_ORDINAL_IN_PARENT, null_ord);
+ child.Put(UNIQUE_POSITION, null_pos);
+ child.Put(SERVER_UNIQUE_POSITION, null_pos);
null_child_id = child.Get(ID);
}
@@ -1524,9 +1557,10 @@ TEST_F(SyncableDirectoryTest, OrdinalWithNullSurvivesSaveAndReload) {
Entry null_ordinal_child(&trans, GET_BY_ID, null_child_id);
EXPECT_TRUE(
- null_ord.Equals(null_ordinal_child.Get(SERVER_ORDINAL_IN_PARENT)));
+ null_pos.Equals(null_ordinal_child.Get(UNIQUE_POSITION)));
+ EXPECT_TRUE(
+ null_pos.Equals(null_ordinal_child.Get(SERVER_UNIQUE_POSITION)));
}
-
}
// An OnDirectoryBackingStore that can be set to always fail SaveChanges.
@@ -1879,13 +1913,13 @@ TEST_F(OnDiskSyncableDirectoryTest,
update_post_save.ref((ProtoField)i).SerializeAsString())
<< "Blob field #" << i << " changed during save/load";
}
- for ( ; i < ORDINAL_FIELDS_END; ++i) {
- EXPECT_EQ(create_pre_save.ref((OrdinalField)i).ToInternalValue(),
- create_post_save.ref((OrdinalField)i).ToInternalValue())
- << "Blob field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((OrdinalField)i).ToInternalValue(),
- update_post_save.ref((OrdinalField)i).ToInternalValue())
- << "Blob field #" << i << " changed during save/load";
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
+ EXPECT_TRUE(create_pre_save.ref((UniquePositionField)i).Equals(
+ create_post_save.ref((UniquePositionField)i)))
+ << "Position field #" << i << " changed during save/load";
+ EXPECT_TRUE(update_pre_save.ref((UniquePositionField)i).Equals(
+ update_post_save.ref((UniquePositionField)i)))
+ << "Position field #" << i << " changed during save/load";
}
}
@@ -2219,7 +2253,7 @@ TEST_F(SyncableClientTagTest, TestClientTagClear) {
WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
MutableEntry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
EXPECT_TRUE(me.good());
- me.Put(UNIQUE_CLIENT_TAG, "");
+ me.Put(UNIQUE_CLIENT_TAG, std::string());
}
{
ReadTransaction trans(FROM_HERE, dir_.get());
diff --git a/sync/syncable/syncable_util.cc b/sync/syncable/syncable_util.cc
index 857fc85a66..6d7f12612f 100644
--- a/sync/syncable/syncable_util.cc
+++ b/sync/syncable/syncable_util.cc
@@ -66,27 +66,13 @@ void ChangeEntryIDAndUpdateChildren(
while (i != children.end()) {
MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
CHECK(child_entry.good());
- // Use the unchecked setter here to avoid touching the child's NEXT_ID
- // and PREV_ID fields (which Put(PARENT_ID) would normally do to
- // maintain linked-list invariants). In this case, NEXT_ID and PREV_ID
- // among the children will be valid after the loop, since we update all
- // the children at once.
+ // Use the unchecked setter here to avoid touching the child's
+ // UNIQUE_POSITION field. In this case, UNIQUE_POSITION among the
+ // children will be valid after the loop, since we update all the children
+ // at once.
child_entry.PutParentIdPropertyOnly(new_id);
}
}
- // Update Id references on the previous and next nodes in the sibling
- // order. Do this by reinserting into the linked list; the first
- // step in PutPredecessor is to Unlink from the existing order, which
- // will overwrite the stale Id value from the adjacent nodes.
- if (entry->GetPredecessorId() == entry->GetSuccessorId() &&
- entry->GetPredecessorId() == old_id) {
- // We just need a shallow update to |entry|'s fields since it is already
- // self looped.
- entry->Put(NEXT_ID, new_id);
- entry->Put(PREV_ID, new_id);
- } else {
- entry->PutPredecessor(entry->GetPredecessorId());
- }
}
// Function to handle runtime failures on syncable code. Rather than crashing,
@@ -119,5 +105,12 @@ std::string GenerateSyncableHash(
return encode_output;
}
+std::string GenerateSyncableBookmarkHash(
+ const std::string originator_cache_guid,
+ const std::string originator_client_item_id) {
+ return syncable::GenerateSyncableHash(
+ BOOKMARKS, originator_cache_guid + originator_client_item_id);
+}
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/syncable/syncable_util.h b/sync/syncable/syncable_util.h
index 4ea8a6f6c4..465324d346 100644
--- a/sync/syncable/syncable_util.h
+++ b/sync/syncable/syncable_util.h
@@ -44,6 +44,13 @@ SYNC_EXPORT_PRIVATE int GetUnsyncedEntries(BaseTransaction* trans,
SYNC_EXPORT_PRIVATE std::string GenerateSyncableHash(
ModelType model_type, const std::string& client_tag);
+// A helper for generating the bookmark type's tag. This is required in more
+// than one place, so we define the algorithm here to make sure the
+// implementation is consistent.
+SYNC_EXPORT_PRIVATE std::string GenerateSyncableBookmarkHash(
+ const std::string originator_cache_guid,
+ const std::string originator_client_item_id);
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/syncable/syncable_write_transaction.cc b/sync/syncable/syncable_write_transaction.cc
index 788d92205a..5b884e7f35 100644
--- a/sync/syncable/syncable_write_transaction.cc
+++ b/sync/syncable/syncable_write_transaction.cc
@@ -175,7 +175,7 @@ std::string WriterTagToString(WriterTag writer_tag) {
ENUM_CASE(SYNCAPI);
};
NOTREACHED();
- return "";
+ return std::string();
}
#undef ENUM_CASE
diff --git a/sync/test/android/javatests/src/org/chromium/sync/test/util/MockAccountManager.java b/sync/test/android/javatests/src/org/chromium/sync/test/util/MockAccountManager.java
index d293918d78..9c4a41d8f2 100644
--- a/sync/test/android/javatests/src/org/chromium/sync/test/util/MockAccountManager.java
+++ b/sync/test/android/javatests/src/org/chromium/sync/test/util/MockAccountManager.java
@@ -8,6 +8,7 @@ import android.accounts.Account;
import android.accounts.AccountManager;
import android.accounts.AccountManagerCallback;
import android.accounts.AccountManagerFuture;
+import android.accounts.AuthenticatorDescription;
import android.accounts.AuthenticatorException;
import android.accounts.OperationCanceledException;
import android.app.Activity;
@@ -294,6 +295,14 @@ public class MockAccountManager implements AccountManagerDelegate {
}
}
+ @Override
+ public AuthenticatorDescription[] getAuthenticatorTypes() {
+ AuthenticatorDescription googleAuthenticator = new AuthenticatorDescription(
+ AccountManagerHelper.GOOGLE_ACCOUNT_TYPE, "p1", 0, 0, 0, 0);
+
+ return new AuthenticatorDescription[] { googleAuthenticator };
+ }
+
public void prepareAllowAppPermission(Account account, String authTokenType) {
addPreparedAppPermission(new AccountAuthTokenPreparation(account, authTokenType, true));
}
diff --git a/sync/test/engine/mock_connection_manager.cc b/sync/test/engine/mock_connection_manager.cc
index 3fd4a3218a..33371e9ff9 100644
--- a/sync/test/engine/mock_connection_manager.cc
+++ b/sync/test/engine/mock_connection_manager.cc
@@ -32,6 +32,7 @@ namespace syncer {
using syncable::WriteTransaction;
static char kValidAuthToken[] = "AuthToken";
+static char kCacheGuid[] = "kqyg7097kro6GSUod+GSg==";
MockConnectionManager::MockConnectionManager(syncable::Directory* directory)
: ServerConnectionManager("unused", 0, false),
@@ -42,7 +43,6 @@ MockConnectionManager::MockConnectionManager(syncable::Directory* directory)
store_birthday_("Store BDay!"),
store_birthday_sent_(false),
client_stuck_(false),
- commit_time_rename_prepended_string_(""),
countdown_to_postbuffer_fail_(0),
directory_(directory),
mid_commit_observer_(NULL),
@@ -55,7 +55,7 @@ MockConnectionManager::MockConnectionManager(syncable::Directory* directory)
use_legacy_bookmarks_protocol_(false),
num_get_updates_requests_(0) {
SetNewTimestamp(0);
- set_auth_token(kValidAuthToken);
+ SetAuthToken(kValidAuthToken, base::Time());
}
MockConnectionManager::~MockConnectionManager() {
@@ -343,6 +343,19 @@ sync_pb::SyncEntity* MockConnectionManager::AddUpdateMeta(
ent->set_mtime(sync_ts);
ent->set_ctime(1);
ent->set_position_in_parent(GeneratePositionInParent());
+
+ // This isn't perfect, but it works well enough. This is an update, which
+ // means the ID is a server ID, which means it never changes. By making
+ // kCacheGuid also never change, we guarantee that the same item always has
+ // the same originator_cache_guid and originator_client_item_id.
+ //
+ // Unfortunately, neither this class nor the tests that use it explicitly
+ // track sync entitites, so supporting proper cache guids and client item IDs
+ // would require major refactoring. The ID used here ought to be the "c-"
+ // style ID that was sent up on the commit.
+ ent->set_originator_cache_guid(kCacheGuid);
+ ent->set_originator_client_item_id(id);
+
return ent;
}
@@ -395,9 +408,20 @@ sync_pb::SyncEntity* MockConnectionManager::AddUpdateFromLastCommit() {
last_commit_response().entryresponse(0).version());
ent->set_id_string(
last_commit_response().entryresponse(0).id_string());
+
+ // This is the same hack as in AddUpdateMeta. See the comment in that
+ // function for more information.
+ ent->set_originator_cache_guid(kCacheGuid);
+ ent->set_originator_client_item_id(
+ last_commit_response().entryresponse(0).id_string());
+
+ if (last_sent_commit().entries(0).has_unique_position()) {
+ ent->mutable_unique_position()->CopyFrom(
+ last_sent_commit().entries(0).unique_position());
+ }
+
// Tests don't currently care about the following:
- // originator_cache_guid, originator_client_item_id, parent_id_string,
- // name, non_unique_name.
+ // parent_id_string, name, non_unique_name.
}
return GetMutableLastUpdate();
}
@@ -528,7 +552,7 @@ void MockConnectionManager::ProcessGetUpdates(
update_queue_.pop_front();
- if (gu_client_command_.get()) {
+ if (gu_client_command_) {
response->mutable_client_command()->CopyFrom(*gu_client_command_.get());
}
}
@@ -615,7 +639,7 @@ void MockConnectionManager::ProcessCommit(
}
commit_responses_.push_back(new CommitResponse(*commit_response));
- if (commit_client_command_.get()) {
+ if (commit_client_command_) {
response_buffer->mutable_client_command()->CopyFrom(
*commit_client_command_.get());
}
diff --git a/sync/test/engine/mock_connection_manager.h b/sync/test/engine/mock_connection_manager.h
index 53b950bd4b..f4f98822d2 100644
--- a/sync/test/engine/mock_connection_manager.h
+++ b/sync/test/engine/mock_connection_manager.h
@@ -18,6 +18,7 @@
#include "sync/engine/net/server_connection_manager.h"
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/base/model_type_invalidation_map.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/sync.pb.h"
namespace syncer {
diff --git a/sync/test/engine/syncer_command_test.h b/sync/test/engine/syncer_command_test.h
index 7e3ff6eb6e..fa51d8221d 100644
--- a/sync/test/engine/syncer_command_test.h
+++ b/sync/test/engine/syncer_command_test.h
@@ -107,7 +107,7 @@ class SyncerCommandTestBase : public testing::Test,
// Create a session with the provided source.
sessions::SyncSession* session(const sessions::SyncSourceInfo& source) {
- if (!session_.get()) {
+ if (!session_) {
std::vector<ModelSafeWorker*> workers = GetWorkers();
session_.reset(new sessions::SyncSession(context(), delegate(), source));
}
diff --git a/sync/test/local_sync_test_server.cc b/sync/test/local_sync_test_server.cc
index a11234f340..4f9242b4a0 100644
--- a/sync/test/local_sync_test_server.cc
+++ b/sync/test/local_sync_test_server.cc
@@ -9,20 +9,22 @@
#include "base/string_number_conversions.h"
#include "base/values.h"
#include "net/test/python_utils.h"
-#include "net/test/test_server.h"
+#include "net/test/spawned_test_server.h"
namespace syncer {
LocalSyncTestServer::LocalSyncTestServer()
- : LocalTestServer(net::TestServer::TYPE_HTTP, // Sync uses the HTTP scheme.
- net::TestServer::kLocalhost,
- base::FilePath()),
+ : LocalTestServer(
+ net::SpawnedTestServer::TYPE_HTTP, // Sync uses the HTTP scheme.
+ net::SpawnedTestServer::kLocalhost,
+ base::FilePath()),
xmpp_port_(0) {}
LocalSyncTestServer::LocalSyncTestServer(uint16 port, uint16 xmpp_port)
- : LocalTestServer(net::TestServer::TYPE_HTTP, // Sync uses the HTTP scheme.
- net::TestServer::kLocalhost,
- base::FilePath()),
+ : LocalTestServer(
+ net::SpawnedTestServer::TYPE_HTTP, // Sync uses the HTTP scheme.
+ net::SpawnedTestServer::kLocalhost,
+ base::FilePath()),
xmpp_port_(xmpp_port) {
SetPort(port);
}
diff --git a/sync/test/test_directory_backing_store.h b/sync/test/test_directory_backing_store.h
index cbbe16e324..54fe49edd9 100644
--- a/sync/test/test_directory_backing_store.h
+++ b/sync/test/test_directory_backing_store.h
@@ -48,7 +48,8 @@ class TestDirectoryBackingStore : public DirectoryBackingStore {
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion82To83);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion83To84);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion84To85);
- FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DetectInvalidOrdinal);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion85To86);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DetectInvalidPosition);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, ModelTypeIds);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, Corruption);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DeleteEntries);
diff --git a/sync/tools/null_invalidation_state_tracker.cc b/sync/tools/null_invalidation_state_tracker.cc
index 084630c99f..192060bf97 100644
--- a/sync/tools/null_invalidation_state_tracker.cc
+++ b/sync/tools/null_invalidation_state_tracker.cc
@@ -43,6 +43,12 @@ void NullInvalidationStateTracker::SetInvalidatorClientId(
}
std::string NullInvalidationStateTracker::GetInvalidatorClientId() const {
+ // The caller of this function is probably looking for an ID it can use to
+ // identify this client as the originator of some notifiable change. It does
+ // this so the invalidation server can prevent it from being notified of its
+ // own changes. This invalidation state tracker doesn't remember its ID, so
+ // it can't support this feature.
+ NOTREACHED() << "This state tracker does not support reflection-blocking";
return std::string();
}
@@ -56,6 +62,10 @@ void NullInvalidationStateTracker::SetBootstrapData(const std::string& data) {
LOG(INFO) << "Setting bootstrap data to: " << base64_data;
}
+void NullInvalidationStateTracker::Clear() {
+ // We have no members to clear.
+}
+
void NullInvalidationStateTracker::GenerateAckHandles(
const ObjectIdSet& ids,
const scoped_refptr<base::TaskRunner>& task_runner,
diff --git a/sync/tools/null_invalidation_state_tracker.h b/sync/tools/null_invalidation_state_tracker.h
index c6274e96dc..ce05c3327a 100644
--- a/sync/tools/null_invalidation_state_tracker.h
+++ b/sync/tools/null_invalidation_state_tracker.h
@@ -30,6 +30,8 @@ class NullInvalidationStateTracker
virtual std::string GetBootstrapData() const OVERRIDE;
virtual void SetBootstrapData(const std::string& data) OVERRIDE;
+ virtual void Clear() OVERRIDE;
+
virtual void GenerateAckHandles(
const ObjectIdSet& ids,
const scoped_refptr<base::TaskRunner>& task_runner,
diff --git a/sync/tools/sync_client.cc b/sync/tools/sync_client.cc
index 5fb0a0cbf5..43b63f5283 100644
--- a/sync/tools/sync_client.cc
+++ b/sync/tools/sync_client.cc
@@ -88,7 +88,7 @@ class MyTestURLRequestContextGetter : public net::TestURLRequestContextGetter {
virtual net::TestURLRequestContext* GetURLRequestContext() OVERRIDE {
// Construct |context_| lazily so it gets constructed on the right
// thread (the IO thread).
- if (!context_.get())
+ if (!context_)
context_.reset(new MyTestURLRequestContext());
return context_.get();
}
@@ -311,7 +311,9 @@ int SyncClientMain(int argc, char* argv[]) {
// TODO(akalin): Replace this with just the context getter once
// HttpPostProviderFactory is removed.
scoped_ptr<HttpPostProviderFactory> post_factory(
- new HttpBridgeFactory(context_getter, kUserAgent));
+ new HttpBridgeFactory(context_getter,
+ kUserAgent,
+ NetworkTimeUpdateCallback()));
// Used only when committing bookmarks, so it's okay to leave this
// as NULL.
ExtensionsActivityMonitor* extensions_activity_monitor = NULL;
@@ -338,6 +340,7 @@ int SyncClientMain(int argc, char* argv[]) {
credentials,
scoped_ptr<Invalidator>(
invalidator_factory.CreateInvalidator()),
+ invalidator_factory.GetInvalidatorClientId(),
kRestoredKeyForBootstrapping,
kRestoredKeystoreKeyForBootstrapping,
scoped_ptr<InternalComponentsFactory>(
diff --git a/sync/tools/sync_listen_notifications.cc b/sync/tools/sync_listen_notifications.cc
index 834b04049e..7a0624a0ec 100644
--- a/sync/tools/sync_listen_notifications.cc
+++ b/sync/tools/sync_listen_notifications.cc
@@ -98,7 +98,7 @@ class MyTestURLRequestContextGetter : public net::TestURLRequestContextGetter {
virtual net::TestURLRequestContext* GetURLRequestContext() OVERRIDE {
// Construct |context_| lazily so it gets constructed on the right
// thread (the IO thread).
- if (!context_.get())
+ if (!context_)
context_.reset(new MyTestURLRequestContext());
return context_.get();
}
@@ -201,8 +201,6 @@ int SyncListenNotificationsMain(int argc, char* argv[]) {
invalidator_factory.CreateInvalidator());
NotificationPrinter notification_printer;
- const char kUniqueId[] = "fake_unique_id";
- invalidator->SetUniqueId(kUniqueId);
invalidator->UpdateCredentials(email, token);
// Listen for notifications for all known types.
diff --git a/sync/tools/testserver/chromiumsync.py b/sync/tools/testserver/chromiumsync.py
index d33ed016f7..fbd3c5b6c2 100644
--- a/sync/tools/testserver/chromiumsync.py
+++ b/sync/tools/testserver/chromiumsync.py
@@ -31,9 +31,11 @@ import extension_specifics_pb2
import favicon_image_specifics_pb2
import favicon_tracking_specifics_pb2
import history_delete_directive_specifics_pb2
+import managed_user_setting_specifics_pb2
import nigori_specifics_pb2
import password_specifics_pb2
import preference_specifics_pb2
+import priority_preference_specifics_pb2
import search_engine_specifics_pb2
import session_specifics_pb2
import sync_pb2
@@ -59,9 +61,11 @@ ALL_TYPES = (
EXPERIMENTS,
EXTENSIONS,
HISTORY_DELETE_DIRECTIVE,
+ MANAGED_USER_SETTING,
NIGORI,
PASSWORD,
PREFERENCE,
+ PRIORITY_PREFERENCE,
SEARCH_ENGINE,
SESSION,
SYNCED_NOTIFICATION,
@@ -69,7 +73,7 @@ ALL_TYPES = (
TYPED_URL,
EXTENSION_SETTINGS,
FAVICON_IMAGES,
- FAVICON_TRACKING) = range(23)
+ FAVICON_TRACKING) = range(25)
# An enumeration on the frequency at which the server should send errors
# to the client. This would be specified by the url that triggers the error.
@@ -100,9 +104,11 @@ SYNC_TYPE_TO_DESCRIPTOR = {
FAVICON_IMAGES: SYNC_TYPE_FIELDS['favicon_image'],
FAVICON_TRACKING: SYNC_TYPE_FIELDS['favicon_tracking'],
HISTORY_DELETE_DIRECTIVE: SYNC_TYPE_FIELDS['history_delete_directive'],
+ MANAGED_USER_SETTING: SYNC_TYPE_FIELDS['managed_user_setting'],
NIGORI: SYNC_TYPE_FIELDS['nigori'],
PASSWORD: SYNC_TYPE_FIELDS['password'],
PREFERENCE: SYNC_TYPE_FIELDS['preference'],
+ PRIORITY_PREFERENCE: SYNC_TYPE_FIELDS['priority_preference'],
SEARCH_ENGINE: SYNC_TYPE_FIELDS['search_engine'],
SESSION: SYNC_TYPE_FIELDS['session'],
SYNCED_NOTIFICATION: SYNC_TYPE_FIELDS["synced_notification"],
@@ -488,12 +494,18 @@ class SyncDataModel(object):
name='Favicon Tracking',
parent_tag=ROOT_ID,
sync_type=FAVICON_TRACKING),
+ PermanentItem('google_chrome_managed_user_settings',
+ name='Managed User Settings',
+ parent_tag=ROOT_ID, sync_type=MANAGED_USER_SETTING),
PermanentItem('google_chrome_nigori', name='Nigori',
parent_tag=ROOT_ID, sync_type=NIGORI),
PermanentItem('google_chrome_passwords', name='Passwords',
parent_tag=ROOT_ID, sync_type=PASSWORD),
PermanentItem('google_chrome_preferences', name='Preferences',
parent_tag=ROOT_ID, sync_type=PREFERENCE),
+ PermanentItem('google_chrome_priority_preferences',
+ name='Priority Preferences',
+ parent_tag=ROOT_ID, sync_type=PRIORITY_PREFERENCE),
PermanentItem('google_chrome_synced_notifications',
name='Synced Notifications',
parent_tag=ROOT_ID, sync_type=SYNCED_NOTIFICATION),
@@ -624,12 +636,14 @@ class SyncDataModel(object):
was changed and Chrome now sends up the absolute position. The server
must store a position_in_parent value and must not maintain
insert_after_item_id.
+ Starting in Jan 2013, the client will also send up a unique_position field
+ which should be saved and returned on subsequent GetUpdates.
Args:
entry: The entry for which to write a position. Its ID field are
- assumed to be server IDs. This entry will have its parent_id_string
- and position_in_parent fields updated; its insert_after_item_id field
- will be cleared.
+ assumed to be server IDs. This entry will have its parent_id_string,
+ position_in_parent and unique_position fields updated; its
+ insert_after_item_id field will be cleared.
parent_id: The ID of the entry intended as the new parent.
"""
@@ -944,12 +958,14 @@ class SyncDataModel(object):
entry = MakeTombstone(entry.id_string)
else:
# Comments in sync.proto detail how the representation of positional
- # ordering works: either the 'insert_after_item_id' field or the
- # 'position_in_parent' field may determine the sibling order during
- # Commit operations. The 'position_in_parent' field provides an absolute
- # ordering in GetUpdates contexts. Here we assume the client will
- # always send a valid position_in_parent (this is the newer style), and
- # we ignore insert_after_item_id (an older style).
+ # ordering works.
+ #
+ # We've almost fully deprecated the 'insert_after_item_id' field.
+ # The 'position_in_parent' field is also deprecated, but as of Jan 2013
+ # is still in common use. The 'unique_position' field is the latest
+ # and greatest in positioning technology.
+ #
+ # This server supports 'position_in_parent' and 'unique_position'.
self._WritePosition(entry, entry.parent_id_string)
# Preserve the originator info, which the client is not required to send
diff --git a/sync/tools/testserver/chromiumsync_test.py b/sync/tools/testserver/chromiumsync_test.py
index e56c04b047..8c2acb7103 100755
--- a/sync/tools/testserver/chromiumsync_test.py
+++ b/sync/tools/testserver/chromiumsync_test.py
@@ -569,7 +569,7 @@ class SyncDataModelTest(unittest.TestCase):
sieve = chromiumsync.UpdateSieve(msg, migrator)
sieve.CheckMigrationState()
- def testCreateSyncedBookmaks(self):
+ def testCreateSyncedBookmarks(self):
version1, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], 0))
id_string = self.model._MakeCurrentId(chromiumsync.BOOKMARK,
diff --git a/sync/util/cryptographer.cc b/sync/util/cryptographer.cc
index 61b5e6328e..0fed51e167 100644
--- a/sync/util/cryptographer.cc
+++ b/sync/util/cryptographer.cc
@@ -110,12 +110,12 @@ std::string Cryptographer::DecryptToString(
NigoriMap::const_iterator it = nigoris_.find(encrypted.key_name());
if (nigoris_.end() == it) {
NOTREACHED() << "Cannot decrypt message";
- return std::string(""); // Caller should have called CanDecrypt(encrypt).
+ return std::string(); // Caller should have called CanDecrypt(encrypt).
}
std::string plaintext;
if (!it->second->Decrypt(encrypted.blob(), &plaintext)) {
- return std::string("");
+ return std::string();
}
return plaintext;
@@ -271,18 +271,18 @@ bool Cryptographer::GetBootstrapToken(std::string* token) const {
std::string Cryptographer::UnpackBootstrapToken(
const std::string& token) const {
if (token.empty())
- return "";
+ return std::string();
std::string encrypted_data;
if (!base::Base64Decode(token, &encrypted_data)) {
DLOG(WARNING) << "Could not decode token.";
- return "";
+ return std::string();
}
std::string unencrypted_token;
if (!encryptor_->DecryptString(encrypted_data, &unencrypted_token)) {
DLOG(WARNING) << "Decryption of bootstrap token failed.";
- return "";
+ return std::string();
}
return unencrypted_token;
}
@@ -328,15 +328,15 @@ bool Cryptographer::KeybagIsStale(
std::string Cryptographer::GetDefaultNigoriKey() const {
if (!is_initialized())
- return "";
+ return std::string();
NigoriMap::const_iterator iter = nigoris_.find(default_nigori_name_);
if (iter == nigoris_.end())
- return "";
+ return std::string();
sync_pb::NigoriKey key;
if (!iter->second->ExportKeys(key.mutable_user_key(),
key.mutable_encryption_key(),
key.mutable_mac_key()))
- return "";
+ return std::string();
return key.SerializeAsString();
}
diff --git a/sync/util/data_type_histogram.h b/sync/util/data_type_histogram.h
index c911ce42ba..c2d8542e74 100644
--- a/sync/util/data_type_histogram.h
+++ b/sync/util/data_type_histogram.h
@@ -105,6 +105,9 @@
case ::syncer::FAVICON_TRACKING: \
PER_DATA_TYPE_MACRO("FaviconTracking"); \
break; \
+ case ::syncer::MANAGED_USER_SETTINGS: \
+ PER_DATA_TYPE_MACRO("ManagedUserSetting"); \
+ break; \
case ::syncer::PROXY_TABS :\
PER_DATA_TYPE_MACRO("Tabs"); \
break; \
diff --git a/sync/util/get_session_name_ios.mm b/sync/util/get_session_name_ios.mm
index f9c101b033..ff9e619a70 100644
--- a/sync/util/get_session_name_ios.mm
+++ b/sync/util/get_session_name_ios.mm
@@ -6,7 +6,7 @@
#import <UIKit/UIKit.h>
-#include "base/sys_string_conversions.h"
+#include "base/strings/sys_string_conversions.h"
namespace syncer {
namespace internal {
diff --git a/sync/util/get_session_name_mac.mm b/sync/util/get_session_name_mac.mm
index 7f5940b5a0..b67bbd7474 100644
--- a/sync/util/get_session_name_mac.mm
+++ b/sync/util/get_session_name_mac.mm
@@ -9,7 +9,7 @@
#include "base/mac/scoped_cftyperef.h"
#include "base/string_util.h"
-#include "base/sys_string_conversions.h"
+#include "base/strings/sys_string_conversions.h"
namespace syncer {
namespace internal {