| OLD | NEW |
| (Empty) |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 """An implementation of the server side of the Chromium sync protocol. | |
| 6 | |
| 7 The details of the protocol are described mostly by comments in the protocol | |
| 8 buffer definition at chrome/browser/sync/protocol/sync.proto. | |
| 9 """ | |
| 10 | |
| 11 import cgi | |
| 12 import copy | |
| 13 import operator | |
| 14 import pickle | |
| 15 import random | |
| 16 import string | |
| 17 import sys | |
| 18 import threading | |
| 19 import time | |
| 20 import urlparse | |
| 21 | |
| 22 import app_notification_specifics_pb2 | |
| 23 import app_setting_specifics_pb2 | |
| 24 import app_specifics_pb2 | |
| 25 import autofill_specifics_pb2 | |
| 26 import bookmark_specifics_pb2 | |
| 27 import get_updates_caller_info_pb2 | |
| 28 import extension_setting_specifics_pb2 | |
| 29 import extension_specifics_pb2 | |
| 30 import history_delete_directive_specifics_pb2 | |
| 31 import nigori_specifics_pb2 | |
| 32 import password_specifics_pb2 | |
| 33 import preference_specifics_pb2 | |
| 34 import search_engine_specifics_pb2 | |
| 35 import session_specifics_pb2 | |
| 36 import sync_pb2 | |
| 37 import sync_enums_pb2 | |
| 38 import synced_notification_specifics_pb2 | |
| 39 import theme_specifics_pb2 | |
| 40 import typed_url_specifics_pb2 | |
| 41 | |
| 42 # An enumeration of the various kinds of data that can be synced. | |
| 43 # Over the wire, this enumeration is not used: a sync object's type is | |
| 44 # inferred by which EntitySpecifics field it has. But in the context | |
| 45 # of a program, it is useful to have an enumeration. | |
| 46 ALL_TYPES = ( | |
| 47 TOP_LEVEL, # The type of the 'Google Chrome' folder. | |
| 48 APPS, | |
| 49 APP_NOTIFICATION, | |
| 50 APP_SETTINGS, | |
| 51 AUTOFILL, | |
| 52 AUTOFILL_PROFILE, | |
| 53 BOOKMARK, | |
| 54 DEVICE_INFO, | |
| 55 EXPERIMENTS, | |
| 56 EXTENSIONS, | |
| 57 HISTORY_DELETE_DIRECTIVE, | |
| 58 NIGORI, | |
| 59 PASSWORD, | |
| 60 PREFERENCE, | |
| 61 SEARCH_ENGINE, | |
| 62 SESSION, | |
| 63 SYNCED_NOTIFICATION, | |
| 64 THEME, | |
| 65 TYPED_URL, | |
| 66 EXTENSION_SETTINGS) = range(20) | |
| 67 | |
| 68 # An eumeration on the frequency at which the server should send errors | |
| 69 # to the client. This would be specified by the url that triggers the error. | |
| 70 # Note: This enum should be kept in the same order as the enum in sync_test.h. | |
| 71 SYNC_ERROR_FREQUENCY = ( | |
| 72 ERROR_FREQUENCY_NONE, | |
| 73 ERROR_FREQUENCY_ALWAYS, | |
| 74 ERROR_FREQUENCY_TWO_THIRDS) = range(3) | |
| 75 | |
| 76 # Well-known server tag of the top level 'Google Chrome' folder. | |
| 77 TOP_LEVEL_FOLDER_TAG = 'google_chrome' | |
| 78 | |
| 79 # Given a sync type from ALL_TYPES, find the FieldDescriptor corresponding | |
| 80 # to that datatype. Note that TOP_LEVEL has no such token. | |
| 81 SYNC_TYPE_FIELDS = sync_pb2.EntitySpecifics.DESCRIPTOR.fields_by_name | |
| 82 SYNC_TYPE_TO_DESCRIPTOR = { | |
| 83 APP_NOTIFICATION: SYNC_TYPE_FIELDS['app_notification'], | |
| 84 APP_SETTINGS: SYNC_TYPE_FIELDS['app_setting'], | |
| 85 APPS: SYNC_TYPE_FIELDS['app'], | |
| 86 AUTOFILL: SYNC_TYPE_FIELDS['autofill'], | |
| 87 AUTOFILL_PROFILE: SYNC_TYPE_FIELDS['autofill_profile'], | |
| 88 BOOKMARK: SYNC_TYPE_FIELDS['bookmark'], | |
| 89 DEVICE_INFO: SYNC_TYPE_FIELDS['device_info'], | |
| 90 EXPERIMENTS: SYNC_TYPE_FIELDS['experiments'], | |
| 91 EXTENSION_SETTINGS: SYNC_TYPE_FIELDS['extension_setting'], | |
| 92 EXTENSIONS: SYNC_TYPE_FIELDS['extension'], | |
| 93 HISTORY_DELETE_DIRECTIVE: SYNC_TYPE_FIELDS['history_delete_directive'], | |
| 94 NIGORI: SYNC_TYPE_FIELDS['nigori'], | |
| 95 PASSWORD: SYNC_TYPE_FIELDS['password'], | |
| 96 PREFERENCE: SYNC_TYPE_FIELDS['preference'], | |
| 97 SEARCH_ENGINE: SYNC_TYPE_FIELDS['search_engine'], | |
| 98 SESSION: SYNC_TYPE_FIELDS['session'], | |
| 99 SYNCED_NOTIFICATION: SYNC_TYPE_FIELDS["synced_notification"], | |
| 100 THEME: SYNC_TYPE_FIELDS['theme'], | |
| 101 TYPED_URL: SYNC_TYPE_FIELDS['typed_url'], | |
| 102 } | |
| 103 | |
| 104 # The parent ID used to indicate a top-level node. | |
| 105 ROOT_ID = '0' | |
| 106 | |
| 107 # Unix time epoch in struct_time format. The tuple corresponds to UTC Wednesday | |
| 108 # Jan 1 1970, 00:00:00, non-dst. | |
| 109 UNIX_TIME_EPOCH = (1970, 1, 1, 0, 0, 0, 3, 1, 0) | |
| 110 | |
| 111 # The number of characters in the server-generated encryption key. | |
| 112 KEYSTORE_KEY_LENGTH = 16 | |
| 113 | |
| 114 # The hashed client tag for the keystore encryption experiment node. | |
| 115 KEYSTORE_ENCRYPTION_EXPERIMENT_TAG = "pis8ZRzh98/MKLtVEio2mr42LQA=" | |
| 116 | |
| 117 class Error(Exception): | |
| 118 """Error class for this module.""" | |
| 119 | |
| 120 | |
| 121 class ProtobufDataTypeFieldNotUnique(Error): | |
| 122 """An entry should not have more than one data type present.""" | |
| 123 | |
| 124 | |
| 125 class DataTypeIdNotRecognized(Error): | |
| 126 """The requested data type is not recognized.""" | |
| 127 | |
| 128 | |
| 129 class MigrationDoneError(Error): | |
| 130 """A server-side migration occurred; clients must re-sync some datatypes. | |
| 131 | |
| 132 Attributes: | |
| 133 datatypes: a list of the datatypes (python enum) needing migration. | |
| 134 """ | |
| 135 | |
| 136 def __init__(self, datatypes): | |
| 137 self.datatypes = datatypes | |
| 138 | |
| 139 | |
| 140 class StoreBirthdayError(Error): | |
| 141 """The client sent a birthday that doesn't correspond to this server.""" | |
| 142 | |
| 143 | |
| 144 class TransientError(Error): | |
| 145 """The client would be sent a transient error.""" | |
| 146 | |
| 147 | |
| 148 class SyncInducedError(Error): | |
| 149 """The client would be sent an error.""" | |
| 150 | |
| 151 | |
| 152 class InducedErrorFrequencyNotDefined(Error): | |
| 153 """The error frequency defined is not handled.""" | |
| 154 | |
| 155 | |
| 156 def GetEntryType(entry): | |
| 157 """Extract the sync type from a SyncEntry. | |
| 158 | |
| 159 Args: | |
| 160 entry: A SyncEntity protobuf object whose type to determine. | |
| 161 Returns: | |
| 162 An enum value from ALL_TYPES if the entry's type can be determined, or None | |
| 163 if the type cannot be determined. | |
| 164 Raises: | |
| 165 ProtobufDataTypeFieldNotUnique: More than one type was indicated by | |
| 166 the entry. | |
| 167 """ | |
| 168 if entry.server_defined_unique_tag == TOP_LEVEL_FOLDER_TAG: | |
| 169 return TOP_LEVEL | |
| 170 entry_types = GetEntryTypesFromSpecifics(entry.specifics) | |
| 171 if not entry_types: | |
| 172 return None | |
| 173 | |
| 174 # If there is more than one, either there's a bug, or else the caller | |
| 175 # should use GetEntryTypes. | |
| 176 if len(entry_types) > 1: | |
| 177 raise ProtobufDataTypeFieldNotUnique | |
| 178 return entry_types[0] | |
| 179 | |
| 180 | |
| 181 def GetEntryTypesFromSpecifics(specifics): | |
| 182 """Determine the sync types indicated by an EntitySpecifics's field(s). | |
| 183 | |
| 184 If the specifics have more than one recognized data type field (as commonly | |
| 185 happens with the requested_types field of GetUpdatesMessage), all types | |
| 186 will be returned. Callers must handle the possibility of the returned | |
| 187 value having more than one item. | |
| 188 | |
| 189 Args: | |
| 190 specifics: A EntitySpecifics protobuf message whose extensions to | |
| 191 enumerate. | |
| 192 Returns: | |
| 193 A list of the sync types (values from ALL_TYPES) associated with each | |
| 194 recognized extension of the specifics message. | |
| 195 """ | |
| 196 return [data_type for data_type, field_descriptor | |
| 197 in SYNC_TYPE_TO_DESCRIPTOR.iteritems() | |
| 198 if specifics.HasField(field_descriptor.name)] | |
| 199 | |
| 200 | |
| 201 def SyncTypeToProtocolDataTypeId(data_type): | |
| 202 """Convert from a sync type (python enum) to the protocol's data type id.""" | |
| 203 return SYNC_TYPE_TO_DESCRIPTOR[data_type].number | |
| 204 | |
| 205 | |
| 206 def ProtocolDataTypeIdToSyncType(protocol_data_type_id): | |
| 207 """Convert from the protocol's data type id to a sync type (python enum).""" | |
| 208 for data_type, field_descriptor in SYNC_TYPE_TO_DESCRIPTOR.iteritems(): | |
| 209 if field_descriptor.number == protocol_data_type_id: | |
| 210 return data_type | |
| 211 raise DataTypeIdNotRecognized | |
| 212 | |
| 213 | |
| 214 def DataTypeStringToSyncTypeLoose(data_type_string): | |
| 215 """Converts a human-readable string to a sync type (python enum). | |
| 216 | |
| 217 Capitalization and pluralization don't matter; this function is appropriate | |
| 218 for values that might have been typed by a human being; e.g., command-line | |
| 219 flags or query parameters. | |
| 220 """ | |
| 221 if data_type_string.isdigit(): | |
| 222 return ProtocolDataTypeIdToSyncType(int(data_type_string)) | |
| 223 name = data_type_string.lower().rstrip('s') | |
| 224 for data_type, field_descriptor in SYNC_TYPE_TO_DESCRIPTOR.iteritems(): | |
| 225 if field_descriptor.name.lower().rstrip('s') == name: | |
| 226 return data_type | |
| 227 raise DataTypeIdNotRecognized | |
| 228 | |
| 229 | |
| 230 def MakeNewKeystoreKey(): | |
| 231 """Returns a new random keystore key.""" | |
| 232 return ''.join(random.choice(string.ascii_uppercase + string.digits) | |
| 233 for x in xrange(KEYSTORE_KEY_LENGTH)) | |
| 234 | |
| 235 | |
| 236 def SyncTypeToString(data_type): | |
| 237 """Formats a sync type enum (from ALL_TYPES) to a human-readable string.""" | |
| 238 return SYNC_TYPE_TO_DESCRIPTOR[data_type].name | |
| 239 | |
| 240 | |
| 241 def CallerInfoToString(caller_info_source): | |
| 242 """Formats a GetUpdatesSource enum value to a readable string.""" | |
| 243 return get_updates_caller_info_pb2.GetUpdatesCallerInfo \ | |
| 244 .DESCRIPTOR.enum_types_by_name['GetUpdatesSource'] \ | |
| 245 .values_by_number[caller_info_source].name | |
| 246 | |
| 247 | |
| 248 def ShortDatatypeListSummary(data_types): | |
| 249 """Formats compactly a list of sync types (python enums) for human eyes. | |
| 250 | |
| 251 This function is intended for use by logging. If the list of datatypes | |
| 252 contains almost all of the values, the return value will be expressed | |
| 253 in terms of the datatypes that aren't set. | |
| 254 """ | |
| 255 included = set(data_types) - set([TOP_LEVEL]) | |
| 256 if not included: | |
| 257 return 'nothing' | |
| 258 excluded = set(ALL_TYPES) - included - set([TOP_LEVEL]) | |
| 259 if not excluded: | |
| 260 return 'everything' | |
| 261 simple_text = '+'.join(sorted([SyncTypeToString(x) for x in included])) | |
| 262 all_but_text = 'all except %s' % ( | |
| 263 '+'.join(sorted([SyncTypeToString(x) for x in excluded]))) | |
| 264 if len(included) < len(excluded) or len(simple_text) <= len(all_but_text): | |
| 265 return simple_text | |
| 266 else: | |
| 267 return all_but_text | |
| 268 | |
| 269 | |
| 270 def GetDefaultEntitySpecifics(data_type): | |
| 271 """Get an EntitySpecifics having a sync type's default field value.""" | |
| 272 specifics = sync_pb2.EntitySpecifics() | |
| 273 if data_type in SYNC_TYPE_TO_DESCRIPTOR: | |
| 274 descriptor = SYNC_TYPE_TO_DESCRIPTOR[data_type] | |
| 275 getattr(specifics, descriptor.name).SetInParent() | |
| 276 return specifics | |
| 277 | |
| 278 | |
| 279 class PermanentItem(object): | |
| 280 """A specification of one server-created permanent item. | |
| 281 | |
| 282 Attributes: | |
| 283 tag: A known-to-the-client value that uniquely identifies a server-created | |
| 284 permanent item. | |
| 285 name: The human-readable display name for this item. | |
| 286 parent_tag: The tag of the permanent item's parent. If ROOT_ID, indicates | |
| 287 a top-level item. Otherwise, this must be the tag value of some other | |
| 288 server-created permanent item. | |
| 289 sync_type: A value from ALL_TYPES, giving the datatype of this permanent | |
| 290 item. This controls which types of client GetUpdates requests will | |
| 291 cause the permanent item to be created and returned. | |
| 292 create_by_default: Whether the permanent item is created at startup or not. | |
| 293 This value is set to True in the default case. Non-default permanent items | |
| 294 are those that are created only when a client explicitly tells the server | |
| 295 to do so. | |
| 296 """ | |
| 297 | |
| 298 def __init__(self, tag, name, parent_tag, sync_type, create_by_default=True): | |
| 299 self.tag = tag | |
| 300 self.name = name | |
| 301 self.parent_tag = parent_tag | |
| 302 self.sync_type = sync_type | |
| 303 self.create_by_default = create_by_default | |
| 304 | |
| 305 | |
| 306 class MigrationHistory(object): | |
| 307 """A record of the migration events associated with an account. | |
| 308 | |
| 309 Each migration event invalidates one or more datatypes on all clients | |
| 310 that had synced the datatype before the event. Such clients will continue | |
| 311 to receive MigrationDone errors until they throw away their progress and | |
| 312 re-sync that datatype from the beginning. | |
| 313 """ | |
| 314 def __init__(self): | |
| 315 self._migrations = {} | |
| 316 for datatype in ALL_TYPES: | |
| 317 self._migrations[datatype] = [1] | |
| 318 self._next_migration_version = 2 | |
| 319 | |
| 320 def GetLatestVersion(self, datatype): | |
| 321 return self._migrations[datatype][-1] | |
| 322 | |
| 323 def CheckAllCurrent(self, versions_map): | |
| 324 """Raises an error if any the provided versions are out of date. | |
| 325 | |
| 326 This function intentionally returns migrations in the order that they were | |
| 327 triggered. Doing it this way allows the client to queue up two migrations | |
| 328 in a row, so the second one is received while responding to the first. | |
| 329 | |
| 330 Arguments: | |
| 331 version_map: a map whose keys are datatypes and whose values are versions. | |
| 332 | |
| 333 Raises: | |
| 334 MigrationDoneError: if a mismatch is found. | |
| 335 """ | |
| 336 problems = {} | |
| 337 for datatype, client_migration in versions_map.iteritems(): | |
| 338 for server_migration in self._migrations[datatype]: | |
| 339 if client_migration < server_migration: | |
| 340 problems.setdefault(server_migration, []).append(datatype) | |
| 341 if problems: | |
| 342 raise MigrationDoneError(problems[min(problems.keys())]) | |
| 343 | |
| 344 def Bump(self, datatypes): | |
| 345 """Add a record of a migration, to cause errors on future requests.""" | |
| 346 for idx, datatype in enumerate(datatypes): | |
| 347 self._migrations[datatype].append(self._next_migration_version) | |
| 348 self._next_migration_version += 1 | |
| 349 | |
| 350 | |
| 351 class UpdateSieve(object): | |
| 352 """A filter to remove items the client has already seen.""" | |
| 353 def __init__(self, request, migration_history=None): | |
| 354 self._original_request = request | |
| 355 self._state = {} | |
| 356 self._migration_history = migration_history or MigrationHistory() | |
| 357 self._migration_versions_to_check = {} | |
| 358 if request.from_progress_marker: | |
| 359 for marker in request.from_progress_marker: | |
| 360 data_type = ProtocolDataTypeIdToSyncType(marker.data_type_id) | |
| 361 if marker.HasField('timestamp_token_for_migration'): | |
| 362 timestamp = marker.timestamp_token_for_migration | |
| 363 if timestamp: | |
| 364 self._migration_versions_to_check[data_type] = 1 | |
| 365 elif marker.token: | |
| 366 (timestamp, version) = pickle.loads(marker.token) | |
| 367 self._migration_versions_to_check[data_type] = version | |
| 368 elif marker.HasField('token'): | |
| 369 timestamp = 0 | |
| 370 else: | |
| 371 raise ValueError('No timestamp information in progress marker.') | |
| 372 data_type = ProtocolDataTypeIdToSyncType(marker.data_type_id) | |
| 373 self._state[data_type] = timestamp | |
| 374 elif request.HasField('from_timestamp'): | |
| 375 for data_type in GetEntryTypesFromSpecifics(request.requested_types): | |
| 376 self._state[data_type] = request.from_timestamp | |
| 377 self._migration_versions_to_check[data_type] = 1 | |
| 378 if self._state: | |
| 379 self._state[TOP_LEVEL] = min(self._state.itervalues()) | |
| 380 | |
| 381 def SummarizeRequest(self): | |
| 382 timestamps = {} | |
| 383 for data_type, timestamp in self._state.iteritems(): | |
| 384 if data_type == TOP_LEVEL: | |
| 385 continue | |
| 386 timestamps.setdefault(timestamp, []).append(data_type) | |
| 387 return ', '.join('<%s>@%d' % (ShortDatatypeListSummary(types), stamp) | |
| 388 for stamp, types in sorted(timestamps.iteritems())) | |
| 389 | |
| 390 def CheckMigrationState(self): | |
| 391 self._migration_history.CheckAllCurrent(self._migration_versions_to_check) | |
| 392 | |
| 393 def ClientWantsItem(self, item): | |
| 394 """Return true if the client hasn't already seen an item.""" | |
| 395 return self._state.get(GetEntryType(item), sys.maxint) < item.version | |
| 396 | |
| 397 def HasAnyTimestamp(self): | |
| 398 """Return true if at least one datatype was requested.""" | |
| 399 return bool(self._state) | |
| 400 | |
| 401 def GetMinTimestamp(self): | |
| 402 """Return true the smallest timestamp requested across all datatypes.""" | |
| 403 return min(self._state.itervalues()) | |
| 404 | |
| 405 def GetFirstTimeTypes(self): | |
| 406 """Return a list of datatypes requesting updates from timestamp zero.""" | |
| 407 return [datatype for datatype, timestamp in self._state.iteritems() | |
| 408 if timestamp == 0] | |
| 409 | |
| 410 def SaveProgress(self, new_timestamp, get_updates_response): | |
| 411 """Write the new_timestamp or new_progress_marker fields to a response.""" | |
| 412 if self._original_request.from_progress_marker: | |
| 413 for data_type, old_timestamp in self._state.iteritems(): | |
| 414 if data_type == TOP_LEVEL: | |
| 415 continue | |
| 416 new_marker = sync_pb2.DataTypeProgressMarker() | |
| 417 new_marker.data_type_id = SyncTypeToProtocolDataTypeId(data_type) | |
| 418 final_stamp = max(old_timestamp, new_timestamp) | |
| 419 final_migration = self._migration_history.GetLatestVersion(data_type) | |
| 420 new_marker.token = pickle.dumps((final_stamp, final_migration)) | |
| 421 if new_marker not in self._original_request.from_progress_marker: | |
| 422 get_updates_response.new_progress_marker.add().MergeFrom(new_marker) | |
| 423 elif self._original_request.HasField('from_timestamp'): | |
| 424 if self._original_request.from_timestamp < new_timestamp: | |
| 425 get_updates_response.new_timestamp = new_timestamp | |
| 426 | |
| 427 | |
| 428 class SyncDataModel(object): | |
| 429 """Models the account state of one sync user.""" | |
| 430 _BATCH_SIZE = 100 | |
| 431 | |
| 432 # Specify all the permanent items that a model might need. | |
| 433 _PERMANENT_ITEM_SPECS = [ | |
| 434 PermanentItem('google_chrome_apps', name='Apps', | |
| 435 parent_tag=ROOT_ID, sync_type=APPS), | |
| 436 PermanentItem('google_chrome_app_notifications', name='App Notifications', | |
| 437 parent_tag=ROOT_ID, sync_type=APP_NOTIFICATION), | |
| 438 PermanentItem('google_chrome_app_settings', | |
| 439 name='App Settings', | |
| 440 parent_tag=ROOT_ID, sync_type=APP_SETTINGS), | |
| 441 PermanentItem('google_chrome_bookmarks', name='Bookmarks', | |
| 442 parent_tag=ROOT_ID, sync_type=BOOKMARK), | |
| 443 PermanentItem('bookmark_bar', name='Bookmark Bar', | |
| 444 parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK), | |
| 445 PermanentItem('synced_bookmarks', name='Mobile Bookmarks', | |
| 446 parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK), | |
| 447 PermanentItem('other_bookmarks', name='Other Bookmarks', | |
| 448 parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK), | |
| 449 PermanentItem('synced_bookmarks', name='Synced Bookmarks', | |
| 450 parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK, | |
| 451 create_by_default=False), | |
| 452 PermanentItem('google_chrome_autofill', name='Autofill', | |
| 453 parent_tag=ROOT_ID, sync_type=AUTOFILL), | |
| 454 PermanentItem('google_chrome_autofill_profiles', name='Autofill Profiles', | |
| 455 parent_tag=ROOT_ID, sync_type=AUTOFILL_PROFILE), | |
| 456 PermanentItem('google_chrome_device_info', name='Device Info', | |
| 457 parent_tag=ROOT_ID, sync_type=DEVICE_INFO), | |
| 458 PermanentItem('google_chrome_experiments', name='Experiments', | |
| 459 parent_tag=ROOT_ID, sync_type=EXPERIMENTS), | |
| 460 PermanentItem('google_chrome_extension_settings', | |
| 461 name='Extension Settings', | |
| 462 parent_tag=ROOT_ID, sync_type=EXTENSION_SETTINGS), | |
| 463 PermanentItem('google_chrome_extensions', name='Extensions', | |
| 464 parent_tag=ROOT_ID, sync_type=EXTENSIONS), | |
| 465 PermanentItem('google_chrome_history_delete_directives', | |
| 466 name='History Delete Directives', | |
| 467 parent_tag=ROOT_ID, | |
| 468 sync_type=HISTORY_DELETE_DIRECTIVE), | |
| 469 PermanentItem('google_chrome_nigori', name='Nigori', | |
| 470 parent_tag=ROOT_ID, sync_type=NIGORI), | |
| 471 PermanentItem('google_chrome_passwords', name='Passwords', | |
| 472 parent_tag=ROOT_ID, sync_type=PASSWORD), | |
| 473 PermanentItem('google_chrome_preferences', name='Preferences', | |
| 474 parent_tag=ROOT_ID, sync_type=PREFERENCE), | |
| 475 PermanentItem('google_chrome_preferences', name='Synced Notifications', | |
| 476 parent_tag=ROOT_ID, sync_type=SYNCED_NOTIFICATION), | |
| 477 PermanentItem('google_chrome_search_engines', name='Search Engines', | |
| 478 parent_tag=ROOT_ID, sync_type=SEARCH_ENGINE), | |
| 479 PermanentItem('google_chrome_sessions', name='Sessions', | |
| 480 parent_tag=ROOT_ID, sync_type=SESSION), | |
| 481 PermanentItem('google_chrome_themes', name='Themes', | |
| 482 parent_tag=ROOT_ID, sync_type=THEME), | |
| 483 PermanentItem('google_chrome_typed_urls', name='Typed URLs', | |
| 484 parent_tag=ROOT_ID, sync_type=TYPED_URL), | |
| 485 ] | |
| 486 | |
| 487 def __init__(self): | |
| 488 # Monotonically increasing version number. The next object change will | |
| 489 # take on this value + 1. | |
| 490 self._version = 0 | |
| 491 | |
| 492 # The definitive copy of this client's items: a map from ID string to a | |
| 493 # SyncEntity protocol buffer. | |
| 494 self._entries = {} | |
| 495 | |
| 496 self.ResetStoreBirthday() | |
| 497 | |
| 498 self.migration_history = MigrationHistory() | |
| 499 | |
| 500 self.induced_error = sync_pb2.ClientToServerResponse.Error() | |
| 501 self.induced_error_frequency = 0 | |
| 502 self.sync_count_before_errors = 0 | |
| 503 | |
| 504 self._keys = [MakeNewKeystoreKey()] | |
| 505 | |
| 506 def _SaveEntry(self, entry): | |
| 507 """Insert or update an entry in the change log, and give it a new version. | |
| 508 | |
| 509 The ID fields of this entry are assumed to be valid server IDs. This | |
| 510 entry will be updated with a new version number and sync_timestamp. | |
| 511 | |
| 512 Args: | |
| 513 entry: The entry to be added or updated. | |
| 514 """ | |
| 515 self._version += 1 | |
| 516 # Maintain a global (rather than per-item) sequence number and use it | |
| 517 # both as the per-entry version as well as the update-progress timestamp. | |
| 518 # This simulates the behavior of the original server implementation. | |
| 519 entry.version = self._version | |
| 520 entry.sync_timestamp = self._version | |
| 521 | |
| 522 # Preserve the originator info, which the client is not required to send | |
| 523 # when updating. | |
| 524 base_entry = self._entries.get(entry.id_string) | |
| 525 if base_entry: | |
| 526 entry.originator_cache_guid = base_entry.originator_cache_guid | |
| 527 entry.originator_client_item_id = base_entry.originator_client_item_id | |
| 528 | |
| 529 self._entries[entry.id_string] = copy.deepcopy(entry) | |
| 530 | |
| 531 def _ServerTagToId(self, tag): | |
| 532 """Determine the server ID from a server-unique tag. | |
| 533 | |
| 534 The resulting value is guaranteed not to collide with the other ID | |
| 535 generation methods. | |
| 536 | |
| 537 Args: | |
| 538 datatype: The sync type (python enum) of the identified object. | |
| 539 tag: The unique, known-to-the-client tag of a server-generated item. | |
| 540 Returns: | |
| 541 The string value of the computed server ID. | |
| 542 """ | |
| 543 if not tag or tag == ROOT_ID: | |
| 544 return tag | |
| 545 spec = [x for x in self._PERMANENT_ITEM_SPECS if x.tag == tag][0] | |
| 546 return self._MakeCurrentId(spec.sync_type, '<server tag>%s' % tag) | |
| 547 | |
| 548 def _ClientTagToId(self, datatype, tag): | |
| 549 """Determine the server ID from a client-unique tag. | |
| 550 | |
| 551 The resulting value is guaranteed not to collide with the other ID | |
| 552 generation methods. | |
| 553 | |
| 554 Args: | |
| 555 datatype: The sync type (python enum) of the identified object. | |
| 556 tag: The unique, opaque-to-the-server tag of a client-tagged item. | |
| 557 Returns: | |
| 558 The string value of the computed server ID. | |
| 559 """ | |
| 560 return self._MakeCurrentId(datatype, '<client tag>%s' % tag) | |
| 561 | |
| 562 def _ClientIdToId(self, datatype, client_guid, client_item_id): | |
| 563 """Compute a unique server ID from a client-local ID tag. | |
| 564 | |
| 565 The resulting value is guaranteed not to collide with the other ID | |
| 566 generation methods. | |
| 567 | |
| 568 Args: | |
| 569 datatype: The sync type (python enum) of the identified object. | |
| 570 client_guid: A globally unique ID that identifies the client which | |
| 571 created this item. | |
| 572 client_item_id: An ID that uniquely identifies this item on the client | |
| 573 which created it. | |
| 574 Returns: | |
| 575 The string value of the computed server ID. | |
| 576 """ | |
| 577 # Using the client ID info is not required here (we could instead generate | |
| 578 # a random ID), but it's useful for debugging. | |
| 579 return self._MakeCurrentId(datatype, | |
| 580 '<server ID originally>%s/%s' % (client_guid, client_item_id)) | |
| 581 | |
| 582 def _MakeCurrentId(self, datatype, inner_id): | |
| 583 return '%d^%d^%s' % (datatype, | |
| 584 self.migration_history.GetLatestVersion(datatype), | |
| 585 inner_id) | |
| 586 | |
| 587 def _ExtractIdInfo(self, id_string): | |
| 588 if not id_string or id_string == ROOT_ID: | |
| 589 return None | |
| 590 datatype_string, separator, remainder = id_string.partition('^') | |
| 591 migration_version_string, separator, inner_id = remainder.partition('^') | |
| 592 return (int(datatype_string), int(migration_version_string), inner_id) | |
| 593 | |
| 594 def _WritePosition(self, entry, parent_id): | |
| 595 """Ensure the entry has an absolute, numeric position and parent_id. | |
| 596 | |
| 597 Historically, clients would specify positions using the predecessor-based | |
| 598 references in the insert_after_item_id field; starting July 2011, this | |
| 599 was changed and Chrome now sends up the absolute position. The server | |
| 600 must store a position_in_parent value and must not maintain | |
| 601 insert_after_item_id. | |
| 602 | |
| 603 Args: | |
| 604 entry: The entry for which to write a position. Its ID field are | |
| 605 assumed to be server IDs. This entry will have its parent_id_string | |
| 606 and position_in_parent fields updated; its insert_after_item_id field | |
| 607 will be cleared. | |
| 608 parent_id: The ID of the entry intended as the new parent. | |
| 609 """ | |
| 610 | |
| 611 entry.parent_id_string = parent_id | |
| 612 if not entry.HasField('position_in_parent'): | |
| 613 entry.position_in_parent = 1337 # A debuggable, distinctive default. | |
| 614 entry.ClearField('insert_after_item_id') | |
| 615 | |
| 616 def _ItemExists(self, id_string): | |
| 617 """Determine whether an item exists in the changelog.""" | |
| 618 return id_string in self._entries | |
| 619 | |
| 620 def _CreatePermanentItem(self, spec): | |
| 621 """Create one permanent item from its spec, if it doesn't exist. | |
| 622 | |
| 623 The resulting item is added to the changelog. | |
| 624 | |
| 625 Args: | |
| 626 spec: A PermanentItem object holding the properties of the item to create. | |
| 627 """ | |
| 628 id_string = self._ServerTagToId(spec.tag) | |
| 629 if self._ItemExists(id_string): | |
| 630 return | |
| 631 print 'Creating permanent item: %s' % spec.name | |
| 632 entry = sync_pb2.SyncEntity() | |
| 633 entry.id_string = id_string | |
| 634 entry.non_unique_name = spec.name | |
| 635 entry.name = spec.name | |
| 636 entry.server_defined_unique_tag = spec.tag | |
| 637 entry.folder = True | |
| 638 entry.deleted = False | |
| 639 entry.specifics.CopyFrom(GetDefaultEntitySpecifics(spec.sync_type)) | |
| 640 self._WritePosition(entry, self._ServerTagToId(spec.parent_tag)) | |
| 641 self._SaveEntry(entry) | |
| 642 | |
| 643 def _CreateDefaultPermanentItems(self, requested_types): | |
| 644 """Ensure creation of all default permanent items for a given set of types. | |
| 645 | |
| 646 Args: | |
| 647 requested_types: A list of sync data types from ALL_TYPES. | |
| 648 All default permanent items of only these types will be created. | |
| 649 """ | |
| 650 for spec in self._PERMANENT_ITEM_SPECS: | |
| 651 if spec.sync_type in requested_types and spec.create_by_default: | |
| 652 self._CreatePermanentItem(spec) | |
| 653 | |
| 654 def ResetStoreBirthday(self): | |
| 655 """Resets the store birthday to a random value.""" | |
| 656 # TODO(nick): uuid.uuid1() is better, but python 2.5 only. | |
| 657 self.store_birthday = '%0.30f' % random.random() | |
| 658 | |
| 659 def StoreBirthday(self): | |
| 660 """Gets the store birthday.""" | |
| 661 return self.store_birthday | |
| 662 | |
| 663 def GetChanges(self, sieve): | |
| 664 """Get entries which have changed, oldest first. | |
| 665 | |
| 666 The returned entries are limited to being _BATCH_SIZE many. The entries | |
| 667 are returned in strict version order. | |
| 668 | |
| 669 Args: | |
| 670 sieve: An update sieve to use to filter out updates the client | |
| 671 has already seen. | |
| 672 Returns: | |
| 673 A tuple of (version, entries, changes_remaining). Version is a new | |
| 674 timestamp value, which should be used as the starting point for the | |
| 675 next query. Entries is the batch of entries meeting the current | |
| 676 timestamp query. Changes_remaining indicates the number of changes | |
| 677 left on the server after this batch. | |
| 678 """ | |
| 679 if not sieve.HasAnyTimestamp(): | |
| 680 return (0, [], 0) | |
| 681 min_timestamp = sieve.GetMinTimestamp() | |
| 682 self._CreateDefaultPermanentItems(sieve.GetFirstTimeTypes()) | |
| 683 change_log = sorted(self._entries.values(), | |
| 684 key=operator.attrgetter('version')) | |
| 685 new_changes = [x for x in change_log if x.version > min_timestamp] | |
| 686 # Pick batch_size new changes, and then filter them. This matches | |
| 687 # the RPC behavior of the production sync server. | |
| 688 batch = new_changes[:self._BATCH_SIZE] | |
| 689 if not batch: | |
| 690 # Client is up to date. | |
| 691 return (min_timestamp, [], 0) | |
| 692 | |
| 693 # Restrict batch to requested types. Tombstones are untyped | |
| 694 # and will always get included. | |
| 695 filtered = [copy.deepcopy(item) for item in batch | |
| 696 if item.deleted or sieve.ClientWantsItem(item)] | |
| 697 | |
| 698 # The new client timestamp is the timestamp of the last item in the | |
| 699 # batch, even if that item was filtered out. | |
| 700 return (batch[-1].version, filtered, len(new_changes) - len(batch)) | |
| 701 | |
| 702 def GetKeystoreKeys(self): | |
| 703 """Returns the encryption keys for this account.""" | |
| 704 print "Returning encryption keys: %s" % self._keys | |
| 705 return self._keys | |
| 706 | |
| 707 def _CopyOverImmutableFields(self, entry): | |
| 708 """Preserve immutable fields by copying pre-commit state. | |
| 709 | |
| 710 Args: | |
| 711 entry: A sync entity from the client. | |
| 712 """ | |
| 713 if entry.id_string in self._entries: | |
| 714 if self._entries[entry.id_string].HasField( | |
| 715 'server_defined_unique_tag'): | |
| 716 entry.server_defined_unique_tag = ( | |
| 717 self._entries[entry.id_string].server_defined_unique_tag) | |
| 718 | |
| 719 def _CheckVersionForCommit(self, entry): | |
| 720 """Perform an optimistic concurrency check on the version number. | |
| 721 | |
| 722 Clients are only allowed to commit if they report having seen the most | |
| 723 recent version of an object. | |
| 724 | |
| 725 Args: | |
| 726 entry: A sync entity from the client. It is assumed that ID fields | |
| 727 have been converted to server IDs. | |
| 728 Returns: | |
| 729 A boolean value indicating whether the client's version matches the | |
| 730 newest server version for the given entry. | |
| 731 """ | |
| 732 if entry.id_string in self._entries: | |
| 733 # Allow edits/deletes if the version matches, and any undeletion. | |
| 734 return (self._entries[entry.id_string].version == entry.version or | |
| 735 self._entries[entry.id_string].deleted) | |
| 736 else: | |
| 737 # Allow unknown ID only if the client thinks it's new too. | |
| 738 return entry.version == 0 | |
| 739 | |
| 740 def _CheckParentIdForCommit(self, entry): | |
| 741 """Check that the parent ID referenced in a SyncEntity actually exists. | |
| 742 | |
| 743 Args: | |
| 744 entry: A sync entity from the client. It is assumed that ID fields | |
| 745 have been converted to server IDs. | |
| 746 Returns: | |
| 747 A boolean value indicating whether the entity's parent ID is an object | |
| 748 that actually exists (and is not deleted) in the current account state. | |
| 749 """ | |
| 750 if entry.parent_id_string == ROOT_ID: | |
| 751 # This is generally allowed. | |
| 752 return True | |
| 753 if entry.parent_id_string not in self._entries: | |
| 754 print 'Warning: Client sent unknown ID. Should never happen.' | |
| 755 return False | |
| 756 if entry.parent_id_string == entry.id_string: | |
| 757 print 'Warning: Client sent circular reference. Should never happen.' | |
| 758 return False | |
| 759 if self._entries[entry.parent_id_string].deleted: | |
| 760 # This can happen in a race condition between two clients. | |
| 761 return False | |
| 762 if not self._entries[entry.parent_id_string].folder: | |
| 763 print 'Warning: Client sent non-folder parent. Should never happen.' | |
| 764 return False | |
| 765 return True | |
| 766 | |
| 767 def _RewriteIdsAsServerIds(self, entry, cache_guid, commit_session): | |
| 768 """Convert ID fields in a client sync entry to server IDs. | |
| 769 | |
| 770 A commit batch sent by a client may contain new items for which the | |
| 771 server has not generated IDs yet. And within a commit batch, later | |
| 772 items are allowed to refer to earlier items. This method will | |
| 773 generate server IDs for new items, as well as rewrite references | |
| 774 to items whose server IDs were generated earlier in the batch. | |
| 775 | |
| 776 Args: | |
| 777 entry: The client sync entry to modify. | |
| 778 cache_guid: The globally unique ID of the client that sent this | |
| 779 commit request. | |
| 780 commit_session: A dictionary mapping the original IDs to the new server | |
| 781 IDs, for any items committed earlier in the batch. | |
| 782 """ | |
| 783 if entry.version == 0: | |
| 784 data_type = GetEntryType(entry) | |
| 785 if entry.HasField('client_defined_unique_tag'): | |
| 786 # When present, this should determine the item's ID. | |
| 787 new_id = self._ClientTagToId(data_type, entry.client_defined_unique_tag) | |
| 788 else: | |
| 789 new_id = self._ClientIdToId(data_type, cache_guid, entry.id_string) | |
| 790 entry.originator_cache_guid = cache_guid | |
| 791 entry.originator_client_item_id = entry.id_string | |
| 792 commit_session[entry.id_string] = new_id # Remember the remapping. | |
| 793 entry.id_string = new_id | |
| 794 if entry.parent_id_string in commit_session: | |
| 795 entry.parent_id_string = commit_session[entry.parent_id_string] | |
| 796 if entry.insert_after_item_id in commit_session: | |
| 797 entry.insert_after_item_id = commit_session[entry.insert_after_item_id] | |
| 798 | |
| 799 def ValidateCommitEntries(self, entries): | |
| 800 """Raise an exception if a commit batch contains any global errors. | |
| 801 | |
| 802 Arguments: | |
| 803 entries: an iterable containing commit-form SyncEntity protocol buffers. | |
| 804 | |
| 805 Raises: | |
| 806 MigrationDoneError: if any of the entries reference a recently-migrated | |
| 807 datatype. | |
| 808 """ | |
| 809 server_ids_in_commit = set() | |
| 810 local_ids_in_commit = set() | |
| 811 for entry in entries: | |
| 812 if entry.version: | |
| 813 server_ids_in_commit.add(entry.id_string) | |
| 814 else: | |
| 815 local_ids_in_commit.add(entry.id_string) | |
| 816 if entry.HasField('parent_id_string'): | |
| 817 if entry.parent_id_string not in local_ids_in_commit: | |
| 818 server_ids_in_commit.add(entry.parent_id_string) | |
| 819 | |
| 820 versions_present = {} | |
| 821 for server_id in server_ids_in_commit: | |
| 822 parsed = self._ExtractIdInfo(server_id) | |
| 823 if parsed: | |
| 824 datatype, version, _ = parsed | |
| 825 versions_present.setdefault(datatype, []).append(version) | |
| 826 | |
| 827 self.migration_history.CheckAllCurrent( | |
| 828 dict((k, min(v)) for k, v in versions_present.iteritems())) | |
| 829 | |
| 830 def CommitEntry(self, entry, cache_guid, commit_session): | |
| 831 """Attempt to commit one entry to the user's account. | |
| 832 | |
| 833 Args: | |
| 834 entry: A SyncEntity protobuf representing desired object changes. | |
| 835 cache_guid: A string value uniquely identifying the client; this | |
| 836 is used for ID generation and will determine the originator_cache_guid | |
| 837 if the entry is new. | |
| 838 commit_session: A dictionary mapping client IDs to server IDs for any | |
| 839 objects committed earlier this session. If the entry gets a new ID | |
| 840 during commit, the change will be recorded here. | |
| 841 Returns: | |
| 842 A SyncEntity reflecting the post-commit value of the entry, or None | |
| 843 if the entry was not committed due to an error. | |
| 844 """ | |
| 845 entry = copy.deepcopy(entry) | |
| 846 | |
| 847 # Generate server IDs for this entry, and write generated server IDs | |
| 848 # from earlier entries into the message's fields, as appropriate. The | |
| 849 # ID generation state is stored in 'commit_session'. | |
| 850 self._RewriteIdsAsServerIds(entry, cache_guid, commit_session) | |
| 851 | |
| 852 # Perform the optimistic concurrency check on the entry's version number. | |
| 853 # Clients are not allowed to commit unless they indicate that they've seen | |
| 854 # the most recent version of an object. | |
| 855 if not self._CheckVersionForCommit(entry): | |
| 856 return None | |
| 857 | |
| 858 # Check the validity of the parent ID; it must exist at this point. | |
| 859 # TODO(nick): Implement cycle detection and resolution. | |
| 860 if not self._CheckParentIdForCommit(entry): | |
| 861 return None | |
| 862 | |
| 863 self._CopyOverImmutableFields(entry); | |
| 864 | |
| 865 # At this point, the commit is definitely going to happen. | |
| 866 | |
| 867 # Deletion works by storing a limited record for an entry, called a | |
| 868 # tombstone. A sync server must track deleted IDs forever, since it does | |
| 869 # not keep track of client knowledge (there's no deletion ACK event). | |
| 870 if entry.deleted: | |
| 871 def MakeTombstone(id_string): | |
| 872 """Make a tombstone entry that will replace the entry being deleted. | |
| 873 | |
| 874 Args: | |
| 875 id_string: Index of the SyncEntity to be deleted. | |
| 876 Returns: | |
| 877 A new SyncEntity reflecting the fact that the entry is deleted. | |
| 878 """ | |
| 879 # Only the ID, version and deletion state are preserved on a tombstone. | |
| 880 # TODO(nick): Does the production server not preserve the type? Not | |
| 881 # doing so means that tombstones cannot be filtered based on | |
| 882 # requested_types at GetUpdates time. | |
| 883 tombstone = sync_pb2.SyncEntity() | |
| 884 tombstone.id_string = id_string | |
| 885 tombstone.deleted = True | |
| 886 tombstone.name = '' | |
| 887 return tombstone | |
| 888 | |
| 889 def IsChild(child_id): | |
| 890 """Check if a SyncEntity is a child of entry, or any of its children. | |
| 891 | |
| 892 Args: | |
| 893 child_id: Index of the SyncEntity that is a possible child of entry. | |
| 894 Returns: | |
| 895 True if it is a child; false otherwise. | |
| 896 """ | |
| 897 if child_id not in self._entries: | |
| 898 return False | |
| 899 if self._entries[child_id].parent_id_string == entry.id_string: | |
| 900 return True | |
| 901 return IsChild(self._entries[child_id].parent_id_string) | |
| 902 | |
| 903 # Identify any children entry might have. | |
| 904 child_ids = [child.id_string for child in self._entries.itervalues() | |
| 905 if IsChild(child.id_string)] | |
| 906 | |
| 907 # Mark all children that were identified as deleted. | |
| 908 for child_id in child_ids: | |
| 909 self._SaveEntry(MakeTombstone(child_id)) | |
| 910 | |
| 911 # Delete entry itself. | |
| 912 entry = MakeTombstone(entry.id_string) | |
| 913 else: | |
| 914 # Comments in sync.proto detail how the representation of positional | |
| 915 # ordering works: either the 'insert_after_item_id' field or the | |
| 916 # 'position_in_parent' field may determine the sibling order during | |
| 917 # Commit operations. The 'position_in_parent' field provides an absolute | |
| 918 # ordering in GetUpdates contexts. Here we assume the client will | |
| 919 # always send a valid position_in_parent (this is the newer style), and | |
| 920 # we ignore insert_after_item_id (an older style). | |
| 921 self._WritePosition(entry, entry.parent_id_string) | |
| 922 | |
| 923 # Preserve the originator info, which the client is not required to send | |
| 924 # when updating. | |
| 925 base_entry = self._entries.get(entry.id_string) | |
| 926 if base_entry and not entry.HasField('originator_cache_guid'): | |
| 927 entry.originator_cache_guid = base_entry.originator_cache_guid | |
| 928 entry.originator_client_item_id = base_entry.originator_client_item_id | |
| 929 | |
| 930 # Store the current time since the Unix epoch in milliseconds. | |
| 931 entry.mtime = (int((time.mktime(time.gmtime()) - | |
| 932 time.mktime(UNIX_TIME_EPOCH))*1000)) | |
| 933 | |
| 934 # Commit the change. This also updates the version number. | |
| 935 self._SaveEntry(entry) | |
| 936 return entry | |
| 937 | |
| 938 def _RewriteVersionInId(self, id_string): | |
| 939 """Rewrites an ID so that its migration version becomes current.""" | |
| 940 parsed_id = self._ExtractIdInfo(id_string) | |
| 941 if not parsed_id: | |
| 942 return id_string | |
| 943 datatype, old_migration_version, inner_id = parsed_id | |
| 944 return self._MakeCurrentId(datatype, inner_id) | |
| 945 | |
| 946 def TriggerMigration(self, datatypes): | |
| 947 """Cause a migration to occur for a set of datatypes on this account. | |
| 948 | |
| 949 Clients will see the MIGRATION_DONE error for these datatypes until they | |
| 950 resync them. | |
| 951 """ | |
| 952 versions_to_remap = self.migration_history.Bump(datatypes) | |
| 953 all_entries = self._entries.values() | |
| 954 self._entries.clear() | |
| 955 for entry in all_entries: | |
| 956 new_id = self._RewriteVersionInId(entry.id_string) | |
| 957 entry.id_string = new_id | |
| 958 if entry.HasField('parent_id_string'): | |
| 959 entry.parent_id_string = self._RewriteVersionInId( | |
| 960 entry.parent_id_string) | |
| 961 self._entries[entry.id_string] = entry | |
| 962 | |
| 963 def TriggerSyncTabFavicons(self): | |
| 964 """Set the 'sync_tab_favicons' field to this account's nigori node. | |
| 965 | |
| 966 If the field is not currently set, will write a new nigori node entry | |
| 967 with the field set. Else does nothing. | |
| 968 """ | |
| 969 | |
| 970 nigori_tag = "google_chrome_nigori" | |
| 971 nigori_original = self._entries.get(self._ServerTagToId(nigori_tag)) | |
| 972 if (nigori_original.specifics.nigori.sync_tab_favicons): | |
| 973 return | |
| 974 nigori_new = copy.deepcopy(nigori_original) | |
| 975 nigori_new.specifics.nigori.sync_tabs = True | |
| 976 self._SaveEntry(nigori_new) | |
| 977 | |
| 978 def TriggerCreateSyncedBookmarks(self): | |
| 979 """Create the Synced Bookmarks folder under the Bookmarks permanent item. | |
| 980 | |
| 981 Clients will then receive the Synced Bookmarks folder on future | |
| 982 GetUpdates, and new bookmarks can be added within the Synced Bookmarks | |
| 983 folder. | |
| 984 """ | |
| 985 | |
| 986 synced_bookmarks_spec, = [spec for spec in self._PERMANENT_ITEM_SPECS | |
| 987 if spec.name == "Synced Bookmarks"] | |
| 988 self._CreatePermanentItem(synced_bookmarks_spec) | |
| 989 | |
| 990 def TriggerEnableKeystoreEncryption(self): | |
| 991 """Create the keystore_encryption experiment entity and enable it. | |
| 992 | |
| 993 A new entity within the EXPERIMENTS datatype is created with the unique | |
| 994 client tag "keystore_encryption" if it doesn't already exist. The | |
| 995 keystore_encryption message is then filled with |enabled| set to true. | |
| 996 """ | |
| 997 | |
| 998 experiment_id = self._ServerTagToId("google_chrome_experiments") | |
| 999 keystore_encryption_id = self._ClientTagToId( | |
| 1000 EXPERIMENTS, | |
| 1001 KEYSTORE_ENCRYPTION_EXPERIMENT_TAG) | |
| 1002 keystore_entry = self._entries.get(keystore_encryption_id) | |
| 1003 if keystore_entry is None: | |
| 1004 keystore_entry = sync_pb2.SyncEntity() | |
| 1005 keystore_entry.id_string = keystore_encryption_id | |
| 1006 keystore_entry.name = "Keystore Encryption" | |
| 1007 keystore_entry.client_defined_unique_tag = ( | |
| 1008 KEYSTORE_ENCRYPTION_EXPERIMENT_TAG) | |
| 1009 keystore_entry.folder = False | |
| 1010 keystore_entry.deleted = False | |
| 1011 keystore_entry.specifics.CopyFrom(GetDefaultEntitySpecifics(EXPERIMENTS)) | |
| 1012 self._WritePosition(keystore_entry, experiment_id) | |
| 1013 | |
| 1014 keystore_entry.specifics.experiments.keystore_encryption.enabled = True | |
| 1015 | |
| 1016 self._SaveEntry(keystore_entry) | |
| 1017 | |
| 1018 def TriggerRotateKeystoreKeys(self): | |
| 1019 """Rotate the current set of keystore encryption keys. | |
| 1020 | |
| 1021 |self._keys| will have a new random encryption key appended to it. We touch | |
| 1022 the nigori node so that each client will receive the new encryption keys | |
| 1023 only once. | |
| 1024 """ | |
| 1025 | |
| 1026 # Add a new encryption key. | |
| 1027 self._keys += [MakeNewKeystoreKey(), ] | |
| 1028 | |
| 1029 # Increment the nigori node's timestamp, so clients will get the new keys | |
| 1030 # on their next GetUpdates (any time the nigori node is sent back, we also | |
| 1031 # send back the keystore keys). | |
| 1032 nigori_tag = "google_chrome_nigori" | |
| 1033 self._SaveEntry(self._entries.get(self._ServerTagToId(nigori_tag))) | |
| 1034 | |
| 1035 def SetInducedError(self, error, error_frequency, | |
| 1036 sync_count_before_errors): | |
| 1037 self.induced_error = error | |
| 1038 self.induced_error_frequency = error_frequency | |
| 1039 self.sync_count_before_errors = sync_count_before_errors | |
| 1040 | |
| 1041 def GetInducedError(self): | |
| 1042 return self.induced_error | |
| 1043 | |
| 1044 | |
| 1045 class TestServer(object): | |
| 1046 """An object to handle requests for one (and only one) Chrome Sync account. | |
| 1047 | |
| 1048 TestServer consumes the sync command messages that are the outermost | |
| 1049 layers of the protocol, performs the corresponding actions on its | |
| 1050 SyncDataModel, and constructs an appropropriate response message. | |
| 1051 """ | |
| 1052 | |
| 1053 def __init__(self): | |
| 1054 # The implementation supports exactly one account; its state is here. | |
| 1055 self.account = SyncDataModel() | |
| 1056 self.account_lock = threading.Lock() | |
| 1057 # Clients that have talked to us: a map from the full client ID | |
| 1058 # to its nickname. | |
| 1059 self.clients = {} | |
| 1060 self.client_name_generator = ('+' * times + chr(c) | |
| 1061 for times in xrange(0, sys.maxint) for c in xrange(ord('A'), ord('Z'))) | |
| 1062 self.transient_error = False | |
| 1063 self.sync_count = 0 | |
| 1064 | |
| 1065 def GetShortClientName(self, query): | |
| 1066 parsed = cgi.parse_qs(query[query.find('?')+1:]) | |
| 1067 client_id = parsed.get('client_id') | |
| 1068 if not client_id: | |
| 1069 return '?' | |
| 1070 client_id = client_id[0] | |
| 1071 if client_id not in self.clients: | |
| 1072 self.clients[client_id] = self.client_name_generator.next() | |
| 1073 return self.clients[client_id] | |
| 1074 | |
| 1075 def CheckStoreBirthday(self, request): | |
| 1076 """Raises StoreBirthdayError if the request's birthday is a mismatch.""" | |
| 1077 if not request.HasField('store_birthday'): | |
| 1078 return | |
| 1079 if self.account.StoreBirthday() != request.store_birthday: | |
| 1080 raise StoreBirthdayError | |
| 1081 | |
| 1082 def CheckTransientError(self): | |
| 1083 """Raises TransientError if transient_error variable is set.""" | |
| 1084 if self.transient_error: | |
| 1085 raise TransientError | |
| 1086 | |
| 1087 def CheckSendError(self): | |
| 1088 """Raises SyncInducedError if needed.""" | |
| 1089 if (self.account.induced_error.error_type != | |
| 1090 sync_enums_pb2.SyncEnums.UNKNOWN): | |
| 1091 # Always means return the given error for all requests. | |
| 1092 if self.account.induced_error_frequency == ERROR_FREQUENCY_ALWAYS: | |
| 1093 raise SyncInducedError | |
| 1094 # This means the FIRST 2 requests of every 3 requests | |
| 1095 # return an error. Don't switch the order of failures. There are | |
| 1096 # test cases that rely on the first 2 being the failure rather than | |
| 1097 # the last 2. | |
| 1098 elif (self.account.induced_error_frequency == | |
| 1099 ERROR_FREQUENCY_TWO_THIRDS): | |
| 1100 if (((self.sync_count - | |
| 1101 self.account.sync_count_before_errors) % 3) != 0): | |
| 1102 raise SyncInducedError | |
| 1103 else: | |
| 1104 raise InducedErrorFrequencyNotDefined | |
| 1105 | |
| 1106 def HandleMigrate(self, path): | |
| 1107 query = urlparse.urlparse(path)[4] | |
| 1108 code = 200 | |
| 1109 self.account_lock.acquire() | |
| 1110 try: | |
| 1111 datatypes = [DataTypeStringToSyncTypeLoose(x) | |
| 1112 for x in urlparse.parse_qs(query).get('type',[])] | |
| 1113 if datatypes: | |
| 1114 self.account.TriggerMigration(datatypes) | |
| 1115 response = 'Migrated datatypes %s' % ( | |
| 1116 ' and '.join(SyncTypeToString(x).upper() for x in datatypes)) | |
| 1117 else: | |
| 1118 response = 'Please specify one or more <i>type=name</i> parameters' | |
| 1119 code = 400 | |
| 1120 except DataTypeIdNotRecognized, error: | |
| 1121 response = 'Could not interpret datatype name' | |
| 1122 code = 400 | |
| 1123 finally: | |
| 1124 self.account_lock.release() | |
| 1125 return (code, '<html><title>Migration: %d</title><H1>%d %s</H1></html>' % | |
| 1126 (code, code, response)) | |
| 1127 | |
| 1128 def HandleSetInducedError(self, path): | |
| 1129 query = urlparse.urlparse(path)[4] | |
| 1130 self.account_lock.acquire() | |
| 1131 code = 200 | |
| 1132 response = 'Success' | |
| 1133 error = sync_pb2.ClientToServerResponse.Error() | |
| 1134 try: | |
| 1135 error_type = urlparse.parse_qs(query)['error'] | |
| 1136 action = urlparse.parse_qs(query)['action'] | |
| 1137 error.error_type = int(error_type[0]) | |
| 1138 error.action = int(action[0]) | |
| 1139 try: | |
| 1140 error.url = (urlparse.parse_qs(query)['url'])[0] | |
| 1141 except KeyError: | |
| 1142 error.url = '' | |
| 1143 try: | |
| 1144 error.error_description =( | |
| 1145 (urlparse.parse_qs(query)['error_description'])[0]) | |
| 1146 except KeyError: | |
| 1147 error.error_description = '' | |
| 1148 try: | |
| 1149 error_frequency = int((urlparse.parse_qs(query)['frequency'])[0]) | |
| 1150 except KeyError: | |
| 1151 error_frequency = ERROR_FREQUENCY_ALWAYS | |
| 1152 self.account.SetInducedError(error, error_frequency, self.sync_count) | |
| 1153 response = ('Error = %d, action = %d, url = %s, description = %s' % | |
| 1154 (error.error_type, error.action, | |
| 1155 error.url, | |
| 1156 error.error_description)) | |
| 1157 except error: | |
| 1158 response = 'Could not parse url' | |
| 1159 code = 400 | |
| 1160 finally: | |
| 1161 self.account_lock.release() | |
| 1162 return (code, '<html><title>SetError: %d</title><H1>%d %s</H1></html>' % | |
| 1163 (code, code, response)) | |
| 1164 | |
| 1165 def HandleCreateBirthdayError(self): | |
| 1166 self.account.ResetStoreBirthday() | |
| 1167 return ( | |
| 1168 200, | |
| 1169 '<html><title>Birthday error</title><H1>Birthday error</H1></html>') | |
| 1170 | |
| 1171 def HandleSetTransientError(self): | |
| 1172 self.transient_error = True | |
| 1173 return ( | |
| 1174 200, | |
| 1175 '<html><title>Transient error</title><H1>Transient error</H1></html>') | |
| 1176 | |
| 1177 def HandleSetSyncTabFavicons(self): | |
| 1178 """Set 'sync_tab_favicons' field of the nigori node for this account.""" | |
| 1179 self.account.TriggerSyncTabFavicons() | |
| 1180 return ( | |
| 1181 200, | |
| 1182 '<html><title>Tab Favicons</title><H1>Tab Favicons</H1></html>') | |
| 1183 | |
| 1184 def HandleCreateSyncedBookmarks(self): | |
| 1185 """Create the Synced Bookmarks folder under Bookmarks.""" | |
| 1186 self.account.TriggerCreateSyncedBookmarks() | |
| 1187 return ( | |
| 1188 200, | |
| 1189 '<html><title>Synced Bookmarks</title><H1>Synced Bookmarks</H1></html>') | |
| 1190 | |
| 1191 def HandleEnableKeystoreEncryption(self): | |
| 1192 """Enables the keystore encryption experiment.""" | |
| 1193 self.account.TriggerEnableKeystoreEncryption() | |
| 1194 return ( | |
| 1195 200, | |
| 1196 '<html><title>Enable Keystore Encryption</title>' | |
| 1197 '<H1>Enable Keystore Encryption</H1></html>') | |
| 1198 | |
| 1199 def HandleRotateKeystoreKeys(self): | |
| 1200 """Rotate the keystore encryption keys.""" | |
| 1201 self.account.TriggerRotateKeystoreKeys() | |
| 1202 return ( | |
| 1203 200, | |
| 1204 '<html><title>Rotate Keystore Keys</title>' | |
| 1205 '<H1>Rotate Keystore Keys</H1></html>') | |
| 1206 | |
| 1207 def HandleCommand(self, query, raw_request): | |
| 1208 """Decode and handle a sync command from a raw input of bytes. | |
| 1209 | |
| 1210 This is the main entry point for this class. It is safe to call this | |
| 1211 method from multiple threads. | |
| 1212 | |
| 1213 Args: | |
| 1214 raw_request: An iterable byte sequence to be interpreted as a sync | |
| 1215 protocol command. | |
| 1216 Returns: | |
| 1217 A tuple (response_code, raw_response); the first value is an HTTP | |
| 1218 result code, while the second value is a string of bytes which is the | |
| 1219 serialized reply to the command. | |
| 1220 """ | |
| 1221 self.account_lock.acquire() | |
| 1222 self.sync_count += 1 | |
| 1223 def print_context(direction): | |
| 1224 print '[Client %s %s %s.py]' % (self.GetShortClientName(query), direction, | |
| 1225 __name__), | |
| 1226 | |
| 1227 try: | |
| 1228 request = sync_pb2.ClientToServerMessage() | |
| 1229 request.MergeFromString(raw_request) | |
| 1230 contents = request.message_contents | |
| 1231 | |
| 1232 response = sync_pb2.ClientToServerResponse() | |
| 1233 response.error_code = sync_enums_pb2.SyncEnums.SUCCESS | |
| 1234 self.CheckStoreBirthday(request) | |
| 1235 response.store_birthday = self.account.store_birthday | |
| 1236 self.CheckTransientError() | |
| 1237 self.CheckSendError() | |
| 1238 | |
| 1239 print_context('->') | |
| 1240 | |
| 1241 if contents == sync_pb2.ClientToServerMessage.AUTHENTICATE: | |
| 1242 print 'Authenticate' | |
| 1243 # We accept any authentication token, and support only one account. | |
| 1244 # TODO(nick): Mock out the GAIA authentication as well; hook up here. | |
| 1245 response.authenticate.user.email = 'syncjuser@chromium' | |
| 1246 response.authenticate.user.display_name = 'Sync J User' | |
| 1247 elif contents == sync_pb2.ClientToServerMessage.COMMIT: | |
| 1248 print 'Commit %d item(s)' % len(request.commit.entries) | |
| 1249 self.HandleCommit(request.commit, response.commit) | |
| 1250 elif contents == sync_pb2.ClientToServerMessage.GET_UPDATES: | |
| 1251 print 'GetUpdates', | |
| 1252 self.HandleGetUpdates(request.get_updates, response.get_updates) | |
| 1253 print_context('<-') | |
| 1254 print '%d update(s)' % len(response.get_updates.entries) | |
| 1255 else: | |
| 1256 print 'Unrecognizable sync request!' | |
| 1257 return (400, None) # Bad request. | |
| 1258 return (200, response.SerializeToString()) | |
| 1259 except MigrationDoneError, error: | |
| 1260 print_context('<-') | |
| 1261 print 'MIGRATION_DONE: <%s>' % (ShortDatatypeListSummary(error.datatypes)) | |
| 1262 response = sync_pb2.ClientToServerResponse() | |
| 1263 response.store_birthday = self.account.store_birthday | |
| 1264 response.error_code = sync_enums_pb2.SyncEnums.MIGRATION_DONE | |
| 1265 response.migrated_data_type_id[:] = [ | |
| 1266 SyncTypeToProtocolDataTypeId(x) for x in error.datatypes] | |
| 1267 return (200, response.SerializeToString()) | |
| 1268 except StoreBirthdayError, error: | |
| 1269 print_context('<-') | |
| 1270 print 'NOT_MY_BIRTHDAY' | |
| 1271 response = sync_pb2.ClientToServerResponse() | |
| 1272 response.store_birthday = self.account.store_birthday | |
| 1273 response.error_code = sync_enums_pb2.SyncEnums.NOT_MY_BIRTHDAY | |
| 1274 return (200, response.SerializeToString()) | |
| 1275 except TransientError, error: | |
| 1276 ### This is deprecated now. Would be removed once test cases are removed. | |
| 1277 print_context('<-') | |
| 1278 print 'TRANSIENT_ERROR' | |
| 1279 response.store_birthday = self.account.store_birthday | |
| 1280 response.error_code = sync_enums_pb2.SyncEnums.TRANSIENT_ERROR | |
| 1281 return (200, response.SerializeToString()) | |
| 1282 except SyncInducedError, error: | |
| 1283 print_context('<-') | |
| 1284 print 'INDUCED_ERROR' | |
| 1285 response.store_birthday = self.account.store_birthday | |
| 1286 error = self.account.GetInducedError() | |
| 1287 response.error.error_type = error.error_type | |
| 1288 response.error.url = error.url | |
| 1289 response.error.error_description = error.error_description | |
| 1290 response.error.action = error.action | |
| 1291 return (200, response.SerializeToString()) | |
| 1292 finally: | |
| 1293 self.account_lock.release() | |
| 1294 | |
| 1295 def HandleCommit(self, commit_message, commit_response): | |
| 1296 """Respond to a Commit request by updating the user's account state. | |
| 1297 | |
| 1298 Commit attempts stop after the first error, returning a CONFLICT result | |
| 1299 for any unattempted entries. | |
| 1300 | |
| 1301 Args: | |
| 1302 commit_message: A sync_pb.CommitMessage protobuf holding the content | |
| 1303 of the client's request. | |
| 1304 commit_response: A sync_pb.CommitResponse protobuf into which a reply | |
| 1305 to the client request will be written. | |
| 1306 """ | |
| 1307 commit_response.SetInParent() | |
| 1308 batch_failure = False | |
| 1309 session = {} # Tracks ID renaming during the commit operation. | |
| 1310 guid = commit_message.cache_guid | |
| 1311 | |
| 1312 self.account.ValidateCommitEntries(commit_message.entries) | |
| 1313 | |
| 1314 for entry in commit_message.entries: | |
| 1315 server_entry = None | |
| 1316 if not batch_failure: | |
| 1317 # Try to commit the change to the account. | |
| 1318 server_entry = self.account.CommitEntry(entry, guid, session) | |
| 1319 | |
| 1320 # An entryresponse is returned in both success and failure cases. | |
| 1321 reply = commit_response.entryresponse.add() | |
| 1322 if not server_entry: | |
| 1323 reply.response_type = sync_pb2.CommitResponse.CONFLICT | |
| 1324 reply.error_message = 'Conflict.' | |
| 1325 batch_failure = True # One failure halts the batch. | |
| 1326 else: | |
| 1327 reply.response_type = sync_pb2.CommitResponse.SUCCESS | |
| 1328 # These are the properties that the server is allowed to override | |
| 1329 # during commit; the client wants to know their values at the end | |
| 1330 # of the operation. | |
| 1331 reply.id_string = server_entry.id_string | |
| 1332 if not server_entry.deleted: | |
| 1333 # Note: the production server doesn't actually send the | |
| 1334 # parent_id_string on commit responses, so we don't either. | |
| 1335 reply.position_in_parent = server_entry.position_in_parent | |
| 1336 reply.version = server_entry.version | |
| 1337 reply.name = server_entry.name | |
| 1338 reply.non_unique_name = server_entry.non_unique_name | |
| 1339 else: | |
| 1340 reply.version = entry.version + 1 | |
| 1341 | |
| 1342 def HandleGetUpdates(self, update_request, update_response): | |
| 1343 """Respond to a GetUpdates request by querying the user's account. | |
| 1344 | |
| 1345 Args: | |
| 1346 update_request: A sync_pb.GetUpdatesMessage protobuf holding the content | |
| 1347 of the client's request. | |
| 1348 update_response: A sync_pb.GetUpdatesResponse protobuf into which a reply | |
| 1349 to the client request will be written. | |
| 1350 """ | |
| 1351 update_response.SetInParent() | |
| 1352 update_sieve = UpdateSieve(update_request, self.account.migration_history) | |
| 1353 | |
| 1354 print CallerInfoToString(update_request.caller_info.source), | |
| 1355 print update_sieve.SummarizeRequest() | |
| 1356 | |
| 1357 update_sieve.CheckMigrationState() | |
| 1358 | |
| 1359 new_timestamp, entries, remaining = self.account.GetChanges(update_sieve) | |
| 1360 | |
| 1361 update_response.changes_remaining = remaining | |
| 1362 sending_nigori_node = False | |
| 1363 for entry in entries: | |
| 1364 if entry.name == 'Nigori': | |
| 1365 sending_nigori_node = True | |
| 1366 reply = update_response.entries.add() | |
| 1367 reply.CopyFrom(entry) | |
| 1368 update_sieve.SaveProgress(new_timestamp, update_response) | |
| 1369 | |
| 1370 if update_request.need_encryption_key or sending_nigori_node: | |
| 1371 update_response.encryption_keys.extend(self.account.GetKeystoreKeys()) | |
| OLD | NEW |