OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/python2.4 |
| 2 # Copyright (c) 2010 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 """An implementation of the server side of the Chromium sync protocol. |
| 7 |
| 8 The details of the protocol are described mostly by comments in the protocol |
| 9 buffer definition at chrome/browser/sync/protocol/sync.proto. |
| 10 """ |
| 11 |
| 12 import operator |
| 13 import random |
| 14 import threading |
| 15 |
| 16 import autofill_specifics_pb2 |
| 17 import bookmark_specifics_pb2 |
| 18 import preference_specifics_pb2 |
| 19 import theme_specifics_pb2 |
| 20 import typed_url_specifics_pb2 |
| 21 import sync_pb2 |
| 22 |
| 23 # An enumeration of the various kinds of data that can be synced. |
| 24 # Over the wire, this enumeration is not used: a sync object's type is |
| 25 # inferred by which EntitySpecifics extension it has. But in the context |
| 26 # of a program, it is useful to have an enumeration. |
| 27 ALL_TYPES = ( |
| 28 TOP_LEVEL, # The type of the 'Google Chrome' folder. |
| 29 BOOKMARK, |
| 30 AUTOFILL, |
| 31 TYPED_URL, |
| 32 PREFERENCE, |
| 33 # PASSWORD, # Disabled since there's no specifics proto. |
| 34 # SESSION, |
| 35 THEME) = range(6) |
| 36 |
| 37 # Given a sync type from ALL_TYPES, find the extension token corresponding |
| 38 # to that datatype. Note that TOP_LEVEL has no such token. |
| 39 SYNC_TYPE_TO_EXTENSION = { |
| 40 BOOKMARK: bookmark_specifics_pb2.bookmark, |
| 41 AUTOFILL: autofill_specifics_pb2.autofill, |
| 42 TYPED_URL: typed_url_specifics_pb2.typed_url, |
| 43 PREFERENCE: preference_specifics_pb2.preference, |
| 44 # PASSWORD: password_specifics_pb2.password, # Disabled |
| 45 # SESSION: session_specifics_pb2.session, # Disabled |
| 46 THEME: theme_specifics_pb2.theme, |
| 47 } |
| 48 |
| 49 # The parent ID used to indicate a top-level node. |
| 50 ROOT_ID = '0' |
| 51 |
| 52 def GetEntryType(entry): |
| 53 """Extract the sync type from a SyncEntry. |
| 54 |
| 55 Args: |
| 56 entry: A SyncEntity protobuf object whose type to determine. |
| 57 Returns: |
| 58 A value from ALL_TYPES if the entry's type can be determined, or None |
| 59 if the type cannot be determined. |
| 60 """ |
| 61 if entry.server_defined_unique_tag == 'google_chrome': |
| 62 return TOP_LEVEL |
| 63 entry_types = GetEntryTypesFromSpecifics(entry.specifics) |
| 64 if not entry_types: |
| 65 return None |
| 66 # It is presupposed that the entry has at most one specifics extension |
| 67 # present. If there is more than one, either there's a bug, or else |
| 68 # the caller should use GetEntryTypes. |
| 69 if len(entry_types) > 1: |
| 70 raise 'GetEntryType called with multiple extensions present.' |
| 71 return entry_types[0] |
| 72 |
| 73 def GetEntryTypesFromSpecifics(specifics): |
| 74 """Determine the sync types indicated by an EntitySpecifics's extension(s). |
| 75 |
| 76 If the specifics have more than one recognized extension (as commonly |
| 77 happens with the requested_types field of GetUpdatesMessage), all types |
| 78 will be returned. Callers must handle the possibility of the returned |
| 79 value having more than one item. |
| 80 |
| 81 Args: |
| 82 specifics: A EntitySpecifics protobuf message whose extensions to |
| 83 enumerate. |
| 84 Returns: |
| 85 A list of the sync types (values from ALL_TYPES) assocated with each |
| 86 recognized extension of the specifics message. |
| 87 """ |
| 88 entry_types = [] |
| 89 for data_type, extension in SYNC_TYPE_TO_EXTENSION.iteritems(): |
| 90 if specifics.HasExtension(extension): |
| 91 entry_types.append(data_type) |
| 92 return entry_types |
| 93 |
| 94 def GetRequestedTypes(get_updates_message): |
| 95 """Determine the sync types requested by a client GetUpdates operation.""" |
| 96 types = GetEntryTypesFromSpecifics( |
| 97 get_updates_message.requested_types) |
| 98 if types: |
| 99 types.append(TOP_LEVEL) |
| 100 return types |
| 101 |
| 102 def GetDefaultEntitySpecifics(data_type): |
| 103 """Get an EntitySpecifics having a sync type's default extension value. |
| 104 """ |
| 105 specifics = sync_pb2.EntitySpecifics() |
| 106 if data_type in SYNC_TYPE_TO_EXTENSION: |
| 107 extension_handle = SYNC_TYPE_TO_EXTENSION[data_type] |
| 108 specifics.Extensions[extension_handle].SetInParent() |
| 109 return specifics |
| 110 |
| 111 def DeepCopyOfProto(proto): |
| 112 """Return a deep copy of a protocol buffer.""" |
| 113 new_proto = type(proto)() |
| 114 new_proto.MergeFrom(proto) |
| 115 return new_proto |
| 116 |
| 117 |
| 118 class PermanentItem(object): |
| 119 """A specification of one server-created permanent item. |
| 120 |
| 121 Attributes: |
| 122 tag: A known-to-the-client value that uniquely identifies a server-created |
| 123 permanent item. |
| 124 name: The human-readable display name for this item. |
| 125 parent_tag: The tag of the permanent item's parent. If ROOT_ID, indicates |
| 126 a top-level item. Otherwise, this must be the tag value of some other |
| 127 server-created permanent item. |
| 128 sync_type: A value from ALL_TYPES, giving the datatype of this permanent |
| 129 item. This controls which types of client GetUpdates requests will |
| 130 cause the permanent item to be created and returned. |
| 131 """ |
| 132 |
| 133 def __init__(self, tag, name, parent_tag, sync_type): |
| 134 self.tag = tag |
| 135 self.name = name |
| 136 self.parent_tag = parent_tag |
| 137 self.sync_type = sync_type |
| 138 |
| 139 class SyncDataModel(object): |
| 140 """Models the account state of one sync user. |
| 141 """ |
| 142 _BATCH_SIZE = 100 |
| 143 |
| 144 # Specify all the permanent items that a model might need. |
| 145 _PERMANENT_ITEM_SPECS = [ |
| 146 PermanentItem('google_chrome', name='Google Chrome', |
| 147 parent_tag=ROOT_ID, sync_type=TOP_LEVEL), |
| 148 PermanentItem('google_chrome_bookmarks', name='Bookmarks', |
| 149 parent_tag='google_chrome', sync_type=BOOKMARK), |
| 150 PermanentItem('bookmark_bar', name='Bookmark Bar', |
| 151 parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK), |
| 152 PermanentItem('other_bookmarks', name='Other Bookmarks', |
| 153 parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK), |
| 154 PermanentItem('google_chrome_preferences', name='Preferences', |
| 155 parent_tag='google_chrome', sync_type=PREFERENCE), |
| 156 PermanentItem('google_chrome_autofill', name='Autofill', |
| 157 parent_tag='google_chrome', sync_type=AUTOFILL), |
| 158 # TODO(nick): Disabled since the protocol does not support them yet. |
| 159 # PermanentItem('google_chrome_passwords', name='Passwords', |
| 160 # parent_tag='google_chrome', sync_type=PASSWORD), |
| 161 # PermanentItem('google_chrome_sessions', name='Sessions', |
| 162 # parent_tag='google_chrome', SESSION), |
| 163 PermanentItem('google_chrome_themes', name='Themes', |
| 164 parent_tag='google_chrome', sync_type=THEME), |
| 165 PermanentItem('google_chrome_typed_urls', name='Typed URLs', |
| 166 parent_tag='google_chrome', sync_type=TYPED_URL), |
| 167 ] |
| 168 |
| 169 def __init__(self): |
| 170 self._version = 0 |
| 171 |
| 172 # Monotonically increasing version number. The next object change will |
| 173 # take on this value + 1. |
| 174 self._entries = {} |
| 175 |
| 176 # TODO(nick): uuid.uuid1() is better, but python 2.5 only. |
| 177 self.store_birthday = '%0.30f' % random.random() |
| 178 |
| 179 def _SaveEntry(self, entry): |
| 180 """Insert or update an entry in the change log, and give it a new version. |
| 181 |
| 182 The ID fields of this entry are assumed to be valid server IDs. This |
| 183 entry will be updated with a new version number and sync_timestamp. |
| 184 |
| 185 Args: |
| 186 entry: The entry to be added or updated. |
| 187 """ |
| 188 self._version = self._version + 1 |
| 189 entry.version = self._version |
| 190 entry.sync_timestamp = self._version |
| 191 |
| 192 # Preserve the originator info, which the client is not required to send |
| 193 # when updating. |
| 194 base_entry = self._entries.get(entry.id_string) |
| 195 if base_entry: |
| 196 entry.originator_cache_guid = base_entry.originator_cache_guid |
| 197 entry.originator_client_item_id = base_entry.originator_client_item_id |
| 198 |
| 199 self._entries[entry.id_string] = DeepCopyOfProto(entry) |
| 200 |
| 201 def _ServerTagToId(self, tag): |
| 202 """Determine the server ID from a server-unique tag. |
| 203 |
| 204 The resulting value is guaranteed not to collide with the other ID |
| 205 generation methods. |
| 206 |
| 207 Args: |
| 208 tag: The unique, known-to-the-client tag of a server-generated item. |
| 209 """ |
| 210 if tag and tag != ROOT_ID: |
| 211 return '<server tag>%s' % tag |
| 212 else: |
| 213 return tag |
| 214 |
| 215 def _ClientTagToId(self, tag): |
| 216 """Determine the server ID from a client-unique tag. |
| 217 |
| 218 The resulting value is guaranteed not to collide with the other ID |
| 219 generation methods. |
| 220 |
| 221 Args: |
| 222 tag: The unique, opaque-to-the-server tag of a client-tagged item. |
| 223 """ |
| 224 return '<client tag>%s' % tag |
| 225 |
| 226 def _ClientIdToId(self, client_guid, client_item_id): |
| 227 """Compute a unique server ID from a client-local ID tag. |
| 228 |
| 229 The resulting value is guaranteed not to collide with the other ID |
| 230 generation methods. |
| 231 |
| 232 Args: |
| 233 client_guid: A globally unique ID that identifies the client which |
| 234 created this item. |
| 235 client_item_id: An ID that uniquely identifies this item on the client |
| 236 which created it. |
| 237 """ |
| 238 # Using the client ID info is not required here (we could instead generate |
| 239 # a random ID), but it's useful for debugging. |
| 240 return '<server ID originally>%s/%s' % (client_guid, client_item_id) |
| 241 |
| 242 def _WritePosition(self, entry, parent_id, prev_id=None): |
| 243 """Convert from a relative position into an absolute, numeric position. |
| 244 |
| 245 Clients specify positions using the predecessor-based references; the |
| 246 server stores and reports item positions using sparse integer values. |
| 247 This method converts from the former to the latter. |
| 248 |
| 249 Args: |
| 250 entry: The entry for which to compute a position. Its ID field are |
| 251 assumed to be server IDs. This entry will have its parent_id_string |
| 252 and position_in_parent fields updated; its insert_after_item_id field |
| 253 will be cleared. |
| 254 parent_id: The ID of the entry intended as the new parent. |
| 255 prev_id: The ID of the entry intended as the new predecessor. If this |
| 256 is None, or an ID of an object which is not a child of the new parent, |
| 257 the entry will be positioned at the end (right) of the ordering. If |
| 258 the empty ID (''), this will be positioned at the front (left) of the |
| 259 ordering. Otherwise, the entry will be given a position_in_parent |
| 260 value placing it just after (to the right of) the new predecessor. |
| 261 """ |
| 262 PREFERRED_GAP = 2 ** 20 |
| 263 # Compute values at the beginning or end. |
| 264 def ExtendRange(current_limit_entry, sign_multiplier): |
| 265 if current_limit_entry.id_string == entry.id_string: |
| 266 step = 0 |
| 267 else: |
| 268 step = sign_multiplier * PREFERRED_GAP |
| 269 return current_limit_entry.position_in_parent + step |
| 270 |
| 271 siblings = [x for x in self._entries.values() |
| 272 if x.parent_id_string == parent_id and not x.deleted] |
| 273 siblings = sorted(siblings, key=operator.attrgetter('position_in_parent')) |
| 274 if prev_id == entry.id_string: |
| 275 prev_id = '' |
| 276 if not siblings: |
| 277 # First item in this container; start in the middle. |
| 278 entry.position_in_parent = 0 |
| 279 elif prev_id == '': |
| 280 # A special value in the protocol. Insert at first position. |
| 281 entry.position_in_parent = ExtendRange(siblings[0], -1) |
| 282 else: |
| 283 # Consider items along with their successors. |
| 284 for a, b in zip(siblings, siblings[1:]): |
| 285 if a.id_string != prev_id: |
| 286 continue |
| 287 elif b.id_string == entry.id_string: |
| 288 # We're already in place; don't change anything. |
| 289 entry.position_in_parent = b.position_in_parent |
| 290 else: |
| 291 # Interpolate new position between two others. |
| 292 entry.position_in_parent = ( |
| 293 a.position_in_parent * 7 + b.position_in_parent) / 8 |
| 294 break |
| 295 else: |
| 296 # Insert at end. Includes the case where prev_id is None. |
| 297 entry.position_in_parent = ExtendRange(siblings[-1], +1) |
| 298 |
| 299 entry.parent_id_string = parent_id |
| 300 entry.ClearField('insert_after_item_id') |
| 301 |
| 302 def _ItemExists(self, id_string): |
| 303 """Determine whether an item exists in the changelog.""" |
| 304 return id_string in self._entries |
| 305 |
| 306 def _CreatePermanentItem(self, spec): |
| 307 """Create one permanent item from its spec, if it doesn't exist. |
| 308 |
| 309 The resulting item is added to the changelog. |
| 310 |
| 311 Args: |
| 312 spec: A PermanentItem object holding the properties of the item to create. |
| 313 """ |
| 314 id_string = self._ServerTagToId(spec.tag) |
| 315 if self._ItemExists(id_string): |
| 316 return |
| 317 print 'Creating permanent item: %s' % spec.name |
| 318 entry = sync_pb2.SyncEntity() |
| 319 entry.id_string = id_string |
| 320 entry.non_unique_name = spec.name |
| 321 entry.name = spec.name |
| 322 entry.server_defined_unique_tag = spec.tag |
| 323 entry.folder = True |
| 324 entry.deleted = False |
| 325 entry.specifics.CopyFrom(GetDefaultEntitySpecifics(spec.sync_type)) |
| 326 self._WritePosition(entry, self._ServerTagToId(spec.parent_tag)) |
| 327 self._SaveEntry(entry) |
| 328 |
| 329 def _CreatePermanentItems(self, requested_types): |
| 330 """Ensure creation of all permanent items for a given set of sync types. |
| 331 |
| 332 Args: |
| 333 requested_types: A list of sync data types from ALL_TYPES. |
| 334 Permanent items of only these types will be created. |
| 335 """ |
| 336 for spec in self._PERMANENT_ITEM_SPECS: |
| 337 if spec.sync_type in requested_types: |
| 338 self._CreatePermanentItem(spec) |
| 339 |
| 340 def GetChangesFromTimestamp(self, requested_types, timestamp): |
| 341 """Get entries which have changed since a given timestamp, oldest first. |
| 342 |
| 343 The returned entries are limited to being _BATCH_SIZE many. The entries |
| 344 are returned in strict version order. |
| 345 |
| 346 Args: |
| 347 requested_types: A list of sync data types from ALL_TYPES. |
| 348 Only items of these types will be retrieved; others will be filtered |
| 349 out. |
| 350 timestamp: A timestamp / version number. Only items that have changed |
| 351 more recently than this value will be retrieved; older items will |
| 352 be filtered out. |
| 353 Returns: |
| 354 A tuple of (version, entries). Version is a new timestamp value, which |
| 355 should be used as the starting point for the next query. Entries is the |
| 356 batch of entries meeting the current timestamp query. |
| 357 """ |
| 358 if timestamp == 0: |
| 359 self._CreatePermanentItems(requested_types) |
| 360 change_log = sorted(self._entries.values(), |
| 361 key=operator.attrgetter('version')) |
| 362 new_changes = [x for x in change_log if x.version > timestamp] |
| 363 # Pick batch_size new changes, and then filter them. This matches |
| 364 # the RPC behavior of the production sync server. |
| 365 batch = new_changes[:self._BATCH_SIZE] |
| 366 if not batch: |
| 367 # Client is up to date. |
| 368 return (timestamp, []) |
| 369 |
| 370 # Restrict batch to requested types. Tombstones are untyped |
| 371 # and will always get included. |
| 372 filtered = [] |
| 373 for x in batch: |
| 374 if (GetEntryType(x) in requested_types) or x.deleted: |
| 375 filtered.append(DeepCopyOfProto(x)) |
| 376 # The new client timestamp is the timestamp of the last item in the |
| 377 # batch, even if that item was filtered out. |
| 378 return (batch[-1].version, filtered) |
| 379 |
| 380 def _CheckVersionForCommit(self, entry): |
| 381 """Perform an optimistic concurrency check on the version number. |
| 382 |
| 383 Clients are only allowed to commit if they report having seen the most |
| 384 recent version of an object. |
| 385 |
| 386 Args: |
| 387 entry: A sync entity from the client. It is assumed that ID fields |
| 388 have been converted to server IDs. |
| 389 Returns: |
| 390 A boolean value indicating whether the client's version matches the |
| 391 newest server version for the given entry. |
| 392 """ |
| 393 if entry.id_string in self._entries: |
| 394 if (self._entries[entry.id_string].version != entry.version and |
| 395 not self._entries[entry.id_string].deleted): |
| 396 # Version mismatch that is not a tombstone recreation. |
| 397 return False |
| 398 else: |
| 399 if entry.version != 0: |
| 400 # Edit to an item that does not exist. |
| 401 return False |
| 402 return True |
| 403 |
| 404 def _CheckParentIdForCommit(self, entry): |
| 405 """Check that the parent ID referenced in a SyncEntity actually exists. |
| 406 |
| 407 Args: |
| 408 entry: A sync entity from the client. It is assumed that ID fields |
| 409 have been converted to server IDs. |
| 410 Returns: |
| 411 A boolean value indicating whether the entity's parent ID is an object |
| 412 that actually exists (and is not deleted) in the current account state. |
| 413 """ |
| 414 if entry.parent_id_string == ROOT_ID: |
| 415 # This is generally allowed. |
| 416 return True |
| 417 if entry.parent_id_string not in self._entries: |
| 418 print 'Warning: Client sent unknown ID. Should never happen.' |
| 419 return False |
| 420 if entry.parent_id_string == entry.id_string: |
| 421 print 'Warning: Client sent circular reference. Should never happen.' |
| 422 return False |
| 423 if self._entries[entry.parent_id_string].deleted: |
| 424 # This can happen in a race condition between two clients. |
| 425 return False |
| 426 if not self._entries[entry.parent_id_string].folder: |
| 427 print 'Warning: Client sent non-folder parent. Should never happen.' |
| 428 return False |
| 429 return True |
| 430 |
| 431 def _RewriteIdsAsServerIds(self, entry, cache_guid, commit_session): |
| 432 """Convert ID fields in a client sync entry to server IDs. |
| 433 |
| 434 A commit batch sent by a client may contain new items for which the |
| 435 server has not generated IDs yet. And within a commit batch, later |
| 436 items are allowed to refer to earlier items. This method will |
| 437 generate server IDs for new items, as well as rewrite references |
| 438 to items whose server IDs were generated earlier in the batch. |
| 439 |
| 440 Args: |
| 441 entry: The client sync entry to modify. |
| 442 cache_guid: The globally unique ID of the client that sent this |
| 443 commit request. |
| 444 commit_session: A dictionary mapping the original IDs to the new server |
| 445 IDs, for any items committed earlier in the batch. |
| 446 """ |
| 447 if entry.version == 0: |
| 448 if entry.HasField('client_defined_unique_tag'): |
| 449 # When present, this should determine the item's ID. |
| 450 new_id = self._ClientTagToId(entry.client_defined_unique_tag) |
| 451 else: |
| 452 new_id = self._ClientIdToId(cache_guid, entry.id_string) |
| 453 entry.originator_cache_guid = cache_guid |
| 454 entry.originator_client_item_id = entry.id_string |
| 455 commit_session[entry.id_string] = new_id # Remember the remapping. |
| 456 entry.id_string = new_id |
| 457 if entry.parent_id_string in commit_session: |
| 458 entry.parent_id_string = commit_session[entry.parent_id_string] |
| 459 if entry.insert_after_item_id in commit_session: |
| 460 entry.insert_after_item_id = commit_session[entry.insert_after_item_id] |
| 461 |
| 462 def CommitEntry(self, entry, cache_guid, commit_session): |
| 463 """Attempt to commit one entry to the user's account. |
| 464 |
| 465 Args: |
| 466 entry: A SyncEntity protobuf representing desired object changes. |
| 467 cache_guid: A string value uniquely identifying the client; this |
| 468 is used for ID generation and will determine the originator_cache_guid |
| 469 if the entry is new. |
| 470 commit_session: A dictionary mapping client IDs to server IDs for any |
| 471 objects committed earlier this session. If the entry gets a new ID |
| 472 during commit, the change will be recorded here. |
| 473 Returns: |
| 474 A SyncEntity reflecting the post-commit value of the entry, or None |
| 475 if the entry was not committed due to an error. |
| 476 """ |
| 477 entry = DeepCopyOfProto(entry) |
| 478 |
| 479 # Generate server IDs for this entry, and write generated server IDs |
| 480 # from earlier entries into the message's fields, as appropriate. The |
| 481 # ID generation state is stored in 'commit_session'. |
| 482 self._RewriteIdsAsServerIds(entry, cache_guid, commit_session) |
| 483 |
| 484 # Perform the optimistic concurrency check on the entry's version number. |
| 485 # Clients are not allowed to commit unless they indicate that they've seen |
| 486 # the most recent version of an object. |
| 487 if not self._CheckVersionForCommit(entry): |
| 488 return None |
| 489 |
| 490 # Check the validity of the parent ID; it must exist at this point. |
| 491 # TODO(nick): Implement cycle detection and resolution. |
| 492 if not self._CheckParentIdForCommit(entry): |
| 493 return None |
| 494 |
| 495 # At this point, the commit is definitely going to happen. |
| 496 |
| 497 # Deletion works by storing a limited record for an entry, called a |
| 498 # tombstone. A sync server must track deleted IDs forever, since it does |
| 499 # not keep track of client knowledge (there's no deletion ACK event). |
| 500 if entry.deleted: |
| 501 # Only the ID, version and deletion state are preserved on a tombstone. |
| 502 # TODO(nick): Does the production server not preserve the type? Not |
| 503 # doing so means that tombstones cannot be filtered based on |
| 504 # requested_types at GetUpdates time. |
| 505 tombstone = sync_pb2.SyncEntity() |
| 506 tombstone.id_string = entry.id_string |
| 507 tombstone.deleted = True |
| 508 tombstone.name = '' # 'name' is a required field; we're stuck with it. |
| 509 entry = tombstone |
| 510 else: |
| 511 # Comments in sync.proto detail how the representation of positional |
| 512 # ordering works: the 'insert_after_item_id' field specifies a |
| 513 # predecessor during Commit operations, but the 'position_in_parent' |
| 514 # field provides an absolute ordering in GetUpdates contexts. Here |
| 515 # we convert from the former to the latter. Specifically, we'll |
| 516 # generate a numeric position placing the item just after the object |
| 517 # identified by 'insert_after_item_id', and then clear the |
| 518 # 'insert_after_item_id' field so that it's not sent back to the client |
| 519 # during later GetUpdates requests. |
| 520 if entry.HasField('insert_after_item_id'): |
| 521 self._WritePosition(entry, entry.parent_id_string, |
| 522 entry.insert_after_item_id) |
| 523 else: |
| 524 self._WritePosition(entry, entry.parent_id_string) |
| 525 |
| 526 # Preserve the originator info, which the client is not required to send |
| 527 # when updating. |
| 528 base_entry = self._entries.get(entry.id_string) |
| 529 if base_entry and not entry.HasField("originator_cache_guid"): |
| 530 entry.originator_cache_guid = base_entry.originator_cache_guid |
| 531 entry.originator_client_item_id = base_entry.originator_client_item_id |
| 532 |
| 533 # Commit the change. This also updates the version number. |
| 534 self._SaveEntry(entry) |
| 535 # TODO(nick): Handle recursive deletion. |
| 536 return entry |
| 537 |
| 538 class TestServer(object): |
| 539 """An object to handle requests for one (and only one) Chrome Sync account. |
| 540 |
| 541 TestServer consumes the sync command messages that are the outermost |
| 542 layers of the protocol, performs the corresponding actions on its |
| 543 SyncDataModel, and constructs an appropropriate response message. |
| 544 """ |
| 545 |
| 546 def __init__(self): |
| 547 # The implementation supports exactly one account; its state is here. |
| 548 self.account = SyncDataModel() |
| 549 self.account_lock = threading.Lock() |
| 550 |
| 551 def HandleCommand(self, raw_request): |
| 552 """Decode and handle a sync command from a raw input of bytes. |
| 553 |
| 554 This is the main entry point for this class. It is safe to call this |
| 555 method from multiple threads. |
| 556 |
| 557 Args: |
| 558 raw_request: An iterable byte sequence to be interpreted as a sync |
| 559 protocol command. |
| 560 Returns: |
| 561 A tuple (response_code, raw_response); the first value is an HTTP |
| 562 result code, while the second value is a string of bytes which is the |
| 563 serialized reply to the command. |
| 564 """ |
| 565 self.account_lock.acquire() |
| 566 try: |
| 567 request = sync_pb2.ClientToServerMessage() |
| 568 request.MergeFromString(raw_request) |
| 569 contents = request.message_contents |
| 570 |
| 571 response = sync_pb2.ClientToServerResponse() |
| 572 response.error_code = sync_pb2.ClientToServerResponse.SUCCESS |
| 573 response.store_birthday = self.account.store_birthday |
| 574 |
| 575 if contents == sync_pb2.ClientToServerMessage.AUTHENTICATE: |
| 576 print 'Authenticate' |
| 577 # We accept any authentication token, and support only one account. |
| 578 # TODO(nick): Mock out the GAIA authentication as well; hook up here. |
| 579 response.authenticate.user.email = 'syncjuser@chromium' |
| 580 response.authenticate.user.display_name = 'Sync J User' |
| 581 elif contents == sync_pb2.ClientToServerMessage.COMMIT: |
| 582 print 'Commit' |
| 583 self.HandleCommit(request.commit, response.commit) |
| 584 elif contents == sync_pb2.ClientToServerMessage.GET_UPDATES: |
| 585 print ('GetUpdates from timestamp %d' % |
| 586 request.get_updates.from_timestamp) |
| 587 self.HandleGetUpdates(request.get_updates, response.get_updates) |
| 588 return (200, response.SerializeToString()) |
| 589 finally: |
| 590 self.account_lock.release() |
| 591 |
| 592 def HandleCommit(self, commit_message, commit_response): |
| 593 """Respond to a Commit request by updating the user's account state. |
| 594 |
| 595 Commit attempts stop after the first error, returning a CONFLICT result |
| 596 for any unattempted entries. |
| 597 |
| 598 Args: |
| 599 commit_message: A sync_pb.CommitMessage protobuf holding the content |
| 600 of the client's request. |
| 601 commit_response: A sync_pb.CommitResponse protobuf into which a reply |
| 602 to the client request will be written. |
| 603 """ |
| 604 commit_response.SetInParent() |
| 605 batch_failure = False |
| 606 session = {} # Tracks ID renaming during the commit operation. |
| 607 guid = commit_message.cache_guid |
| 608 for entry in commit_message.entries: |
| 609 server_entry = None |
| 610 if not batch_failure: |
| 611 # Try to commit the change to the account. |
| 612 server_entry = self.account.CommitEntry(entry, guid, session) |
| 613 |
| 614 # An entryresponse is returned in both success and failure cases. |
| 615 reply = commit_response.entryresponse.add() |
| 616 if not server_entry: |
| 617 reply.response_type = sync_pb2.CommitResponse.CONFLICT |
| 618 reply.error_message = 'Conflict.' |
| 619 batch_failure = True # One failure halts the batch. |
| 620 else: |
| 621 reply.response_type = sync_pb2.CommitResponse.SUCCESS |
| 622 # These are the properties that the server is allowed to override |
| 623 # during commit; the client wants to know their values at the end |
| 624 # of the operation. |
| 625 reply.id_string = server_entry.id_string |
| 626 if not server_entry.deleted: |
| 627 reply.parent_id_string = server_entry.parent_id_string |
| 628 reply.position_in_parent = server_entry.position_in_parent |
| 629 reply.version = server_entry.version |
| 630 reply.name = server_entry.name |
| 631 reply.non_unique_name = server_entry.non_unique_name |
| 632 |
| 633 def HandleGetUpdates(self, update_request, update_response): |
| 634 """Respond to a GetUpdates request by querying the user's account. |
| 635 |
| 636 Args: |
| 637 update_request: A sync_pb.GetUpdatesMessage protobuf holding the content |
| 638 of the client's request. |
| 639 update_response: A sync_pb.GetUpdatesResponse protobuf into which a reply |
| 640 to the client request will be written. |
| 641 """ |
| 642 update_response.SetInParent() |
| 643 requested_types = GetRequestedTypes(update_request) |
| 644 new_timestamp, entries = self.account.GetChangesFromTimestamp( |
| 645 requested_types, update_request.from_timestamp) |
| 646 |
| 647 # If the client is up to date, we are careful not to set the |
| 648 # new_timestamp field. |
| 649 if new_timestamp != update_request.from_timestamp: |
| 650 update_response.new_timestamp = new_timestamp |
| 651 for e in entries: |
| 652 reply = update_response.entries.add() |
| 653 reply.CopyFrom(e) |
OLD | NEW |