OLD | NEW |
| (Empty) |
1 // Copyright 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "sync/engine/syncer_util.h" | |
6 | |
7 #include <stdint.h> | |
8 | |
9 #include <algorithm> | |
10 #include <set> | |
11 #include <string> | |
12 #include <vector> | |
13 | |
14 #include "base/base64.h" | |
15 #include "base/location.h" | |
16 #include "base/metrics/histogram.h" | |
17 #include "base/rand_util.h" | |
18 #include "base/strings/string_number_conversions.h" | |
19 #include "sync/engine/conflict_resolver.h" | |
20 #include "sync/engine/syncer_proto_util.h" | |
21 #include "sync/engine/syncer_types.h" | |
22 #include "sync/internal_api/public/base/attachment_id_proto.h" | |
23 #include "sync/internal_api/public/base/model_type.h" | |
24 #include "sync/internal_api/public/base/unique_position.h" | |
25 #include "sync/protocol/bookmark_specifics.pb.h" | |
26 #include "sync/protocol/password_specifics.pb.h" | |
27 #include "sync/protocol/sync.pb.h" | |
28 #include "sync/syncable/directory.h" | |
29 #include "sync/syncable/entry.h" | |
30 #include "sync/syncable/model_neutral_mutable_entry.h" | |
31 #include "sync/syncable/mutable_entry.h" | |
32 #include "sync/syncable/syncable_changes_version.h" | |
33 #include "sync/syncable/syncable_model_neutral_write_transaction.h" | |
34 #include "sync/syncable/syncable_proto_util.h" | |
35 #include "sync/syncable/syncable_read_transaction.h" | |
36 #include "sync/syncable/syncable_util.h" | |
37 #include "sync/syncable/syncable_write_transaction.h" | |
38 #include "sync/util/cryptographer.h" | |
39 #include "sync/util/time.h" | |
40 | |
41 namespace syncer { | |
42 | |
43 using syncable::CHANGES_VERSION; | |
44 using syncable::Directory; | |
45 using syncable::Entry; | |
46 using syncable::GET_BY_HANDLE; | |
47 using syncable::GET_BY_ID; | |
48 using syncable::ID; | |
49 using syncable::Id; | |
50 | |
51 syncable::Id FindLocalIdToUpdate( | |
52 syncable::BaseTransaction* trans, | |
53 const sync_pb::SyncEntity& update) { | |
54 // Expected entry points of this function: | |
55 // SyncEntity has NOT been applied to SERVER fields. | |
56 // SyncEntity has NOT been applied to LOCAL fields. | |
57 // DB has not yet been modified, no entries created for this update. | |
58 | |
59 const std::string& client_id = trans->directory()->cache_guid(); | |
60 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); | |
61 | |
62 if (update.has_client_defined_unique_tag() && | |
63 !update.client_defined_unique_tag().empty()) { | |
64 // When a server sends down a client tag, the following cases can occur: | |
65 // 1) Client has entry for tag already, ID is server style, matches | |
66 // 2) Client has entry for tag already, ID is server, doesn't match. | |
67 // 3) Client has entry for tag already, ID is local, (never matches) | |
68 // 4) Client has no entry for tag | |
69 | |
70 // Case 1, we don't have to do anything since the update will | |
71 // work just fine. Update will end up in the proper entry, via ID lookup. | |
72 // Case 2 - Happens very rarely due to lax enforcement of client tags | |
73 // on the server, if two clients commit the same tag at the same time. | |
74 // When this happens, we pick the lexically-least ID and ignore all other | |
75 // items. | |
76 // Case 3 - We need to replace the local ID with the server ID so that | |
77 // this update gets targeted at the correct local entry; we expect conflict | |
78 // resolution to occur. | |
79 // Case 4 - Perfect. Same as case 1. | |
80 | |
81 syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG, | |
82 update.client_defined_unique_tag()); | |
83 | |
84 // The SyncAPI equivalent of this function will return !good if IS_DEL. | |
85 // The syncable version will return good even if IS_DEL. | |
86 // TODO(chron): Unit test the case with IS_DEL and make sure. | |
87 if (local_entry.good()) { | |
88 if (local_entry.GetId().ServerKnows()) { | |
89 if (local_entry.GetId() != update_id) { | |
90 // Case 2. | |
91 LOG(WARNING) << "Duplicated client tag."; | |
92 if (local_entry.GetId() < update_id) { | |
93 // Signal an error; drop this update on the floor. Note that | |
94 // we don't server delete the item, because we don't allow it to | |
95 // exist locally at all. So the item will remain orphaned on | |
96 // the server, and we won't pay attention to it. | |
97 return syncable::Id(); | |
98 } | |
99 } | |
100 // Target this change to the existing local entry; later, | |
101 // we'll change the ID of the local entry to update_id | |
102 // if needed. | |
103 return local_entry.GetId(); | |
104 } else { | |
105 // Case 3: We have a local entry with the same client tag. | |
106 // We should change the ID of the local entry to the server entry. | |
107 // This will result in an server ID with base version == 0, but that's | |
108 // a legal state for an item with a client tag. By changing the ID, | |
109 // update will now be applied to local_entry. | |
110 DCHECK(0 == local_entry.GetBaseVersion() || | |
111 CHANGES_VERSION == local_entry.GetBaseVersion()); | |
112 return local_entry.GetId(); | |
113 } | |
114 } | |
115 } else if (update.has_originator_cache_guid() && | |
116 update.originator_cache_guid() == client_id) { | |
117 // If a commit succeeds, but the response does not come back fast enough | |
118 // then the syncer might assume that it was never committed. | |
119 // The server will track the client that sent up the original commit and | |
120 // return this in a get updates response. When this matches a local | |
121 // uncommitted item, we must mutate our local item and version to pick up | |
122 // the committed version of the same item whose commit response was lost. | |
123 // There is however still a race condition if the server has not | |
124 // completed the commit by the time the syncer tries to get updates | |
125 // again. To mitigate this, we need to have the server time out in | |
126 // a reasonable span, our commit batches have to be small enough | |
127 // to process within our HTTP response "assumed alive" time. | |
128 | |
129 // We need to check if we have an entry that didn't get its server | |
130 // id updated correctly. The server sends down a client ID | |
131 // and a local (negative) id. If we have a entry by that | |
132 // description, we should update the ID and version to the | |
133 // server side ones to avoid multiple copies of the same thing. | |
134 | |
135 syncable::Id client_item_id = syncable::Id::CreateFromClientString( | |
136 update.originator_client_item_id()); | |
137 DCHECK(!client_item_id.ServerKnows()); | |
138 syncable::Entry local_entry(trans, GET_BY_ID, client_item_id); | |
139 | |
140 // If it exists, then our local client lost a commit response. Use | |
141 // the local entry. | |
142 if (local_entry.good() && !local_entry.GetIsDel()) { | |
143 int64_t old_version = local_entry.GetBaseVersion(); | |
144 int64_t new_version = update.version(); | |
145 DCHECK_LE(old_version, 0); | |
146 DCHECK_GT(new_version, 0); | |
147 // Otherwise setting the base version could cause a consistency failure. | |
148 // An entry should never be version 0 and SYNCED. | |
149 DCHECK(local_entry.GetIsUnsynced()); | |
150 | |
151 // Just a quick sanity check. | |
152 DCHECK(!local_entry.GetId().ServerKnows()); | |
153 | |
154 DVLOG(1) << "Reuniting lost commit response IDs. server id: " | |
155 << update_id << " local id: " << local_entry.GetId() | |
156 << " new version: " << new_version; | |
157 | |
158 return local_entry.GetId(); | |
159 } | |
160 } else if (update.has_server_defined_unique_tag() && | |
161 !update.server_defined_unique_tag().empty()) { | |
162 // The client creates type root folders with a local ID on demand when a | |
163 // progress marker for the given type is initially set. | |
164 // The server might also attempt to send a type root folder for the same | |
165 // type (during the transition period until support for root folders is | |
166 // removed for new client versions). | |
167 // TODO(stanisc): crbug.com/438313: remove this once the transition to | |
168 // implicit root folders on the server is done. | |
169 syncable::Entry local_entry(trans, syncable::GET_BY_SERVER_TAG, | |
170 update.server_defined_unique_tag()); | |
171 if (local_entry.good() && !local_entry.GetId().ServerKnows()) { | |
172 DCHECK(local_entry.GetId() != update_id); | |
173 return local_entry.GetId(); | |
174 } | |
175 } | |
176 | |
177 // Fallback: target an entry having the server ID, creating one if needed. | |
178 return update_id; | |
179 } | |
180 | |
181 UpdateAttemptResponse AttemptToUpdateEntry( | |
182 syncable::WriteTransaction* const trans, | |
183 syncable::MutableEntry* const entry, | |
184 Cryptographer* cryptographer) { | |
185 CHECK(entry->good()); | |
186 if (!entry->GetIsUnappliedUpdate()) | |
187 return SUCCESS; // No work to do. | |
188 syncable::Id id = entry->GetId(); | |
189 const sync_pb::EntitySpecifics& specifics = entry->GetServerSpecifics(); | |
190 ModelType type = GetModelTypeFromSpecifics(specifics); | |
191 | |
192 // Only apply updates that we can decrypt. If we can't decrypt the update, it | |
193 // is likely because the passphrase has not arrived yet. Because the | |
194 // passphrase may not arrive within this GetUpdates, we can't just return | |
195 // conflict, else we try to perform normal conflict resolution prematurely or | |
196 // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is | |
197 // treated as an unresolvable conflict. See the description in syncer_types.h. | |
198 // This prevents any unsynced changes from commiting and postpones conflict | |
199 // resolution until all data can be decrypted. | |
200 if (specifics.has_encrypted() && | |
201 !cryptographer->CanDecrypt(specifics.encrypted())) { | |
202 // We can't decrypt this node yet. | |
203 DVLOG(1) << "Received an undecryptable " | |
204 << ModelTypeToString(entry->GetServerModelType()) | |
205 << " update, returning conflict_encryption."; | |
206 return CONFLICT_ENCRYPTION; | |
207 } else if (specifics.has_password() && | |
208 entry->GetUniqueServerTag().empty()) { | |
209 // Passwords use their own legacy encryption scheme. | |
210 const sync_pb::PasswordSpecifics& password = specifics.password(); | |
211 if (!cryptographer->CanDecrypt(password.encrypted())) { | |
212 DVLOG(1) << "Received an undecryptable password update, returning " | |
213 << "conflict_encryption."; | |
214 return CONFLICT_ENCRYPTION; | |
215 } | |
216 } | |
217 | |
218 if (!entry->GetServerIsDel()) { | |
219 syncable::Id new_parent = entry->GetServerParentId(); | |
220 if (!new_parent.IsNull()) { | |
221 // Perform this step only if the parent is specified. | |
222 // The unset parent means that the implicit type root would be used. | |
223 Entry parent(trans, GET_BY_ID, new_parent); | |
224 // A note on non-directory parents: | |
225 // We catch most unfixable tree invariant errors at update receipt time, | |
226 // however we deal with this case here because we may receive the child | |
227 // first then the illegal parent. Instead of dealing with it twice in | |
228 // different ways we deal with it once here to reduce the amount of code | |
229 // and potential errors. | |
230 if (!parent.good() || parent.GetIsDel() || !parent.GetIsDir()) { | |
231 DVLOG(1) << "Entry has bad parent, returning conflict_hierarchy."; | |
232 return CONFLICT_HIERARCHY; | |
233 } | |
234 if (entry->GetParentId() != new_parent) { | |
235 if (!entry->GetIsDel() && !IsLegalNewParent(trans, id, new_parent)) { | |
236 DVLOG(1) << "Not updating item " << id | |
237 << ", illegal new parent (would cause loop)."; | |
238 return CONFLICT_HIERARCHY; | |
239 } | |
240 } | |
241 } else { | |
242 // new_parent is unset. | |
243 DCHECK(IsTypeWithClientGeneratedRoot(type)); | |
244 } | |
245 } else if (entry->GetIsDir()) { | |
246 Directory::Metahandles handles; | |
247 trans->directory()->GetChildHandlesById(trans, id, &handles); | |
248 if (!handles.empty()) { | |
249 // If we have still-existing children, then we need to deal with | |
250 // them before we can process this change. | |
251 DVLOG(1) << "Not deleting directory; it's not empty " << *entry; | |
252 return CONFLICT_HIERARCHY; | |
253 } | |
254 } | |
255 | |
256 if (entry->GetIsUnsynced()) { | |
257 DVLOG(1) << "Skipping update, returning conflict for: " << id | |
258 << " ; it's unsynced."; | |
259 return CONFLICT_SIMPLE; | |
260 } | |
261 | |
262 if (specifics.has_encrypted()) { | |
263 DVLOG(2) << "Received a decryptable " | |
264 << ModelTypeToString(entry->GetServerModelType()) | |
265 << " update, applying normally."; | |
266 } else { | |
267 DVLOG(2) << "Received an unencrypted " | |
268 << ModelTypeToString(entry->GetServerModelType()) | |
269 << " update, applying normally."; | |
270 } | |
271 | |
272 UpdateLocalDataFromServerData(trans, entry); | |
273 | |
274 return SUCCESS; | |
275 } | |
276 | |
277 std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update) { | |
278 if (!update.has_originator_cache_guid() || | |
279 !update.has_originator_client_item_id()) { | |
280 LOG(ERROR) << "Update is missing requirements for bookmark position." | |
281 << " This is a server bug."; | |
282 return UniquePosition::RandomSuffix(); | |
283 } | |
284 | |
285 return syncable::GenerateSyncableBookmarkHash( | |
286 update.originator_cache_guid(), update.originator_client_item_id()); | |
287 } | |
288 | |
289 UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update, | |
290 const std::string& suffix) { | |
291 DCHECK(UniquePosition::IsValidSuffix(suffix)); | |
292 if (!(SyncerProtoUtil::ShouldMaintainPosition(update))) { | |
293 return UniquePosition::CreateInvalid(); | |
294 } else if (update.has_unique_position()) { | |
295 UniquePosition proto_position = | |
296 UniquePosition::FromProto(update.unique_position()); | |
297 if (proto_position.IsValid()) { | |
298 return proto_position; | |
299 } | |
300 } | |
301 | |
302 // Now, there are two cases hit here. | |
303 // 1. Did not receive unique_position for this update. | |
304 // 2. Received unique_position, but it is invalid(ex. empty). | |
305 // And we will create a valid position for this two cases. | |
306 if (update.has_position_in_parent()) { | |
307 return UniquePosition::FromInt64(update.position_in_parent(), suffix); | |
308 } else { | |
309 LOG(ERROR) << "No position information in update. This is a server bug."; | |
310 return UniquePosition::FromInt64(0, suffix); | |
311 } | |
312 } | |
313 | |
314 namespace { | |
315 | |
316 // Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally, | |
317 // when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based | |
318 // protocol. | |
319 void UpdateBookmarkSpecifics(const std::string& singleton_tag, | |
320 const std::string& url, | |
321 const std::string& favicon_bytes, | |
322 syncable::ModelNeutralMutableEntry* local_entry) { | |
323 // In the new-style protocol, the server no longer sends bookmark info for | |
324 // the "google_chrome" folder. Mimic that here. | |
325 if (singleton_tag == "google_chrome") | |
326 return; | |
327 sync_pb::EntitySpecifics pb; | |
328 sync_pb::BookmarkSpecifics* bookmark = pb.mutable_bookmark(); | |
329 if (!url.empty()) | |
330 bookmark->set_url(url); | |
331 if (!favicon_bytes.empty()) | |
332 bookmark->set_favicon(favicon_bytes); | |
333 local_entry->PutServerSpecifics(pb); | |
334 } | |
335 | |
336 void UpdateBookmarkPositioning( | |
337 const sync_pb::SyncEntity& update, | |
338 syncable::ModelNeutralMutableEntry* local_entry) { | |
339 // Update our unique bookmark tag. In many cases this will be identical to | |
340 // the tag we already have. However, clients that have recently upgraded to | |
341 // versions that support unique positions will have incorrect tags. See the | |
342 // v86 migration logic in directory_backing_store.cc for more information. | |
343 // | |
344 // Both the old and new values are unique to this element. Applying this | |
345 // update will not risk the creation of conflicting unique tags. | |
346 std::string bookmark_tag = GetUniqueBookmarkTagFromUpdate(update); | |
347 if (UniquePosition::IsValidSuffix(bookmark_tag)) { | |
348 local_entry->PutUniqueBookmarkTag(bookmark_tag); | |
349 } | |
350 | |
351 // Update our position. | |
352 UniquePosition update_pos = | |
353 GetUpdatePosition(update, local_entry->GetUniqueBookmarkTag()); | |
354 if (update_pos.IsValid()) { | |
355 local_entry->PutServerUniquePosition(update_pos); | |
356 } | |
357 } | |
358 | |
359 } // namespace | |
360 | |
361 void UpdateServerFieldsFromUpdate( | |
362 syncable::ModelNeutralMutableEntry* target, | |
363 const sync_pb::SyncEntity& update, | |
364 const std::string& name) { | |
365 if (update.deleted()) { | |
366 if (target->GetServerIsDel()) { | |
367 // If we already think the item is server-deleted, we're done. | |
368 // Skipping these cases prevents our committed deletions from coming | |
369 // back and overriding subsequent undeletions. For non-deleted items, | |
370 // the version number check has a similar effect. | |
371 return; | |
372 } | |
373 // Mark entry as unapplied update first to ensure journaling the deletion. | |
374 target->PutIsUnappliedUpdate(true); | |
375 // The server returns very lightweight replies for deletions, so we don't | |
376 // clobber a bunch of fields on delete. | |
377 target->PutServerIsDel(true); | |
378 if (!target->GetUniqueClientTag().empty()) { | |
379 // Items identified by the client unique tag are undeletable; when | |
380 // they're deleted, they go back to version 0. | |
381 target->PutServerVersion(0); | |
382 } else { | |
383 // Otherwise, fake a server version by bumping the local number. | |
384 target->PutServerVersion( | |
385 std::max(target->GetServerVersion(), target->GetBaseVersion()) + 1); | |
386 } | |
387 return; | |
388 } | |
389 | |
390 DCHECK_EQ(target->GetId(), SyncableIdFromProto(update.id_string())) | |
391 << "ID Changing not supported here"; | |
392 if (SyncerProtoUtil::ShouldMaintainHierarchy(update)) { | |
393 target->PutServerParentId(SyncableIdFromProto(update.parent_id_string())); | |
394 } else { | |
395 target->PutServerParentId(syncable::Id()); | |
396 } | |
397 target->PutServerNonUniqueName(name); | |
398 target->PutServerVersion(update.version()); | |
399 target->PutServerCtime(ProtoTimeToTime(update.ctime())); | |
400 target->PutServerMtime(ProtoTimeToTime(update.mtime())); | |
401 target->PutServerIsDir(IsFolder(update)); | |
402 if (update.has_server_defined_unique_tag()) { | |
403 const std::string& tag = update.server_defined_unique_tag(); | |
404 target->PutUniqueServerTag(tag); | |
405 } | |
406 if (update.has_client_defined_unique_tag()) { | |
407 const std::string& tag = update.client_defined_unique_tag(); | |
408 target->PutUniqueClientTag(tag); | |
409 } | |
410 // Store the datatype-specific part as a protobuf. | |
411 if (update.has_specifics()) { | |
412 DCHECK_NE(GetModelType(update), UNSPECIFIED) | |
413 << "Storing unrecognized datatype in sync database."; | |
414 target->PutServerSpecifics(update.specifics()); | |
415 } else if (update.has_bookmarkdata()) { | |
416 // Legacy protocol response for bookmark data. | |
417 const sync_pb::SyncEntity::BookmarkData& bookmark = update.bookmarkdata(); | |
418 UpdateBookmarkSpecifics(update.server_defined_unique_tag(), | |
419 bookmark.bookmark_url(), | |
420 bookmark.bookmark_favicon(), | |
421 target); | |
422 } | |
423 target->PutServerAttachmentMetadata( | |
424 CreateAttachmentMetadata(update.attachment_id())); | |
425 if (SyncerProtoUtil::ShouldMaintainPosition(update)) { | |
426 UpdateBookmarkPositioning(update, target); | |
427 } | |
428 | |
429 // We only mark the entry as unapplied if its version is greater than the | |
430 // local data. If we're processing the update that corresponds to one of our | |
431 // commit we don't apply it as time differences may occur. | |
432 if (update.version() > target->GetBaseVersion()) { | |
433 target->PutIsUnappliedUpdate(true); | |
434 } | |
435 DCHECK(!update.deleted()); | |
436 target->PutServerIsDel(false); | |
437 } | |
438 | |
439 // Creates a new Entry iff no Entry exists with the given id. | |
440 void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans, | |
441 const syncable::Id& id) { | |
442 syncable::Entry entry(trans, GET_BY_ID, id); | |
443 if (!entry.good()) { | |
444 syncable::ModelNeutralMutableEntry new_entry( | |
445 trans, | |
446 syncable::CREATE_NEW_UPDATE_ITEM, | |
447 id); | |
448 } | |
449 } | |
450 | |
451 // This function is called on an entry when we can update the user-facing data | |
452 // from the server data. | |
453 void UpdateLocalDataFromServerData( | |
454 syncable::WriteTransaction* trans, | |
455 syncable::MutableEntry* entry) { | |
456 DCHECK(!entry->GetIsUnsynced()); | |
457 DCHECK(entry->GetIsUnappliedUpdate()); | |
458 | |
459 DVLOG(2) << "Updating entry : " << *entry; | |
460 // Start by setting the properties that determine the model_type. | |
461 entry->PutSpecifics(entry->GetServerSpecifics()); | |
462 // Clear the previous server specifics now that we're applying successfully. | |
463 entry->PutBaseServerSpecifics(sync_pb::EntitySpecifics()); | |
464 entry->PutIsDir(entry->GetServerIsDir()); | |
465 // This strange dance around the IS_DEL flag avoids problems when setting | |
466 // the name. | |
467 // TODO(chron): Is this still an issue? Unit test this codepath. | |
468 if (entry->GetServerIsDel()) { | |
469 entry->PutIsDel(true); | |
470 } else { | |
471 entry->PutNonUniqueName(entry->GetServerNonUniqueName()); | |
472 entry->PutParentId(entry->GetServerParentId()); | |
473 entry->PutUniquePosition(entry->GetServerUniquePosition()); | |
474 entry->PutIsDel(false); | |
475 } | |
476 | |
477 entry->PutCtime(entry->GetServerCtime()); | |
478 entry->PutMtime(entry->GetServerMtime()); | |
479 entry->PutBaseVersion(entry->GetServerVersion()); | |
480 entry->PutIsDel(entry->GetServerIsDel()); | |
481 entry->PutIsUnappliedUpdate(false); | |
482 entry->PutAttachmentMetadata(entry->GetServerAttachmentMetadata()); | |
483 } | |
484 | |
485 VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) { | |
486 syncable::Id id = entry->GetId(); | |
487 if (id == entry->GetParentId()) { | |
488 CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry; | |
489 // If the root becomes unsynced it can cause us problems. | |
490 LOG(ERROR) << "Root item became unsynced " << *entry; | |
491 return VERIFY_UNSYNCABLE; | |
492 } | |
493 if (entry->IsRoot()) { | |
494 LOG(ERROR) << "Permanent item became unsynced " << *entry; | |
495 return VERIFY_UNSYNCABLE; | |
496 } | |
497 if (entry->GetIsDel() && !entry->GetId().ServerKnows()) { | |
498 // Drop deleted uncommitted entries. | |
499 return VERIFY_UNSYNCABLE; | |
500 } | |
501 return VERIFY_OK; | |
502 } | |
503 | |
504 void MarkDeletedChildrenSynced( | |
505 syncable::Directory* dir, | |
506 syncable::BaseWriteTransaction* trans, | |
507 std::set<syncable::Id>* deleted_folders) { | |
508 // There's two options here. | |
509 // 1. Scan deleted unsynced entries looking up their pre-delete tree for any | |
510 // of the deleted folders. | |
511 // 2. Take each folder and do a tree walk of all entries underneath it. | |
512 // #2 has a lower big O cost, but writing code to limit the time spent inside | |
513 // the transaction during each step is simpler with 1. Changing this decision | |
514 // may be sensible if this code shows up in profiling. | |
515 if (deleted_folders->empty()) | |
516 return; | |
517 Directory::Metahandles handles; | |
518 dir->GetUnsyncedMetaHandles(trans, &handles); | |
519 if (handles.empty()) | |
520 return; | |
521 Directory::Metahandles::iterator it; | |
522 for (it = handles.begin() ; it != handles.end() ; ++it) { | |
523 syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it); | |
524 if (!entry.GetIsUnsynced() || !entry.GetIsDel()) | |
525 continue; | |
526 syncable::Id id = entry.GetParentId(); | |
527 while (id != trans->root_id()) { | |
528 if (deleted_folders->find(id) != deleted_folders->end()) { | |
529 // We've synced the deletion of this deleted entries parent. | |
530 entry.PutIsUnsynced(false); | |
531 break; | |
532 } | |
533 Entry parent(trans, GET_BY_ID, id); | |
534 if (!parent.good() || !parent.GetIsDel()) | |
535 break; | |
536 id = parent.GetParentId(); | |
537 } | |
538 } | |
539 } | |
540 | |
541 VerifyResult VerifyNewEntry( | |
542 const sync_pb::SyncEntity& update, | |
543 syncable::Entry* target, | |
544 const bool deleted) { | |
545 if (target->good()) { | |
546 // Not a new update. | |
547 return VERIFY_UNDECIDED; | |
548 } | |
549 if (deleted) { | |
550 // Deletion of an item we've never seen can be ignored. | |
551 return VERIFY_SKIP; | |
552 } | |
553 | |
554 return VERIFY_SUCCESS; | |
555 } | |
556 | |
557 // Assumes we have an existing entry; check here for updates that break | |
558 // consistency rules. | |
559 VerifyResult VerifyUpdateConsistency( | |
560 syncable::ModelNeutralWriteTransaction* trans, | |
561 const sync_pb::SyncEntity& update, | |
562 const bool deleted, | |
563 const bool is_directory, | |
564 ModelType model_type, | |
565 syncable::ModelNeutralMutableEntry* target) { | |
566 | |
567 CHECK(target->good()); | |
568 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); | |
569 | |
570 // If the update is a delete, we don't really need to worry at this stage. | |
571 if (deleted) | |
572 return VERIFY_SUCCESS; | |
573 | |
574 if (model_type == UNSPECIFIED) { | |
575 // This update is to an item of a datatype we don't recognize. The server | |
576 // shouldn't have sent it to us. Throw it on the ground. | |
577 return VERIFY_SKIP; | |
578 } | |
579 | |
580 if (target->GetServerVersion() > 0) { | |
581 // Then we've had an update for this entry before. | |
582 if (is_directory != target->GetServerIsDir() || | |
583 model_type != target->GetServerModelType()) { | |
584 if (target->GetIsDel()) { // If we've deleted the item, we don't care. | |
585 return VERIFY_SKIP; | |
586 } else { | |
587 LOG(ERROR) << "Server update doesn't agree with previous updates. "; | |
588 LOG(ERROR) << " Entry: " << *target; | |
589 LOG(ERROR) << " Update: " | |
590 << SyncerProtoUtil::SyncEntityDebugString(update); | |
591 return VERIFY_FAIL; | |
592 } | |
593 } | |
594 | |
595 if (!deleted && (target->GetId() == update_id) && | |
596 (target->GetServerIsDel() || | |
597 (!target->GetIsUnsynced() && target->GetIsDel() && | |
598 target->GetBaseVersion() > 0))) { | |
599 // An undelete. The latter case in the above condition is for | |
600 // when the server does not give us an update following the | |
601 // commit of a delete, before undeleting. | |
602 // Undeletion is common for items that reuse the client-unique tag. | |
603 VerifyResult result = VerifyUndelete(trans, update, target); | |
604 if (VERIFY_UNDECIDED != result) | |
605 return result; | |
606 } | |
607 } | |
608 if (target->GetBaseVersion() > 0) { | |
609 // We've committed this update in the past. | |
610 if (is_directory != target->GetIsDir() || | |
611 model_type != target->GetModelType()) { | |
612 LOG(ERROR) << "Server update doesn't agree with committed item. "; | |
613 LOG(ERROR) << " Entry: " << *target; | |
614 LOG(ERROR) << " Update: " | |
615 << SyncerProtoUtil::SyncEntityDebugString(update); | |
616 return VERIFY_FAIL; | |
617 } | |
618 if (target->GetId() == update_id) { | |
619 if (target->GetServerVersion() > update.version()) { | |
620 LOG(WARNING) << "We've already seen a more recent version."; | |
621 LOG(WARNING) << " Entry: " << *target; | |
622 LOG(WARNING) << " Update: " | |
623 << SyncerProtoUtil::SyncEntityDebugString(update); | |
624 return VERIFY_SKIP; | |
625 } | |
626 } | |
627 } | |
628 return VERIFY_SUCCESS; | |
629 } | |
630 | |
631 // Assumes we have an existing entry; verify an update that seems to be | |
632 // expressing an 'undelete' | |
633 VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans, | |
634 const sync_pb::SyncEntity& update, | |
635 syncable::ModelNeutralMutableEntry* target) { | |
636 // TODO(nick): We hit this path for items deleted items that the server | |
637 // tells us to re-create; only deleted items with positive base versions | |
638 // will hit this path. However, it's not clear how such an undeletion | |
639 // would actually succeed on the server; in the protocol, a base | |
640 // version of 0 is required to undelete an object. This codepath | |
641 // should be deprecated in favor of client-tag style undeletion | |
642 // (where items go to version 0 when they're deleted), or else | |
643 // removed entirely (if this type of undeletion is indeed impossible). | |
644 CHECK(target->good()); | |
645 DVLOG(1) << "Server update is attempting undelete. " << *target | |
646 << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update); | |
647 // Move the old one aside and start over. It's too tricky to get the old one | |
648 // back into a state that would pass CheckTreeInvariants(). | |
649 if (target->GetIsDel()) { | |
650 if (target->GetUniqueClientTag().empty()) | |
651 LOG(WARNING) << "Doing move-aside undeletion on client-tagged item."; | |
652 target->PutId(trans->directory()->NextId()); | |
653 target->PutUniqueClientTag(std::string()); | |
654 target->PutBaseVersion(CHANGES_VERSION); | |
655 target->PutServerVersion(0); | |
656 return VERIFY_SUCCESS; | |
657 } | |
658 if (update.version() < target->GetServerVersion()) { | |
659 LOG(WARNING) << "Update older than current server version for " | |
660 << *target << " Update:" | |
661 << SyncerProtoUtil::SyncEntityDebugString(update); | |
662 return VERIFY_SUCCESS; // Expected in new sync protocol. | |
663 } | |
664 return VERIFY_UNDECIDED; | |
665 } | |
666 | |
667 } // namespace syncer | |
OLD | NEW |