|
OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "chrome/browser/sync/engine/build_and_process_conflict_sets_command.h" | |
6 | |
7 #include <string> | |
8 #include <sstream> | |
9 #include <vector> | |
10 | |
11 #include "base/basictypes.h" | |
12 #include "base/format_macros.h" | |
13 #include "base/rand_util.h" | |
14 #include "chrome/browser/sync/engine/conflict_resolution_view.h" | |
15 #include "chrome/browser/sync/engine/syncer_util.h" | |
16 #include "chrome/browser/sync/engine/update_applicator.h" | |
17 #include "chrome/browser/sync/syncable/directory_manager.h" | |
18 | |
19 namespace browser_sync { | |
20 | |
21 using std::set; | |
22 using std::string; | |
23 using std::vector; | |
24 | |
25 BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSetsCommand() {} | |
26 BuildAndProcessConflictSetsCommand::~BuildAndProcessConflictSetsCommand() {} | |
27 | |
28 void BuildAndProcessConflictSetsCommand::ModelChangingExecuteImpl( | |
29 SyncerSession *session) { | |
idana
2009/09/10 05:44:37
nit: "SyncerSession *session" -> "SyncerSession* s
| |
30 session->set_conflict_sets_built(BuildAndProcessConflictSets(session)); | |
31 } | |
32 | |
33 bool BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSets( | |
34 SyncerSession *session) { | |
idana
2009/09/10 05:44:37
nit: "SyncerSession *session" -> "SyncerSession* s
| |
35 syncable::ScopedDirLookup dir(session->dirman(), session->account_name()); | |
36 if (!dir.good()) | |
37 return false; | |
38 bool had_single_direction_sets = false; | |
39 { // scope for transaction | |
40 syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__); | |
41 ConflictResolutionView conflict_view(session); | |
42 BuildConflictSets(&trans, &conflict_view); | |
43 had_single_direction_sets = | |
44 ProcessSingleDirectionConflictSets(&trans, session); | |
45 // we applied some updates transactionally, lets try syncing again. | |
46 if (had_single_direction_sets) | |
47 return true; | |
48 } | |
49 return false; | |
50 } | |
51 | |
52 bool BuildAndProcessConflictSetsCommand::ProcessSingleDirectionConflictSets( | |
53 syncable::WriteTransaction* trans, SyncerSession* const session) { | |
54 bool rv = false; | |
55 ConflictResolutionView conflict_view(session); | |
56 set<ConflictSet*>::const_iterator all_sets_iterator; | |
57 for(all_sets_iterator = conflict_view.ConflictSetsBegin(); | |
58 all_sets_iterator != conflict_view.ConflictSetsEnd() ; ) { | |
59 const ConflictSet* conflict_set = *all_sets_iterator; | |
60 CHECK(conflict_set->size() >= 2); | |
61 // We scan the set to see if it consists of changes of only one type. | |
62 ConflictSet::const_iterator i; | |
63 int unsynced_count = 0, unapplied_count = 0; | |
64 for (i = conflict_set->begin(); i != conflict_set->end(); ++i) { | |
65 syncable::Entry entry(trans, syncable::GET_BY_ID, *i); | |
66 CHECK(entry.good()); | |
67 if (entry.Get(syncable::IS_UNSYNCED)) | |
68 unsynced_count++; | |
69 if (entry.Get(syncable::IS_UNAPPLIED_UPDATE)) | |
70 unapplied_count++; | |
71 } | |
72 if (conflict_set->size() == unsynced_count && 0 == unapplied_count) { | |
73 LOG(INFO) << "Skipped transactional commit attempt."; | |
74 } else if (conflict_set->size() == unapplied_count && | |
75 0 == unsynced_count && | |
76 ApplyUpdatesTransactionally(trans, conflict_set, session)) { | |
77 rv = true; | |
78 } | |
79 ++all_sets_iterator; | |
80 } | |
81 return rv; | |
82 } | |
83 | |
84 namespace { | |
85 | |
86 void StoreLocalDataForUpdateRollback(syncable::Entry* entry, | |
87 syncable::EntryKernel* backup) { | |
88 CHECK(!entry->Get(syncable::IS_UNSYNCED)) << " Storing Rollback data for " | |
89 "entry that's unsynced." << *entry ; | |
90 CHECK(entry->Get(syncable::IS_UNAPPLIED_UPDATE)) << " Storing Rollback data " | |
91 "for entry that's not an unapplied update." << *entry ; | |
92 *backup = entry->GetKernelCopy(); | |
93 } | |
94 | |
95 class UniqueNameGenerator { | |
96 public: | |
97 void Initialize() { | |
98 // To avoid name collisions we prefix the names with hex data derived from | |
99 // 64 bits of randomness. | |
100 int64 name_prefix = static_cast<int64>(base::RandUint64()); | |
101 name_stem_ = StringPrintf("%0" PRId64 "x.", name_prefix); | |
102 } | |
103 string StringNameForEntry(const syncable::Entry& entry) { | |
104 CHECK(!name_stem_.empty()); | |
105 std::stringstream rv; | |
106 rv << name_stem_ << entry.Get(syncable::ID); | |
107 return rv.str(); | |
108 } | |
109 PathString PathStringNameForEntry(const syncable::Entry& entry) { | |
110 string name = StringNameForEntry(entry); | |
111 return PathString(name.begin(), name.end()); | |
112 } | |
113 | |
114 private: | |
115 string name_stem_; | |
116 }; | |
117 | |
118 bool RollbackEntry(syncable::WriteTransaction* trans, | |
119 syncable::EntryKernel* backup) { | |
120 syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, | |
121 backup->ref(syncable::META_HANDLE)); | |
122 CHECK(entry.good()); | |
123 bool was_del = entry.Get(syncable::IS_DEL); | |
124 | |
125 if (!entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL))) | |
126 return false; | |
127 syncable::Name name = syncable::Name::FromEntryKernel(backup); | |
128 if (!entry.PutParentIdAndName(backup->ref(syncable::PARENT_ID), name)) | |
129 return false; | |
130 | |
131 if (!backup->ref(syncable::IS_DEL)) { | |
132 if (!entry.PutPredecessor(backup->ref(syncable::PREV_ID))) | |
133 return false; | |
134 } | |
135 | |
136 if (backup->ref(syncable::PREV_ID) != entry.Get(syncable::PREV_ID)) | |
137 return false; | |
138 | |
139 entry.Put(syncable::CTIME, backup->ref(syncable::CTIME)); | |
140 entry.Put(syncable::MTIME, backup->ref(syncable::MTIME)); | |
141 entry.Put(syncable::BASE_VERSION, backup->ref(syncable::BASE_VERSION)); | |
142 entry.Put(syncable::IS_DIR, backup->ref(syncable::IS_DIR)); | |
143 entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL)); | |
144 entry.Put(syncable::ID, backup->ref(syncable::ID)); | |
145 entry.Put(syncable::IS_UNAPPLIED_UPDATE, | |
146 backup->ref(syncable::IS_UNAPPLIED_UPDATE)); | |
147 return true; | |
148 } | |
149 | |
150 class TransactionalUpdateEntryPreparer { | |
151 public: | |
152 TransactionalUpdateEntryPreparer() { | |
153 namegen_.Initialize(); | |
154 } | |
155 | |
156 void PrepareEntries(syncable::WriteTransaction* trans, | |
157 const vector<syncable::Id>* ids) { | |
158 vector<syncable::Id>::const_iterator it; | |
159 for (it = ids->begin(); it != ids->end(); ++it) { | |
160 syncable::MutableEntry entry(trans, syncable::GET_BY_ID, *it); | |
161 syncable::Name random_name(namegen_.PathStringNameForEntry(entry)); | |
162 CHECK(entry.PutParentIdAndName(trans->root_id(), random_name)); | |
163 } | |
164 } | |
165 | |
166 private: | |
167 UniqueNameGenerator namegen_; | |
168 DISALLOW_COPY_AND_ASSIGN(TransactionalUpdateEntryPreparer); | |
169 }; | |
170 | |
171 } // namespace | |
172 | |
173 bool BuildAndProcessConflictSetsCommand::ApplyUpdatesTransactionally( | |
174 syncable::WriteTransaction* trans, | |
175 const vector<syncable::Id>* const update_set, | |
176 SyncerSession* const session) { | |
177 vector<int64> handles; // The handles in the |update_set| order. | |
178 vector<syncable::Id> rollback_ids; // Holds the same Ids as update_set, but | |
179 // sorted so that runs of adjacent nodes | |
180 // appear in order. | |
idana
2009/09/10 05:44:37
Can you please change this comment paragraph so th
| |
181 rollback_ids.reserve(update_set->size()); | |
182 syncable::MetahandleSet rollback_ids_inserted_items; // Tracks what's added | |
183 // to |rollback_ids|. | |
idana
2009/09/10 05:44:37
Same as above.
| |
184 | |
185 vector<syncable::Id>::const_iterator it; | |
186 // 1. Build |rollback_ids| in the order required for successful rollback. | |
187 // Specifically, for positions to come out right, restoring an item | |
188 // requires that its predecessor in the sibling order is properly | |
189 // restored first. | |
190 // 2. Build |handles|, the list of handles for ApplyUpdates. | |
191 for (it = update_set->begin(); it != update_set->end(); ++it) { | |
192 syncable::Entry entry(trans, syncable::GET_BY_ID, *it); | |
193 SyncerUtil::AddPredecessorsThenItem(trans, &entry, | |
194 syncable::IS_UNAPPLIED_UPDATE, &rollback_ids_inserted_items, | |
195 &rollback_ids); | |
196 handles.push_back(entry.Get(syncable::META_HANDLE)); | |
197 } | |
198 DCHECK_EQ(rollback_ids.size(), update_set->size()); | |
199 DCHECK_EQ(rollback_ids_inserted_items.size(), update_set->size()); | |
200 | |
201 // 3. Store the information needed to rollback if the transaction fails. | |
202 // Do this before modifying anything to keep the next/prev values intact. | |
203 vector<syncable::EntryKernel> rollback_data(rollback_ids.size()); | |
204 for (size_t i = 0; i < rollback_ids.size(); ++i) { | |
205 syncable::Entry entry(trans, syncable::GET_BY_ID, rollback_ids[i]); | |
206 StoreLocalDataForUpdateRollback(&entry, &rollback_data[i]); | |
207 } | |
208 | |
209 // 4. Use the preparer to move things to an initial starting state where no | |
210 // names collide, and nothing in the set is a child of anything else. If | |
211 // we've correctly calculated the set, the server tree is valid and no | |
212 // changes have occurred locally we should be able to apply updates from this | |
213 // state. | |
214 TransactionalUpdateEntryPreparer preparer; | |
215 preparer.PrepareEntries(trans, update_set); | |
216 | |
217 // 5. Use the usual apply updates from the special start state we've just | |
218 // prepared. | |
219 UpdateApplicator applicator(session, handles.begin(), handles.end()); | |
220 while (applicator.AttemptOneApplication(trans)) { | |
221 // Keep going till all updates are applied. | |
222 } | |
223 if (!applicator.AllUpdatesApplied()) { | |
224 LOG(ERROR) << "Transactional Apply Failed, Rolling back."; | |
225 // We have to move entries into the temp dir again. e.g. if a swap was in a | |
226 // set with other failing updates, the swap may have gone through, meaning | |
227 // the roll back needs to be transactional. But as we're going to a known | |
228 // good state we should always succeed. | |
229 preparer.PrepareEntries(trans, update_set); | |
230 | |
231 // Rollback all entries. | |
232 for (size_t i = 0; i < rollback_data.size(); ++i) { | |
233 CHECK(RollbackEntry(trans, &rollback_data[i])); | |
234 } | |
235 return false; // Don't save progress -- we just undid it. | |
236 } | |
237 applicator.SaveProgressIntoSessionState(); | |
238 return true; | |
239 } | |
240 | |
241 void BuildAndProcessConflictSetsCommand::BuildConflictSets( | |
242 syncable::BaseTransaction* trans, | |
243 ConflictResolutionView* view) { | |
244 view->CleanupSets(); | |
245 set<syncable::Id>::iterator i = view->CommitConflictsBegin(); | |
246 while (i != view->CommitConflictsEnd()) { | |
247 syncable::Entry entry(trans, syncable::GET_BY_ID, *i); | |
248 CHECK(entry.good()); | |
249 if (!entry.Get(syncable::IS_UNSYNCED) && | |
250 !entry.Get(syncable::IS_UNAPPLIED_UPDATE)) { | |
251 // This can happen very rarely. It means we had a simply conflicting item | |
252 // that randomly committed. We drop the entry as it's no longer | |
253 // conflicting. | |
254 view->EraseCommitConflict(i++); | |
255 continue; | |
256 } | |
257 if (entry.ExistsOnClientBecauseDatabaseNameIsNonEmpty() && | |
258 (entry.Get(syncable::IS_DEL) || entry.Get(syncable::SERVER_IS_DEL))) { | |
259 // If we're deleted on client or server we can't be in a complex set. | |
260 ++i; | |
261 continue; | |
262 } | |
263 bool new_parent = | |
264 entry.Get(syncable::PARENT_ID) != entry.Get(syncable::SERVER_PARENT_ID); | |
265 bool new_name = 0 != syncable::ComparePathNames(entry.GetSyncNameValue(), | |
266 entry.Get(syncable::SERVER_NAME)); | |
267 if (new_parent || new_name) | |
268 MergeSetsForNameClash(trans, &entry, view); | |
269 if (new_parent) | |
270 MergeSetsForIntroducedLoops(trans, &entry, view); | |
271 MergeSetsForNonEmptyDirectories(trans, &entry, view); | |
272 ++i; | |
273 } | |
274 } | |
275 | |
276 void BuildAndProcessConflictSetsCommand::MergeSetsForNameClash( | |
277 syncable::BaseTransaction* trans, syncable::Entry* entry, | |
278 ConflictResolutionView* view) { | |
279 PathString server_name = entry->Get(syncable::SERVER_NAME); | |
280 // Uncommitted entries have no server name. We trap this because the root | |
281 // item has a null name and 0 parentid. | |
282 if (server_name.empty()) | |
283 return; | |
284 syncable::Id conflicting_id = | |
285 SyncerUtil::GetNameConflictingItemId( | |
286 trans, entry->Get(syncable::SERVER_PARENT_ID), server_name); | |
287 if (syncable::kNullId != conflicting_id) | |
288 view->MergeSets(entry->Get(syncable::ID), conflicting_id); | |
289 } | |
290 | |
291 void BuildAndProcessConflictSetsCommand::MergeSetsForIntroducedLoops( | |
292 syncable::BaseTransaction* trans, syncable::Entry* entry, | |
293 ConflictResolutionView* view) { | |
294 // This code crawls up from the item in question until it gets to the root | |
295 // or itself. If it gets to the root it does nothing. If it finds a loop all | |
296 // moved unsynced entries in the list of crawled entries have their sets | |
297 // merged with the entry. | |
298 // TODO(sync): Build test cases to cover this function when the argument | |
299 // list has settled. | |
300 syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID); | |
301 syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id); | |
302 if (!parent.good()) { | |
303 return; | |
304 } | |
305 // Don't check for loop if the server parent is deleted. | |
306 if (parent.Get(syncable::IS_DEL)) | |
307 return; | |
308 vector<syncable::Id> conflicting_entries; | |
309 while (!parent_id.IsRoot()) { | |
310 syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id); | |
311 if (!parent.good()) { | |
312 LOG(INFO) << "Bad parent in loop check, skipping. Bad parent id: " | |
313 << parent_id << " entry: " << *entry; | |
314 return; | |
315 } | |
316 if (parent.Get(syncable::IS_UNSYNCED) && | |
317 entry->Get(syncable::PARENT_ID) != | |
318 entry->Get(syncable::SERVER_PARENT_ID)) | |
319 conflicting_entries.push_back(parent_id); | |
320 parent_id = parent.Get(syncable::PARENT_ID); | |
321 if (parent_id == entry->Get(syncable::ID)) | |
322 break; | |
323 } | |
324 if (parent_id.IsRoot()) | |
325 return; | |
326 for (size_t i = 0; i < conflicting_entries.size(); i++) { | |
327 view->MergeSets(entry->Get(syncable::ID), conflicting_entries[i]); | |
328 } | |
329 } | |
330 | |
331 namespace { | |
332 | |
333 class ServerDeletedPathChecker { | |
334 public: | |
335 bool CausingConflict(const syncable::Entry& e, | |
336 const syncable::Entry& log_entry) { | |
337 CHECK(e.good()) << "Missing parent in path of: " << log_entry; | |
338 if (e.Get(syncable::IS_UNAPPLIED_UPDATE) && | |
339 e.Get(syncable::SERVER_IS_DEL)) { | |
340 CHECK(!e.Get(syncable::IS_DEL)) << " Inconsistency in local tree. " | |
341 "syncable::Entry: " << e << " Leaf: " << log_entry; | |
342 return true; | |
343 } else { | |
344 CHECK(!e.Get(syncable::IS_DEL)) << " Deleted entry has children. " | |
345 "syncable::Entry: " << e << " Leaf: " << log_entry; | |
346 return false; | |
347 } | |
348 } | |
349 // returns 0 if we should stop investigating the path. | |
350 syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans, | |
351 syncable::Id id, | |
352 syncable::Id check_id, | |
353 const syncable::Entry& log_entry) { | |
354 syncable::Entry parent(trans, syncable::GET_BY_ID, id); | |
355 CHECK(parent.good()) << "Tree inconsitency, missing id" << id << " " | |
356 << log_entry; | |
357 syncable::Id parent_id = parent.Get(syncable::PARENT_ID); | |
358 CHECK(parent_id != check_id) << "Loop in dir tree! " | |
359 << log_entry << " " << parent; | |
360 return parent_id; | |
361 } | |
362 }; | |
363 | |
364 class LocallyDeletedPathChecker { | |
365 public: | |
366 bool CausingConflict(const syncable::Entry& e, | |
367 const syncable::Entry& log_entry) { | |
368 return e.good() && e.Get(syncable::IS_DEL) && e.Get(syncable::IS_UNSYNCED); | |
369 } | |
370 // returns 0 if we should stop investigating the path. | |
371 syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans, | |
372 syncable::Id id, | |
373 syncable::Id check_id, | |
374 const syncable::Entry& log_entry) { | |
375 syncable::Entry parent(trans, syncable::GET_BY_ID, id); | |
376 if (!parent.good()) | |
377 return syncable::kNullId; | |
378 syncable::Id parent_id = parent.Get(syncable::PARENT_ID); | |
379 if (parent_id == check_id) | |
380 return syncable::kNullId; | |
381 return parent_id; | |
382 } | |
383 }; | |
384 | |
385 template <typename Checker> | |
386 void CrawlDeletedTreeMergingSets(syncable::BaseTransaction* trans, | |
387 const syncable::Entry& entry, | |
388 ConflictResolutionView* view, | |
389 Checker checker) { | |
390 syncable::Id parent_id = entry.Get(syncable::PARENT_ID); | |
391 syncable::Id double_step_parent_id = parent_id; | |
392 // This block builds sets where we've got an entry in a directory the | |
393 // server wants to delete. | |
394 // | |
395 // Here we're walking up the tree to find all entries that the pass checks | |
396 // deleted. We can be extremely strict here as anything unexpected means | |
397 // invariants in the local hierarchy have been broken. | |
398 while (!parent_id.IsRoot()) { | |
399 if (!double_step_parent_id.IsRoot()) { | |
400 // Checks to ensure we don't loop. | |
401 double_step_parent_id = checker.GetAndExamineParent( | |
402 trans, double_step_parent_id, parent_id, entry); | |
403 double_step_parent_id = checker.GetAndExamineParent( | |
404 trans, double_step_parent_id, parent_id, entry); | |
405 } | |
406 syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id); | |
407 if (checker.CausingConflict(parent, entry)) | |
408 view->MergeSets(entry.Get(syncable::ID), parent.Get(syncable::ID)); | |
409 else | |
410 break; | |
411 parent_id = parent.Get(syncable::PARENT_ID); | |
412 } | |
413 } | |
414 | |
415 } // namespace | |
416 | |
417 void BuildAndProcessConflictSetsCommand::MergeSetsForNonEmptyDirectories( | |
418 syncable::BaseTransaction* trans, syncable::Entry* entry, | |
419 ConflictResolutionView* view) { | |
420 if (entry->Get(syncable::IS_UNSYNCED) && !entry->Get(syncable::IS_DEL)) { | |
421 ServerDeletedPathChecker checker; | |
422 CrawlDeletedTreeMergingSets(trans, *entry, view, checker); | |
423 } | |
424 if (entry->Get(syncable::IS_UNAPPLIED_UPDATE) && | |
425 !entry->Get(syncable::SERVER_IS_DEL)) { | |
426 syncable::Entry parent(trans, syncable::GET_BY_ID, | |
427 entry->Get(syncable::SERVER_PARENT_ID)); | |
428 syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID); | |
429 if (!parent.good()) | |
430 return; | |
431 LocallyDeletedPathChecker checker; | |
432 if (!checker.CausingConflict(parent, *entry)) | |
433 return; | |
434 view->MergeSets(entry->Get(syncable::ID), parent.Get(syncable::ID)); | |
435 CrawlDeletedTreeMergingSets(trans, parent, view, checker); | |
436 } | |
437 } | |
438 | |
439 } // namespace browser_sync | |
OLD | NEW |