OLD | NEW |
1 // Copyright 2012 The Chromium Authors. All rights reserved. | 1 // Copyright 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sync/engine/get_commit_ids_command.h" | 5 #include "sync/engine/get_commit_ids.h" |
6 | 6 |
7 #include <set> | 7 #include <set> |
8 #include <utility> | |
9 #include <vector> | 8 #include <vector> |
10 | 9 |
| 10 #include "base/basictypes.h" |
11 #include "sync/engine/syncer_util.h" | 11 #include "sync/engine/syncer_util.h" |
12 #include "sync/sessions/nudge_tracker.h" | 12 #include "sync/syncable/directory.h" |
13 #include "sync/syncable/entry.h" | 13 #include "sync/syncable/entry.h" |
14 #include "sync/syncable/nigori_handler.h" | 14 #include "sync/syncable/nigori_handler.h" |
15 #include "sync/syncable/nigori_util.h" | 15 #include "sync/syncable/nigori_util.h" |
16 #include "sync/syncable/syncable_base_transaction.h" | 16 #include "sync/syncable/syncable_base_transaction.h" |
17 #include "sync/syncable/syncable_util.h" | 17 #include "sync/syncable/syncable_util.h" |
18 #include "sync/util/cryptographer.h" | 18 #include "sync/util/cryptographer.h" |
19 | 19 |
20 using std::set; | 20 using std::set; |
21 using std::vector; | 21 using std::vector; |
22 | 22 |
23 namespace syncer { | 23 namespace syncer { |
24 | 24 |
25 using sessions::OrderedCommitSet; | 25 namespace { |
26 using sessions::SyncSession; | |
27 using sessions::StatusController; | |
28 | 26 |
29 GetCommitIdsCommand::GetCommitIdsCommand( | 27 // Forward-declare some helper functions. This gives us more options for |
| 28 // ordering the function defintions within this file. |
| 29 |
| 30 // Filters |unsynced_handles| to remove all entries that do not belong to the |
| 31 // specified |requested_types|, or are not eligible for a commit at this time. |
| 32 void FilterUnreadyEntries( |
30 syncable::BaseTransaction* trans, | 33 syncable::BaseTransaction* trans, |
31 ModelTypeSet requested_types, | 34 ModelTypeSet requested_types, |
32 const size_t commit_batch_size, | 35 ModelTypeSet encrypted_types, |
33 sessions::OrderedCommitSet* commit_set) | 36 bool passphrase_missing, |
34 : trans_(trans), | 37 const syncable::Directory::Metahandles& unsynced_handles, |
35 requested_types_(requested_types), | 38 std::set<int64>* ready_unsynced_set); |
36 requested_commit_batch_size_(commit_batch_size), | |
37 commit_set_(commit_set) { | |
38 } | |
39 | 39 |
40 GetCommitIdsCommand::~GetCommitIdsCommand() {} | 40 // Given a set of commit metahandles that are ready for commit |
| 41 // (|ready_unsynced_set|), sorts these into commit order and places up to |
| 42 // |max_entries| of them in the output parameter |out|. |
| 43 // |
| 44 // See the header file for an explanation of commit ordering. |
| 45 void OrderCommitIds( |
| 46 syncable::BaseTransaction* trans, |
| 47 size_t max_entries, |
| 48 const std::set<int64>& ready_unsynced_set, |
| 49 std::vector<int64>* out); |
41 | 50 |
42 SyncerError GetCommitIdsCommand::ExecuteImpl(SyncSession* session) { | 51 } // namespace |
| 52 |
| 53 void GetCommitIdsForType( |
| 54 syncable::BaseTransaction* trans, |
| 55 ModelType type, |
| 56 size_t max_entries, |
| 57 syncable::Directory::Metahandles* out) { |
| 58 syncable::Directory* dir = trans->directory(); |
| 59 |
43 // Gather the full set of unsynced items and store it in the session. They | 60 // Gather the full set of unsynced items and store it in the session. They |
44 // are not in the correct order for commit. | 61 // are not in the correct order for commit. |
45 std::set<int64> ready_unsynced_set; | 62 std::set<int64> ready_unsynced_set; |
46 syncable::Directory::Metahandles all_unsynced_handles; | 63 syncable::Directory::Metahandles all_unsynced_handles; |
47 GetUnsyncedEntries(trans_, | 64 GetUnsyncedEntries(trans, &all_unsynced_handles); |
48 &all_unsynced_handles); | |
49 | 65 |
50 ModelTypeSet encrypted_types; | 66 ModelTypeSet encrypted_types; |
51 bool passphrase_missing = false; | 67 bool passphrase_missing = false; |
52 Cryptographer* cryptographer = | 68 Cryptographer* cryptographer = dir->GetCryptographer(trans); |
53 session->context()-> | |
54 directory()->GetCryptographer(trans_); | |
55 if (cryptographer) { | 69 if (cryptographer) { |
56 encrypted_types = session->context()->directory()->GetNigoriHandler()-> | 70 encrypted_types = dir->GetNigoriHandler()->GetEncryptedTypes(trans); |
57 GetEncryptedTypes(trans_); | |
58 passphrase_missing = cryptographer->has_pending_keys(); | 71 passphrase_missing = cryptographer->has_pending_keys(); |
59 }; | 72 }; |
60 | 73 |
61 // We filter out all unready entries from the set of unsynced handles. This | 74 // We filter out all unready entries from the set of unsynced handles. This |
62 // new set of ready and unsynced items is then what we use to determine what | 75 // new set of ready and unsynced items is then what we use to determine what |
63 // is a candidate for commit. The caller of this SyncerCommand is responsible | 76 // is a candidate for commit. The caller of this SyncerCommand is responsible |
64 // for ensuring that no throttled types are included among the | 77 // for ensuring that no throttled types are included among the |
65 // requested_types. | 78 // requested_types. |
66 FilterUnreadyEntries(trans_, | 79 FilterUnreadyEntries(trans, |
67 requested_types_, | 80 ModelTypeSet(type), |
68 encrypted_types, | 81 encrypted_types, |
69 passphrase_missing, | 82 passphrase_missing, |
70 all_unsynced_handles, | 83 all_unsynced_handles, |
71 &ready_unsynced_set); | 84 &ready_unsynced_set); |
72 | 85 |
73 BuildCommitIds(trans_, | 86 OrderCommitIds(trans, max_entries, ready_unsynced_set, out); |
74 session->context()->routing_info(), | |
75 ready_unsynced_set); | |
76 | 87 |
77 return SYNCER_OK; | 88 for (size_t i = 0; i < out->size(); i++) { |
| 89 DVLOG(1) << "Debug commit batch result:" << (*out)[i]; |
| 90 } |
78 } | 91 } |
79 | 92 |
80 namespace { | 93 namespace { |
81 | 94 |
82 bool IsEntryInConflict(const syncable::Entry& entry) { | 95 bool IsEntryInConflict(const syncable::Entry& entry) { |
83 if (entry.Get(syncable::IS_UNSYNCED) && | 96 if (entry.Get(syncable::IS_UNSYNCED) && |
84 entry.Get(syncable::SERVER_VERSION) > 0 && | 97 entry.Get(syncable::SERVER_VERSION) > 0 && |
85 (entry.Get(syncable::SERVER_VERSION) > | 98 (entry.Get(syncable::SERVER_VERSION) > |
86 entry.Get(syncable::BASE_VERSION))) { | 99 entry.Get(syncable::BASE_VERSION))) { |
87 // The local and server versions don't match. The item must be in | 100 // The local and server versions don't match. The item must be in |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
147 | 160 |
148 if (entry.IsRoot()) { | 161 if (entry.IsRoot()) { |
149 NOTREACHED() << "Permanent item became unsynced " << entry; | 162 NOTREACHED() << "Permanent item became unsynced " << entry; |
150 return false; | 163 return false; |
151 } | 164 } |
152 | 165 |
153 DVLOG(2) << "Entry is ready for commit: " << entry; | 166 DVLOG(2) << "Entry is ready for commit: " << entry; |
154 return true; | 167 return true; |
155 } | 168 } |
156 | 169 |
157 } // namespace | 170 // Filters |unsynced_handles| to remove all entries that do not belong to the |
158 | 171 // specified |requested_types|, or are not eligible for a commit at this time. |
159 void GetCommitIdsCommand::FilterUnreadyEntries( | 172 void FilterUnreadyEntries( |
160 syncable::BaseTransaction* trans, | 173 syncable::BaseTransaction* trans, |
161 ModelTypeSet requested_types, | 174 ModelTypeSet requested_types, |
162 ModelTypeSet encrypted_types, | 175 ModelTypeSet encrypted_types, |
163 bool passphrase_missing, | 176 bool passphrase_missing, |
164 const syncable::Directory::Metahandles& unsynced_handles, | 177 const syncable::Directory::Metahandles& unsynced_handles, |
165 std::set<int64>* ready_unsynced_set) { | 178 std::set<int64>* ready_unsynced_set) { |
166 for (syncable::Directory::Metahandles::const_iterator iter = | 179 for (syncable::Directory::Metahandles::const_iterator iter = |
167 unsynced_handles.begin(); iter != unsynced_handles.end(); ++iter) { | 180 unsynced_handles.begin(); iter != unsynced_handles.end(); ++iter) { |
168 syncable::Entry entry(trans, syncable::GET_BY_HANDLE, *iter); | 181 syncable::Entry entry(trans, syncable::GET_BY_HANDLE, *iter); |
169 if (IsEntryReadyForCommit(requested_types, | 182 if (IsEntryReadyForCommit(requested_types, |
170 encrypted_types, | 183 encrypted_types, |
171 passphrase_missing, | 184 passphrase_missing, |
172 entry)) { | 185 entry)) { |
173 ready_unsynced_set->insert(*iter); | 186 ready_unsynced_set->insert(*iter); |
174 } | 187 } |
175 } | 188 } |
176 } | 189 } |
177 | 190 |
178 bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors( | 191 // This class helps to implement OrderCommitIds(). Its members track the |
| 192 // progress of a traversal while its methods extend it. It can return early if |
| 193 // the traversal reaches the desired size before the full traversal is complete. |
| 194 class Traversal { |
| 195 public: |
| 196 Traversal( |
179 syncable::BaseTransaction* trans, | 197 syncable::BaseTransaction* trans, |
180 const ModelSafeRoutingInfo& routes, | 198 int64 max_entries, |
| 199 syncable::Directory::Metahandles* out); |
| 200 ~Traversal(); |
| 201 |
| 202 // First step of traversal building. Adds non-deleted items in order. |
| 203 void AddCreatesAndMoves(const std::set<int64>& ready_unsynced_set); |
| 204 |
| 205 // Second step of traverals building. Appends deleted items. |
| 206 void AddDeletes(const std::set<int64>& ready_unsynced_set); |
| 207 |
| 208 private: |
| 209 // The following functions do not modify the traversal directly. They return |
| 210 // their results in the |result| vector instead. |
| 211 bool AddUncommittedParentsAndTheirPredecessors( |
| 212 const std::set<int64>& ready_unsynced_set, |
| 213 const syncable::Entry& item, |
| 214 syncable::Directory::Metahandles* result) const; |
| 215 |
| 216 void TryAddItem(const std::set<int64>& ready_unsynced_set, |
| 217 const syncable::Entry& item, |
| 218 syncable::Directory::Metahandles* result) const; |
| 219 |
| 220 void AddItemThenPredecessors( |
| 221 const std::set<int64>& ready_unsynced_set, |
| 222 const syncable::Entry& item, |
| 223 syncable::Directory::Metahandles* result) const; |
| 224 |
| 225 void AddPredecessorsThenItem( |
| 226 const std::set<int64>& ready_unsynced_set, |
| 227 const syncable::Entry& item, |
| 228 syncable::Directory::Metahandles* result) const; |
| 229 |
| 230 // Returns true if we've collected enough items. |
| 231 bool IsFull() const; |
| 232 |
| 233 // Returns true if the specified handle is already in the traversal. |
| 234 bool HaveItem(int64 handle) const; |
| 235 |
| 236 // Adds the specified handles to the traversal. |
| 237 void AppendManyToTraversal(const syncable::Directory::Metahandles& handles); |
| 238 |
| 239 // Adds the specifed handle to the traversal. |
| 240 void AppendToTraversal(int64 handle); |
| 241 |
| 242 syncable::Directory::Metahandles* out_; |
| 243 std::set<int64> added_handles_; |
| 244 const size_t max_entries_; |
| 245 syncable::BaseTransaction* trans_; |
| 246 |
| 247 DISALLOW_COPY_AND_ASSIGN(Traversal); |
| 248 }; |
| 249 |
| 250 Traversal::Traversal( |
| 251 syncable::BaseTransaction* trans, |
| 252 int64 max_entries, |
| 253 syncable::Directory::Metahandles* out) |
| 254 : out_(out), |
| 255 max_entries_(max_entries), |
| 256 trans_(trans) { } |
| 257 |
| 258 Traversal::~Traversal() {} |
| 259 |
| 260 bool Traversal::AddUncommittedParentsAndTheirPredecessors( |
181 const std::set<int64>& ready_unsynced_set, | 261 const std::set<int64>& ready_unsynced_set, |
182 const syncable::Entry& item, | 262 const syncable::Entry& item, |
183 sessions::OrderedCommitSet* result) const { | 263 syncable::Directory::Metahandles* result) const { |
184 OrderedCommitSet item_dependencies(routes); | 264 syncable::Directory::Metahandles dependencies; |
185 syncable::Id parent_id = item.Get(syncable::PARENT_ID); | 265 syncable::Id parent_id = item.Get(syncable::PARENT_ID); |
186 | 266 |
187 // Climb the tree adding entries leaf -> root. | 267 // Climb the tree adding entries leaf -> root. |
188 while (!parent_id.ServerKnows()) { | 268 while (!parent_id.ServerKnows()) { |
189 syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id); | 269 syncable::Entry parent(trans_, syncable::GET_BY_ID, parent_id); |
190 CHECK(parent.good()) << "Bad user-only parent in item path."; | 270 CHECK(parent.good()) << "Bad user-only parent in item path."; |
191 int64 handle = parent.Get(syncable::META_HANDLE); | 271 int64 handle = parent.Get(syncable::META_HANDLE); |
192 if (commit_set_->HaveCommitItem(handle)) { | 272 if (HaveItem(handle)) { |
193 // We've already added this parent (and therefore all of its parents). | 273 // We've already added this parent (and therefore all of its parents). |
194 // We can return early. | 274 // We can return early. |
195 break; | 275 break; |
196 } | 276 } |
197 if (IsEntryInConflict(parent)) { | 277 if (IsEntryInConflict(parent)) { |
198 // We ignore all entries that are children of a conflicing item. Return | 278 // We ignore all entries that are children of a conflicing item. Return |
199 // false immediately to forget the traversal we've built up so far. | 279 // false immediately to forget the traversal we've built up so far. |
200 DVLOG(1) << "Parent was in conflict, omitting " << item; | 280 DVLOG(1) << "Parent was in conflict, omitting " << item; |
201 return false; | 281 return false; |
202 } | 282 } |
203 AddItemThenPredecessors(trans, | 283 AddItemThenPredecessors(ready_unsynced_set, |
204 ready_unsynced_set, | |
205 parent, | 284 parent, |
206 &item_dependencies); | 285 &dependencies); |
207 parent_id = parent.Get(syncable::PARENT_ID); | 286 parent_id = parent.Get(syncable::PARENT_ID); |
208 } | 287 } |
209 | 288 |
210 // Reverse what we added to get the correct order. | 289 // Reverse what we added to get the correct order. |
211 result->AppendReverse(item_dependencies); | 290 result->insert(result->end(), dependencies.rbegin(), dependencies.rend()); |
212 return true; | 291 return true; |
213 } | 292 } |
214 | 293 |
215 // Adds the given item to the list if it is unsynced and ready for commit. | 294 // Adds the given item to the list if it is unsynced and ready for commit. |
216 void GetCommitIdsCommand::TryAddItem(const std::set<int64>& ready_unsynced_set, | 295 void Traversal::TryAddItem(const std::set<int64>& ready_unsynced_set, |
217 const syncable::Entry& item, | 296 const syncable::Entry& item, |
218 OrderedCommitSet* result) const { | 297 syncable::Directory::Metahandles* result) const { |
219 DCHECK(item.Get(syncable::IS_UNSYNCED)); | 298 DCHECK(item.Get(syncable::IS_UNSYNCED)); |
220 int64 item_handle = item.Get(syncable::META_HANDLE); | 299 int64 item_handle = item.Get(syncable::META_HANDLE); |
221 if (ready_unsynced_set.count(item_handle) != 0) { | 300 if (ready_unsynced_set.count(item_handle) != 0) { |
222 result->AddCommitItem(item_handle, item.GetModelType()); | 301 result->push_back(item_handle); |
223 } | 302 } |
224 } | 303 } |
225 | 304 |
226 // Adds the given item, and all its unsynced predecessors. The traversal will | 305 // Adds the given item, and all its unsynced predecessors. The traversal will |
227 // be cut short if any item along the traversal is not IS_UNSYNCED, or if we | 306 // be cut short if any item along the traversal is not IS_UNSYNCED, or if we |
228 // detect that this area of the tree has already been traversed. Items that are | 307 // detect that this area of the tree has already been traversed. Items that are |
229 // not 'ready' for commit (see IsEntryReadyForCommit()) will not be added to the | 308 // not 'ready' for commit (see IsEntryReadyForCommit()) will not be added to the |
230 // list, though they will not stop the traversal. | 309 // list, though they will not stop the traversal. |
231 void GetCommitIdsCommand::AddItemThenPredecessors( | 310 void Traversal::AddItemThenPredecessors( |
232 syncable::BaseTransaction* trans, | |
233 const std::set<int64>& ready_unsynced_set, | 311 const std::set<int64>& ready_unsynced_set, |
234 const syncable::Entry& item, | 312 const syncable::Entry& item, |
235 OrderedCommitSet* result) const { | 313 syncable::Directory::Metahandles* result) const { |
236 int64 item_handle = item.Get(syncable::META_HANDLE); | 314 int64 item_handle = item.Get(syncable::META_HANDLE); |
237 if (commit_set_->HaveCommitItem(item_handle)) { | 315 if (HaveItem(item_handle)) { |
238 // We've already added this item to the commit set, and so must have | 316 // We've already added this item to the commit set, and so must have |
239 // already added the predecessors as well. | 317 // already added the predecessors as well. |
240 return; | 318 return; |
241 } | 319 } |
242 TryAddItem(ready_unsynced_set, item, result); | 320 TryAddItem(ready_unsynced_set, item, result); |
243 if (item.Get(syncable::IS_DEL)) | 321 if (item.Get(syncable::IS_DEL)) |
244 return; // Deleted items have no predecessors. | 322 return; // Deleted items have no predecessors. |
245 | 323 |
246 syncable::Id prev_id = item.GetPredecessorId(); | 324 syncable::Id prev_id = item.GetPredecessorId(); |
247 while (!prev_id.IsRoot()) { | 325 while (!prev_id.IsRoot()) { |
248 syncable::Entry prev(trans, syncable::GET_BY_ID, prev_id); | 326 syncable::Entry prev(trans_, syncable::GET_BY_ID, prev_id); |
249 CHECK(prev.good()) << "Bad id when walking predecessors."; | 327 CHECK(prev.good()) << "Bad id when walking predecessors."; |
250 if (!prev.Get(syncable::IS_UNSYNCED)) { | 328 if (!prev.Get(syncable::IS_UNSYNCED)) { |
251 // We're interested in "runs" of unsynced items. This item breaks | 329 // We're interested in "runs" of unsynced items. This item breaks |
252 // the streak, so we stop traversing. | 330 // the streak, so we stop traversing. |
253 return; | 331 return; |
254 } | 332 } |
255 int64 handle = prev.Get(syncable::META_HANDLE); | 333 int64 handle = prev.Get(syncable::META_HANDLE); |
256 if (commit_set_->HaveCommitItem(handle)) { | 334 if (HaveItem(handle)) { |
257 // We've already added this item to the commit set, and so must have | 335 // We've already added this item to the commit set, and so must have |
258 // already added the predecessors as well. | 336 // already added the predecessors as well. |
259 return; | 337 return; |
260 } | 338 } |
261 TryAddItem(ready_unsynced_set, prev, result); | 339 TryAddItem(ready_unsynced_set, prev, result); |
262 prev_id = prev.GetPredecessorId(); | 340 prev_id = prev.GetPredecessorId(); |
263 } | 341 } |
264 } | 342 } |
265 | 343 |
266 // Same as AddItemThenPredecessor, but the traversal order will be reversed. | 344 // Same as AddItemThenPredecessor, but the traversal order will be reversed. |
267 void GetCommitIdsCommand::AddPredecessorsThenItem( | 345 void Traversal::AddPredecessorsThenItem( |
268 syncable::BaseTransaction* trans, | |
269 const ModelSafeRoutingInfo& routes, | |
270 const std::set<int64>& ready_unsynced_set, | 346 const std::set<int64>& ready_unsynced_set, |
271 const syncable::Entry& item, | 347 const syncable::Entry& item, |
272 OrderedCommitSet* result) const { | 348 syncable::Directory::Metahandles* result) const { |
273 OrderedCommitSet item_dependencies(routes); | 349 syncable::Directory::Metahandles dependencies; |
274 AddItemThenPredecessors(trans, ready_unsynced_set, item, &item_dependencies); | 350 AddItemThenPredecessors(ready_unsynced_set, item, &dependencies); |
275 | 351 |
276 // Reverse what we added to get the correct order. | 352 // Reverse what we added to get the correct order. |
277 result->AppendReverse(item_dependencies); | 353 result->insert(result->end(), dependencies.rbegin(), dependencies.rend()); |
278 } | 354 } |
279 | 355 |
280 bool GetCommitIdsCommand::IsCommitBatchFull() const { | 356 bool Traversal::IsFull() const { |
281 return commit_set_->Size() >= requested_commit_batch_size_; | 357 return out_->size() >= max_entries_; |
282 } | 358 } |
283 | 359 |
284 void GetCommitIdsCommand::AddCreatesAndMoves( | 360 bool Traversal::HaveItem(int64 handle) const { |
285 syncable::BaseTransaction* trans, | 361 return added_handles_.find(handle) != added_handles_.end(); |
286 const ModelSafeRoutingInfo& routes, | 362 } |
| 363 |
| 364 void Traversal::AppendManyToTraversal( |
| 365 const syncable::Directory::Metahandles& handles) { |
| 366 out_->insert(out_->end(), handles.begin(), handles.end()); |
| 367 added_handles_.insert(handles.begin(), handles.end()); |
| 368 } |
| 369 |
| 370 void Traversal::AppendToTraversal(int64 metahandle) { |
| 371 out_->push_back(metahandle); |
| 372 added_handles_.insert(metahandle); |
| 373 } |
| 374 |
| 375 void Traversal::AddCreatesAndMoves( |
287 const std::set<int64>& ready_unsynced_set) { | 376 const std::set<int64>& ready_unsynced_set) { |
288 // Add moves and creates, and prepend their uncommitted parents. | 377 // Add moves and creates, and prepend their uncommitted parents. |
289 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); | 378 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); |
290 !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) { | 379 !IsFull() && iter != ready_unsynced_set.end(); ++iter) { |
291 int64 metahandle = *iter; | 380 int64 metahandle = *iter; |
292 if (commit_set_->HaveCommitItem(metahandle)) | 381 if (HaveItem(metahandle)) |
293 continue; | 382 continue; |
294 | 383 |
295 syncable::Entry entry(trans, | 384 syncable::Entry entry(trans_, |
296 syncable::GET_BY_HANDLE, | 385 syncable::GET_BY_HANDLE, |
297 metahandle); | 386 metahandle); |
298 if (!entry.Get(syncable::IS_DEL)) { | 387 if (!entry.Get(syncable::IS_DEL)) { |
299 // We only commit an item + its dependencies if it and all its | 388 // We only commit an item + its dependencies if it and all its |
300 // dependencies are not in conflict. | 389 // dependencies are not in conflict. |
301 OrderedCommitSet item_dependencies(routes); | 390 syncable::Directory::Metahandles item_dependencies; |
302 if (AddUncommittedParentsAndTheirPredecessors( | 391 if (AddUncommittedParentsAndTheirPredecessors( |
303 trans, | |
304 routes, | |
305 ready_unsynced_set, | 392 ready_unsynced_set, |
306 entry, | 393 entry, |
307 &item_dependencies)) { | 394 &item_dependencies)) { |
308 AddPredecessorsThenItem(trans, | 395 AddPredecessorsThenItem(ready_unsynced_set, |
309 routes, | |
310 ready_unsynced_set, | |
311 entry, | 396 entry, |
312 &item_dependencies); | 397 &item_dependencies); |
313 commit_set_->Append(item_dependencies); | 398 AppendManyToTraversal(item_dependencies); |
314 } | 399 } |
315 } | 400 } |
316 } | 401 } |
317 | 402 |
318 // It's possible that we overcommitted while trying to expand dependent | 403 // It's possible that we overcommitted while trying to expand dependent |
319 // items. If so, truncate the set down to the allowed size. | 404 // items. If so, truncate the set down to the allowed size. |
320 commit_set_->Truncate(requested_commit_batch_size_); | 405 if (out_->size() > max_entries_) |
| 406 out_->resize(max_entries_); |
321 } | 407 } |
322 | 408 |
323 void GetCommitIdsCommand::AddDeletes( | 409 void Traversal::AddDeletes( |
324 syncable::BaseTransaction* trans, | |
325 const std::set<int64>& ready_unsynced_set) { | 410 const std::set<int64>& ready_unsynced_set) { |
326 set<syncable::Id> legal_delete_parents; | 411 set<syncable::Id> legal_delete_parents; |
327 | 412 |
328 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); | 413 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); |
329 !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) { | 414 !IsFull() && iter != ready_unsynced_set.end(); ++iter) { |
330 int64 metahandle = *iter; | 415 int64 metahandle = *iter; |
331 if (commit_set_->HaveCommitItem(metahandle)) | 416 if (HaveItem(metahandle)) |
332 continue; | 417 continue; |
333 | 418 |
334 syncable::Entry entry(trans, syncable::GET_BY_HANDLE, | 419 syncable::Entry entry(trans_, syncable::GET_BY_HANDLE, |
335 metahandle); | 420 metahandle); |
336 | 421 |
337 if (entry.Get(syncable::IS_DEL)) { | 422 if (entry.Get(syncable::IS_DEL)) { |
338 syncable::Entry parent(trans, syncable::GET_BY_ID, | 423 syncable::Entry parent(trans_, syncable::GET_BY_ID, |
339 entry.Get(syncable::PARENT_ID)); | 424 entry.Get(syncable::PARENT_ID)); |
340 // If the parent is deleted and unsynced, then any children of that | 425 // If the parent is deleted and unsynced, then any children of that |
341 // parent don't need to be added to the delete queue. | 426 // parent don't need to be added to the delete queue. |
342 // | 427 // |
343 // Note: the parent could be synced if there was an update deleting a | 428 // Note: the parent could be synced if there was an update deleting a |
344 // folder when we had a deleted all items in it. | 429 // folder when we had a deleted all items in it. |
345 // We may get more updates, or we may want to delete the entry. | 430 // We may get more updates, or we may want to delete the entry. |
346 if (parent.good() && | 431 if (parent.good() && |
347 parent.Get(syncable::IS_DEL) && | 432 parent.Get(syncable::IS_DEL) && |
348 parent.Get(syncable::IS_UNSYNCED)) { | 433 parent.Get(syncable::IS_UNSYNCED)) { |
349 // However, if an entry is moved, these rules can apply differently. | 434 // However, if an entry is moved, these rules can apply differently. |
350 // | 435 // |
351 // If the entry was moved, then the destination parent was deleted, | 436 // If the entry was moved, then the destination parent was deleted, |
352 // then we'll miss it in the roll up. We have to add it in manually. | 437 // then we'll miss it in the roll up. We have to add it in manually. |
353 // TODO(chron): Unit test for move / delete cases: | 438 // TODO(chron): Unit test for move / delete cases: |
354 // Case 1: Locally moved, then parent deleted | 439 // Case 1: Locally moved, then parent deleted |
355 // Case 2: Server moved, then locally issue recursive delete. | 440 // Case 2: Server moved, then locally issue recursive delete. |
356 if (entry.Get(syncable::ID).ServerKnows() && | 441 if (entry.Get(syncable::ID).ServerKnows() && |
357 entry.Get(syncable::PARENT_ID) != | 442 entry.Get(syncable::PARENT_ID) != |
358 entry.Get(syncable::SERVER_PARENT_ID)) { | 443 entry.Get(syncable::SERVER_PARENT_ID)) { |
359 DVLOG(1) << "Inserting moved and deleted entry, will be missed by " | 444 DVLOG(1) << "Inserting moved and deleted entry, will be missed by " |
360 << "delete roll." << entry.Get(syncable::ID); | 445 << "delete roll." << entry.Get(syncable::ID); |
361 | 446 |
362 commit_set_->AddCommitItem(metahandle, entry.GetModelType()); | 447 AppendToTraversal(metahandle); |
363 } | 448 } |
364 | 449 |
365 // Skip this entry since it's a child of a parent that will be | 450 // Skip this entry since it's a child of a parent that will be |
366 // deleted. The server will unroll the delete and delete the | 451 // deleted. The server will unroll the delete and delete the |
367 // child as well. | 452 // child as well. |
368 continue; | 453 continue; |
369 } | 454 } |
370 | 455 |
371 legal_delete_parents.insert(entry.Get(syncable::PARENT_ID)); | 456 legal_delete_parents.insert(entry.Get(syncable::PARENT_ID)); |
372 } | 457 } |
373 } | 458 } |
374 | 459 |
375 // We could store all the potential entries with a particular parent during | 460 // We could store all the potential entries with a particular parent during |
376 // the above scan, but instead we rescan here. This is less efficient, but | 461 // the above scan, but instead we rescan here. This is less efficient, but |
377 // we're dropping memory alloc/dealloc in favor of linear scans of recently | 462 // we're dropping memory alloc/dealloc in favor of linear scans of recently |
378 // examined entries. | 463 // examined entries. |
379 // | 464 // |
380 // Scan through the UnsyncedMetaHandles again. If we have a deleted | 465 // Scan through the UnsyncedMetaHandles again. If we have a deleted |
381 // entry, then check if the parent is in legal_delete_parents. | 466 // entry, then check if the parent is in legal_delete_parents. |
382 // | 467 // |
383 // Parent being in legal_delete_parents means for the child: | 468 // Parent being in legal_delete_parents means for the child: |
384 // a recursive delete is not currently happening (no recent deletes in same | 469 // a recursive delete is not currently happening (no recent deletes in same |
385 // folder) | 470 // folder) |
386 // parent did expect at least one old deleted child | 471 // parent did expect at least one old deleted child |
387 // parent was not deleted | 472 // parent was not deleted |
388 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); | 473 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); |
389 !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) { | 474 !IsFull() && iter != ready_unsynced_set.end(); ++iter) { |
390 int64 metahandle = *iter; | 475 int64 metahandle = *iter; |
391 if (commit_set_->HaveCommitItem(metahandle)) | 476 if (HaveItem(metahandle)) |
392 continue; | 477 continue; |
393 syncable::Entry entry(trans, syncable::GET_BY_HANDLE, | 478 syncable::Entry entry(trans_, syncable::GET_BY_HANDLE, |
394 metahandle); | 479 metahandle); |
395 if (entry.Get(syncable::IS_DEL)) { | 480 if (entry.Get(syncable::IS_DEL)) { |
396 syncable::Id parent_id = entry.Get(syncable::PARENT_ID); | 481 syncable::Id parent_id = entry.Get(syncable::PARENT_ID); |
397 if (legal_delete_parents.count(parent_id)) { | 482 if (legal_delete_parents.count(parent_id)) { |
398 commit_set_->AddCommitItem(metahandle, entry.GetModelType()); | 483 AppendToTraversal(metahandle); |
399 } | 484 } |
400 } | 485 } |
401 } | 486 } |
402 } | 487 } |
403 | 488 |
404 void GetCommitIdsCommand::BuildCommitIds( | 489 void OrderCommitIds( |
405 syncable::BaseTransaction* trans, | 490 syncable::BaseTransaction* trans, |
406 const ModelSafeRoutingInfo& routes, | 491 size_t max_entries, |
407 const std::set<int64>& ready_unsynced_set) { | 492 const std::set<int64>& ready_unsynced_set, |
| 493 syncable::Directory::Metahandles* out) { |
408 // Commits follow these rules: | 494 // Commits follow these rules: |
409 // 1. Moves or creates are preceded by needed folder creates, from | 495 // 1. Moves or creates are preceded by needed folder creates, from |
410 // root to leaf. For folders whose contents are ordered, moves | 496 // root to leaf. For folders whose contents are ordered, moves |
411 // and creates appear in order. | 497 // and creates appear in order. |
412 // 2. Moves/Creates before deletes. | 498 // 2. Moves/Creates before deletes. |
413 // 3. Deletes, collapsed. | 499 // 3. Deletes, collapsed. |
414 // We commit deleted moves under deleted items as moves when collapsing | 500 // We commit deleted moves under deleted items as moves when collapsing |
415 // delete trees. | 501 // delete trees. |
416 | 502 |
| 503 Traversal traversal(trans, max_entries, out); |
| 504 |
417 // Add moves and creates, and prepend their uncommitted parents. | 505 // Add moves and creates, and prepend their uncommitted parents. |
418 AddCreatesAndMoves(trans, routes, ready_unsynced_set); | 506 traversal.AddCreatesAndMoves(ready_unsynced_set); |
419 | 507 |
420 // Add all deletes. | 508 // Add all deletes. |
421 AddDeletes(trans, ready_unsynced_set); | 509 traversal.AddDeletes(ready_unsynced_set); |
| 510 } |
| 511 |
| 512 } // namespace |
| 513 |
| 514 void GetCommitIds( |
| 515 syncable::BaseTransaction* trans, |
| 516 ModelTypeSet requested_types, |
| 517 size_t commit_batch_size, |
| 518 sessions::OrderedCommitSet* ordered_commit_set) { |
| 519 for (ModelTypeSet::Iterator it = requested_types.First(); |
| 520 it.Good(); it.Inc()) { |
| 521 DCHECK_LE(ordered_commit_set->Size(), commit_batch_size); |
| 522 if (ordered_commit_set->Size() >= commit_batch_size) |
| 523 break; |
| 524 size_t space_remaining = commit_batch_size - ordered_commit_set->Size(); |
| 525 syncable::Directory::Metahandles out; |
| 526 GetCommitIdsForType( |
| 527 trans, |
| 528 it.Get(), |
| 529 space_remaining, |
| 530 &out); |
| 531 ordered_commit_set->AddCommitItems(out, it.Get()); |
| 532 } |
422 } | 533 } |
423 | 534 |
424 } // namespace syncer | 535 } // namespace syncer |
OLD | NEW |