OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sync/syncable/directory.h" | 5 #include "sync/syncable/directory.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <iterator> | 8 #include <iterator> |
9 | 9 |
10 #include "base/base64.h" | 10 #include "base/base64.h" |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
155 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | 155 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { |
156 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == | 156 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == |
157 kernel_->server_tags_map.end()) | 157 kernel_->server_tags_map.end()) |
158 << "Unexpected duplicate use of server tag"; | 158 << "Unexpected duplicate use of server tag"; |
159 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry; | 159 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry; |
160 } | 160 } |
161 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) == | 161 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) == |
162 kernel_->ids_map.end()) << "Unexpected duplicate use of ID"; | 162 kernel_->ids_map.end()) << "Unexpected duplicate use of ID"; |
163 kernel_->ids_map[entry->ref(ID).value()] = entry; | 163 kernel_->ids_map[entry->ref(ID).value()] = entry; |
164 DCHECK(!entry->is_dirty()); | 164 DCHECK(!entry->is_dirty()); |
165 AddToAttachmentIndex(metahandle, entry->ref(ATTACHMENT_METADATA), lock); | 165 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA)); |
166 } | 166 } |
167 } | 167 } |
168 | 168 |
169 DirOpenResult Directory::OpenImpl( | 169 DirOpenResult Directory::OpenImpl( |
170 const string& name, | 170 const string& name, |
171 DirectoryChangeDelegate* delegate, | 171 DirectoryChangeDelegate* delegate, |
172 const WeakHandle<TransactionObserver>& | 172 const WeakHandle<TransactionObserver>& |
173 transaction_observer) { | 173 transaction_observer) { |
174 KernelLoadInfo info; | 174 KernelLoadInfo info; |
175 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) | 175 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
218 const tracked_objects::Location& location, | 218 const tracked_objects::Location& location, |
219 const std::string & message) { | 219 const std::string & message) { |
220 DCHECK(trans != NULL); | 220 DCHECK(trans != NULL); |
221 unrecoverable_error_set_ = true; | 221 unrecoverable_error_set_ = true; |
222 unrecoverable_error_handler_->OnUnrecoverableError(location, | 222 unrecoverable_error_handler_->OnUnrecoverableError(location, |
223 message); | 223 message); |
224 } | 224 } |
225 | 225 |
226 EntryKernel* Directory::GetEntryById(const Id& id) { | 226 EntryKernel* Directory::GetEntryById(const Id& id) { |
227 ScopedKernelLock lock(this); | 227 ScopedKernelLock lock(this); |
228 return GetEntryById(id, &lock); | 228 return GetEntryById(lock, id); |
229 } | 229 } |
230 | 230 |
231 EntryKernel* Directory::GetEntryById(const Id& id, | 231 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock, |
232 ScopedKernelLock* const lock) { | 232 const Id& id) { |
233 DCHECK(kernel_); | 233 DCHECK(kernel_); |
234 // Find it in the in memory ID index. | 234 // Find it in the in memory ID index. |
235 IdsMap::iterator id_found = kernel_->ids_map.find(id.value()); | 235 IdsMap::iterator id_found = kernel_->ids_map.find(id.value()); |
236 if (id_found != kernel_->ids_map.end()) { | 236 if (id_found != kernel_->ids_map.end()) { |
237 return id_found->second; | 237 return id_found->second; |
238 } | 238 } |
239 return NULL; | 239 return NULL; |
240 } | 240 } |
241 | 241 |
242 EntryKernel* Directory::GetEntryByClientTag(const string& tag) { | 242 EntryKernel* Directory::GetEntryByClientTag(const string& tag) { |
(...skipping 12 matching lines...) Expand all Loading... |
255 DCHECK(kernel_); | 255 DCHECK(kernel_); |
256 TagsMap::iterator it = kernel_->server_tags_map.find(tag); | 256 TagsMap::iterator it = kernel_->server_tags_map.find(tag); |
257 if (it != kernel_->server_tags_map.end()) { | 257 if (it != kernel_->server_tags_map.end()) { |
258 return it->second; | 258 return it->second; |
259 } | 259 } |
260 return NULL; | 260 return NULL; |
261 } | 261 } |
262 | 262 |
263 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) { | 263 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) { |
264 ScopedKernelLock lock(this); | 264 ScopedKernelLock lock(this); |
265 return GetEntryByHandle(metahandle, &lock); | 265 return GetEntryByHandle(lock, metahandle); |
266 } | 266 } |
267 | 267 |
268 EntryKernel* Directory::GetEntryByHandle(int64 metahandle, | 268 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock, |
269 ScopedKernelLock* lock) { | 269 int64 metahandle) { |
270 // Look up in memory | 270 // Look up in memory |
271 MetahandlesMap::iterator found = | 271 MetahandlesMap::iterator found = |
272 kernel_->metahandles_map.find(metahandle); | 272 kernel_->metahandles_map.find(metahandle); |
273 if (found != kernel_->metahandles_map.end()) { | 273 if (found != kernel_->metahandles_map.end()) { |
274 // Found it in memory. Easy. | 274 // Found it in memory. Easy. |
275 return found->second; | 275 return found->second; |
276 } | 276 } |
277 return NULL; | 277 return NULL; |
278 } | 278 } |
279 | 279 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
335 EntryKernel* kernel) const { | 335 EntryKernel* kernel) const { |
336 const OrderedChildSet* siblings = | 336 const OrderedChildSet* siblings = |
337 kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID)); | 337 kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID)); |
338 | 338 |
339 OrderedChildSet::const_iterator it = siblings->find(kernel); | 339 OrderedChildSet::const_iterator it = siblings->find(kernel); |
340 return std::distance(siblings->begin(), it); | 340 return std::distance(siblings->begin(), it); |
341 } | 341 } |
342 | 342 |
343 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) { | 343 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) { |
344 ScopedKernelLock lock(this); | 344 ScopedKernelLock lock(this); |
345 return InsertEntry(trans, entry, &lock); | 345 return InsertEntry(lock, trans, entry); |
346 } | 346 } |
347 | 347 |
348 bool Directory::InsertEntry(BaseWriteTransaction* trans, | 348 bool Directory::InsertEntry(const ScopedKernelLock& lock, |
349 EntryKernel* entry, | 349 BaseWriteTransaction* trans, |
350 ScopedKernelLock* lock) { | 350 EntryKernel* entry) { |
351 DCHECK(NULL != lock); | |
352 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) | 351 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) |
353 return false; | 352 return false; |
354 | 353 |
355 static const char error[] = "Entry already in memory index."; | 354 static const char error[] = "Entry already in memory index."; |
356 | 355 |
357 if (!SyncAssert( | 356 if (!SyncAssert( |
358 kernel_->metahandles_map.insert( | 357 kernel_->metahandles_map.insert( |
359 std::make_pair(entry->ref(META_HANDLE), entry)).second, | 358 std::make_pair(entry->ref(META_HANDLE), entry)).second, |
360 FROM_HERE, | 359 FROM_HERE, |
361 error, | 360 error, |
(...skipping 10 matching lines...) Expand all Loading... |
372 } | 371 } |
373 if (ParentChildIndex::ShouldInclude(entry)) { | 372 if (ParentChildIndex::ShouldInclude(entry)) { |
374 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), | 373 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), |
375 FROM_HERE, | 374 FROM_HERE, |
376 error, | 375 error, |
377 trans)) { | 376 trans)) { |
378 return false; | 377 return false; |
379 } | 378 } |
380 } | 379 } |
381 AddToAttachmentIndex( | 380 AddToAttachmentIndex( |
382 entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), *lock); | 381 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); |
383 | 382 |
384 // Should NEVER be created with a client tag or server tag. | 383 // Should NEVER be created with a client tag or server tag. |
385 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, | 384 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, |
386 "Server tag should be empty", trans)) { | 385 "Server tag should be empty", trans)) { |
387 return false; | 386 return false; |
388 } | 387 } |
389 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, | 388 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, |
390 "Client tag should be empty", trans)) | 389 "Client tag should be empty", trans)) |
391 return false; | 390 return false; |
392 | 391 |
393 return true; | 392 return true; |
394 } | 393 } |
395 | 394 |
396 bool Directory::ReindexId(BaseWriteTransaction* trans, | 395 bool Directory::ReindexId(BaseWriteTransaction* trans, |
397 EntryKernel* const entry, | 396 EntryKernel* const entry, |
398 const Id& new_id) { | 397 const Id& new_id) { |
399 ScopedKernelLock lock(this); | 398 ScopedKernelLock lock(this); |
400 if (NULL != GetEntryById(new_id, &lock)) | 399 if (NULL != GetEntryById(lock, new_id)) |
401 return false; | 400 return false; |
402 | 401 |
403 { | 402 { |
404 // Update the indices that depend on the ID field. | 403 // Update the indices that depend on the ID field. |
405 ScopedParentChildIndexUpdater updater_b(lock, entry, | 404 ScopedParentChildIndexUpdater updater_b(lock, entry, |
406 &kernel_->parent_child_index); | 405 &kernel_->parent_child_index); |
407 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); | 406 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); |
408 DCHECK_EQ(1U, num_erased); | 407 DCHECK_EQ(1U, num_erased); |
409 entry->put(ID, new_id); | 408 entry->put(ID, new_id); |
410 kernel_->ids_map[entry->ref(ID).value()] = entry; | 409 kernel_->ids_map[entry->ref(ID).value()] = entry; |
411 } | 410 } |
412 return true; | 411 return true; |
413 } | 412 } |
414 | 413 |
415 bool Directory::ReindexParentId(BaseWriteTransaction* trans, | 414 bool Directory::ReindexParentId(BaseWriteTransaction* trans, |
416 EntryKernel* const entry, | 415 EntryKernel* const entry, |
417 const Id& new_parent_id) { | 416 const Id& new_parent_id) { |
418 ScopedKernelLock lock(this); | 417 ScopedKernelLock lock(this); |
419 | 418 |
420 { | 419 { |
421 // Update the indices that depend on the PARENT_ID field. | 420 // Update the indices that depend on the PARENT_ID field. |
422 ScopedParentChildIndexUpdater index_updater(lock, entry, | 421 ScopedParentChildIndexUpdater index_updater(lock, entry, |
423 &kernel_->parent_child_index); | 422 &kernel_->parent_child_index); |
424 entry->put(PARENT_ID, new_parent_id); | 423 entry->put(PARENT_ID, new_parent_id); |
425 } | 424 } |
426 return true; | 425 return true; |
427 } | 426 } |
428 | 427 |
429 void Directory::RemoveFromAttachmentIndex( | 428 void Directory::RemoveFromAttachmentIndex( |
| 429 const ScopedKernelLock& lock, |
430 const int64 metahandle, | 430 const int64 metahandle, |
431 const sync_pb::AttachmentMetadata& attachment_metadata, | 431 const sync_pb::AttachmentMetadata& attachment_metadata) { |
432 const ScopedKernelLock& lock) { | |
433 for (int i = 0; i < attachment_metadata.record_size(); ++i) { | 432 for (int i = 0; i < attachment_metadata.record_size(); ++i) { |
434 AttachmentIdUniqueId unique_id = | 433 AttachmentIdUniqueId unique_id = |
435 attachment_metadata.record(i).id().unique_id(); | 434 attachment_metadata.record(i).id().unique_id(); |
436 IndexByAttachmentId::iterator iter = | 435 IndexByAttachmentId::iterator iter = |
437 kernel_->index_by_attachment_id.find(unique_id); | 436 kernel_->index_by_attachment_id.find(unique_id); |
438 if (iter != kernel_->index_by_attachment_id.end()) { | 437 if (iter != kernel_->index_by_attachment_id.end()) { |
439 iter->second.erase(metahandle); | 438 iter->second.erase(metahandle); |
440 if (iter->second.empty()) { | 439 if (iter->second.empty()) { |
441 kernel_->index_by_attachment_id.erase(iter); | 440 kernel_->index_by_attachment_id.erase(iter); |
442 } | 441 } |
443 } | 442 } |
444 } | 443 } |
445 } | 444 } |
446 | 445 |
447 void Directory::AddToAttachmentIndex( | 446 void Directory::AddToAttachmentIndex( |
| 447 const ScopedKernelLock& lock, |
448 const int64 metahandle, | 448 const int64 metahandle, |
449 const sync_pb::AttachmentMetadata& attachment_metadata, | 449 const sync_pb::AttachmentMetadata& attachment_metadata) { |
450 const ScopedKernelLock& lock) { | |
451 for (int i = 0; i < attachment_metadata.record_size(); ++i) { | 450 for (int i = 0; i < attachment_metadata.record_size(); ++i) { |
452 AttachmentIdUniqueId unique_id = | 451 AttachmentIdUniqueId unique_id = |
453 attachment_metadata.record(i).id().unique_id(); | 452 attachment_metadata.record(i).id().unique_id(); |
454 IndexByAttachmentId::iterator iter = | 453 IndexByAttachmentId::iterator iter = |
455 kernel_->index_by_attachment_id.find(unique_id); | 454 kernel_->index_by_attachment_id.find(unique_id); |
456 if (iter == kernel_->index_by_attachment_id.end()) { | 455 if (iter == kernel_->index_by_attachment_id.end()) { |
457 iter = kernel_->index_by_attachment_id.insert(std::make_pair( | 456 iter = kernel_->index_by_attachment_id.insert(std::make_pair( |
458 unique_id, | 457 unique_id, |
459 MetahandleSet())).first; | 458 MetahandleSet())).first; |
460 } | 459 } |
461 iter->second.insert(metahandle); | 460 iter->second.insert(metahandle); |
462 } | 461 } |
463 } | 462 } |
464 | 463 |
465 void Directory::UpdateAttachmentIndex( | 464 void Directory::UpdateAttachmentIndex( |
466 const int64 metahandle, | 465 const int64 metahandle, |
467 const sync_pb::AttachmentMetadata& old_metadata, | 466 const sync_pb::AttachmentMetadata& old_metadata, |
468 const sync_pb::AttachmentMetadata& new_metadata) { | 467 const sync_pb::AttachmentMetadata& new_metadata) { |
469 ScopedKernelLock lock(this); | 468 ScopedKernelLock lock(this); |
470 RemoveFromAttachmentIndex(metahandle, old_metadata, lock); | 469 RemoveFromAttachmentIndex(lock, metahandle, old_metadata); |
471 AddToAttachmentIndex(metahandle, new_metadata, lock); | 470 AddToAttachmentIndex(lock, metahandle, new_metadata); |
472 } | 471 } |
473 | 472 |
474 void Directory::GetMetahandlesByAttachmentId( | 473 void Directory::GetMetahandlesByAttachmentId( |
475 BaseTransaction* trans, | 474 BaseTransaction* trans, |
476 const sync_pb::AttachmentIdProto& attachment_id_proto, | 475 const sync_pb::AttachmentIdProto& attachment_id_proto, |
477 Metahandles* result) { | 476 Metahandles* result) { |
478 DCHECK(result); | 477 DCHECK(result); |
479 result->clear(); | 478 result->clear(); |
480 ScopedKernelLock lock(this); | 479 ScopedKernelLock lock(this); |
481 IndexByAttachmentId::const_iterator index_iter = | 480 IndexByAttachmentId::const_iterator index_iter = |
482 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id()); | 481 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id()); |
483 if (index_iter == kernel_->index_by_attachment_id.end()) | 482 if (index_iter == kernel_->index_by_attachment_id.end()) |
484 return; | 483 return; |
485 const MetahandleSet& metahandle_set = index_iter->second; | 484 const MetahandleSet& metahandle_set = index_iter->second; |
486 std::copy( | 485 std::copy( |
487 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result)); | 486 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result)); |
488 } | 487 } |
489 | 488 |
490 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { | 489 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { |
491 DCHECK(trans != NULL); | 490 DCHECK(trans != NULL); |
492 return unrecoverable_error_set_; | 491 return unrecoverable_error_set_; |
493 } | 492 } |
494 | 493 |
495 void Directory::ClearDirtyMetahandles() { | 494 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) { |
496 kernel_->transaction_mutex.AssertAcquired(); | 495 kernel_->transaction_mutex.AssertAcquired(); |
497 kernel_->dirty_metahandles.clear(); | 496 kernel_->dirty_metahandles.clear(); |
498 } | 497 } |
499 | 498 |
500 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans, | 499 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans, |
501 const EntryKernel* const entry) const { | 500 const EntryKernel* const entry) const { |
502 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() && | 501 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() && |
503 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) && | 502 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) && |
504 !entry->ref(IS_UNSYNCED); | 503 !entry->ref(IS_UNSYNCED); |
505 | 504 |
(...skipping 25 matching lines...) Expand all Loading... |
531 ScopedKernelLock lock(this); | 530 ScopedKernelLock lock(this); |
532 | 531 |
533 // If there is an unrecoverable error then just bail out. | 532 // If there is an unrecoverable error then just bail out. |
534 if (unrecoverable_error_set(&trans)) | 533 if (unrecoverable_error_set(&trans)) |
535 return; | 534 return; |
536 | 535 |
537 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and | 536 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and |
538 // clear dirty flags. | 537 // clear dirty flags. |
539 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin(); | 538 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin(); |
540 i != kernel_->dirty_metahandles.end(); ++i) { | 539 i != kernel_->dirty_metahandles.end(); ++i) { |
541 EntryKernel* entry = GetEntryByHandle(*i, &lock); | 540 EntryKernel* entry = GetEntryByHandle(lock, *i); |
542 if (!entry) | 541 if (!entry) |
543 continue; | 542 continue; |
544 // Skip over false positives; it happens relatively infrequently. | 543 // Skip over false positives; it happens relatively infrequently. |
545 if (!entry->is_dirty()) | 544 if (!entry->is_dirty()) |
546 continue; | 545 continue; |
547 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), | 546 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), |
548 new EntryKernel(*entry)); | 547 new EntryKernel(*entry)); |
549 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i)); | 548 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i)); |
550 // We don't bother removing from the index here as we blow the entire thing | 549 // We don't bother removing from the index here as we blow the entire thing |
551 // in a moment, and it unnecessarily complicates iteration. | 550 // in a moment, and it unnecessarily complicates iteration. |
552 entry->clear_dirty(NULL); | 551 entry->clear_dirty(NULL); |
553 } | 552 } |
554 ClearDirtyMetahandles(); | 553 ClearDirtyMetahandles(lock); |
555 | 554 |
556 // Set purged handles. | 555 // Set purged handles. |
557 DCHECK(snapshot->metahandles_to_purge.empty()); | 556 DCHECK(snapshot->metahandles_to_purge.empty()); |
558 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge); | 557 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge); |
559 | 558 |
560 // Fill kernel_info_status and kernel_info. | 559 // Fill kernel_info_status and kernel_info. |
561 snapshot->kernel_info = kernel_->persisted_info; | 560 snapshot->kernel_info = kernel_->persisted_info; |
562 // To avoid duplicates when the process crashes, we record the next_id to be | 561 // To avoid duplicates when the process crashes, we record the next_id to be |
563 // greater magnitude than could possibly be reached before the next save | 562 // greater magnitude than could possibly be reached before the next save |
564 // changes. In other words, it's effectively impossible for the user to | 563 // changes. In other words, it's effectively impossible for the user to |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
621 num_erased = | 620 num_erased = |
622 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); | 621 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); |
623 DCHECK_EQ(1u, num_erased); | 622 DCHECK_EQ(1u, num_erased); |
624 } | 623 } |
625 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), | 624 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), |
626 FROM_HERE, | 625 FROM_HERE, |
627 "Deleted entry still present", | 626 "Deleted entry still present", |
628 (&trans))) | 627 (&trans))) |
629 return false; | 628 return false; |
630 RemoveFromAttachmentIndex( | 629 RemoveFromAttachmentIndex( |
631 entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), lock); | 630 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); |
632 | 631 |
633 delete entry; | 632 delete entry; |
634 } | 633 } |
635 if (trans.unrecoverable_error_set()) | 634 if (trans.unrecoverable_error_set()) |
636 return false; | 635 return false; |
637 } | 636 } |
638 return true; | 637 return true; |
639 } | 638 } |
640 | 639 |
641 void Directory::UnapplyEntry(EntryKernel* entry) { | 640 void Directory::UnapplyEntry(EntryKernel* entry) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
680 entry->put(BASE_VERSION, CHANGES_VERSION); | 679 entry->put(BASE_VERSION, CHANGES_VERSION); |
681 entry->mark_dirty(&kernel_->dirty_metahandles); | 680 entry->mark_dirty(&kernel_->dirty_metahandles); |
682 } | 681 } |
683 | 682 |
684 // At this point locally created items that aren't synced will become locally | 683 // At this point locally created items that aren't synced will become locally |
685 // deleted items, and purged on the next snapshot. All other items will match | 684 // deleted items, and purged on the next snapshot. All other items will match |
686 // the state they would have had if they were just created via a server | 685 // the state they would have had if they were just created via a server |
687 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). | 686 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). |
688 } | 687 } |
689 | 688 |
690 void Directory::DeleteEntry(bool save_to_journal, | 689 void Directory::DeleteEntry(const ScopedKernelLock& lock, |
| 690 bool save_to_journal, |
691 EntryKernel* entry, | 691 EntryKernel* entry, |
692 EntryKernelSet* entries_to_journal, | 692 EntryKernelSet* entries_to_journal) { |
693 const ScopedKernelLock& lock) { | |
694 int64 handle = entry->ref(META_HANDLE); | 693 int64 handle = entry->ref(META_HANDLE); |
695 ModelType server_type = GetModelTypeFromSpecifics( | 694 ModelType server_type = GetModelTypeFromSpecifics( |
696 entry->ref(SERVER_SPECIFICS)); | 695 entry->ref(SERVER_SPECIFICS)); |
697 | 696 |
698 kernel_->metahandles_to_purge.insert(handle); | 697 kernel_->metahandles_to_purge.insert(handle); |
699 | 698 |
700 size_t num_erased = 0; | 699 size_t num_erased = 0; |
701 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); | 700 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); |
702 DCHECK_EQ(1u, num_erased); | 701 DCHECK_EQ(1u, num_erased); |
703 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); | 702 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); |
704 DCHECK_EQ(1u, num_erased); | 703 DCHECK_EQ(1u, num_erased); |
705 num_erased = kernel_->unsynced_metahandles.erase(handle); | 704 num_erased = kernel_->unsynced_metahandles.erase(handle); |
706 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); | 705 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); |
707 num_erased = | 706 num_erased = |
708 kernel_->unapplied_update_metahandles[server_type].erase(handle); | 707 kernel_->unapplied_update_metahandles[server_type].erase(handle); |
709 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); | 708 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); |
710 if (kernel_->parent_child_index.Contains(entry)) | 709 if (kernel_->parent_child_index.Contains(entry)) |
711 kernel_->parent_child_index.Remove(entry); | 710 kernel_->parent_child_index.Remove(entry); |
712 | 711 |
713 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | 712 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { |
714 num_erased = | 713 num_erased = |
715 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); | 714 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); |
716 DCHECK_EQ(1u, num_erased); | 715 DCHECK_EQ(1u, num_erased); |
717 } | 716 } |
718 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { | 717 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { |
719 num_erased = | 718 num_erased = |
720 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); | 719 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); |
721 DCHECK_EQ(1u, num_erased); | 720 DCHECK_EQ(1u, num_erased); |
722 } | 721 } |
723 RemoveFromAttachmentIndex(handle, entry->ref(ATTACHMENT_METADATA), lock); | 722 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA)); |
724 | 723 |
725 if (save_to_journal) { | 724 if (save_to_journal) { |
726 entries_to_journal->insert(entry); | 725 entries_to_journal->insert(entry); |
727 } else { | 726 } else { |
728 delete entry; | 727 delete entry; |
729 } | 728 } |
730 } | 729 } |
731 | 730 |
732 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, | 731 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, |
733 ModelTypeSet types_to_journal, | 732 ModelTypeSet types_to_journal, |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
793 | 792 |
794 if (types_to_unapply.Has(local_type) || | 793 if (types_to_unapply.Has(local_type) || |
795 types_to_unapply.Has(server_type)) { | 794 types_to_unapply.Has(server_type)) { |
796 UnapplyEntry(entry); | 795 UnapplyEntry(entry); |
797 } else { | 796 } else { |
798 bool save_to_journal = | 797 bool save_to_journal = |
799 (types_to_journal.Has(local_type) || | 798 (types_to_journal.Has(local_type) || |
800 types_to_journal.Has(server_type)) && | 799 types_to_journal.Has(server_type)) && |
801 (delete_journal_->IsDeleteJournalEnabled(local_type) || | 800 (delete_journal_->IsDeleteJournalEnabled(local_type) || |
802 delete_journal_->IsDeleteJournalEnabled(server_type)); | 801 delete_journal_->IsDeleteJournalEnabled(server_type)); |
803 DeleteEntry(save_to_journal, entry, &entries_to_journal, lock); | 802 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal); |
804 } | 803 } |
805 } | 804 } |
806 | 805 |
807 delete_journal_->AddJournalBatch(&trans, entries_to_journal); | 806 delete_journal_->AddJournalBatch(&trans, entries_to_journal); |
808 | 807 |
809 // Ensure meta tracking for these data types reflects the purged state. | 808 // Ensure meta tracking for these data types reflects the purged state. |
810 for (ModelTypeSet::Iterator it = disabled_types.First(); | 809 for (ModelTypeSet::Iterator it = disabled_types.First(); |
811 it.Good(); it.Inc()) { | 810 it.Good(); it.Inc()) { |
812 kernel_->persisted_info.transaction_version[it.Get()] = 0; | 811 kernel_->persisted_info.transaction_version[it.Get()] = 0; |
813 | 812 |
(...skipping 20 matching lines...) Expand all Loading... |
834 if (!type_root) | 833 if (!type_root) |
835 return false; | 834 return false; |
836 | 835 |
837 ScopedKernelLock lock(this); | 836 ScopedKernelLock lock(this); |
838 const Id& type_root_id = type_root->ref(ID); | 837 const Id& type_root_id = type_root->ref(ID); |
839 Directory::Metahandles children; | 838 Directory::Metahandles children; |
840 AppendChildHandles(lock, type_root_id, &children); | 839 AppendChildHandles(lock, type_root_id, &children); |
841 | 840 |
842 for (Metahandles::iterator it = children.begin(); it != children.end(); | 841 for (Metahandles::iterator it = children.begin(); it != children.end(); |
843 ++it) { | 842 ++it) { |
844 EntryKernel* entry = GetEntryByHandle(*it, &lock); | 843 EntryKernel* entry = GetEntryByHandle(lock, *it); |
845 if (!entry) | 844 if (!entry) |
846 continue; | 845 continue; |
847 if (entry->ref(BASE_VERSION) > 1) | 846 if (entry->ref(BASE_VERSION) > 1) |
848 entry->put(BASE_VERSION, 1); | 847 entry->put(BASE_VERSION, 1); |
849 if (entry->ref(SERVER_VERSION) > 1) | 848 if (entry->ref(SERVER_VERSION) > 1) |
850 entry->put(SERVER_VERSION, 1); | 849 entry->put(SERVER_VERSION, 1); |
851 | 850 |
852 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order | 851 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order |
853 // to ensure no in-transit data is lost. | 852 // to ensure no in-transit data is lost. |
854 | 853 |
(...skipping 635 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1490 AttachmentIdSet on_server_id_set; | 1489 AttachmentIdSet on_server_id_set; |
1491 AttachmentIdSet not_on_server_id_set; | 1490 AttachmentIdSet not_on_server_id_set; |
1492 std::vector<int64> metahandles; | 1491 std::vector<int64> metahandles; |
1493 { | 1492 { |
1494 ScopedKernelLock lock(this); | 1493 ScopedKernelLock lock(this); |
1495 GetMetaHandlesOfType(lock, trans, type, &metahandles); | 1494 GetMetaHandlesOfType(lock, trans, type, &metahandles); |
1496 std::vector<int64>::const_iterator iter = metahandles.begin(); | 1495 std::vector<int64>::const_iterator iter = metahandles.begin(); |
1497 const std::vector<int64>::const_iterator end = metahandles.end(); | 1496 const std::vector<int64>::const_iterator end = metahandles.end(); |
1498 // For all of this type's entries... | 1497 // For all of this type's entries... |
1499 for (; iter != end; ++iter) { | 1498 for (; iter != end; ++iter) { |
1500 EntryKernel* entry = GetEntryByHandle(*iter, &lock); | 1499 EntryKernel* entry = GetEntryByHandle(lock, *iter); |
1501 DCHECK(entry); | 1500 DCHECK(entry); |
1502 const sync_pb::AttachmentMetadata metadata = | 1501 const sync_pb::AttachmentMetadata metadata = |
1503 entry->ref(ATTACHMENT_METADATA); | 1502 entry->ref(ATTACHMENT_METADATA); |
1504 // for each of this entry's attachments... | 1503 // for each of this entry's attachments... |
1505 for (int i = 0; i < metadata.record_size(); ++i) { | 1504 for (int i = 0; i < metadata.record_size(); ++i) { |
1506 AttachmentId id = | 1505 AttachmentId id = |
1507 AttachmentId::CreateFromProto(metadata.record(i).id()); | 1506 AttachmentId::CreateFromProto(metadata.record(i).id()); |
1508 // if this attachment is known to be on the server, remember it for | 1507 // if this attachment is known to be on the server, remember it for |
1509 // later, | 1508 // later, |
1510 if (metadata.record(i).is_on_server()) { | 1509 if (metadata.record(i).is_on_server()) { |
(...skipping 16 matching lines...) Expand all Loading... |
1527 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203). | 1526 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203). |
1528 std::set_difference(not_on_server_id_set.begin(), | 1527 std::set_difference(not_on_server_id_set.begin(), |
1529 not_on_server_id_set.end(), | 1528 not_on_server_id_set.end(), |
1530 on_server_id_set.begin(), | 1529 on_server_id_set.begin(), |
1531 on_server_id_set.end(), | 1530 on_server_id_set.end(), |
1532 std::inserter(*id_set, id_set->end())); | 1531 std::inserter(*id_set, id_set->end())); |
1533 } | 1532 } |
1534 | 1533 |
1535 } // namespace syncable | 1534 } // namespace syncable |
1536 } // namespace syncer | 1535 } // namespace syncer |
OLD | NEW |