Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "sync/syncable/directory.h" | 5 #include "sync/syncable/directory.h" |
| 6 | 6 |
| 7 #include <iterator> | 7 #include <iterator> |
| 8 | 8 |
| 9 #include "base/base64.h" | 9 #include "base/base64.h" |
| 10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
| 11 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
| 12 #include "base/strings/string_number_conversions.h" | 12 #include "base/strings/string_number_conversions.h" |
| 13 #include "sync/api/attachments/attachment_id.h" | |
| 13 #include "sync/internal_api/public/base/unique_position.h" | 14 #include "sync/internal_api/public/base/unique_position.h" |
| 14 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" | 15 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" |
| 15 #include "sync/syncable/entry.h" | 16 #include "sync/syncable/entry.h" |
| 16 #include "sync/syncable/entry_kernel.h" | 17 #include "sync/syncable/entry_kernel.h" |
| 17 #include "sync/syncable/in_memory_directory_backing_store.h" | 18 #include "sync/syncable/in_memory_directory_backing_store.h" |
| 18 #include "sync/syncable/on_disk_directory_backing_store.h" | 19 #include "sync/syncable/on_disk_directory_backing_store.h" |
| 19 #include "sync/syncable/scoped_kernel_lock.h" | 20 #include "sync/syncable/scoped_kernel_lock.h" |
| 20 #include "sync/syncable/scoped_parent_child_index_updater.h" | 21 #include "sync/syncable/scoped_parent_child_index_updater.h" |
| 21 #include "sync/syncable/syncable-inl.h" | 22 #include "sync/syncable/syncable-inl.h" |
| 22 #include "sync/syncable/syncable_base_transaction.h" | 23 #include "sync/syncable/syncable_base_transaction.h" |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 116 | 117 |
| 117 const DirOpenResult result = | 118 const DirOpenResult result = |
| 118 OpenImpl(name, delegate, transaction_observer); | 119 OpenImpl(name, delegate, transaction_observer); |
| 119 | 120 |
| 120 if (OPENED != result) | 121 if (OPENED != result) |
| 121 Close(); | 122 Close(); |
| 122 return result; | 123 return result; |
| 123 } | 124 } |
| 124 | 125 |
| 125 void Directory::InitializeIndices(MetahandlesMap* handles_map) { | 126 void Directory::InitializeIndices(MetahandlesMap* handles_map) { |
| 127 ScopedKernelLock lock(this); | |
| 126 kernel_->metahandles_map.swap(*handles_map); | 128 kernel_->metahandles_map.swap(*handles_map); |
| 127 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin(); | 129 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin(); |
| 128 it != kernel_->metahandles_map.end(); ++it) { | 130 it != kernel_->metahandles_map.end(); ++it) { |
| 129 EntryKernel* entry = it->second; | 131 EntryKernel* entry = it->second; |
| 130 if (ParentChildIndex::ShouldInclude(entry)) | 132 if (ParentChildIndex::ShouldInclude(entry)) |
| 131 kernel_->parent_child_index.Insert(entry); | 133 kernel_->parent_child_index.Insert(entry); |
| 132 const int64 metahandle = entry->ref(META_HANDLE); | 134 const int64 metahandle = entry->ref(META_HANDLE); |
| 133 if (entry->ref(IS_UNSYNCED)) | 135 if (entry->ref(IS_UNSYNCED)) |
| 134 kernel_->unsynced_metahandles.insert(metahandle); | 136 kernel_->unsynced_metahandles.insert(metahandle); |
| 135 if (entry->ref(IS_UNAPPLIED_UPDATE)) { | 137 if (entry->ref(IS_UNAPPLIED_UPDATE)) { |
| 136 const ModelType type = entry->GetServerModelType(); | 138 const ModelType type = entry->GetServerModelType(); |
| 137 kernel_->unapplied_update_metahandles[type].insert(metahandle); | 139 kernel_->unapplied_update_metahandles[type].insert(metahandle); |
| 138 } | 140 } |
| 139 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { | 141 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { |
| 140 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == | 142 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == |
| 141 kernel_->server_tags_map.end()) | 143 kernel_->server_tags_map.end()) |
| 142 << "Unexpected duplicate use of client tag"; | 144 << "Unexpected duplicate use of client tag"; |
| 143 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry; | 145 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry; |
| 144 } | 146 } |
| 145 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | 147 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { |
| 146 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == | 148 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == |
| 147 kernel_->server_tags_map.end()) | 149 kernel_->server_tags_map.end()) |
| 148 << "Unexpected duplicate use of server tag"; | 150 << "Unexpected duplicate use of server tag"; |
| 149 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry; | 151 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry; |
| 150 } | 152 } |
| 151 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) == | 153 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) == |
| 152 kernel_->ids_map.end()) << "Unexpected duplicate use of ID"; | 154 kernel_->ids_map.end()) << "Unexpected duplicate use of ID"; |
| 153 kernel_->ids_map[entry->ref(ID).value()] = entry; | 155 kernel_->ids_map[entry->ref(ID).value()] = entry; |
| 154 DCHECK(!entry->is_dirty()); | 156 DCHECK(!entry->is_dirty()); |
| 157 AddToAttachmentIndex(metahandle, entry->ref(ATTACHMENT_METADATA), lock); | |
| 155 } | 158 } |
| 156 } | 159 } |
| 157 | 160 |
| 158 DirOpenResult Directory::OpenImpl( | 161 DirOpenResult Directory::OpenImpl( |
| 159 const string& name, | 162 const string& name, |
| 160 DirectoryChangeDelegate* delegate, | 163 DirectoryChangeDelegate* delegate, |
| 161 const WeakHandle<TransactionObserver>& | 164 const WeakHandle<TransactionObserver>& |
| 162 transaction_observer) { | 165 transaction_observer) { |
| 163 KernelLoadInfo info; | 166 KernelLoadInfo info; |
| 164 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) | 167 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 355 return false; | 358 return false; |
| 356 } | 359 } |
| 357 if (ParentChildIndex::ShouldInclude(entry)) { | 360 if (ParentChildIndex::ShouldInclude(entry)) { |
| 358 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), | 361 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), |
| 359 FROM_HERE, | 362 FROM_HERE, |
| 360 error, | 363 error, |
| 361 trans)) { | 364 trans)) { |
| 362 return false; | 365 return false; |
| 363 } | 366 } |
| 364 } | 367 } |
| 368 AddToAttachmentIndex( | |
| 369 entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), *lock); | |
| 365 | 370 |
| 366 // Should NEVER be created with a client tag or server tag. | 371 // Should NEVER be created with a client tag or server tag. |
| 367 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, | 372 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, |
| 368 "Server tag should be empty", trans)) { | 373 "Server tag should be empty", trans)) { |
| 369 return false; | 374 return false; |
| 370 } | 375 } |
| 371 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, | 376 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, |
| 372 "Client tag should be empty", trans)) | 377 "Client tag should be empty", trans)) |
| 373 return false; | 378 return false; |
| 374 | 379 |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 401 | 406 |
| 402 { | 407 { |
| 403 // Update the indices that depend on the PARENT_ID field. | 408 // Update the indices that depend on the PARENT_ID field. |
| 404 ScopedParentChildIndexUpdater index_updater(lock, entry, | 409 ScopedParentChildIndexUpdater index_updater(lock, entry, |
| 405 &kernel_->parent_child_index); | 410 &kernel_->parent_child_index); |
| 406 entry->put(PARENT_ID, new_parent_id); | 411 entry->put(PARENT_ID, new_parent_id); |
| 407 } | 412 } |
| 408 return true; | 413 return true; |
| 409 } | 414 } |
| 410 | 415 |
| 416 void Directory::RemoveFromAttachmentIndex( | |
| 417 const int64 metahandle, | |
| 418 const sync_pb::AttachmentMetadata& attachment_metadata, | |
| 419 const ScopedKernelLock& lock) { | |
| 420 for (int i = 0; i < attachment_metadata.record_size(); ++i) { | |
| 421 AttachmentIdUniqueId unique_id = | |
| 422 attachment_metadata.record(i).id().unique_id(); | |
| 423 IndexByAttachmenId::iterator iter = | |
| 424 kernel_->index_by_attachment_id.find(unique_id); | |
| 425 if (iter != kernel_->index_by_attachment_id.end()) { | |
| 426 iter->second.erase(metahandle); | |
|
pavely
2014/04/28 23:26:27
Can entity reference attachment multiple times? Se
maniscalco
2014/04/29 20:53:22
Good point. No, a single SyncData (or entry) shou
| |
| 427 if (iter->second.empty()) { | |
| 428 kernel_->index_by_attachment_id.erase(iter); | |
| 429 } | |
| 430 } | |
| 431 } | |
| 432 } | |
| 433 | |
| 434 void Directory::AddToAttachmentIndex( | |
| 435 const int64 metahandle, | |
| 436 const sync_pb::AttachmentMetadata& attachment_metadata, | |
| 437 const ScopedKernelLock& lock) { | |
| 438 for (int i = 0; i < attachment_metadata.record_size(); ++i) { | |
| 439 AttachmentIdUniqueId unique_id = | |
| 440 attachment_metadata.record(i).id().unique_id(); | |
| 441 IndexByAttachmenId::iterator iter = | |
| 442 kernel_->index_by_attachment_id.find(unique_id); | |
| 443 if (iter == kernel_->index_by_attachment_id.end()) { | |
| 444 iter = kernel_->index_by_attachment_id.insert(std::make_pair( | |
| 445 unique_id, | |
| 446 MetahandleSet())).first; | |
| 447 } | |
| 448 iter->second.insert(metahandle); | |
| 449 } | |
| 450 } | |
| 451 | |
| 452 void Directory::UpdateAttachmentIndex( | |
| 453 const int64 metahandle, | |
| 454 const sync_pb::AttachmentMetadata& old_metadata, | |
| 455 const sync_pb::AttachmentMetadata& new_metadata) { | |
| 456 ScopedKernelLock lock(this); | |
| 457 RemoveFromAttachmentIndex(metahandle, old_metadata, lock); | |
| 458 AddToAttachmentIndex(metahandle, new_metadata, lock); | |
| 459 } | |
| 460 | |
| 411 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { | 461 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { |
| 412 DCHECK(trans != NULL); | 462 DCHECK(trans != NULL); |
| 413 return unrecoverable_error_set_; | 463 return unrecoverable_error_set_; |
| 414 } | 464 } |
| 415 | 465 |
| 416 void Directory::ClearDirtyMetahandles() { | 466 void Directory::ClearDirtyMetahandles() { |
| 417 kernel_->transaction_mutex.AssertAcquired(); | 467 kernel_->transaction_mutex.AssertAcquired(); |
| 418 kernel_->dirty_metahandles.clear(); | 468 kernel_->dirty_metahandles.clear(); |
| 419 } | 469 } |
| 420 | 470 |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 541 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | 591 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { |
| 542 num_erased = | 592 num_erased = |
| 543 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); | 593 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); |
| 544 DCHECK_EQ(1u, num_erased); | 594 DCHECK_EQ(1u, num_erased); |
| 545 } | 595 } |
| 546 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), | 596 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), |
| 547 FROM_HERE, | 597 FROM_HERE, |
| 548 "Deleted entry still present", | 598 "Deleted entry still present", |
| 549 (&trans))) | 599 (&trans))) |
| 550 return false; | 600 return false; |
| 601 RemoveFromAttachmentIndex( | |
| 602 entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), lock); | |
| 603 | |
| 551 delete entry; | 604 delete entry; |
| 552 } | 605 } |
| 553 if (trans.unrecoverable_error_set()) | 606 if (trans.unrecoverable_error_set()) |
| 554 return false; | 607 return false; |
| 555 } | 608 } |
| 556 return true; | 609 return true; |
| 557 } | 610 } |
| 558 | 611 |
| 559 void Directory::UnapplyEntry(EntryKernel* entry) { | 612 void Directory::UnapplyEntry(EntryKernel* entry) { |
| 560 int64 handle = entry->ref(META_HANDLE); | 613 int64 handle = entry->ref(META_HANDLE); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 600 } | 653 } |
| 601 | 654 |
| 602 // At this point locally created items that aren't synced will become locally | 655 // At this point locally created items that aren't synced will become locally |
| 603 // deleted items, and purged on the next snapshot. All other items will match | 656 // deleted items, and purged on the next snapshot. All other items will match |
| 604 // the state they would have had if they were just created via a server | 657 // the state they would have had if they were just created via a server |
| 605 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). | 658 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). |
| 606 } | 659 } |
| 607 | 660 |
| 608 void Directory::DeleteEntry(bool save_to_journal, | 661 void Directory::DeleteEntry(bool save_to_journal, |
| 609 EntryKernel* entry, | 662 EntryKernel* entry, |
| 610 EntryKernelSet* entries_to_journal) { | 663 EntryKernelSet* entries_to_journal, |
| 664 const ScopedKernelLock& lock) { | |
| 611 int64 handle = entry->ref(META_HANDLE); | 665 int64 handle = entry->ref(META_HANDLE); |
| 612 ModelType server_type = GetModelTypeFromSpecifics( | 666 ModelType server_type = GetModelTypeFromSpecifics( |
| 613 entry->ref(SERVER_SPECIFICS)); | 667 entry->ref(SERVER_SPECIFICS)); |
| 614 | 668 |
| 615 kernel_->metahandles_to_purge.insert(handle); | 669 kernel_->metahandles_to_purge.insert(handle); |
| 616 | 670 |
| 617 size_t num_erased = 0; | 671 size_t num_erased = 0; |
| 618 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); | 672 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); |
| 619 DCHECK_EQ(1u, num_erased); | 673 DCHECK_EQ(1u, num_erased); |
| 620 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); | 674 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); |
| 621 DCHECK_EQ(1u, num_erased); | 675 DCHECK_EQ(1u, num_erased); |
| 622 num_erased = kernel_->unsynced_metahandles.erase(handle); | 676 num_erased = kernel_->unsynced_metahandles.erase(handle); |
| 623 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); | 677 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); |
| 624 num_erased = | 678 num_erased = |
| 625 kernel_->unapplied_update_metahandles[server_type].erase(handle); | 679 kernel_->unapplied_update_metahandles[server_type].erase(handle); |
| 626 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); | 680 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); |
| 627 if (kernel_->parent_child_index.Contains(entry)) | 681 if (kernel_->parent_child_index.Contains(entry)) |
| 628 kernel_->parent_child_index.Remove(entry); | 682 kernel_->parent_child_index.Remove(entry); |
| 629 | 683 |
| 630 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | 684 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { |
| 631 num_erased = | 685 num_erased = |
| 632 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); | 686 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); |
| 633 DCHECK_EQ(1u, num_erased); | 687 DCHECK_EQ(1u, num_erased); |
| 634 } | 688 } |
| 635 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { | 689 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { |
| 636 num_erased = | 690 num_erased = |
| 637 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); | 691 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); |
| 638 DCHECK_EQ(1u, num_erased); | 692 DCHECK_EQ(1u, num_erased); |
| 639 } | 693 } |
| 694 RemoveFromAttachmentIndex(handle, entry->ref(ATTACHMENT_METADATA), lock); | |
| 640 | 695 |
| 641 if (save_to_journal) { | 696 if (save_to_journal) { |
| 642 entries_to_journal->insert(entry); | 697 entries_to_journal->insert(entry); |
| 643 } else { | 698 } else { |
| 644 delete entry; | 699 delete entry; |
| 645 } | 700 } |
| 646 } | 701 } |
| 647 | 702 |
| 648 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, | 703 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, |
| 649 ModelTypeSet types_to_journal, | 704 ModelTypeSet types_to_journal, |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 697 | 752 |
| 698 if (types_to_unapply.Has(local_type) || | 753 if (types_to_unapply.Has(local_type) || |
| 699 types_to_unapply.Has(server_type)) { | 754 types_to_unapply.Has(server_type)) { |
| 700 UnapplyEntry(entry); | 755 UnapplyEntry(entry); |
| 701 } else { | 756 } else { |
| 702 bool save_to_journal = | 757 bool save_to_journal = |
| 703 (types_to_journal.Has(local_type) || | 758 (types_to_journal.Has(local_type) || |
| 704 types_to_journal.Has(server_type)) && | 759 types_to_journal.Has(server_type)) && |
| 705 (delete_journal_->IsDeleteJournalEnabled(local_type) || | 760 (delete_journal_->IsDeleteJournalEnabled(local_type) || |
| 706 delete_journal_->IsDeleteJournalEnabled(server_type)); | 761 delete_journal_->IsDeleteJournalEnabled(server_type)); |
| 707 DeleteEntry(save_to_journal, entry, &entries_to_journal); | 762 DeleteEntry(save_to_journal, entry, &entries_to_journal, lock); |
| 708 } | 763 } |
| 709 } | 764 } |
| 710 | 765 |
| 711 delete_journal_->AddJournalBatch(&trans, entries_to_journal); | 766 delete_journal_->AddJournalBatch(&trans, entries_to_journal); |
| 712 | 767 |
| 713 // Ensure meta tracking for these data types reflects the purged state. | 768 // Ensure meta tracking for these data types reflects the purged state. |
| 714 for (ModelTypeSet::Iterator it = disabled_types.First(); | 769 for (ModelTypeSet::Iterator it = disabled_types.First(); |
| 715 it.Good(); it.Inc()) { | 770 it.Good(); it.Inc()) { |
| 716 kernel_->persisted_info.transaction_version[it.Get()] = 0; | 771 kernel_->persisted_info.transaction_version[it.Get()] = 0; |
| 717 | 772 |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 755 | 810 |
| 756 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order | 811 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order |
| 757 // to ensure no in-transit data is lost. | 812 // to ensure no in-transit data is lost. |
| 758 | 813 |
| 759 entry->mark_dirty(&kernel_->dirty_metahandles); | 814 entry->mark_dirty(&kernel_->dirty_metahandles); |
| 760 } | 815 } |
| 761 | 816 |
| 762 return true; | 817 return true; |
| 763 } | 818 } |
| 764 | 819 |
| 820 bool Directory::IsAttachmentLinked(const AttachmentId& attachment_id) const { | |
| 821 ScopedKernelLock lock(this); | |
| 822 IndexByAttachmenId::const_iterator iter = | |
| 823 kernel_->index_by_attachment_id.find( | |
| 824 attachment_id.GetProto().unique_id()); | |
| 825 if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) { | |
| 826 return true; | |
| 827 } | |
| 828 return false; | |
| 829 } | |
| 830 | |
| 765 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { | 831 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { |
| 766 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this); | 832 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this); |
| 767 ScopedKernelLock lock(this); | 833 ScopedKernelLock lock(this); |
| 768 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 834 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
| 769 | 835 |
| 770 // Because we optimistically cleared the dirty bit on the real entries when | 836 // Because we optimistically cleared the dirty bit on the real entries when |
| 771 // taking the snapshot, we must restore it on failure. Not doing this could | 837 // taking the snapshot, we must restore it on failure. Not doing this could |
| 772 // cause lost data, if no other changes are made to the in-memory entries | 838 // cause lost data, if no other changes are made to the in-memory entries |
| 773 // that would cause the dirty bit to get set again. Setting the bit ensures | 839 // that would cause the dirty bit to get set again. Setting the bit ensures |
| 774 // that SaveChanges will at least try again later. | 840 // that SaveChanges will at least try again later. |
| (...skipping 581 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1356 | 1422 |
| 1357 for (OrderedChildSet::const_iterator i = children->begin(); | 1423 for (OrderedChildSet::const_iterator i = children->begin(); |
| 1358 i != children->end(); ++i) { | 1424 i != children->end(); ++i) { |
| 1359 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID)); | 1425 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID)); |
| 1360 result->push_back((*i)->ref(META_HANDLE)); | 1426 result->push_back((*i)->ref(META_HANDLE)); |
| 1361 } | 1427 } |
| 1362 } | 1428 } |
| 1363 | 1429 |
| 1364 } // namespace syncable | 1430 } // namespace syncable |
| 1365 } // namespace syncer | 1431 } // namespace syncer |
| OLD | NEW |