Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "sync/syncable/directory.h" | 5 #include "sync/syncable/directory.h" |
| 6 | 6 |
| 7 #include "base/debug/trace_event.h" | 7 #include "base/debug/trace_event.h" |
| 8 #include "base/perftimer.h" | 8 #include "base/perftimer.h" |
| 9 #include "base/stl_util.h" | 9 #include "base/stl_util.h" |
| 10 #include "base/string_number_conversions.h" | 10 #include "base/string_number_conversions.h" |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 94 download_progress[model_type].set_data_type_id( | 94 download_progress[model_type].set_data_type_id( |
| 95 GetSpecificsFieldNumberFromModelType(model_type)); | 95 GetSpecificsFieldNumberFromModelType(model_type)); |
| 96 // An empty-string token indicates no prior knowledge. | 96 // An empty-string token indicates no prior knowledge. |
| 97 download_progress[model_type].set_token(std::string()); | 97 download_progress[model_type].set_token(std::string()); |
| 98 } | 98 } |
| 99 | 99 |
| 100 Directory::SaveChangesSnapshot::SaveChangesSnapshot() | 100 Directory::SaveChangesSnapshot::SaveChangesSnapshot() |
| 101 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { | 101 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { |
| 102 } | 102 } |
| 103 | 103 |
| 104 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {} | 104 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() { |
| 105 STLDeleteElements(&dirty_metas); | |
| 106 STLDeleteElements(&delete_journals); | |
| 107 } | |
| 105 | 108 |
| 106 Directory::Kernel::Kernel( | 109 Directory::Kernel::Kernel( |
| 107 const std::string& name, | 110 const std::string& name, |
| 108 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, | 111 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, |
| 109 const WeakHandle<TransactionObserver>& transaction_observer) | 112 const WeakHandle<TransactionObserver>& transaction_observer) |
| 110 : next_write_transaction_id(0), | 113 : next_write_transaction_id(0), |
| 111 name(name), | 114 name(name), |
| 112 metahandles_index(new Directory::MetahandlesIndex), | 115 metahandles_index(new Directory::MetahandlesIndex), |
| 113 ids_index(new Directory::IdsIndex), | 116 ids_index(new Directory::IdsIndex), |
| 114 parent_id_child_index(new Directory::ParentIdChildIndex), | 117 parent_id_child_index(new Directory::ParentIdChildIndex), |
| 115 client_tag_index(new Directory::ClientTagIndex), | 118 client_tag_index(new Directory::ClientTagIndex), |
| 116 unsynced_metahandles(new MetahandleSet), | 119 unsynced_metahandles(new MetahandleSet), |
| 117 dirty_metahandles(new MetahandleSet), | 120 dirty_metahandles(new MetahandleSet), |
| 118 metahandles_to_purge(new MetahandleSet), | 121 metahandles_to_purge(new MetahandleSet), |
| 119 info_status(Directory::KERNEL_SHARE_INFO_VALID), | 122 info_status(Directory::KERNEL_SHARE_INFO_VALID), |
| 120 persisted_info(info.kernel_info), | 123 persisted_info(info.kernel_info), |
| 121 cache_guid(info.cache_guid), | 124 cache_guid(info.cache_guid), |
| 122 next_metahandle(info.max_metahandle + 1), | 125 next_metahandle(info.max_metahandle + 1), |
| 123 delegate(delegate), | 126 delegate(delegate), |
| 124 transaction_observer(transaction_observer) { | 127 transaction_observer(transaction_observer), |
| 128 delete_journals_(new Directory::IdsIndex), | |
| 129 delete_journals_to_purge_(new MetahandleSet) { | |
| 125 DCHECK(delegate); | 130 DCHECK(delegate); |
| 126 DCHECK(transaction_observer.IsInitialized()); | 131 DCHECK(transaction_observer.IsInitialized()); |
| 127 } | 132 } |
| 128 | 133 |
| 129 Directory::Kernel::~Kernel() { | 134 Directory::Kernel::~Kernel() { |
| 130 delete unsynced_metahandles; | 135 delete unsynced_metahandles; |
| 131 delete dirty_metahandles; | 136 delete dirty_metahandles; |
| 132 delete metahandles_to_purge; | 137 delete metahandles_to_purge; |
| 133 delete parent_id_child_index; | 138 delete parent_id_child_index; |
| 134 delete client_tag_index; | 139 delete client_tag_index; |
| 135 delete ids_index; | 140 delete ids_index; |
| 136 STLDeleteElements(metahandles_index); | 141 STLDeleteElements(metahandles_index); |
| 137 delete metahandles_index; | 142 delete metahandles_index; |
| 143 STLDeleteElements(delete_journals_); | |
| 144 delete delete_journals_; | |
| 145 delete delete_journals_to_purge_; | |
| 138 } | 146 } |
| 139 | 147 |
| 140 Directory::Directory( | 148 Directory::Directory( |
| 141 DirectoryBackingStore* store, | 149 DirectoryBackingStore* store, |
| 142 UnrecoverableErrorHandler* unrecoverable_error_handler, | 150 UnrecoverableErrorHandler* unrecoverable_error_handler, |
| 143 ReportUnrecoverableErrorFunction report_unrecoverable_error_function, | 151 ReportUnrecoverableErrorFunction report_unrecoverable_error_function, |
| 144 NigoriHandler* nigori_handler, | 152 NigoriHandler* nigori_handler, |
| 145 Cryptographer* cryptographer) | 153 Cryptographer* cryptographer) |
| 146 : kernel_(NULL), | 154 : kernel_(NULL), |
| 147 store_(store), | 155 store_(store), |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 189 } | 197 } |
| 190 DCHECK(!entry->is_dirty()); | 198 DCHECK(!entry->is_dirty()); |
| 191 } | 199 } |
| 192 } | 200 } |
| 193 | 201 |
| 194 DirOpenResult Directory::OpenImpl( | 202 DirOpenResult Directory::OpenImpl( |
| 195 const string& name, | 203 const string& name, |
| 196 DirectoryChangeDelegate* delegate, | 204 DirectoryChangeDelegate* delegate, |
| 197 const WeakHandle<TransactionObserver>& | 205 const WeakHandle<TransactionObserver>& |
| 198 transaction_observer) { | 206 transaction_observer) { |
| 199 | |
| 200 KernelLoadInfo info; | 207 KernelLoadInfo info; |
| 201 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) | 208 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) |
| 202 // swap these later. | 209 // swap these later. |
| 203 MetahandlesIndex metas_bucket; | 210 MetahandlesIndex metas_bucket; |
| 204 DirOpenResult result = store_->Load(&metas_bucket, &info); | 211 IdsIndex delete_journals; |
| 212 | |
| 213 DirOpenResult result = store_->Load(&metas_bucket, &delete_journals, &info); | |
| 205 if (OPENED != result) | 214 if (OPENED != result) |
| 206 return result; | 215 return result; |
| 207 | 216 |
| 208 kernel_ = new Kernel(name, info, delegate, transaction_observer); | 217 kernel_ = new Kernel(name, info, delegate, transaction_observer); |
| 209 kernel_->metahandles_index->swap(metas_bucket); | 218 kernel_->metahandles_index->swap(metas_bucket); |
| 219 kernel_->delete_journals_->swap(delete_journals); | |
| 210 InitializeIndices(); | 220 InitializeIndices(); |
| 211 | 221 |
| 212 // Write back the share info to reserve some space in 'next_id'. This will | 222 // Write back the share info to reserve some space in 'next_id'. This will |
| 213 // prevent local ID reuse in the case of an early crash. See the comments in | 223 // prevent local ID reuse in the case of an early crash. See the comments in |
| 214 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information. | 224 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information. |
| 215 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 225 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
| 216 if (!SaveChanges()) | 226 if (!SaveChanges()) |
| 217 return FAILED_INITIAL_WRITE; | 227 return FAILED_INITIAL_WRITE; |
| 218 | 228 |
| 219 return OPENED; | 229 return OPENED; |
| 220 } | 230 } |
| 221 | 231 |
| 222 void Directory::Close() { | 232 void Directory::Close() { |
| 223 store_.reset(); | 233 store_.reset(); |
| 224 if (kernel_) { | 234 if (kernel_) { |
| 225 delete kernel_; | 235 delete kernel_; |
| 226 kernel_ = NULL; | 236 kernel_ = NULL; |
| 227 } | 237 } |
| 228 } | 238 } |
| 229 | 239 |
| 230 void Directory::OnUnrecoverableError(const BaseTransaction* trans, | 240 void Directory::OnUnrecoverableError(const BaseTransaction* trans, |
| 231 const tracked_objects::Location& location, | 241 const tracked_objects::Location& location, |
| 232 const std::string & message) { | 242 const std::string & message) { |
| 233 DCHECK(trans != NULL); | 243 DCHECK(trans != NULL); |
| 234 unrecoverable_error_set_ = true; | 244 unrecoverable_error_set_ = true; |
| 235 unrecoverable_error_handler_->OnUnrecoverableError(location, | 245 unrecoverable_error_handler_->OnUnrecoverableError(location, |
| 236 message); | 246 message); |
| 237 } | 247 } |
| 238 | 248 |
| 249 void Directory::UpdateDeleteJournals(BaseTransaction* trans, | |
| 250 bool was_deleted, | |
| 251 const EntryKernel* entry) { | |
|
tim (not reviewing)
2012/12/13 02:48:10
How come this can't be a ref?
haitaol1
2012/12/13 21:34:21
Done.
| |
| 252 if (!IsDeleteJournalEnabledForType(entry->GetServerModelType())) | |
| 253 return; | |
| 254 | |
| 255 ScopedKernelLock lock(this); | |
| 256 IdsIndex::const_iterator it = | |
| 257 kernel_->delete_journals_->find(const_cast<EntryKernel*>(entry)); | |
| 258 | |
| 259 if (entry->ref(SERVER_IS_DEL)) { | |
| 260 if (it == kernel_->delete_journals_->end()) { | |
| 261 // New delete. | |
| 262 EntryKernel* t = new EntryKernel(*entry); | |
| 263 kernel_->delete_journals_->insert(t); | |
| 264 kernel_->delete_journals_to_purge_->erase(t->ref(META_HANDLE)); | |
| 265 } | |
| 266 } else { | |
| 267 // Undelete. This could happen in two cases: | |
| 268 // * An entry was actually deleted and undeleted: was_deleted = true. | |
| 269 // * A data type was broken in last sync session and all its entries | |
| 270 // were duplicated in delete journals. On restart, entries are recreated | |
| 271 // from downloads and recreation calls UpdateDeleteJournals() to remove | |
| 272 // live entries from delete journals, thus only deleted entries remain in | |
| 273 // journals. | |
| 274 if (it != kernel_->delete_journals_->end()) { | |
| 275 kernel_->delete_journals_to_purge_->insert((*it)->ref(META_HANDLE)); | |
| 276 delete *it; | |
| 277 kernel_->delete_journals_->erase(it); | |
| 278 } else if (was_deleted) { | |
| 279 kernel_->delete_journals_to_purge_->insert((*it)->ref(META_HANDLE)); | |
| 280 } | |
| 281 } | |
| 282 } | |
| 283 | |
| 284 void Directory::GetDeleteJournals(BaseTransaction* trans, | |
| 285 ModelType type, | |
| 286 EntryKernelSet* deleted_entries) { | |
| 287 ScopedKernelLock lock(this); | |
| 288 DCHECK(!passive_delete_journal_types_.Has(type)); | |
| 289 for (IdsIndex::const_iterator it = kernel_->delete_journals_->begin(); | |
| 290 it != kernel_->delete_journals_->end(); ++it) { | |
| 291 if ((*it)->GetServerModelType() == type) | |
| 292 deleted_entries->insert(*it); | |
| 293 } | |
| 294 passive_delete_journal_types_.Put(type); | |
| 295 } | |
| 296 | |
| 297 void Directory::PurgeDeleteJournals(BaseTransaction* trans, | |
| 298 const MetahandleSet& to_purge) { | |
| 299 ScopedKernelLock lock(this); | |
| 300 IdsIndex::const_iterator it = kernel_->delete_journals_->begin(); | |
| 301 while (it != kernel_->delete_journals_->end()) { | |
| 302 int64 handle = (*it)->ref(META_HANDLE); | |
| 303 if (to_purge.count(handle)) { | |
| 304 delete *it; | |
| 305 kernel_->delete_journals_->erase(it++); | |
| 306 } else { | |
| 307 ++it; | |
| 308 } | |
| 309 } | |
| 310 kernel_->delete_journals_to_purge_->insert(to_purge.begin(), to_purge.end()); | |
| 311 } | |
| 239 | 312 |
| 240 EntryKernel* Directory::GetEntryById(const Id& id) { | 313 EntryKernel* Directory::GetEntryById(const Id& id) { |
| 241 ScopedKernelLock lock(this); | 314 ScopedKernelLock lock(this); |
| 242 return GetEntryById(id, &lock); | 315 return GetEntryById(id, &lock); |
| 243 } | 316 } |
| 244 | 317 |
| 245 EntryKernel* Directory::GetEntryById(const Id& id, | 318 EntryKernel* Directory::GetEntryById(const Id& id, |
| 246 ScopedKernelLock* const lock) { | 319 ScopedKernelLock* const lock) { |
| 247 DCHECK(kernel_); | 320 DCHECK(kernel_); |
| 248 // Find it in the in memory ID index. | 321 // Find it in the in memory ID index. |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 459 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and | 532 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and |
| 460 // clear dirty flags. | 533 // clear dirty flags. |
| 461 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin(); | 534 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin(); |
| 462 i != kernel_->dirty_metahandles->end(); ++i) { | 535 i != kernel_->dirty_metahandles->end(); ++i) { |
| 463 EntryKernel* entry = GetEntryByHandle(*i, &lock); | 536 EntryKernel* entry = GetEntryByHandle(*i, &lock); |
| 464 if (!entry) | 537 if (!entry) |
| 465 continue; | 538 continue; |
| 466 // Skip over false positives; it happens relatively infrequently. | 539 // Skip over false positives; it happens relatively infrequently. |
| 467 if (!entry->is_dirty()) | 540 if (!entry->is_dirty()) |
| 468 continue; | 541 continue; |
| 469 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry); | 542 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), |
| 543 new EntryKernel(*entry)); | |
| 470 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); | 544 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); |
| 471 // We don't bother removing from the index here as we blow the entire thing | 545 // We don't bother removing from the index here as we blow the entire thing |
| 472 // in a moment, and it unnecessarily complicates iteration. | 546 // in a moment, and it unnecessarily complicates iteration. |
| 473 entry->clear_dirty(NULL); | 547 entry->clear_dirty(NULL); |
| 474 } | 548 } |
| 475 ClearDirtyMetahandles(); | 549 ClearDirtyMetahandles(); |
| 476 | 550 |
| 477 // Set purged handles. | 551 // Set purged handles. |
| 478 DCHECK(snapshot->metahandles_to_purge.empty()); | 552 DCHECK(snapshot->metahandles_to_purge.empty()); |
| 479 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); | 553 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); |
| 480 | 554 |
| 481 // Fill kernel_info_status and kernel_info. | 555 // Fill kernel_info_status and kernel_info. |
| 482 snapshot->kernel_info = kernel_->persisted_info; | 556 snapshot->kernel_info = kernel_->persisted_info; |
| 483 // To avoid duplicates when the process crashes, we record the next_id to be | 557 // To avoid duplicates when the process crashes, we record the next_id to be |
| 484 // greater magnitude than could possibly be reached before the next save | 558 // greater magnitude than could possibly be reached before the next save |
| 485 // changes. In other words, it's effectively impossible for the user to | 559 // changes. In other words, it's effectively impossible for the user to |
| 486 // generate 65536 new bookmarks in 3 seconds. | 560 // generate 65536 new bookmarks in 3 seconds. |
| 487 snapshot->kernel_info.next_id -= 65536; | 561 snapshot->kernel_info.next_id -= 65536; |
| 488 snapshot->kernel_info_status = kernel_->info_status; | 562 snapshot->kernel_info_status = kernel_->info_status; |
| 489 // This one we reset on failure. | 563 // This one we reset on failure. |
| 490 kernel_->info_status = KERNEL_SHARE_INFO_VALID; | 564 kernel_->info_status = KERNEL_SHARE_INFO_VALID; |
| 565 | |
| 566 // Move passive delete journals to snapshot. Will copy back if snapshot fails | |
| 567 // to save. | |
| 568 MetahandlesIndex::const_iterator it = kernel_->delete_journals_->begin(); | |
| 569 while (it != kernel_->delete_journals_->end()) { | |
| 570 if (passive_delete_journal_types_.Has((*it)->GetServerModelType())) { | |
| 571 snapshot->delete_journals.insert(*it); | |
| 572 kernel_->delete_journals_->erase(it++); | |
| 573 } else { | |
| 574 ++it; | |
| 575 } | |
| 576 } | |
| 577 snapshot->delete_journals_to_purge.swap( | |
| 578 *kernel_->delete_journals_to_purge_); | |
| 491 } | 579 } |
| 492 | 580 |
| 493 bool Directory::SaveChanges() { | 581 bool Directory::SaveChanges() { |
| 494 bool success = false; | 582 bool success = false; |
| 495 | 583 |
| 496 base::AutoLock scoped_lock(kernel_->save_changes_mutex); | 584 base::AutoLock scoped_lock(kernel_->save_changes_mutex); |
| 497 | 585 |
| 498 // Snapshot and save. | 586 // Snapshot and save. |
| 499 SaveChangesSnapshot snapshot; | 587 SaveChangesSnapshot snapshot; |
| 500 TakeSnapshotForSaveChanges(&snapshot); | 588 TakeSnapshotForSaveChanges(&snapshot); |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 511 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { | 599 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { |
| 512 if (snapshot.dirty_metas.empty()) | 600 if (snapshot.dirty_metas.empty()) |
| 513 return true; | 601 return true; |
| 514 | 602 |
| 515 // Need a write transaction as we are about to permanently purge entries. | 603 // Need a write transaction as we are about to permanently purge entries. |
| 516 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); | 604 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); |
| 517 ScopedKernelLock lock(this); | 605 ScopedKernelLock lock(this); |
| 518 // Now drop everything we can out of memory. | 606 // Now drop everything we can out of memory. |
| 519 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | 607 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); |
| 520 i != snapshot.dirty_metas.end(); ++i) { | 608 i != snapshot.dirty_metas.end(); ++i) { |
| 521 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); | 609 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE)); |
| 522 MetahandlesIndex::iterator found = | 610 MetahandlesIndex::iterator found = |
| 523 kernel_->metahandles_index->find(&kernel_->needle); | 611 kernel_->metahandles_index->find(&kernel_->needle); |
| 524 EntryKernel* entry = (found == kernel_->metahandles_index->end() ? | 612 EntryKernel* entry = (found == kernel_->metahandles_index->end() ? |
| 525 NULL : *found); | 613 NULL : *found); |
| 526 if (entry && SafeToPurgeFromMemory(&trans, entry)) { | 614 if (entry && SafeToPurgeFromMemory(&trans, entry)) { |
| 527 // We now drop deleted metahandles that are up to date on both the client | 615 // We now drop deleted metahandles that are up to date on both the client |
| 528 // and the server. | 616 // and the server. |
| 529 size_t num_erased = 0; | 617 size_t num_erased = 0; |
| 530 num_erased = kernel_->ids_index->erase(entry); | 618 num_erased = kernel_->ids_index->erase(entry); |
| 531 DCHECK_EQ(1u, num_erased); | 619 DCHECK_EQ(1u, num_erased); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 609 ScopedKernelLock lock(this); | 697 ScopedKernelLock lock(this); |
| 610 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 698 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
| 611 | 699 |
| 612 // Because we optimistically cleared the dirty bit on the real entries when | 700 // Because we optimistically cleared the dirty bit on the real entries when |
| 613 // taking the snapshot, we must restore it on failure. Not doing this could | 701 // taking the snapshot, we must restore it on failure. Not doing this could |
| 614 // cause lost data, if no other changes are made to the in-memory entries | 702 // cause lost data, if no other changes are made to the in-memory entries |
| 615 // that would cause the dirty bit to get set again. Setting the bit ensures | 703 // that would cause the dirty bit to get set again. Setting the bit ensures |
| 616 // that SaveChanges will at least try again later. | 704 // that SaveChanges will at least try again later. |
| 617 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | 705 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); |
| 618 i != snapshot.dirty_metas.end(); ++i) { | 706 i != snapshot.dirty_metas.end(); ++i) { |
| 619 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); | 707 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE)); |
| 620 MetahandlesIndex::iterator found = | 708 MetahandlesIndex::iterator found = |
| 621 kernel_->metahandles_index->find(&kernel_->needle); | 709 kernel_->metahandles_index->find(&kernel_->needle); |
| 622 if (found != kernel_->metahandles_index->end()) { | 710 if (found != kernel_->metahandles_index->end()) { |
| 623 (*found)->mark_dirty(kernel_->dirty_metahandles); | 711 (*found)->mark_dirty(kernel_->dirty_metahandles); |
| 624 } | 712 } |
| 625 } | 713 } |
| 626 | 714 |
| 627 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), | 715 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), |
| 628 snapshot.metahandles_to_purge.end()); | 716 snapshot.metahandles_to_purge.end()); |
| 717 | |
| 718 // Restore delete journals. | |
| 719 for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin(); | |
| 720 i != snapshot.delete_journals.end(); ++i) { | |
| 721 kernel_->needle.put(ID, (*i)->ref(ID)); | |
| 722 if (kernel_->delete_journals_->find(&kernel_->needle) == | |
| 723 kernel_->delete_journals_->end()) { | |
| 724 kernel_->delete_journals_->insert(new EntryKernel(**i)); | |
| 725 } | |
| 726 } | |
| 727 kernel_->delete_journals_to_purge_->insert( | |
| 728 snapshot.delete_journals_to_purge.begin(), | |
| 729 snapshot.delete_journals_to_purge.end()); | |
| 730 | |
| 629 } | 731 } |
| 630 | 732 |
| 631 void Directory::GetDownloadProgress( | 733 void Directory::GetDownloadProgress( |
| 632 ModelType model_type, | 734 ModelType model_type, |
| 633 sync_pb::DataTypeProgressMarker* value_out) const { | 735 sync_pb::DataTypeProgressMarker* value_out) const { |
| 634 ScopedKernelLock lock(this); | 736 ScopedKernelLock lock(this); |
| 635 return value_out->CopyFrom( | 737 return value_out->CopyFrom( |
| 636 kernel_->persisted_info.download_progress[model_type]); | 738 kernel_->persisted_info.download_progress[model_type]); |
| 637 } | 739 } |
| 638 | 740 |
| (...skipping 622 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1261 // ordering. | 1363 // ordering. |
| 1262 if (entry->ref(PREV_ID).IsRoot() || | 1364 if (entry->ref(PREV_ID).IsRoot() || |
| 1263 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) { | 1365 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) { |
| 1264 return entry; | 1366 return entry; |
| 1265 } | 1367 } |
| 1266 } | 1368 } |
| 1267 // There were no children in the linked list. | 1369 // There were no children in the linked list. |
| 1268 return NULL; | 1370 return NULL; |
| 1269 } | 1371 } |
| 1270 | 1372 |
| 1373 /* static */ | |
| 1374 bool Directory::IsDeleteJournalEnabled(ModelType type) { | |
| 1375 switch (type) { | |
| 1376 case BOOKMARKS: | |
| 1377 return true; | |
| 1378 default: | |
| 1379 return false; | |
| 1380 } | |
| 1381 } | |
| 1382 | |
| 1271 ScopedKernelLock::ScopedKernelLock(const Directory* dir) | 1383 ScopedKernelLock::ScopedKernelLock(const Directory* dir) |
| 1272 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { | 1384 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { |
| 1273 } | 1385 } |
| 1274 | 1386 |
| 1275 } // namespace syncable | 1387 } // namespace syncable |
| 1276 } // namespace syncer | 1388 } // namespace syncer |
| OLD | NEW |