Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: sync/syncable/directory.cc

Issue 11441026: [Sync] Add support for loading, updating and querying delete journals in (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « sync/syncable/directory.h ('k') | sync/syncable/directory_backing_store.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "sync/syncable/directory.h" 5 #include "sync/syncable/directory.h"
6 6
7 #include "base/debug/trace_event.h" 7 #include "base/debug/trace_event.h"
8 #include "base/perftimer.h" 8 #include "base/perftimer.h"
9 #include "base/stl_util.h" 9 #include "base/stl_util.h"
10 #include "base/string_number_conversions.h" 10 #include "base/string_number_conversions.h"
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
94 download_progress[model_type].set_data_type_id( 94 download_progress[model_type].set_data_type_id(
95 GetSpecificsFieldNumberFromModelType(model_type)); 95 GetSpecificsFieldNumberFromModelType(model_type));
96 // An empty-string token indicates no prior knowledge. 96 // An empty-string token indicates no prior knowledge.
97 download_progress[model_type].set_token(std::string()); 97 download_progress[model_type].set_token(std::string());
98 } 98 }
99 99
100 Directory::SaveChangesSnapshot::SaveChangesSnapshot() 100 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
101 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { 101 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
102 } 102 }
103 103
104 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {} 104 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
105 STLDeleteElements(&dirty_metas);
106 STLDeleteElements(&delete_journals);
107 }
105 108
106 Directory::Kernel::Kernel( 109 Directory::Kernel::Kernel(
107 const std::string& name, 110 const std::string& name,
108 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, 111 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
109 const WeakHandle<TransactionObserver>& transaction_observer) 112 const WeakHandle<TransactionObserver>& transaction_observer)
110 : next_write_transaction_id(0), 113 : next_write_transaction_id(0),
111 name(name), 114 name(name),
112 metahandles_index(new Directory::MetahandlesIndex), 115 metahandles_index(new Directory::MetahandlesIndex),
113 ids_index(new Directory::IdsIndex), 116 ids_index(new Directory::IdsIndex),
114 parent_id_child_index(new Directory::ParentIdChildIndex), 117 parent_id_child_index(new Directory::ParentIdChildIndex),
115 client_tag_index(new Directory::ClientTagIndex), 118 client_tag_index(new Directory::ClientTagIndex),
116 unsynced_metahandles(new MetahandleSet), 119 unsynced_metahandles(new MetahandleSet),
117 dirty_metahandles(new MetahandleSet), 120 dirty_metahandles(new MetahandleSet),
118 metahandles_to_purge(new MetahandleSet), 121 metahandles_to_purge(new MetahandleSet),
119 info_status(Directory::KERNEL_SHARE_INFO_VALID), 122 info_status(Directory::KERNEL_SHARE_INFO_VALID),
120 persisted_info(info.kernel_info), 123 persisted_info(info.kernel_info),
121 cache_guid(info.cache_guid), 124 cache_guid(info.cache_guid),
122 next_metahandle(info.max_metahandle + 1), 125 next_metahandle(info.max_metahandle + 1),
123 delegate(delegate), 126 delegate(delegate),
124 transaction_observer(transaction_observer) { 127 transaction_observer(transaction_observer),
128 delete_journals_(new Directory::IdsIndex),
129 delete_journals_to_purge_(new MetahandleSet) {
125 DCHECK(delegate); 130 DCHECK(delegate);
126 DCHECK(transaction_observer.IsInitialized()); 131 DCHECK(transaction_observer.IsInitialized());
127 } 132 }
128 133
129 Directory::Kernel::~Kernel() { 134 Directory::Kernel::~Kernel() {
130 delete unsynced_metahandles; 135 delete unsynced_metahandles;
131 delete dirty_metahandles; 136 delete dirty_metahandles;
132 delete metahandles_to_purge; 137 delete metahandles_to_purge;
133 delete parent_id_child_index; 138 delete parent_id_child_index;
134 delete client_tag_index; 139 delete client_tag_index;
135 delete ids_index; 140 delete ids_index;
136 STLDeleteElements(metahandles_index); 141 STLDeleteElements(metahandles_index);
137 delete metahandles_index; 142 delete metahandles_index;
143 STLDeleteElements(delete_journals_);
144 delete delete_journals_;
145 delete delete_journals_to_purge_;
138 } 146 }
139 147
140 Directory::Directory( 148 Directory::Directory(
141 DirectoryBackingStore* store, 149 DirectoryBackingStore* store,
142 UnrecoverableErrorHandler* unrecoverable_error_handler, 150 UnrecoverableErrorHandler* unrecoverable_error_handler,
143 ReportUnrecoverableErrorFunction report_unrecoverable_error_function, 151 ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
144 NigoriHandler* nigori_handler, 152 NigoriHandler* nigori_handler,
145 Cryptographer* cryptographer) 153 Cryptographer* cryptographer)
146 : kernel_(NULL), 154 : kernel_(NULL),
147 store_(store), 155 store_(store),
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
189 } 197 }
190 DCHECK(!entry->is_dirty()); 198 DCHECK(!entry->is_dirty());
191 } 199 }
192 } 200 }
193 201
194 DirOpenResult Directory::OpenImpl( 202 DirOpenResult Directory::OpenImpl(
195 const string& name, 203 const string& name,
196 DirectoryChangeDelegate* delegate, 204 DirectoryChangeDelegate* delegate,
197 const WeakHandle<TransactionObserver>& 205 const WeakHandle<TransactionObserver>&
198 transaction_observer) { 206 transaction_observer) {
199
200 KernelLoadInfo info; 207 KernelLoadInfo info;
201 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) 208 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
202 // swap these later. 209 // swap these later.
203 MetahandlesIndex metas_bucket; 210 MetahandlesIndex metas_bucket;
204 DirOpenResult result = store_->Load(&metas_bucket, &info); 211 IdsIndex delete_journals;
212
213 DirOpenResult result = store_->Load(&metas_bucket, &delete_journals, &info);
205 if (OPENED != result) 214 if (OPENED != result)
206 return result; 215 return result;
207 216
208 kernel_ = new Kernel(name, info, delegate, transaction_observer); 217 kernel_ = new Kernel(name, info, delegate, transaction_observer);
209 kernel_->metahandles_index->swap(metas_bucket); 218 kernel_->metahandles_index->swap(metas_bucket);
219 kernel_->delete_journals_->swap(delete_journals);
210 InitializeIndices(); 220 InitializeIndices();
211 221
212 // Write back the share info to reserve some space in 'next_id'. This will 222 // Write back the share info to reserve some space in 'next_id'. This will
213 // prevent local ID reuse in the case of an early crash. See the comments in 223 // prevent local ID reuse in the case of an early crash. See the comments in
214 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information. 224 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
215 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; 225 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
216 if (!SaveChanges()) 226 if (!SaveChanges())
217 return FAILED_INITIAL_WRITE; 227 return FAILED_INITIAL_WRITE;
218 228
219 return OPENED; 229 return OPENED;
220 } 230 }
221 231
222 void Directory::Close() { 232 void Directory::Close() {
223 store_.reset(); 233 store_.reset();
224 if (kernel_) { 234 if (kernel_) {
225 delete kernel_; 235 delete kernel_;
226 kernel_ = NULL; 236 kernel_ = NULL;
227 } 237 }
228 } 238 }
229 239
230 void Directory::OnUnrecoverableError(const BaseTransaction* trans, 240 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
231 const tracked_objects::Location& location, 241 const tracked_objects::Location& location,
232 const std::string & message) { 242 const std::string & message) {
233 DCHECK(trans != NULL); 243 DCHECK(trans != NULL);
234 unrecoverable_error_set_ = true; 244 unrecoverable_error_set_ = true;
235 unrecoverable_error_handler_->OnUnrecoverableError(location, 245 unrecoverable_error_handler_->OnUnrecoverableError(location,
236 message); 246 message);
237 } 247 }
238 248
249 void Directory::UpdateDeleteJournalForServerDelete(BaseTransaction* trans,
250 bool was_deleted,
251 const EntryKernel& entry) {
252 if (!(IsDeleteJournalEnabled(entry.GetServerModelType()) ||
253 IsDeleteJournalEnabled(
254 GetModelTypeFromSpecifics(entry.ref(SPECIFICS))))) {
tim (not reviewing) 2012/12/14 20:12:38 Can you add the comment from your reply in here ex
haitaol1 2012/12/14 23:15:36 Done.
255 return;
256 }
257
258 ScopedKernelLock lock(this);
259 kernel_->needle.put(ID, entry.ref(ID));
260 IdsIndex::const_iterator it =
261 kernel_->delete_journals_->find(&kernel_->needle);
262
263 if (entry.ref(SERVER_IS_DEL)) {
264 if (it == kernel_->delete_journals_->end()) {
265 // New delete.
266 EntryKernel* t = new EntryKernel(entry);
267 kernel_->delete_journals_->insert(t);
268 kernel_->delete_journals_to_purge_->erase(t->ref(META_HANDLE));
269 }
270 } else {
271 // Undelete. This could happen in two cases:
272 // * An entry was actually deleted and undeleted: was_deleted = true.
273 // * A data type was broken, i.e. encountered unrecoverable error, in last
274 // sync session and all its entries were duplicated in delete journals.
275 // On restart, entries are recreated from downloads and recreation calls
276 // UpdateDeleteJournals() to remove live entries from delete journals,
277 // thus only deleted entries remain in journals.
278 if (it != kernel_->delete_journals_->end()) {
279 kernel_->delete_journals_to_purge_->insert((*it)->ref(META_HANDLE));
280 delete *it;
281 kernel_->delete_journals_->erase(it);
282 } else if (was_deleted) {
283 kernel_->delete_journals_to_purge_->insert((*it)->ref(META_HANDLE));
284 }
285 }
286 }
287
288 void Directory::GetDeleteJournals(BaseTransaction* trans,
289 ModelType type,
290 EntryKernelSet* deleted_entries) {
291 ScopedKernelLock lock(this);
292 DCHECK(!passive_delete_journal_types_.Has(type));
293 for (IdsIndex::const_iterator it = kernel_->delete_journals_->begin();
294 it != kernel_->delete_journals_->end(); ++it) {
295 if ((*it)->GetServerModelType() == type ||
296 GetModelTypeFromSpecifics((*it)->ref(SPECIFICS)) == type) {
297 deleted_entries->insert(*it);
298 }
299 }
300 passive_delete_journal_types_.Put(type);
301 }
302
303 void Directory::PurgeDeleteJournals(BaseTransaction* trans,
304 const MetahandleSet& to_purge) {
305 ScopedKernelLock lock(this);
306 IdsIndex::const_iterator it = kernel_->delete_journals_->begin();
307 while (it != kernel_->delete_journals_->end()) {
308 int64 handle = (*it)->ref(META_HANDLE);
309 if (to_purge.count(handle)) {
310 delete *it;
311 kernel_->delete_journals_->erase(it++);
312 } else {
313 ++it;
314 }
315 }
316 kernel_->delete_journals_to_purge_->insert(to_purge.begin(), to_purge.end());
317 }
239 318
240 EntryKernel* Directory::GetEntryById(const Id& id) { 319 EntryKernel* Directory::GetEntryById(const Id& id) {
241 ScopedKernelLock lock(this); 320 ScopedKernelLock lock(this);
242 return GetEntryById(id, &lock); 321 return GetEntryById(id, &lock);
243 } 322 }
244 323
245 EntryKernel* Directory::GetEntryById(const Id& id, 324 EntryKernel* Directory::GetEntryById(const Id& id,
246 ScopedKernelLock* const lock) { 325 ScopedKernelLock* const lock) {
247 DCHECK(kernel_); 326 DCHECK(kernel_);
248 // Find it in the in memory ID index. 327 // Find it in the in memory ID index.
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
459 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and 538 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
460 // clear dirty flags. 539 // clear dirty flags.
461 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin(); 540 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin();
462 i != kernel_->dirty_metahandles->end(); ++i) { 541 i != kernel_->dirty_metahandles->end(); ++i) {
463 EntryKernel* entry = GetEntryByHandle(*i, &lock); 542 EntryKernel* entry = GetEntryByHandle(*i, &lock);
464 if (!entry) 543 if (!entry)
465 continue; 544 continue;
466 // Skip over false positives; it happens relatively infrequently. 545 // Skip over false positives; it happens relatively infrequently.
467 if (!entry->is_dirty()) 546 if (!entry->is_dirty())
468 continue; 547 continue;
469 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry); 548 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
549 new EntryKernel(*entry));
470 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); 550 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i));
471 // We don't bother removing from the index here as we blow the entire thing 551 // We don't bother removing from the index here as we blow the entire thing
472 // in a moment, and it unnecessarily complicates iteration. 552 // in a moment, and it unnecessarily complicates iteration.
473 entry->clear_dirty(NULL); 553 entry->clear_dirty(NULL);
474 } 554 }
475 ClearDirtyMetahandles(); 555 ClearDirtyMetahandles();
476 556
477 // Set purged handles. 557 // Set purged handles.
478 DCHECK(snapshot->metahandles_to_purge.empty()); 558 DCHECK(snapshot->metahandles_to_purge.empty());
479 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); 559 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge));
480 560
481 // Fill kernel_info_status and kernel_info. 561 // Fill kernel_info_status and kernel_info.
482 snapshot->kernel_info = kernel_->persisted_info; 562 snapshot->kernel_info = kernel_->persisted_info;
483 // To avoid duplicates when the process crashes, we record the next_id to be 563 // To avoid duplicates when the process crashes, we record the next_id to be
484 // greater magnitude than could possibly be reached before the next save 564 // greater magnitude than could possibly be reached before the next save
485 // changes. In other words, it's effectively impossible for the user to 565 // changes. In other words, it's effectively impossible for the user to
486 // generate 65536 new bookmarks in 3 seconds. 566 // generate 65536 new bookmarks in 3 seconds.
487 snapshot->kernel_info.next_id -= 65536; 567 snapshot->kernel_info.next_id -= 65536;
488 snapshot->kernel_info_status = kernel_->info_status; 568 snapshot->kernel_info_status = kernel_->info_status;
489 // This one we reset on failure. 569 // This one we reset on failure.
490 kernel_->info_status = KERNEL_SHARE_INFO_VALID; 570 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
571
572 // Move passive delete journals to snapshot. Will copy back if snapshot fails
573 // to save.
574 MetahandlesIndex::const_iterator it = kernel_->delete_journals_->begin();
575 while (it != kernel_->delete_journals_->end()) {
576 if (passive_delete_journal_types_.Has((*it)->GetServerModelType()) ||
577 passive_delete_journal_types_.Has(GetModelTypeFromSpecifics(
578 (*it)->ref(SPECIFICS)))) {
579 snapshot->delete_journals.insert(*it);
580 kernel_->delete_journals_->erase(it++);
581 } else {
582 ++it;
583 }
584 }
585 snapshot->delete_journals_to_purge.swap(
586 *kernel_->delete_journals_to_purge_);
491 } 587 }
492 588
493 bool Directory::SaveChanges() { 589 bool Directory::SaveChanges() {
494 bool success = false; 590 bool success = false;
495 591
496 base::AutoLock scoped_lock(kernel_->save_changes_mutex); 592 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
497 593
498 // Snapshot and save. 594 // Snapshot and save.
499 SaveChangesSnapshot snapshot; 595 SaveChangesSnapshot snapshot;
500 TakeSnapshotForSaveChanges(&snapshot); 596 TakeSnapshotForSaveChanges(&snapshot);
(...skipping 10 matching lines...) Expand all
511 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { 607 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
512 if (snapshot.dirty_metas.empty()) 608 if (snapshot.dirty_metas.empty())
513 return true; 609 return true;
514 610
515 // Need a write transaction as we are about to permanently purge entries. 611 // Need a write transaction as we are about to permanently purge entries.
516 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); 612 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
517 ScopedKernelLock lock(this); 613 ScopedKernelLock lock(this);
518 // Now drop everything we can out of memory. 614 // Now drop everything we can out of memory.
519 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); 615 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
520 i != snapshot.dirty_metas.end(); ++i) { 616 i != snapshot.dirty_metas.end(); ++i) {
521 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); 617 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
522 MetahandlesIndex::iterator found = 618 MetahandlesIndex::iterator found =
523 kernel_->metahandles_index->find(&kernel_->needle); 619 kernel_->metahandles_index->find(&kernel_->needle);
524 EntryKernel* entry = (found == kernel_->metahandles_index->end() ? 620 EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
525 NULL : *found); 621 NULL : *found);
526 if (entry && SafeToPurgeFromMemory(&trans, entry)) { 622 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
527 // We now drop deleted metahandles that are up to date on both the client 623 // We now drop deleted metahandles that are up to date on both the client
528 // and the server. 624 // and the server.
529 size_t num_erased = 0; 625 size_t num_erased = 0;
530 num_erased = kernel_->ids_index->erase(entry); 626 num_erased = kernel_->ids_index->erase(entry);
531 DCHECK_EQ(1u, num_erased); 627 DCHECK_EQ(1u, num_erased);
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
609 ScopedKernelLock lock(this); 705 ScopedKernelLock lock(this);
610 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; 706 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
611 707
612 // Because we optimistically cleared the dirty bit on the real entries when 708 // Because we optimistically cleared the dirty bit on the real entries when
613 // taking the snapshot, we must restore it on failure. Not doing this could 709 // taking the snapshot, we must restore it on failure. Not doing this could
614 // cause lost data, if no other changes are made to the in-memory entries 710 // cause lost data, if no other changes are made to the in-memory entries
615 // that would cause the dirty bit to get set again. Setting the bit ensures 711 // that would cause the dirty bit to get set again. Setting the bit ensures
616 // that SaveChanges will at least try again later. 712 // that SaveChanges will at least try again later.
617 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); 713 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
618 i != snapshot.dirty_metas.end(); ++i) { 714 i != snapshot.dirty_metas.end(); ++i) {
619 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); 715 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
620 MetahandlesIndex::iterator found = 716 MetahandlesIndex::iterator found =
621 kernel_->metahandles_index->find(&kernel_->needle); 717 kernel_->metahandles_index->find(&kernel_->needle);
622 if (found != kernel_->metahandles_index->end()) { 718 if (found != kernel_->metahandles_index->end()) {
623 (*found)->mark_dirty(kernel_->dirty_metahandles); 719 (*found)->mark_dirty(kernel_->dirty_metahandles);
624 } 720 }
625 } 721 }
626 722
627 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), 723 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
628 snapshot.metahandles_to_purge.end()); 724 snapshot.metahandles_to_purge.end());
725
726 // Restore delete journals.
727 for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
728 i != snapshot.delete_journals.end(); ++i) {
729 kernel_->needle.put(ID, (*i)->ref(ID));
730 if (kernel_->delete_journals_->find(&kernel_->needle) ==
731 kernel_->delete_journals_->end()) {
732 kernel_->delete_journals_->insert(new EntryKernel(**i));
733 }
734 }
735 kernel_->delete_journals_to_purge_->insert(
736 snapshot.delete_journals_to_purge.begin(),
737 snapshot.delete_journals_to_purge.end());
738
629 } 739 }
630 740
631 void Directory::GetDownloadProgress( 741 void Directory::GetDownloadProgress(
632 ModelType model_type, 742 ModelType model_type,
633 sync_pb::DataTypeProgressMarker* value_out) const { 743 sync_pb::DataTypeProgressMarker* value_out) const {
634 ScopedKernelLock lock(this); 744 ScopedKernelLock lock(this);
635 return value_out->CopyFrom( 745 return value_out->CopyFrom(
636 kernel_->persisted_info.download_progress[model_type]); 746 kernel_->persisted_info.download_progress[model_type]);
637 } 747 }
638 748
(...skipping 622 matching lines...) Expand 10 before | Expand all | Expand 10 after
1261 // ordering. 1371 // ordering.
1262 if (entry->ref(PREV_ID).IsRoot() || 1372 if (entry->ref(PREV_ID).IsRoot() ||
1263 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) { 1373 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) {
1264 return entry; 1374 return entry;
1265 } 1375 }
1266 } 1376 }
1267 // There were no children in the linked list. 1377 // There were no children in the linked list.
1268 return NULL; 1378 return NULL;
1269 } 1379 }
1270 1380
1381 /* static */
1382 bool Directory::IsDeleteJournalEnabled(ModelType type) {
1383 switch (type) {
1384 case BOOKMARKS:
1385 return true;
1386 default:
1387 return false;
1388 }
1389 }
1390
1271 ScopedKernelLock::ScopedKernelLock(const Directory* dir) 1391 ScopedKernelLock::ScopedKernelLock(const Directory* dir)
1272 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { 1392 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
1273 } 1393 }
1274 1394
1275 } // namespace syncable 1395 } // namespace syncable
1276 } // namespace syncer 1396 } // namespace syncer
OLDNEW
« no previous file with comments | « sync/syncable/directory.h ('k') | sync/syncable/directory_backing_store.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698