OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sync/syncable/directory.h" | 5 #include "sync/syncable/directory.h" |
6 | 6 |
| 7 #include "base/base64.h" |
7 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
8 #include "base/perftimer.h" | |
9 #include "base/stl_util.h" | 9 #include "base/stl_util.h" |
10 #include "base/string_number_conversions.h" | 10 #include "base/string_number_conversions.h" |
11 #include "sync/internal_api/public/base/node_ordinal.h" | 11 #include "sync/internal_api/public/base/unique_position.h" |
12 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" | 12 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" |
13 #include "sync/syncable/base_transaction.h" | 13 #include "sync/syncable/base_transaction.h" |
14 #include "sync/syncable/entry.h" | 14 #include "sync/syncable/entry.h" |
15 #include "sync/syncable/entry_kernel.h" | 15 #include "sync/syncable/entry_kernel.h" |
16 #include "sync/syncable/in_memory_directory_backing_store.h" | 16 #include "sync/syncable/in_memory_directory_backing_store.h" |
17 #include "sync/syncable/on_disk_directory_backing_store.h" | 17 #include "sync/syncable/on_disk_directory_backing_store.h" |
18 #include "sync/syncable/read_transaction.h" | 18 #include "sync/syncable/read_transaction.h" |
19 #include "sync/syncable/scoped_index_updater.h" | 19 #include "sync/syncable/scoped_index_updater.h" |
| 20 #include "sync/syncable/scoped_parent_child_index_updater.h" |
20 #include "sync/syncable/syncable-inl.h" | 21 #include "sync/syncable/syncable-inl.h" |
21 #include "sync/syncable/syncable_changes_version.h" | 22 #include "sync/syncable/syncable_changes_version.h" |
22 #include "sync/syncable/syncable_util.h" | 23 #include "sync/syncable/syncable_util.h" |
23 #include "sync/syncable/write_transaction.h" | 24 #include "sync/syncable/write_transaction.h" |
24 | 25 |
25 using std::string; | 26 using std::string; |
26 | 27 |
27 namespace syncer { | 28 namespace syncer { |
28 namespace syncable { | 29 namespace syncable { |
29 | 30 |
30 namespace { | 31 namespace { |
31 // Helper function to add an item to the index, if it ought to be added. | 32 // Helper function to add an item to the index, if it ought to be added. |
32 template<typename Indexer> | 33 template<typename Indexer> |
33 void InitializeIndexEntry(EntryKernel* entry, | 34 void InitializeIndexEntry(EntryKernel* entry, |
34 typename Index<Indexer>::Set* index) { | 35 typename Index<Indexer>::Set* index) { |
35 if (Indexer::ShouldInclude(entry)) { | 36 if (Indexer::ShouldInclude(entry)) { |
36 index->insert(entry); | 37 index->insert(entry); |
37 } | 38 } |
38 } | 39 } |
39 | |
40 } | 40 } |
41 | 41 |
42 // static | 42 // static |
43 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) { | 43 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) { |
44 return !a->ref(UNIQUE_CLIENT_TAG).empty(); | 44 return !a->ref(UNIQUE_CLIENT_TAG).empty(); |
45 } | 45 } |
46 | 46 |
47 bool ParentIdAndHandleIndexer::Comparator::operator() ( | |
48 const syncable::EntryKernel* a, | |
49 const syncable::EntryKernel* b) const { | |
50 int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID)); | |
51 if (cmp != 0) | |
52 return cmp < 0; | |
53 | |
54 const NodeOrdinal& a_position = a->ref(SERVER_ORDINAL_IN_PARENT); | |
55 const NodeOrdinal& b_position = b->ref(SERVER_ORDINAL_IN_PARENT); | |
56 if (!a_position.Equals(b_position)) | |
57 return a_position.LessThan(b_position); | |
58 | |
59 cmp = a->ref(ID).compare(b->ref(ID)); | |
60 return cmp < 0; | |
61 } | |
62 | |
63 // static | |
64 bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) { | |
65 // This index excludes deleted items and the root item. The root | |
66 // item is excluded so that it doesn't show up as a child of itself. | |
67 return !a->ref(IS_DEL) && !a->ref(ID).IsRoot(); | |
68 } | |
69 | |
70 // static | 47 // static |
71 const FilePath::CharType Directory::kSyncDatabaseFilename[] = | 48 const FilePath::CharType Directory::kSyncDatabaseFilename[] = |
72 FILE_PATH_LITERAL("SyncData.sqlite3"); | 49 FILE_PATH_LITERAL("SyncData.sqlite3"); |
73 | 50 |
74 void Directory::InitKernelForTest( | 51 void Directory::InitKernelForTest( |
75 const std::string& name, | 52 const std::string& name, |
76 DirectoryChangeDelegate* delegate, | 53 DirectoryChangeDelegate* delegate, |
77 const WeakHandle<TransactionObserver>& transaction_observer) { | 54 const WeakHandle<TransactionObserver>& transaction_observer) { |
78 DCHECK(!kernel_); | 55 DCHECK(!kernel_); |
79 kernel_ = new Kernel(name, KernelLoadInfo(), delegate, transaction_observer); | 56 kernel_ = new Kernel(name, KernelLoadInfo(), delegate, transaction_observer); |
(...skipping 24 matching lines...) Expand all Loading... |
104 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {} | 81 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {} |
105 | 82 |
106 Directory::Kernel::Kernel( | 83 Directory::Kernel::Kernel( |
107 const std::string& name, | 84 const std::string& name, |
108 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, | 85 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, |
109 const WeakHandle<TransactionObserver>& transaction_observer) | 86 const WeakHandle<TransactionObserver>& transaction_observer) |
110 : next_write_transaction_id(0), | 87 : next_write_transaction_id(0), |
111 name(name), | 88 name(name), |
112 metahandles_index(new Directory::MetahandlesIndex), | 89 metahandles_index(new Directory::MetahandlesIndex), |
113 ids_index(new Directory::IdsIndex), | 90 ids_index(new Directory::IdsIndex), |
114 parent_id_child_index(new Directory::ParentIdChildIndex), | 91 parent_child_index(new ParentChildIndex), |
115 client_tag_index(new Directory::ClientTagIndex), | 92 client_tag_index(new Directory::ClientTagIndex), |
116 unsynced_metahandles(new MetahandleSet), | 93 unsynced_metahandles(new MetahandleSet), |
117 dirty_metahandles(new MetahandleSet), | 94 dirty_metahandles(new MetahandleSet), |
118 metahandles_to_purge(new MetahandleSet), | 95 metahandles_to_purge(new MetahandleSet), |
119 info_status(Directory::KERNEL_SHARE_INFO_VALID), | 96 info_status(Directory::KERNEL_SHARE_INFO_VALID), |
120 persisted_info(info.kernel_info), | 97 persisted_info(info.kernel_info), |
121 cache_guid(info.cache_guid), | 98 cache_guid(info.cache_guid), |
122 next_metahandle(info.max_metahandle + 1), | 99 next_metahandle(info.max_metahandle + 1), |
123 delegate(delegate), | 100 delegate(delegate), |
124 transaction_observer(transaction_observer) { | 101 transaction_observer(transaction_observer) { |
125 DCHECK(delegate); | 102 DCHECK(delegate); |
126 DCHECK(transaction_observer.IsInitialized()); | 103 DCHECK(transaction_observer.IsInitialized()); |
127 } | 104 } |
128 | 105 |
| 106 |
129 Directory::Kernel::~Kernel() { | 107 Directory::Kernel::~Kernel() { |
130 delete unsynced_metahandles; | 108 delete unsynced_metahandles; |
131 delete dirty_metahandles; | 109 delete dirty_metahandles; |
132 delete metahandles_to_purge; | 110 delete metahandles_to_purge; |
133 delete parent_id_child_index; | 111 delete parent_child_index; |
134 delete client_tag_index; | 112 delete client_tag_index; |
135 delete ids_index; | 113 delete ids_index; |
136 STLDeleteElements(metahandles_index); | 114 STLDeleteElements(metahandles_index); |
137 delete metahandles_index; | 115 delete metahandles_index; |
138 } | 116 } |
139 | 117 |
140 Directory::Directory( | 118 Directory::Directory( |
141 DirectoryBackingStore* store, | 119 DirectoryBackingStore* store, |
142 UnrecoverableErrorHandler* unrecoverable_error_handler, | 120 UnrecoverableErrorHandler* unrecoverable_error_handler, |
143 ReportUnrecoverableErrorFunction report_unrecoverable_error_function, | 121 ReportUnrecoverableErrorFunction report_unrecoverable_error_function, |
(...skipping 25 matching lines...) Expand all Loading... |
169 | 147 |
170 if (OPENED != result) | 148 if (OPENED != result) |
171 Close(); | 149 Close(); |
172 return result; | 150 return result; |
173 } | 151 } |
174 | 152 |
175 void Directory::InitializeIndices() { | 153 void Directory::InitializeIndices() { |
176 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); | 154 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); |
177 for (; it != kernel_->metahandles_index->end(); ++it) { | 155 for (; it != kernel_->metahandles_index->end(); ++it) { |
178 EntryKernel* entry = *it; | 156 EntryKernel* entry = *it; |
179 InitializeIndexEntry<ParentIdAndHandleIndexer>(entry, | 157 if (ParentChildIndex::ShouldInclude(entry)) |
180 kernel_->parent_id_child_index); | 158 kernel_->parent_child_index->Insert(entry); |
181 InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index); | 159 InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index); |
182 InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index); | 160 InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index); |
183 const int64 metahandle = entry->ref(META_HANDLE); | 161 const int64 metahandle = entry->ref(META_HANDLE); |
184 if (entry->ref(IS_UNSYNCED)) | 162 if (entry->ref(IS_UNSYNCED)) |
185 kernel_->unsynced_metahandles->insert(metahandle); | 163 kernel_->unsynced_metahandles->insert(metahandle); |
186 if (entry->ref(IS_UNAPPLIED_UPDATE)) { | 164 if (entry->ref(IS_UNAPPLIED_UPDATE)) { |
187 const ModelType type = entry->GetServerModelType(); | 165 const ModelType type = entry->GetServerModelType(); |
188 kernel_->unapplied_update_metahandles[type].insert(metahandle); | 166 kernel_->unapplied_update_metahandles[type].insert(metahandle); |
189 } | 167 } |
190 DCHECK(!entry->is_dirty()); | 168 DCHECK(!entry->is_dirty()); |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
349 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) | 327 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) |
350 return false; | 328 return false; |
351 | 329 |
352 static const char error[] = "Entry already in memory index."; | 330 static const char error[] = "Entry already in memory index."; |
353 if (!SyncAssert(kernel_->metahandles_index->insert(entry).second, | 331 if (!SyncAssert(kernel_->metahandles_index->insert(entry).second, |
354 FROM_HERE, | 332 FROM_HERE, |
355 error, | 333 error, |
356 trans)) | 334 trans)) |
357 return false; | 335 return false; |
358 | 336 |
359 if (!entry->ref(IS_DEL)) { | 337 if (ParentChildIndex::ShouldInclude(entry)) { |
360 if (!SyncAssert(kernel_->parent_id_child_index->insert(entry).second, | 338 if (!SyncAssert(kernel_->parent_child_index->Insert(entry), |
361 FROM_HERE, | 339 FROM_HERE, |
362 error, | 340 error, |
363 trans)) { | 341 trans)) { |
364 return false; | 342 return false; |
365 } | 343 } |
366 } | 344 } |
367 if (!SyncAssert(kernel_->ids_index->insert(entry).second, | 345 if (!SyncAssert(kernel_->ids_index->insert(entry).second, |
368 FROM_HERE, | 346 FROM_HERE, |
369 error, | 347 error, |
370 trans)) | 348 trans)) |
(...skipping 10 matching lines...) Expand all Loading... |
381 bool Directory::ReindexId(WriteTransaction* trans, | 359 bool Directory::ReindexId(WriteTransaction* trans, |
382 EntryKernel* const entry, | 360 EntryKernel* const entry, |
383 const Id& new_id) { | 361 const Id& new_id) { |
384 ScopedKernelLock lock(this); | 362 ScopedKernelLock lock(this); |
385 if (NULL != GetEntryById(new_id, &lock)) | 363 if (NULL != GetEntryById(new_id, &lock)) |
386 return false; | 364 return false; |
387 | 365 |
388 { | 366 { |
389 // Update the indices that depend on the ID field. | 367 // Update the indices that depend on the ID field. |
390 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index); | 368 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index); |
391 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry, | 369 ScopedParentChildIndexUpdater updater_b(lock, entry, |
392 kernel_->parent_id_child_index); | 370 kernel_->parent_child_index); |
393 entry->put(ID, new_id); | 371 entry->put(ID, new_id); |
394 } | 372 } |
395 return true; | 373 return true; |
396 } | 374 } |
397 | 375 |
398 bool Directory::ReindexParentId(WriteTransaction* trans, | 376 bool Directory::ReindexParentId(WriteTransaction* trans, |
399 EntryKernel* const entry, | 377 EntryKernel* const entry, |
400 const Id& new_parent_id) { | 378 const Id& new_parent_id) { |
401 ScopedKernelLock lock(this); | 379 ScopedKernelLock lock(this); |
402 | 380 |
403 { | 381 { |
404 // Update the indices that depend on the PARENT_ID field. | 382 // Update the indices that depend on the PARENT_ID field. |
405 ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry, | 383 ScopedParentChildIndexUpdater index_updater(lock, entry, |
406 kernel_->parent_id_child_index); | 384 kernel_->parent_child_index); |
407 entry->put(PARENT_ID, new_parent_id); | 385 entry->put(PARENT_ID, new_parent_id); |
408 } | 386 } |
409 return true; | 387 return true; |
410 } | 388 } |
411 | 389 |
412 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { | 390 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { |
413 DCHECK(trans != NULL); | 391 DCHECK(trans != NULL); |
414 return unrecoverable_error_set_; | 392 return unrecoverable_error_set_; |
415 } | 393 } |
416 | 394 |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
528 // and the server. | 506 // and the server. |
529 size_t num_erased = 0; | 507 size_t num_erased = 0; |
530 num_erased = kernel_->ids_index->erase(entry); | 508 num_erased = kernel_->ids_index->erase(entry); |
531 DCHECK_EQ(1u, num_erased); | 509 DCHECK_EQ(1u, num_erased); |
532 num_erased = kernel_->metahandles_index->erase(entry); | 510 num_erased = kernel_->metahandles_index->erase(entry); |
533 DCHECK_EQ(1u, num_erased); | 511 DCHECK_EQ(1u, num_erased); |
534 | 512 |
535 // Might not be in it | 513 // Might not be in it |
536 num_erased = kernel_->client_tag_index->erase(entry); | 514 num_erased = kernel_->client_tag_index->erase(entry); |
537 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); | 515 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); |
538 if (!SyncAssert(!kernel_->parent_id_child_index->count(entry), | 516 if (!SyncAssert(!kernel_->parent_child_index->Contains(entry), |
539 FROM_HERE, | 517 FROM_HERE, |
540 "Deleted entry still present", | 518 "Deleted entry still present", |
541 (&trans))) | 519 (&trans))) |
542 return false; | 520 return false; |
543 delete entry; | 521 delete entry; |
544 } | 522 } |
545 if (trans.unrecoverable_error_set()) | 523 if (trans.unrecoverable_error_set()) |
546 return false; | 524 return false; |
547 } | 525 } |
548 return true; | 526 return true; |
(...skipping 11 matching lines...) Expand all Loading... |
560 while (it != kernel_->metahandles_index->end()) { | 538 while (it != kernel_->metahandles_index->end()) { |
561 const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS); | 539 const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS); |
562 const sync_pb::EntitySpecifics& server_specifics = | 540 const sync_pb::EntitySpecifics& server_specifics = |
563 (*it)->ref(SERVER_SPECIFICS); | 541 (*it)->ref(SERVER_SPECIFICS); |
564 ModelType local_type = GetModelTypeFromSpecifics(local_specifics); | 542 ModelType local_type = GetModelTypeFromSpecifics(local_specifics); |
565 ModelType server_type = GetModelTypeFromSpecifics(server_specifics); | 543 ModelType server_type = GetModelTypeFromSpecifics(server_specifics); |
566 | 544 |
567 // Note the dance around incrementing |it|, since we sometimes erase(). | 545 // Note the dance around incrementing |it|, since we sometimes erase(). |
568 if ((IsRealDataType(local_type) && types.Has(local_type)) || | 546 if ((IsRealDataType(local_type) && types.Has(local_type)) || |
569 (IsRealDataType(server_type) && types.Has(server_type))) { | 547 (IsRealDataType(server_type) && types.Has(server_type))) { |
570 if (!UnlinkEntryFromOrder(*it, &trans, &lock, DATA_TYPE_PURGE)) | |
571 return false; | |
572 | 548 |
573 int64 handle = (*it)->ref(META_HANDLE); | 549 int64 handle = (*it)->ref(META_HANDLE); |
574 kernel_->metahandles_to_purge->insert(handle); | 550 kernel_->metahandles_to_purge->insert(handle); |
575 | 551 |
576 size_t num_erased = 0; | 552 size_t num_erased = 0; |
577 EntryKernel* entry = *it; | 553 EntryKernel* entry = *it; |
578 num_erased = kernel_->ids_index->erase(entry); | 554 num_erased = kernel_->ids_index->erase(entry); |
579 DCHECK_EQ(1u, num_erased); | 555 DCHECK_EQ(1u, num_erased); |
580 num_erased = kernel_->client_tag_index->erase(entry); | 556 num_erased = kernel_->client_tag_index->erase(entry); |
581 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); | 557 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); |
582 num_erased = kernel_->unsynced_metahandles->erase(handle); | 558 num_erased = kernel_->unsynced_metahandles->erase(handle); |
583 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); | 559 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); |
584 num_erased = | 560 num_erased = |
585 kernel_->unapplied_update_metahandles[server_type].erase(handle); | 561 kernel_->unapplied_update_metahandles[server_type].erase(handle); |
586 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); | 562 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); |
587 num_erased = kernel_->parent_id_child_index->erase(entry); | 563 if (ParentChildIndex::ShouldInclude(entry)) |
588 DCHECK_EQ(entry->ref(IS_DEL), !num_erased); | 564 kernel_->parent_child_index->Remove(entry); |
589 kernel_->metahandles_index->erase(it++); | 565 kernel_->metahandles_index->erase(it++); |
590 delete entry; | 566 delete entry; |
591 } else { | 567 } else { |
592 ++it; | 568 ++it; |
593 } | 569 } |
594 } | 570 } |
595 | 571 |
596 // Ensure meta tracking for these data types reflects the deleted state. | 572 // Ensure meta tracking for these data types reflects the deleted state. |
597 for (ModelTypeSet::Iterator it = types.First(); | 573 for (ModelTypeSet::Iterator it = types.First(); |
598 it.Good(); it.Inc()) { | 574 it.Good(); it.Inc()) { |
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1003 return false; | 979 return false; |
1004 } | 980 } |
1005 } | 981 } |
1006 return true; | 982 return true; |
1007 } | 983 } |
1008 | 984 |
1009 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) { | 985 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) { |
1010 invariant_check_level_ = check_level; | 986 invariant_check_level_ = check_level; |
1011 } | 987 } |
1012 | 988 |
1013 bool Directory::UnlinkEntryFromOrder(EntryKernel* entry, | |
1014 WriteTransaction* trans, | |
1015 ScopedKernelLock* lock, | |
1016 UnlinkReason unlink_reason) { | |
1017 if (!SyncAssert(!trans || this == trans->directory(), | |
1018 FROM_HERE, | |
1019 "Transaction not pointing to the right directory", | |
1020 trans)) | |
1021 return false; | |
1022 Id old_previous = entry->ref(PREV_ID); | |
1023 Id old_next = entry->ref(NEXT_ID); | |
1024 | |
1025 entry->put(NEXT_ID, entry->ref(ID)); | |
1026 entry->put(PREV_ID, entry->ref(ID)); | |
1027 entry->mark_dirty(kernel_->dirty_metahandles); | |
1028 | |
1029 if (!old_previous.IsRoot()) { | |
1030 if (old_previous == old_next) { | |
1031 // Note previous == next doesn't imply previous == next == Get(ID). We | |
1032 // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added | |
1033 // and deleted before receiving the server ID in the commit response. | |
1034 if (!SyncAssert( | |
1035 (old_next == entry->ref(ID)) || !old_next.ServerKnows(), | |
1036 FROM_HERE, | |
1037 "Encounteered inconsistent entry while deleting", | |
1038 trans)) { | |
1039 return false; | |
1040 } | |
1041 return true; // Done if we were already self-looped (hence unlinked). | |
1042 } | |
1043 EntryKernel* previous_entry = GetEntryById(old_previous, lock); | |
1044 ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS)); | |
1045 // TODO(tim): Multiple asserts here for bug 101039 investigation. | |
1046 if (type == AUTOFILL) { | |
1047 if (!SyncAssert(previous_entry != NULL, | |
1048 FROM_HERE, | |
1049 "Could not find previous autofill entry", | |
1050 trans)) { | |
1051 return false; | |
1052 } | |
1053 } else { | |
1054 if (!SyncAssert(previous_entry != NULL, | |
1055 FROM_HERE, | |
1056 "Could not find previous entry", | |
1057 trans)) { | |
1058 return false; | |
1059 } | |
1060 } | |
1061 if (unlink_reason == NODE_MANIPULATION) | |
1062 trans->SaveOriginal(previous_entry); | |
1063 previous_entry->put(NEXT_ID, old_next); | |
1064 previous_entry->mark_dirty(kernel_->dirty_metahandles); | |
1065 } | |
1066 | |
1067 if (!old_next.IsRoot()) { | |
1068 EntryKernel* next_entry = GetEntryById(old_next, lock); | |
1069 if (!SyncAssert(next_entry != NULL, | |
1070 FROM_HERE, | |
1071 "Could not find next entry", | |
1072 trans)) { | |
1073 return false; | |
1074 } | |
1075 if (unlink_reason == NODE_MANIPULATION) | |
1076 trans->SaveOriginal(next_entry); | |
1077 next_entry->put(PREV_ID, old_previous); | |
1078 next_entry->mark_dirty(kernel_->dirty_metahandles); | |
1079 } | |
1080 return true; | |
1081 } | |
1082 | |
1083 int64 Directory::NextMetahandle() { | 989 int64 Directory::NextMetahandle() { |
1084 ScopedKernelLock lock(this); | 990 ScopedKernelLock lock(this); |
1085 int64 metahandle = (kernel_->next_metahandle)++; | 991 int64 metahandle = (kernel_->next_metahandle)++; |
1086 return metahandle; | 992 return metahandle; |
1087 } | 993 } |
1088 | 994 |
1089 // Always returns a client ID that is the string representation of a negative | 995 // Always returns a client ID that is a string representation of a negative |
1090 // number. | 996 // number. |
1091 Id Directory::NextId() { | 997 Id Directory::NextId() { |
1092 int64 result; | 998 int64 result; |
1093 { | 999 { |
1094 ScopedKernelLock lock(this); | 1000 ScopedKernelLock lock(this); |
1095 result = (kernel_->persisted_info.next_id)--; | 1001 result = (kernel_->persisted_info.next_id)--; |
1096 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 1002 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
1097 } | 1003 } |
1098 DCHECK_LT(result, 0); | 1004 DCHECK_LT(result, 0); |
1099 return Id::CreateFromClientString(base::Int64ToString(result)); | 1005 return Id::CreateFromClientString(base::Int64ToString(result)); |
1100 } | 1006 } |
1101 | 1007 |
1102 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) { | 1008 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) { |
1103 ScopedKernelLock lock(this); | 1009 ScopedKernelLock lock(this); |
1104 return (GetPossibleFirstChild(lock, id) != NULL); | 1010 return kernel_->parent_child_index->GetChildren(id) != NULL; |
1105 } | 1011 } |
1106 | 1012 |
1107 bool Directory::GetFirstChildId(BaseTransaction* trans, | 1013 Id Directory::GetFirstChildId(BaseTransaction* trans, |
1108 const Id& parent_id, | 1014 const EntryKernel* parent) { |
1109 Id* first_child_id) { | 1015 DCHECK(parent); |
| 1016 DCHECK(parent->ref(IS_DIR)); |
| 1017 |
1110 ScopedKernelLock lock(this); | 1018 ScopedKernelLock lock(this); |
1111 EntryKernel* entry = GetPossibleFirstChild(lock, parent_id); | 1019 const ChildSet* children = |
1112 if (!entry) { | 1020 kernel_->parent_child_index->GetChildren(parent->ref(ID)); |
1113 *first_child_id = Id(); | 1021 |
1114 return true; | 1022 // We're expected to return root if there are no children. |
| 1023 if (!children) |
| 1024 return Id(); |
| 1025 |
| 1026 return (*children->begin())->ref(ID); |
| 1027 } |
| 1028 |
| 1029 syncable::Id Directory::GetPredecessorId(EntryKernel* e) { |
| 1030 ScopedKernelLock lock(this); |
| 1031 |
| 1032 DCHECK(ParentChildIndex::ShouldInclude(e)); |
| 1033 const ChildSet* children = |
| 1034 kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID)); |
| 1035 DCHECK(children && !children->empty()); |
| 1036 ChildSet::const_iterator i = children->find(e); |
| 1037 DCHECK(i != children->end()); |
| 1038 |
| 1039 if (i == children->begin()) { |
| 1040 return Id(); |
| 1041 } else { |
| 1042 i--; |
| 1043 return (*i)->ref(ID); |
| 1044 } |
| 1045 } |
| 1046 |
| 1047 syncable::Id Directory::GetSuccessorId(EntryKernel* e) { |
| 1048 ScopedKernelLock lock(this); |
| 1049 |
| 1050 DCHECK(ParentChildIndex::ShouldInclude(e)); |
| 1051 const ChildSet* children = |
| 1052 kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID)); |
| 1053 DCHECK(children && !children->empty()); |
| 1054 ChildSet::const_iterator i = children->find(e); |
| 1055 DCHECK(i != children->end()); |
| 1056 |
| 1057 i++; |
| 1058 if (i == children->end()) { |
| 1059 return Id(); |
| 1060 } else { |
| 1061 return (*i)->ref(ID); |
| 1062 } |
| 1063 } |
| 1064 |
| 1065 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition() |
| 1066 // items as siblings of items that do not maintain postions. It is required |
| 1067 // only for tests. |
| 1068 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) { |
| 1069 DCHECK(!e->ref(IS_DEL)); |
| 1070 if (!ShouldMaintainPosition(GetModelTypeFromSpecifics(e->ref(SPECIFICS)))) { |
| 1071 DCHECK(!e->ref(UNIQUE_POSITION).IsValid()); |
| 1072 return; |
| 1073 } |
| 1074 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG); |
| 1075 DCHECK(!suffix.empty()); |
| 1076 |
| 1077 // Remove our item from the list and remember to re-add it later. |
| 1078 ScopedKernelLock lock(this); |
| 1079 ScopedParentChildIndexUpdater updater(lock, e, kernel_->parent_child_index); |
| 1080 |
| 1081 // Note: The ScopedParentChildIndexUpdater will update this set for us as we |
| 1082 // leave this function. |
| 1083 const ChildSet* children = |
| 1084 kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID)); |
| 1085 |
| 1086 if (!children) { |
| 1087 // This parent currently has no other children. |
| 1088 DCHECK(predecessor->ref(ID).IsRoot()); |
| 1089 UniquePosition pos = UniquePosition::InitialPosition(suffix); |
| 1090 e->put(UNIQUE_POSITION, pos); |
| 1091 return; |
1115 } | 1092 } |
1116 | 1093 |
1117 // Walk to the front of the list; the server position ordering | 1094 if (predecessor->ref(ID).IsRoot()) { |
1118 // is commonly identical to the linked-list ordering, but pending | 1095 // We have at least one sibling, and we're inserting to the left of them. |
1119 // unsynced or unapplied items may diverge. | 1096 UniquePosition successor_pos = (*children->begin())->ref(UNIQUE_POSITION); |
1120 while (!entry->ref(PREV_ID).IsRoot()) { | 1097 |
1121 entry = GetEntryById(entry->ref(PREV_ID), &lock); | 1098 if (!successor_pos.IsValid()) { |
1122 if (!entry) { | 1099 // If all our successors are of non-positionable types, just create an |
1123 *first_child_id = Id(); | 1100 // initial position. We arbitrarily choose to sort invalid positions to |
1124 return false; | 1101 // the right of the valid positions. |
| 1102 // |
| 1103 // We really shoulndn't need to support this. See TODO above. |
| 1104 UniquePosition pos = UniquePosition::InitialPosition(suffix); |
| 1105 e->put(UNIQUE_POSITION, pos); |
| 1106 return; |
1125 } | 1107 } |
| 1108 |
| 1109 DCHECK(!children->empty()); |
| 1110 UniquePosition pos = UniquePosition::Before(successor_pos, suffix); |
| 1111 e->put(UNIQUE_POSITION, pos); |
| 1112 return; |
1126 } | 1113 } |
1127 *first_child_id = entry->ref(ID); | 1114 |
1128 return true; | 1115 // We can't support placing an item after an invalid position. Fortunately, |
| 1116 // the tests don't exercise this particular case. We should not support |
| 1117 // siblings with invalid positions at all. See TODO above. |
| 1118 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid()); |
| 1119 |
| 1120 ChildSet::iterator i = children->find(predecessor); |
| 1121 DCHECK(i != children->end()); |
| 1122 |
| 1123 ++i; |
| 1124 if (i == children->end()) { |
| 1125 // Inserting at the end of the list. |
| 1126 UniquePosition pos = UniquePosition::After( |
| 1127 predecessor->ref(UNIQUE_POSITION), |
| 1128 suffix); |
| 1129 e->put(UNIQUE_POSITION, pos); |
| 1130 return; |
| 1131 } |
| 1132 |
| 1133 EntryKernel* successor = *i; |
| 1134 |
| 1135 // Another mixed valid and invalid position case. This one could be supported |
| 1136 // in theory, but we're trying to deprecate support for siblings with and |
| 1137 // without valid positions. See TODO above. |
| 1138 DCHECK(successor->ref(UNIQUE_POSITION).IsValid()); |
| 1139 |
| 1140 // Finally, the normal case: inserting between two elements. |
| 1141 UniquePosition pos = UniquePosition::Between( |
| 1142 predecessor->ref(UNIQUE_POSITION), |
| 1143 successor->ref(UNIQUE_POSITION), |
| 1144 suffix); |
| 1145 e->put(UNIQUE_POSITION, pos); |
| 1146 return; |
1129 } | 1147 } |
1130 | 1148 |
1131 bool Directory::GetLastChildIdForTest( | 1149 // TODO(rlarocque): Avoid this indirection. Just return the set. |
1132 BaseTransaction* trans, const Id& parent_id, Id* last_child_id) { | |
1133 ScopedKernelLock lock(this); | |
1134 EntryKernel* entry = GetPossibleFirstChild(lock, parent_id); | |
1135 if (!entry) { | |
1136 *last_child_id = Id(); | |
1137 return true; | |
1138 } | |
1139 | |
1140 // Walk to the back of the list; the server position ordering | |
1141 // is commonly identical to the linked-list ordering, but pending | |
1142 // unsynced or unapplied items may diverge. | |
1143 while (!entry->ref(NEXT_ID).IsRoot()) { | |
1144 entry = GetEntryById(entry->ref(NEXT_ID), &lock); | |
1145 if (!entry) { | |
1146 *last_child_id = Id(); | |
1147 return false; | |
1148 } | |
1149 } | |
1150 | |
1151 *last_child_id = entry->ref(ID); | |
1152 return true; | |
1153 } | |
1154 | |
1155 Id Directory::ComputePrevIdFromServerPosition( | |
1156 const EntryKernel* entry, | |
1157 const syncable::Id& parent_id) { | |
1158 ScopedKernelLock lock(this); | |
1159 | |
1160 // Find the natural insertion point in the parent_id_child_index, and | |
1161 // work back from there, filtering out ineligible candidates. | |
1162 ParentIdChildIndex::iterator sibling = LocateInParentChildIndex( | |
1163 lock, | |
1164 parent_id, | |
1165 NodeOrdinalToInt64(entry->ref(SERVER_ORDINAL_IN_PARENT)), | |
1166 entry->ref(ID)); | |
1167 ParentIdChildIndex::iterator first_sibling = | |
1168 GetParentChildIndexLowerBound(lock, parent_id); | |
1169 | |
1170 while (sibling != first_sibling) { | |
1171 --sibling; | |
1172 EntryKernel* candidate = *sibling; | |
1173 | |
1174 // The item itself should never be in the range under consideration. | |
1175 DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE)); | |
1176 | |
1177 // Ignore unapplied updates -- they might not even be server-siblings. | |
1178 if (candidate->ref(IS_UNAPPLIED_UPDATE)) | |
1179 continue; | |
1180 | |
1181 // We can't trust the SERVER_ fields of unsynced items, but they are | |
1182 // potentially legitimate local predecessors. In the case where | |
1183 // |update_item| and an unsynced item wind up in the same insertion | |
1184 // position, we need to choose how to order them. The following check puts | |
1185 // the unapplied update first; removing it would put the unsynced item(s) | |
1186 // first. | |
1187 if (candidate->ref(IS_UNSYNCED)) | |
1188 continue; | |
1189 | |
1190 // Skip over self-looped items, which are not valid predecessors. This | |
1191 // shouldn't happen in practice, but is worth defending against. | |
1192 if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) && | |
1193 !candidate->ref(PREV_ID).IsRoot()) { | |
1194 NOTREACHED(); | |
1195 continue; | |
1196 } | |
1197 return candidate->ref(ID); | |
1198 } | |
1199 // This item will be the first in the sibling order. | |
1200 return Id(); | |
1201 } | |
1202 | |
1203 Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex( | |
1204 const ScopedKernelLock& lock, | |
1205 const Id& parent_id, | |
1206 int64 position_in_parent, | |
1207 const Id& item_id_for_tiebreaking) { | |
1208 kernel_->needle.put(PARENT_ID, parent_id); | |
1209 kernel_->needle.put(SERVER_ORDINAL_IN_PARENT, | |
1210 Int64ToNodeOrdinal(position_in_parent)); | |
1211 kernel_->needle.put(ID, item_id_for_tiebreaking); | |
1212 return kernel_->parent_id_child_index->lower_bound(&kernel_->needle); | |
1213 } | |
1214 | |
1215 Directory::ParentIdChildIndex::iterator | |
1216 Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock, | |
1217 const Id& parent_id) { | |
1218 // Peg the parent ID, and use the least values for the remaining | |
1219 // index variables. | |
1220 return LocateInParentChildIndex(lock, parent_id, | |
1221 std::numeric_limits<int64>::min(), | |
1222 Id::GetLeastIdForLexicographicComparison()); | |
1223 } | |
1224 | |
1225 Directory::ParentIdChildIndex::iterator | |
1226 Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock, | |
1227 const Id& parent_id) { | |
1228 // The upper bound of |parent_id|'s range is the lower | |
1229 // bound of |++parent_id|'s range. | |
1230 return GetParentChildIndexLowerBound(lock, | |
1231 parent_id.GetLexicographicSuccessor()); | |
1232 } | |
1233 | |
1234 void Directory::AppendChildHandles(const ScopedKernelLock& lock, | 1150 void Directory::AppendChildHandles(const ScopedKernelLock& lock, |
1235 const Id& parent_id, | 1151 const Id& parent_id, |
1236 Directory::ChildHandles* result) { | 1152 Directory::ChildHandles* result) { |
1237 typedef ParentIdChildIndex::iterator iterator; | 1153 const ChildSet* children = |
1238 CHECK(result); | 1154 kernel_->parent_child_index->GetChildren(parent_id); |
1239 for (iterator i = GetParentChildIndexLowerBound(lock, parent_id), | 1155 if (!children) |
1240 end = GetParentChildIndexUpperBound(lock, parent_id); | 1156 return; |
1241 i != end; ++i) { | 1157 |
| 1158 for (ChildSet::iterator i = children->begin(); i != children->end(); ++i) { |
1242 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID)); | 1159 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID)); |
1243 result->push_back((*i)->ref(META_HANDLE)); | 1160 result->push_back((*i)->ref(META_HANDLE)); |
1244 } | 1161 } |
1245 } | 1162 } |
1246 | 1163 |
1247 EntryKernel* Directory::GetPossibleFirstChild( | |
1248 const ScopedKernelLock& lock, const Id& parent_id) { | |
1249 // We can use the server positional ordering as a hint because it's generally | |
1250 // in sync with the local (linked-list) positional ordering, and we have an | |
1251 // index on it. | |
1252 ParentIdChildIndex::iterator candidate = | |
1253 GetParentChildIndexLowerBound(lock, parent_id); | |
1254 ParentIdChildIndex::iterator end_range = | |
1255 GetParentChildIndexUpperBound(lock, parent_id); | |
1256 for (; candidate != end_range; ++candidate) { | |
1257 EntryKernel* entry = *candidate; | |
1258 // Filter out self-looped items, which are temporarily not in the child | |
1259 // ordering. | |
1260 if (entry->ref(PREV_ID).IsRoot() || | |
1261 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) { | |
1262 return entry; | |
1263 } | |
1264 } | |
1265 // There were no children in the linked list. | |
1266 return NULL; | |
1267 } | |
1268 | |
1269 ScopedKernelLock::ScopedKernelLock(const Directory* dir) | 1164 ScopedKernelLock::ScopedKernelLock(const Directory* dir) |
1270 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { | 1165 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { |
1271 } | 1166 } |
1272 | 1167 |
1273 } // namespace syncable | 1168 } // namespace syncable |
1274 } // namespace syncer | 1169 } // namespace syncer |
OLD | NEW |