OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome/browser/sync/syncable/syncable.h" | 5 #include "chrome/browser/sync/syncable/syncable.h" |
6 | 6 |
7 #include "build/build_config.h" | 7 #include "build/build_config.h" |
8 | 8 |
9 #include <sys/stat.h> | 9 #include <sys/stat.h> |
10 #if defined(OS_POSIX) | 10 #if defined(OS_POSIX) |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
119 CHECK(index_->insert(entry_).second); | 119 CHECK(index_->insert(entry_).second); |
120 } | 120 } |
121 } | 121 } |
122 private: | 122 private: |
123 // The entry that was temporarily removed from the index. | 123 // The entry that was temporarily removed from the index. |
124 EntryKernel* entry_; | 124 EntryKernel* entry_; |
125 // The index which we are updating. | 125 // The index which we are updating. |
126 typename Index<Indexer>::Set* const index_; | 126 typename Index<Indexer>::Set* const index_; |
127 }; | 127 }; |
128 | 128 |
| 129 // Helper function to add an item to the index, if it ought to be added. |
| 130 template<typename Indexer> |
| 131 void InitializeIndexEntry(EntryKernel* entry, |
| 132 typename Index<Indexer>::Set* index) { |
| 133 if (Indexer::ShouldInclude(entry)) { |
| 134 index->insert(entry); |
| 135 } |
| 136 } |
| 137 |
129 } // namespace | 138 } // namespace |
130 | 139 |
131 /////////////////////////////////////////////////////////////////////////// | 140 /////////////////////////////////////////////////////////////////////////// |
132 // Comparator and filter functions for the indices. | 141 // Comparator and filter functions for the indices. |
133 | 142 |
134 // static | 143 // static |
135 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) { | 144 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) { |
136 return !a->ref(UNIQUE_CLIENT_TAG).empty(); | 145 return !a->ref(UNIQUE_CLIENT_TAG).empty(); |
137 } | 146 } |
138 | 147 |
139 class ParentIdAndHandleIndexer::Comparator { | 148 bool ParentIdAndHandleIndexer::Comparator::operator() ( |
140 public: | 149 const syncable::EntryKernel* a, |
141 bool operator() (const syncable::EntryKernel* a, | 150 const syncable::EntryKernel* b) const { |
142 const syncable::EntryKernel* b) const { | 151 int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID)); |
143 int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID)); | 152 if (cmp != 0) |
144 if (cmp != 0) | 153 return cmp < 0; |
145 return cmp < 0; | |
146 | 154 |
147 // Meta handles are immutable per entry so this is ideal. | 155 int64 a_position = a->ref(SERVER_POSITION_IN_PARENT); |
148 return a->ref(META_HANDLE) < b->ref(META_HANDLE); | 156 int64 b_position = b->ref(SERVER_POSITION_IN_PARENT); |
149 } | 157 if (a_position != b_position) |
150 }; | 158 return a_position < b_position; |
| 159 |
| 160 cmp = a->ref(ID).compare(b->ref(ID)); |
| 161 return cmp < 0; |
| 162 } |
151 | 163 |
152 // static | 164 // static |
153 bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) { | 165 bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) { |
154 return !a->ref(IS_DEL); | 166 // This index excludes deleted items and the root item. The root |
| 167 // item is excluded so that it doesn't show up as a child of itself. |
| 168 return !a->ref(IS_DEL) && !a->ref(ID).IsRoot(); |
155 } | 169 } |
156 | 170 |
157 /////////////////////////////////////////////////////////////////////////// | 171 /////////////////////////////////////////////////////////////////////////// |
158 // EntryKernel | 172 // EntryKernel |
159 | 173 |
160 EntryKernel::EntryKernel() : dirty_(false) { | 174 EntryKernel::EntryKernel() : dirty_(false) { |
161 memset(int64_fields, 0, sizeof(int64_fields)); | 175 memset(int64_fields, 0, sizeof(int64_fields)); |
162 } | 176 } |
163 | 177 |
164 EntryKernel::~EntryKernel() {} | 178 EntryKernel::~EntryKernel() {} |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
256 const DirOpenResult result = OpenImpl(file_path, name); | 270 const DirOpenResult result = OpenImpl(file_path, name); |
257 if (OPENED != result) | 271 if (OPENED != result) |
258 Close(); | 272 Close(); |
259 return result; | 273 return result; |
260 } | 274 } |
261 | 275 |
262 void Directory::InitializeIndices() { | 276 void Directory::InitializeIndices() { |
263 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); | 277 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); |
264 for (; it != kernel_->metahandles_index->end(); ++it) { | 278 for (; it != kernel_->metahandles_index->end(); ++it) { |
265 EntryKernel* entry = *it; | 279 EntryKernel* entry = *it; |
266 if (!entry->ref(IS_DEL)) | 280 InitializeIndexEntry<ParentIdAndHandleIndexer>(entry, |
267 kernel_->parent_id_child_index->insert(entry); | 281 kernel_->parent_id_child_index); |
268 kernel_->ids_index->insert(entry); | 282 InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index); |
269 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | 283 InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index); |
270 kernel_->client_tag_index->insert(entry); | |
271 } | |
272 if (entry->ref(IS_UNSYNCED)) | 284 if (entry->ref(IS_UNSYNCED)) |
273 kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE)); | 285 kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE)); |
274 if (entry->ref(IS_UNAPPLIED_UPDATE)) | 286 if (entry->ref(IS_UNAPPLIED_UPDATE)) |
275 kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE)); | 287 kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE)); |
276 DCHECK(!entry->is_dirty()); | 288 DCHECK(!entry->is_dirty()); |
277 } | 289 } |
278 } | 290 } |
279 | 291 |
280 DirectoryBackingStore* Directory::CreateBackingStore( | 292 DirectoryBackingStore* Directory::CreateBackingStore( |
281 const string& dir_name, const FilePath& backing_filepath) { | 293 const string& dir_name, const FilePath& backing_filepath) { |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
374 kernel_->needle.put(META_HANDLE, metahandle); | 386 kernel_->needle.put(META_HANDLE, metahandle); |
375 MetahandlesIndex::iterator found = | 387 MetahandlesIndex::iterator found = |
376 kernel_->metahandles_index->find(&kernel_->needle); | 388 kernel_->metahandles_index->find(&kernel_->needle); |
377 if (found != kernel_->metahandles_index->end()) { | 389 if (found != kernel_->metahandles_index->end()) { |
378 // Found it in memory. Easy. | 390 // Found it in memory. Easy. |
379 return *found; | 391 return *found; |
380 } | 392 } |
381 return NULL; | 393 return NULL; |
382 } | 394 } |
383 | 395 |
384 // An interface to specify the details of which children | |
385 // GetChildHandles() is looking for. | |
386 // TODO(chron): Clean this up into one function to get child handles | |
387 struct PathMatcher { | |
388 explicit PathMatcher(const Id& parent_id) : parent_id_(parent_id) { } | |
389 virtual ~PathMatcher() { } | |
390 | |
391 typedef Directory::ParentIdChildIndex Index; | |
392 virtual Index::iterator lower_bound(Index* index) = 0; | |
393 virtual Index::iterator upper_bound(Index* index) = 0; | |
394 const Id parent_id_; | |
395 EntryKernel needle_; | |
396 }; | |
397 | |
398 // Matches all children. | |
399 // TODO(chron): Unit test this by itself | |
400 struct AllPathsMatcher : public PathMatcher { | |
401 explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) { | |
402 } | |
403 | |
404 virtual Index::iterator lower_bound(Index* index) { | |
405 needle_.put(PARENT_ID, parent_id_); | |
406 needle_.put(META_HANDLE, std::numeric_limits<int64>::min()); | |
407 return index->lower_bound(&needle_); | |
408 } | |
409 | |
410 virtual Index::iterator upper_bound(Index* index) { | |
411 needle_.put(PARENT_ID, parent_id_); | |
412 needle_.put(META_HANDLE, std::numeric_limits<int64>::max()); | |
413 return index->upper_bound(&needle_); | |
414 } | |
415 }; | |
416 | |
417 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id, | 396 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id, |
418 Directory::ChildHandles* result) { | 397 Directory::ChildHandles* result) { |
419 AllPathsMatcher matcher(parent_id); | |
420 return GetChildHandlesImpl(trans, parent_id, &matcher, result); | |
421 } | |
422 | |
423 void Directory::GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id, | |
424 PathMatcher* matcher, | |
425 Directory::ChildHandles* result) { | |
426 CHECK(this == trans->directory()); | 398 CHECK(this == trans->directory()); |
427 result->clear(); | 399 result->clear(); |
428 { | 400 { |
429 ScopedKernelLock lock(this); | 401 ScopedKernelLock lock(this); |
430 | 402 |
431 // This index is sorted by parent id and metahandle. | |
432 ParentIdChildIndex* const index = kernel_->parent_id_child_index; | |
433 typedef ParentIdChildIndex::iterator iterator; | 403 typedef ParentIdChildIndex::iterator iterator; |
434 for (iterator i = matcher->lower_bound(index), | 404 for (iterator i = GetParentChildIndexLowerBound(lock, parent_id), |
435 end = matcher->upper_bound(index); i != end; ++i) { | 405 end = GetParentChildIndexUpperBound(lock, parent_id); |
436 // root's parent_id is NULL in the db but 0 in memory, so | 406 i != end; ++i) { |
437 // have avoid listing the root as its own child. | 407 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID)); |
438 if ((*i)->ref(ID) == (*i)->ref(PARENT_ID)) | |
439 continue; | |
440 result->push_back((*i)->ref(META_HANDLE)); | 408 result->push_back((*i)->ref(META_HANDLE)); |
441 } | 409 } |
442 } | 410 } |
443 } | 411 } |
444 | 412 |
445 EntryKernel* Directory::GetRootEntry() { | 413 EntryKernel* Directory::GetRootEntry() { |
446 return GetEntryById(Id()); | 414 return GetEntryById(Id()); |
447 } | 415 } |
448 | 416 |
449 void ZeroFields(EntryKernel* entry, int first_field) { | 417 void ZeroFields(EntryKernel* entry, int first_field) { |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
482 } | 450 } |
483 | 451 |
484 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) { | 452 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) { |
485 ScopedKernelLock lock(this); | 453 ScopedKernelLock lock(this); |
486 if (NULL != GetEntryById(new_id, &lock)) | 454 if (NULL != GetEntryById(new_id, &lock)) |
487 return false; | 455 return false; |
488 | 456 |
489 { | 457 { |
490 // Update the indices that depend on the ID field. | 458 // Update the indices that depend on the ID field. |
491 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index); | 459 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index); |
| 460 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry, |
| 461 kernel_->parent_id_child_index); |
492 entry->put(ID, new_id); | 462 entry->put(ID, new_id); |
493 } | 463 } |
494 return true; | 464 return true; |
495 } | 465 } |
496 | 466 |
497 void Directory::ReindexParentId(EntryKernel* const entry, | 467 void Directory::ReindexParentId(EntryKernel* const entry, |
498 const Id& new_parent_id) { | 468 const Id& new_parent_id) { |
499 ScopedKernelLock lock(this); | 469 ScopedKernelLock lock(this); |
500 | 470 |
501 { | 471 { |
(...skipping 725 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1227 | 1197 |
1228 Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle) | 1198 Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle) |
1229 : basetrans_(trans) { | 1199 : basetrans_(trans) { |
1230 kernel_ = trans->directory()->GetEntryByHandle(metahandle); | 1200 kernel_ = trans->directory()->GetEntryByHandle(metahandle); |
1231 } | 1201 } |
1232 | 1202 |
1233 Directory* Entry::dir() const { | 1203 Directory* Entry::dir() const { |
1234 return basetrans_->directory(); | 1204 return basetrans_->directory(); |
1235 } | 1205 } |
1236 | 1206 |
| 1207 Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const { |
| 1208 return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id); |
| 1209 } |
| 1210 |
1237 const string& Entry::Get(StringField field) const { | 1211 const string& Entry::Get(StringField field) const { |
1238 DCHECK(kernel_); | 1212 DCHECK(kernel_); |
1239 return kernel_->ref(field); | 1213 return kernel_->ref(field); |
1240 } | 1214 } |
1241 | 1215 |
1242 syncable::ModelType Entry::GetServerModelType() const { | 1216 syncable::ModelType Entry::GetServerModelType() const { |
1243 ModelType specifics_type = GetModelTypeFromSpecifics(Get(SERVER_SPECIFICS)); | 1217 ModelType specifics_type = GetModelTypeFromSpecifics(Get(SERVER_SPECIFICS)); |
1244 if (specifics_type != UNSPECIFIED) | 1218 if (specifics_type != UNSPECIFIED) |
1245 return specifics_type; | 1219 return specifics_type; |
1246 if (IsRoot()) | 1220 if (IsRoot()) |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1365 // upon a value change. | 1339 // upon a value change. |
1366 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_, | 1340 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_, |
1367 dir()->kernel_->parent_id_child_index); | 1341 dir()->kernel_->parent_id_child_index); |
1368 | 1342 |
1369 kernel_->put(IS_DEL, is_del); | 1343 kernel_->put(IS_DEL, is_del); |
1370 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | 1344 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); |
1371 } | 1345 } |
1372 | 1346 |
1373 if (!is_del) | 1347 if (!is_del) |
1374 PutPredecessor(Id()); // Restores position to the 0th index. | 1348 PutPredecessor(Id()); // Restores position to the 0th index. |
| 1349 |
1375 return true; | 1350 return true; |
1376 } | 1351 } |
1377 | 1352 |
1378 bool MutableEntry::Put(Int64Field field, const int64& value) { | 1353 bool MutableEntry::Put(Int64Field field, const int64& value) { |
1379 DCHECK(kernel_); | 1354 DCHECK(kernel_); |
1380 if (kernel_->ref(field) != value) { | 1355 if (kernel_->ref(field) != value) { |
1381 kernel_->put(field, value); | 1356 ScopedKernelLock lock(dir()); |
| 1357 if (SERVER_POSITION_IN_PARENT == field) { |
| 1358 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_, |
| 1359 dir()->kernel_->parent_id_child_index); |
| 1360 kernel_->put(field, value); |
| 1361 } else { |
| 1362 kernel_->put(field, value); |
| 1363 } |
1382 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | 1364 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); |
1383 } | 1365 } |
1384 return true; | 1366 return true; |
1385 } | 1367 } |
1386 | 1368 |
1387 bool MutableEntry::Put(IdField field, const Id& value) { | 1369 bool MutableEntry::Put(IdField field, const Id& value) { |
1388 DCHECK(kernel_); | 1370 DCHECK(kernel_); |
1389 if (kernel_->ref(field) != value) { | 1371 if (kernel_->ref(field) != value) { |
1390 if (ID == field) { | 1372 if (ID == field) { |
1391 if (!dir()->ReindexId(kernel_, value)) | 1373 if (!dir()->ReindexId(kernel_, value)) |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1599 int64 result; | 1581 int64 result; |
1600 { | 1582 { |
1601 ScopedKernelLock lock(this); | 1583 ScopedKernelLock lock(this); |
1602 result = (kernel_->persisted_info.next_id)--; | 1584 result = (kernel_->persisted_info.next_id)--; |
1603 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 1585 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
1604 } | 1586 } |
1605 DCHECK_LT(result, 0); | 1587 DCHECK_LT(result, 0); |
1606 return Id::CreateFromClientString(base::Int64ToString(result)); | 1588 return Id::CreateFromClientString(base::Int64ToString(result)); |
1607 } | 1589 } |
1608 | 1590 |
1609 Id Directory::GetChildWithNullIdField(IdField field, | |
1610 BaseTransaction* trans, | |
1611 const Id& parent_id) { | |
1612 // This query is O(number of children), which should be acceptable | |
1613 // when this method is used as the first step in enumerating the children of | |
1614 // a node. But careless use otherwise could potentially result in | |
1615 // O((number of children)^2) performance. | |
1616 ChildHandles child_handles; | |
1617 GetChildHandles(trans, parent_id, &child_handles); | |
1618 ChildHandles::const_iterator it; | |
1619 for (it = child_handles.begin(); it != child_handles.end(); ++it) { | |
1620 Entry e(trans, GET_BY_HANDLE, *it); | |
1621 CHECK(e.good()); | |
1622 if (e.Get(field).IsRoot()) | |
1623 return e.Get(ID); | |
1624 } | |
1625 | |
1626 return Id(); | |
1627 } | |
1628 | |
1629 Id Directory::GetFirstChildId(BaseTransaction* trans, | 1591 Id Directory::GetFirstChildId(BaseTransaction* trans, |
1630 const Id& parent_id) { | 1592 const Id& parent_id) { |
1631 return GetChildWithNullIdField(PREV_ID, trans, parent_id); | 1593 ScopedKernelLock lock(this); |
| 1594 // We can use the server positional ordering as a hint because it's generally |
| 1595 // in sync with the local (linked-list) positional ordering, and we have an |
| 1596 // index on it. |
| 1597 ParentIdChildIndex::iterator candidate = |
| 1598 GetParentChildIndexLowerBound(lock, parent_id); |
| 1599 ParentIdChildIndex::iterator end_range = |
| 1600 GetParentChildIndexUpperBound(lock, parent_id); |
| 1601 for (; candidate != end_range; ++candidate) { |
| 1602 EntryKernel* entry = *candidate; |
| 1603 // Filter out self-looped items, which are temporarily not in the child |
| 1604 // ordering. |
| 1605 if (entry->ref(PREV_ID).IsRoot() || |
| 1606 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) { |
| 1607 // Walk to the front of the list; the server position ordering |
| 1608 // is commonly identical to the linked-list ordering, but pending |
| 1609 // unsynced or unapplied items may diverge. |
| 1610 while (!entry->ref(PREV_ID).IsRoot()) { |
| 1611 entry = GetEntryById(entry->ref(PREV_ID), &lock); |
| 1612 } |
| 1613 return entry->ref(ID); |
| 1614 } |
| 1615 } |
| 1616 // There were no children in the linked list. |
| 1617 return Id(); |
1632 } | 1618 } |
1633 | 1619 |
1634 Id Directory::GetLastChildId(BaseTransaction* trans, | 1620 Id Directory::GetLastChildId(BaseTransaction* trans, |
1635 const Id& parent_id) { | 1621 const Id& parent_id) { |
1636 return GetChildWithNullIdField(NEXT_ID, trans, parent_id); | 1622 ScopedKernelLock lock(this); |
| 1623 // We can use the server positional ordering as a hint because it's generally |
| 1624 // in sync with the local (linked-list) positional ordering, and we have an |
| 1625 // index on it. |
| 1626 ParentIdChildIndex::iterator begin_range = |
| 1627 GetParentChildIndexLowerBound(lock, parent_id); |
| 1628 ParentIdChildIndex::iterator candidate = |
| 1629 GetParentChildIndexUpperBound(lock, parent_id); |
| 1630 |
| 1631 while (begin_range != candidate) { |
| 1632 --candidate; |
| 1633 EntryKernel* entry = *candidate; |
| 1634 |
| 1635 // Filter out self-looped items, which are temporarily not in the child |
| 1636 // ordering. |
| 1637 if (entry->ref(NEXT_ID).IsRoot() || |
| 1638 entry->ref(NEXT_ID) != entry->ref(PREV_ID)) { |
| 1639 // Walk to the back of the list; the server position ordering |
| 1640 // is commonly identical to the linked-list ordering, but pending |
| 1641 // unsynced or unapplied items may diverge. |
| 1642 while (!entry->ref(NEXT_ID).IsRoot()) |
| 1643 entry = GetEntryById(entry->ref(NEXT_ID), &lock); |
| 1644 return entry->ref(ID); |
| 1645 } |
| 1646 } |
| 1647 // There were no children in the linked list. |
| 1648 return Id(); |
| 1649 } |
| 1650 |
| 1651 Id Directory::ComputePrevIdFromServerPosition( |
| 1652 const EntryKernel* entry, |
| 1653 const syncable::Id& parent_id) { |
| 1654 ScopedKernelLock lock(this); |
| 1655 |
| 1656 // Find the natural insertion point in the parent_id_child_index, and |
| 1657 // work back from there, filtering out ineligible candidates. |
| 1658 ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(lock, |
| 1659 parent_id, entry->ref(SERVER_POSITION_IN_PARENT), entry->ref(ID)); |
| 1660 ParentIdChildIndex::iterator first_sibling = |
| 1661 GetParentChildIndexLowerBound(lock, parent_id); |
| 1662 |
| 1663 while (sibling != first_sibling) { |
| 1664 --sibling; |
| 1665 EntryKernel* candidate = *sibling; |
| 1666 |
| 1667 // The item itself should never be in the range under consideration. |
| 1668 DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE)); |
| 1669 |
| 1670 // Ignore unapplied updates -- they might not even be server-siblings. |
| 1671 if (candidate->ref(IS_UNAPPLIED_UPDATE)) |
| 1672 continue; |
| 1673 |
| 1674 // We can't trust the SERVER_ fields of unsynced items, but they are |
| 1675 // potentially legitimate local predecessors. In the case where |
| 1676 // |update_item| and an unsynced item wind up in the same insertion |
| 1677 // position, we need to choose how to order them. The following check puts |
| 1678 // the unapplied update first; removing it would put the unsynced item(s) |
| 1679 // first. |
| 1680 if (candidate->ref(IS_UNSYNCED)) |
| 1681 continue; |
| 1682 |
| 1683 // Skip over self-looped items, which are not valid predecessors. This |
| 1684 // shouldn't happen in practice, but is worth defending against. |
| 1685 if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) && |
| 1686 !candidate->ref(PREV_ID).IsRoot()) { |
| 1687 NOTREACHED(); |
| 1688 continue; |
| 1689 } |
| 1690 return candidate->ref(ID); |
| 1691 } |
| 1692 // This item will be the first in the sibling order. |
| 1693 return Id(); |
1637 } | 1694 } |
1638 | 1695 |
1639 bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id, | 1696 bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id, |
1640 const Id& new_parent_id) { | 1697 const Id& new_parent_id) { |
1641 if (entry_id.IsRoot()) | 1698 if (entry_id.IsRoot()) |
1642 return false; | 1699 return false; |
1643 // we have to ensure that the entry is not an ancestor of the new parent. | 1700 // we have to ensure that the entry is not an ancestor of the new parent. |
1644 Id ancestor_id = new_parent_id; | 1701 Id ancestor_id = new_parent_id; |
1645 while (!ancestor_id.IsRoot()) { | 1702 while (!ancestor_id.IsRoot()) { |
1646 if (entry_id == ancestor_id) | 1703 if (entry_id == ancestor_id) |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1694 return os; | 1751 return os; |
1695 } | 1752 } |
1696 | 1753 |
1697 std::ostream& operator<<(std::ostream& s, const Blob& blob) { | 1754 std::ostream& operator<<(std::ostream& s, const Blob& blob) { |
1698 for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i) | 1755 for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i) |
1699 s << std::hex << std::setw(2) | 1756 s << std::hex << std::setw(2) |
1700 << std::setfill('0') << static_cast<unsigned int>(*i); | 1757 << std::setfill('0') << static_cast<unsigned int>(*i); |
1701 return s << std::dec; | 1758 return s << std::dec; |
1702 } | 1759 } |
1703 | 1760 |
| 1761 Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex( |
| 1762 const ScopedKernelLock& lock, |
| 1763 const Id& parent_id, |
| 1764 int64 position_in_parent, |
| 1765 const Id& item_id_for_tiebreaking) { |
| 1766 kernel_->needle.put(PARENT_ID, parent_id); |
| 1767 kernel_->needle.put(SERVER_POSITION_IN_PARENT, position_in_parent); |
| 1768 kernel_->needle.put(ID, item_id_for_tiebreaking); |
| 1769 return kernel_->parent_id_child_index->lower_bound(&kernel_->needle); |
| 1770 } |
| 1771 |
| 1772 Directory::ParentIdChildIndex::iterator |
| 1773 Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock, |
| 1774 const Id& parent_id) { |
| 1775 // Peg the parent ID, and use the least values for the remaining |
| 1776 // index variables. |
| 1777 return LocateInParentChildIndex(lock, parent_id, |
| 1778 std::numeric_limits<int64>::min(), |
| 1779 Id::GetLeastIdForLexicographicComparison()); |
| 1780 } |
| 1781 |
| 1782 |
| 1783 Directory::ParentIdChildIndex::iterator |
| 1784 Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock, |
| 1785 const Id& parent_id) { |
| 1786 // The upper bound of |parent_id|'s range is the lower |
| 1787 // bound of |++parent_id|'s range. |
| 1788 return GetParentChildIndexLowerBound(lock, |
| 1789 parent_id.GetLexicographicSuccessor()); |
| 1790 } |
| 1791 |
1704 } // namespace syncable | 1792 } // namespace syncable |
OLD | NEW |