Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(204)

Side by Side Diff: chrome/browser/sync/syncable/syncable.cc

Issue 6588119: First-time sync: asymptotic running time improvement (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/chrome/Release
Patch Set: Fix unit_tests Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « chrome/browser/sync/syncable/syncable.h ('k') | chrome/browser/sync/syncable/syncable_id.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "chrome/browser/sync/syncable/syncable.h" 5 #include "chrome/browser/sync/syncable/syncable.h"
6 6
7 #include "build/build_config.h" 7 #include "build/build_config.h"
8 8
9 #include <sys/stat.h> 9 #include <sys/stat.h>
10 #if defined(OS_POSIX) 10 #if defined(OS_POSIX)
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
119 CHECK(index_->insert(entry_).second); 119 CHECK(index_->insert(entry_).second);
120 } 120 }
121 } 121 }
122 private: 122 private:
123 // The entry that was temporarily removed from the index. 123 // The entry that was temporarily removed from the index.
124 EntryKernel* entry_; 124 EntryKernel* entry_;
125 // The index which we are updating. 125 // The index which we are updating.
126 typename Index<Indexer>::Set* const index_; 126 typename Index<Indexer>::Set* const index_;
127 }; 127 };
128 128
129 // Helper function to add an item to the index, if it ought to be added.
130 template<typename Indexer>
131 void InitializeIndexEntry(EntryKernel* entry,
132 typename Index<Indexer>::Set* index) {
133 if (Indexer::ShouldInclude(entry)) {
134 index->insert(entry);
135 }
136 }
137
129 } // namespace 138 } // namespace
130 139
131 /////////////////////////////////////////////////////////////////////////// 140 ///////////////////////////////////////////////////////////////////////////
132 // Comparator and filter functions for the indices. 141 // Comparator and filter functions for the indices.
133 142
134 // static 143 // static
135 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) { 144 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) {
136 return !a->ref(UNIQUE_CLIENT_TAG).empty(); 145 return !a->ref(UNIQUE_CLIENT_TAG).empty();
137 } 146 }
138 147
139 class ParentIdAndHandleIndexer::Comparator { 148 bool ParentIdAndHandleIndexer::Comparator::operator() (
140 public: 149 const syncable::EntryKernel* a,
141 bool operator() (const syncable::EntryKernel* a, 150 const syncable::EntryKernel* b) const {
142 const syncable::EntryKernel* b) const { 151 int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID));
143 int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID)); 152 if (cmp != 0)
144 if (cmp != 0) 153 return cmp < 0;
145 return cmp < 0;
146 154
147 // Meta handles are immutable per entry so this is ideal. 155 int64 a_position = a->ref(SERVER_POSITION_IN_PARENT);
148 return a->ref(META_HANDLE) < b->ref(META_HANDLE); 156 int64 b_position = b->ref(SERVER_POSITION_IN_PARENT);
149 } 157 if (a_position != b_position)
150 }; 158 return a_position < b_position;
159
160 cmp = a->ref(ID).compare(b->ref(ID));
161 return cmp < 0;
162 }
151 163
152 // static 164 // static
153 bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) { 165 bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) {
154 return !a->ref(IS_DEL); 166 // This index excludes deleted items and the root item. The root
167 // item is excluded so that it doesn't show up as a child of itself.
168 return !a->ref(IS_DEL) && !a->ref(ID).IsRoot();
155 } 169 }
156 170
157 /////////////////////////////////////////////////////////////////////////// 171 ///////////////////////////////////////////////////////////////////////////
158 // EntryKernel 172 // EntryKernel
159 173
160 EntryKernel::EntryKernel() : dirty_(false) { 174 EntryKernel::EntryKernel() : dirty_(false) {
161 memset(int64_fields, 0, sizeof(int64_fields)); 175 memset(int64_fields, 0, sizeof(int64_fields));
162 } 176 }
163 177
164 EntryKernel::~EntryKernel() {} 178 EntryKernel::~EntryKernel() {}
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
256 const DirOpenResult result = OpenImpl(file_path, name); 270 const DirOpenResult result = OpenImpl(file_path, name);
257 if (OPENED != result) 271 if (OPENED != result)
258 Close(); 272 Close();
259 return result; 273 return result;
260 } 274 }
261 275
262 void Directory::InitializeIndices() { 276 void Directory::InitializeIndices() {
263 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); 277 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
264 for (; it != kernel_->metahandles_index->end(); ++it) { 278 for (; it != kernel_->metahandles_index->end(); ++it) {
265 EntryKernel* entry = *it; 279 EntryKernel* entry = *it;
266 if (!entry->ref(IS_DEL)) 280 InitializeIndexEntry<ParentIdAndHandleIndexer>(entry,
267 kernel_->parent_id_child_index->insert(entry); 281 kernel_->parent_id_child_index);
268 kernel_->ids_index->insert(entry); 282 InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index);
269 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { 283 InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index);
270 kernel_->client_tag_index->insert(entry);
271 }
272 if (entry->ref(IS_UNSYNCED)) 284 if (entry->ref(IS_UNSYNCED))
273 kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE)); 285 kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE));
274 if (entry->ref(IS_UNAPPLIED_UPDATE)) 286 if (entry->ref(IS_UNAPPLIED_UPDATE))
275 kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE)); 287 kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE));
276 DCHECK(!entry->is_dirty()); 288 DCHECK(!entry->is_dirty());
277 } 289 }
278 } 290 }
279 291
280 DirectoryBackingStore* Directory::CreateBackingStore( 292 DirectoryBackingStore* Directory::CreateBackingStore(
281 const string& dir_name, const FilePath& backing_filepath) { 293 const string& dir_name, const FilePath& backing_filepath) {
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
374 kernel_->needle.put(META_HANDLE, metahandle); 386 kernel_->needle.put(META_HANDLE, metahandle);
375 MetahandlesIndex::iterator found = 387 MetahandlesIndex::iterator found =
376 kernel_->metahandles_index->find(&kernel_->needle); 388 kernel_->metahandles_index->find(&kernel_->needle);
377 if (found != kernel_->metahandles_index->end()) { 389 if (found != kernel_->metahandles_index->end()) {
378 // Found it in memory. Easy. 390 // Found it in memory. Easy.
379 return *found; 391 return *found;
380 } 392 }
381 return NULL; 393 return NULL;
382 } 394 }
383 395
384 // An interface to specify the details of which children
385 // GetChildHandles() is looking for.
386 // TODO(chron): Clean this up into one function to get child handles
387 struct PathMatcher {
388 explicit PathMatcher(const Id& parent_id) : parent_id_(parent_id) { }
389 virtual ~PathMatcher() { }
390
391 typedef Directory::ParentIdChildIndex Index;
392 virtual Index::iterator lower_bound(Index* index) = 0;
393 virtual Index::iterator upper_bound(Index* index) = 0;
394 const Id parent_id_;
395 EntryKernel needle_;
396 };
397
398 // Matches all children.
399 // TODO(chron): Unit test this by itself
400 struct AllPathsMatcher : public PathMatcher {
401 explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) {
402 }
403
404 virtual Index::iterator lower_bound(Index* index) {
405 needle_.put(PARENT_ID, parent_id_);
406 needle_.put(META_HANDLE, std::numeric_limits<int64>::min());
407 return index->lower_bound(&needle_);
408 }
409
410 virtual Index::iterator upper_bound(Index* index) {
411 needle_.put(PARENT_ID, parent_id_);
412 needle_.put(META_HANDLE, std::numeric_limits<int64>::max());
413 return index->upper_bound(&needle_);
414 }
415 };
416
417 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id, 396 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id,
418 Directory::ChildHandles* result) { 397 Directory::ChildHandles* result) {
419 AllPathsMatcher matcher(parent_id);
420 return GetChildHandlesImpl(trans, parent_id, &matcher, result);
421 }
422
423 void Directory::GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id,
424 PathMatcher* matcher,
425 Directory::ChildHandles* result) {
426 CHECK(this == trans->directory()); 398 CHECK(this == trans->directory());
427 result->clear(); 399 result->clear();
428 { 400 {
429 ScopedKernelLock lock(this); 401 ScopedKernelLock lock(this);
430 402
431 // This index is sorted by parent id and metahandle.
432 ParentIdChildIndex* const index = kernel_->parent_id_child_index;
433 typedef ParentIdChildIndex::iterator iterator; 403 typedef ParentIdChildIndex::iterator iterator;
434 for (iterator i = matcher->lower_bound(index), 404 for (iterator i = GetParentChildIndexLowerBound(lock, parent_id),
435 end = matcher->upper_bound(index); i != end; ++i) { 405 end = GetParentChildIndexUpperBound(lock, parent_id);
436 // root's parent_id is NULL in the db but 0 in memory, so 406 i != end; ++i) {
437 // have avoid listing the root as its own child.
438 if ((*i)->ref(ID) == (*i)->ref(PARENT_ID))
439 continue;
440 result->push_back((*i)->ref(META_HANDLE)); 407 result->push_back((*i)->ref(META_HANDLE));
441 } 408 }
442 } 409 }
443 } 410 }
444 411
445 EntryKernel* Directory::GetRootEntry() { 412 EntryKernel* Directory::GetRootEntry() {
446 return GetEntryById(Id()); 413 return GetEntryById(Id());
447 } 414 }
448 415
449 void ZeroFields(EntryKernel* entry, int first_field) { 416 void ZeroFields(EntryKernel* entry, int first_field) {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
482 } 449 }
483 450
484 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) { 451 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
485 ScopedKernelLock lock(this); 452 ScopedKernelLock lock(this);
486 if (NULL != GetEntryById(new_id, &lock)) 453 if (NULL != GetEntryById(new_id, &lock))
487 return false; 454 return false;
488 455
489 { 456 {
490 // Update the indices that depend on the ID field. 457 // Update the indices that depend on the ID field.
491 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index); 458 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index);
459 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry,
460 kernel_->parent_id_child_index);
492 entry->put(ID, new_id); 461 entry->put(ID, new_id);
493 } 462 }
494 return true; 463 return true;
495 } 464 }
496 465
497 void Directory::ReindexParentId(EntryKernel* const entry, 466 void Directory::ReindexParentId(EntryKernel* const entry,
498 const Id& new_parent_id) { 467 const Id& new_parent_id) {
499 ScopedKernelLock lock(this); 468 ScopedKernelLock lock(this);
500 469
501 { 470 {
(...skipping 725 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 1196
1228 Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle) 1197 Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
1229 : basetrans_(trans) { 1198 : basetrans_(trans) {
1230 kernel_ = trans->directory()->GetEntryByHandle(metahandle); 1199 kernel_ = trans->directory()->GetEntryByHandle(metahandle);
1231 } 1200 }
1232 1201
1233 Directory* Entry::dir() const { 1202 Directory* Entry::dir() const {
1234 return basetrans_->directory(); 1203 return basetrans_->directory();
1235 } 1204 }
1236 1205
1206 Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const {
1207 return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id);
1208 }
1209
1237 const string& Entry::Get(StringField field) const { 1210 const string& Entry::Get(StringField field) const {
1238 DCHECK(kernel_); 1211 DCHECK(kernel_);
1239 return kernel_->ref(field); 1212 return kernel_->ref(field);
1240 } 1213 }
1241 1214
1242 syncable::ModelType Entry::GetServerModelType() const { 1215 syncable::ModelType Entry::GetServerModelType() const {
1243 ModelType specifics_type = GetModelTypeFromSpecifics(Get(SERVER_SPECIFICS)); 1216 ModelType specifics_type = GetModelTypeFromSpecifics(Get(SERVER_SPECIFICS));
1244 if (specifics_type != UNSPECIFIED) 1217 if (specifics_type != UNSPECIFIED)
1245 return specifics_type; 1218 return specifics_type;
1246 if (IsRoot()) 1219 if (IsRoot())
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
1365 // upon a value change. 1338 // upon a value change.
1366 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_, 1339 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
1367 dir()->kernel_->parent_id_child_index); 1340 dir()->kernel_->parent_id_child_index);
1368 1341
1369 kernel_->put(IS_DEL, is_del); 1342 kernel_->put(IS_DEL, is_del);
1370 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); 1343 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1371 } 1344 }
1372 1345
1373 if (!is_del) 1346 if (!is_del)
1374 PutPredecessor(Id()); // Restores position to the 0th index. 1347 PutPredecessor(Id()); // Restores position to the 0th index.
1348
1375 return true; 1349 return true;
1376 } 1350 }
1377 1351
1378 bool MutableEntry::Put(Int64Field field, const int64& value) { 1352 bool MutableEntry::Put(Int64Field field, const int64& value) {
1379 DCHECK(kernel_); 1353 DCHECK(kernel_);
1380 if (kernel_->ref(field) != value) { 1354 if (kernel_->ref(field) != value) {
1381 kernel_->put(field, value); 1355 ScopedKernelLock lock(dir());
1356 if (SERVER_POSITION_IN_PARENT == field) {
1357 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
1358 dir()->kernel_->parent_id_child_index);
1359 kernel_->put(field, value);
1360 } else {
1361 kernel_->put(field, value);
1362 }
1382 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); 1363 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1383 } 1364 }
1384 return true; 1365 return true;
1385 } 1366 }
1386 1367
1387 bool MutableEntry::Put(IdField field, const Id& value) { 1368 bool MutableEntry::Put(IdField field, const Id& value) {
1388 DCHECK(kernel_); 1369 DCHECK(kernel_);
1389 if (kernel_->ref(field) != value) { 1370 if (kernel_->ref(field) != value) {
1390 if (ID == field) { 1371 if (ID == field) {
1391 if (!dir()->ReindexId(kernel_, value)) 1372 if (!dir()->ReindexId(kernel_, value))
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
1599 int64 result; 1580 int64 result;
1600 { 1581 {
1601 ScopedKernelLock lock(this); 1582 ScopedKernelLock lock(this);
1602 result = (kernel_->persisted_info.next_id)--; 1583 result = (kernel_->persisted_info.next_id)--;
1603 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; 1584 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1604 } 1585 }
1605 DCHECK_LT(result, 0); 1586 DCHECK_LT(result, 0);
1606 return Id::CreateFromClientString(base::Int64ToString(result)); 1587 return Id::CreateFromClientString(base::Int64ToString(result));
1607 } 1588 }
1608 1589
1609 Id Directory::GetChildWithNullIdField(IdField field,
1610 BaseTransaction* trans,
1611 const Id& parent_id) {
1612 // This query is O(number of children), which should be acceptable
1613 // when this method is used as the first step in enumerating the children of
1614 // a node. But careless use otherwise could potentially result in
1615 // O((number of children)^2) performance.
1616 ChildHandles child_handles;
1617 GetChildHandles(trans, parent_id, &child_handles);
1618 ChildHandles::const_iterator it;
1619 for (it = child_handles.begin(); it != child_handles.end(); ++it) {
1620 Entry e(trans, GET_BY_HANDLE, *it);
1621 CHECK(e.good());
1622 if (e.Get(field).IsRoot())
1623 return e.Get(ID);
1624 }
1625
1626 return Id();
1627 }
1628
1629 Id Directory::GetFirstChildId(BaseTransaction* trans, 1590 Id Directory::GetFirstChildId(BaseTransaction* trans,
1630 const Id& parent_id) { 1591 const Id& parent_id) {
1631 return GetChildWithNullIdField(PREV_ID, trans, parent_id); 1592 ScopedKernelLock lock(this);
1593 // We can use the server positional ordering as a hint because it's generally
1594 // in sync with the local (linked-list) positional ordering, and we have an
1595 // index on it.
1596 ParentIdChildIndex::iterator candidate =
1597 GetParentChildIndexLowerBound(lock, parent_id);
1598 ParentIdChildIndex::iterator end_range =
1599 GetParentChildIndexUpperBound(lock, parent_id);
1600 for (; candidate != end_range; ++candidate) {
1601 EntryKernel* entry = *candidate;
1602 // Filter out self-looped items, which are temporarily not in the child
1603 // ordering.
1604 if (entry->ref(PREV_ID).IsRoot() ||
1605 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) {
1606 // Walk to the front of the list; the server position ordering
1607 // is commonly identical to the linked-list ordering, but pending
1608 // unsynced or unapplied items may diverge.
1609 while (!entry->ref(PREV_ID).IsRoot()) {
1610 entry = GetEntryById(entry->ref(PREV_ID), &lock);
1611 }
1612 return entry->ref(ID);
1613 }
1614 }
1615 // There were no children in the linked list.
1616 return Id();
1632 } 1617 }
1633 1618
1634 Id Directory::GetLastChildId(BaseTransaction* trans, 1619 Id Directory::GetLastChildId(BaseTransaction* trans,
1635 const Id& parent_id) { 1620 const Id& parent_id) {
1636 return GetChildWithNullIdField(NEXT_ID, trans, parent_id); 1621 ScopedKernelLock lock(this);
1622 // We can use the server positional ordering as a hint because it's generally
1623 // in sync with the local (linked-list) positional ordering, and we have an
1624 // index on it.
1625 ParentIdChildIndex::iterator begin_range =
1626 GetParentChildIndexLowerBound(lock, parent_id);
1627 ParentIdChildIndex::iterator candidate =
1628 GetParentChildIndexUpperBound(lock, parent_id);
Nicolas Zea 2011/03/02 22:58:02 The first candidate will actually have parent_id+1
ncarter (slow) 2011/03/02 23:58:16 It's subtle, but I think it's okay. The pattern u
1629
1630 while (begin_range != candidate) {
1631 --candidate;
1632 EntryKernel* entry = *candidate;
1633
1634 // Filter out self-looped items, which are temporarily not in the child
1635 // ordering.
1636 if (entry->ref(NEXT_ID).IsRoot() ||
1637 entry->ref(NEXT_ID) != entry->ref(PREV_ID)) {
1638 // Walk to the back of the list; the server position ordering
1639 // is commonly identical to the linked-list ordering, but pending
1640 // unsynced or unapplied items may diverge.
1641 while (!entry->ref(NEXT_ID).IsRoot())
1642 entry = GetEntryById(entry->ref(NEXT_ID), &lock);
1643 return entry->ref(ID);
1644 }
1645 }
1646 // There were no children in the linked list.
1647 return Id();
1648 }
1649
1650 Id Directory::ComputePrevIdFromServerPosition(
1651 const EntryKernel* entry,
1652 const syncable::Id& parent_id) {
1653 ScopedKernelLock lock(this);
1654
1655 // Find the natural insertion point in the parent_id_child_index, and
1656 // work back from there, filtering out ineligible candidates.
1657 ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(lock,
1658 parent_id, entry->ref(SERVER_POSITION_IN_PARENT), entry->ref(ID));
1659 ParentIdChildIndex::iterator first_sibling =
1660 GetParentChildIndexLowerBound(lock, parent_id);
1661
1662 while (sibling != first_sibling) {
Nicolas Zea 2011/03/02 22:58:02 I think this prevents the possibility of having th
ncarter (slow) 2011/03/02 23:58:16 I think it works okay for the same reason as the p
1663 --sibling;
1664 EntryKernel* candidate = *sibling;
1665
1666 // The item itself should never be in the range under consideration.
1667 DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE));
1668
1669 // Ignore unapplied updates -- they might not even be server-siblings.
1670 if (candidate->ref(IS_UNAPPLIED_UPDATE))
1671 continue;
1672
1673 // We can't trust the SERVER_ fields of unsynced items, but they are
1674 // potentially legitimate local predecessors. In the case where
1675 // |update_item| and an unsynced item wind up in the same insertion
1676 // position, we need to choose how to order them. The following check puts
1677 // the unapplied update first; removing it would put the unsynced item(s)
1678 // first.
1679 if (candidate->ref(IS_UNSYNCED))
1680 continue;
1681
1682 // Skip over self-looped items, which are not valid predecessors. This
1683 // shouldn't happen in practice, but is worth defending against.
1684 if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) &&
1685 !candidate->ref(PREV_ID).IsRoot()) {
1686 NOTREACHED();
1687 continue;
1688 }
1689 return candidate->ref(ID);
1690 }
1691 // This item will be the first in the sibling order.
1692 return Id();
1637 } 1693 }
1638 1694
1639 bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id, 1695 bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
1640 const Id& new_parent_id) { 1696 const Id& new_parent_id) {
1641 if (entry_id.IsRoot()) 1697 if (entry_id.IsRoot())
1642 return false; 1698 return false;
1643 // we have to ensure that the entry is not an ancestor of the new parent. 1699 // we have to ensure that the entry is not an ancestor of the new parent.
1644 Id ancestor_id = new_parent_id; 1700 Id ancestor_id = new_parent_id;
1645 while (!ancestor_id.IsRoot()) { 1701 while (!ancestor_id.IsRoot()) {
1646 if (entry_id == ancestor_id) 1702 if (entry_id == ancestor_id)
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1694 return os; 1750 return os;
1695 } 1751 }
1696 1752
1697 std::ostream& operator<<(std::ostream& s, const Blob& blob) { 1753 std::ostream& operator<<(std::ostream& s, const Blob& blob) {
1698 for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i) 1754 for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
1699 s << std::hex << std::setw(2) 1755 s << std::hex << std::setw(2)
1700 << std::setfill('0') << static_cast<unsigned int>(*i); 1756 << std::setfill('0') << static_cast<unsigned int>(*i);
1701 return s << std::dec; 1757 return s << std::dec;
1702 } 1758 }
1703 1759
1760 Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex(
1761 const ScopedKernelLock& lock,
1762 const Id& parent_id,
1763 int64 position_in_parent,
1764 const Id& item_id_for_tiebreaking) {
1765 kernel_->needle.put(PARENT_ID, parent_id);
1766 kernel_->needle.put(SERVER_POSITION_IN_PARENT, position_in_parent);
1767 kernel_->needle.put(ID, item_id_for_tiebreaking);
1768 return kernel_->parent_id_child_index->lower_bound(&kernel_->needle);
1769 }
1770
1771 Directory::ParentIdChildIndex::iterator
1772 Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock,
1773 const Id& parent_id) {
1774 // Peg the parent ID, and use the least values for the remaining
1775 // index variables.
1776 return LocateInParentChildIndex(lock, parent_id,
1777 std::numeric_limits<int64>::min(),
1778 Id::GetLeastIdForLexicographicComparison());
1779 }
1780
1781
1782 Directory::ParentIdChildIndex::iterator
1783 Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock,
1784 const Id& parent_id) {
1785 // The upper bound of |parent_id|'s range is the lower
1786 // bound of |++parent_id|'s range.
1787 return GetParentChildIndexLowerBound(lock,
1788 parent_id.GetLexicographicSuccessor());
1789 }
1790
1704 } // namespace syncable 1791 } // namespace syncable
OLDNEW
« no previous file with comments | « chrome/browser/sync/syncable/syncable.h ('k') | chrome/browser/sync/syncable/syncable_id.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698