| OLD | NEW |
| 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "chrome/browser/sync/syncable/syncable.h" | 5 #include "chrome/browser/sync/syncable/syncable.h" |
| 6 | 6 |
| 7 #include "build/build_config.h" | 7 #include "build/build_config.h" |
| 8 | 8 |
| 9 #include <sys/stat.h> | 9 #include <sys/stat.h> |
| 10 #if defined(OS_POSIX) | 10 #if defined(OS_POSIX) |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 150 bool LessPathNames::operator() (const string& a, const string& b) const { | 150 bool LessPathNames::operator() (const string& a, const string& b) const { |
| 151 return ComparePathNames(a, b) < 0; | 151 return ComparePathNames(a, b) < 0; |
| 152 } | 152 } |
| 153 | 153 |
| 154 /////////////////////////////////////////////////////////////////////////// | 154 /////////////////////////////////////////////////////////////////////////// |
| 155 // Directory | 155 // Directory |
| 156 | 156 |
| 157 static const DirectoryChangeEvent kShutdownChangesEvent = | 157 static const DirectoryChangeEvent kShutdownChangesEvent = |
| 158 { DirectoryChangeEvent::SHUTDOWN, 0, 0 }; | 158 { DirectoryChangeEvent::SHUTDOWN, 0, 0 }; |
| 159 | 159 |
| 160 void Directory::init_kernel(const std::string& name) { |
| 161 DCHECK(kernel_ == NULL); |
| 162 kernel_ = new Kernel(FilePath(), name, KernelLoadInfo()); |
| 163 } |
| 164 |
| 160 Directory::Kernel::Kernel(const FilePath& db_path, | 165 Directory::Kernel::Kernel(const FilePath& db_path, |
| 161 const string& name, | 166 const string& name, |
| 162 const KernelLoadInfo& info) | 167 const KernelLoadInfo& info) |
| 163 : db_path(db_path), | 168 : db_path(db_path), |
| 164 refcount(1), | 169 refcount(1), |
| 165 name(name), | 170 name(name), |
| 166 metahandles_index(new Directory::MetahandlesIndex), | 171 metahandles_index(new Directory::MetahandlesIndex), |
| 167 ids_index(new Directory::IdsIndex), | 172 ids_index(new Directory::IdsIndex), |
| 168 parent_id_child_index(new Directory::ParentIdChildIndex), | 173 parent_id_child_index(new Directory::ParentIdChildIndex), |
| 169 client_tag_index(new Directory::ClientTagIndex), | 174 client_tag_index(new Directory::ClientTagIndex), |
| 170 unapplied_update_metahandles(new MetahandleSet), | 175 unapplied_update_metahandles(new MetahandleSet), |
| 171 unsynced_metahandles(new MetahandleSet), | 176 unsynced_metahandles(new MetahandleSet), |
| 172 dirty_metahandles(new MetahandleSet), | 177 dirty_metahandles(new MetahandleSet), |
| 178 metahandles_to_purge(new MetahandleSet), |
| 173 channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)), | 179 channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)), |
| 174 info_status(Directory::KERNEL_SHARE_INFO_VALID), | 180 info_status(Directory::KERNEL_SHARE_INFO_VALID), |
| 175 persisted_info(info.kernel_info), | 181 persisted_info(info.kernel_info), |
| 176 cache_guid(info.cache_guid), | 182 cache_guid(info.cache_guid), |
| 177 next_metahandle(info.max_metahandle + 1) { | 183 next_metahandle(info.max_metahandle + 1) { |
| 178 } | 184 } |
| 179 | 185 |
| 180 inline void DeleteEntry(EntryKernel* kernel) { | 186 inline void DeleteEntry(EntryKernel* kernel) { |
| 181 delete kernel; | 187 delete kernel; |
| 182 } | 188 } |
| 183 | 189 |
| 184 void Directory::Kernel::AddRef() { | 190 void Directory::Kernel::AddRef() { |
| 185 base::subtle::NoBarrier_AtomicIncrement(&refcount, 1); | 191 base::subtle::NoBarrier_AtomicIncrement(&refcount, 1); |
| 186 } | 192 } |
| 187 | 193 |
| 188 void Directory::Kernel::Release() { | 194 void Directory::Kernel::Release() { |
| 189 if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1)) | 195 if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1)) |
| 190 delete this; | 196 delete this; |
| 191 } | 197 } |
| 192 | 198 |
| 193 Directory::Kernel::~Kernel() { | 199 Directory::Kernel::~Kernel() { |
| 194 CHECK(0 == refcount); | 200 CHECK(0 == refcount); |
| 195 delete channel; | 201 delete channel; |
| 196 changes_channel.Notify(kShutdownChangesEvent); | 202 changes_channel.Notify(kShutdownChangesEvent); |
| 197 delete unsynced_metahandles; | 203 delete unsynced_metahandles; |
| 198 delete unapplied_update_metahandles; | 204 delete unapplied_update_metahandles; |
| 199 delete dirty_metahandles; | 205 delete dirty_metahandles; |
| 206 delete metahandles_to_purge; |
| 200 delete parent_id_child_index; | 207 delete parent_id_child_index; |
| 201 delete client_tag_index; | 208 delete client_tag_index; |
| 202 delete ids_index; | 209 delete ids_index; |
| 203 for_each(metahandles_index->begin(), metahandles_index->end(), DeleteEntry); | 210 for_each(metahandles_index->begin(), metahandles_index->end(), DeleteEntry); |
| 204 delete metahandles_index; | 211 delete metahandles_index; |
| 205 } | 212 } |
| 206 | 213 |
| 207 Directory::Directory() : kernel_(NULL), store_(NULL) { | 214 Directory::Directory() : kernel_(NULL), store_(NULL) { |
| 208 } | 215 } |
| 209 | 216 |
| (...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 515 if (!entry->is_dirty()) | 522 if (!entry->is_dirty()) |
| 516 continue; | 523 continue; |
| 517 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry); | 524 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry); |
| 518 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); | 525 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); |
| 519 // We don't bother removing from the index here as we blow the entire thing | 526 // We don't bother removing from the index here as we blow the entire thing |
| 520 // in a moment, and it unnecessarily complicates iteration. | 527 // in a moment, and it unnecessarily complicates iteration. |
| 521 entry->clear_dirty(NULL); | 528 entry->clear_dirty(NULL); |
| 522 } | 529 } |
| 523 ClearDirtyMetahandles(); | 530 ClearDirtyMetahandles(); |
| 524 | 531 |
| 532 // Set purged handles. |
| 533 DCHECK(snapshot->metahandles_to_purge.empty()); |
| 534 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); |
| 535 |
| 525 // Fill kernel_info_status and kernel_info. | 536 // Fill kernel_info_status and kernel_info. |
| 526 snapshot->kernel_info = kernel_->persisted_info; | 537 snapshot->kernel_info = kernel_->persisted_info; |
| 527 // To avoid duplicates when the process crashes, we record the next_id to be | 538 // To avoid duplicates when the process crashes, we record the next_id to be |
| 528 // greater magnitude than could possibly be reached before the next save | 539 // greater magnitude than could possibly be reached before the next save |
| 529 // changes. In other words, it's effectively impossible for the user to | 540 // changes. In other words, it's effectively impossible for the user to |
| 530 // generate 65536 new bookmarks in 3 seconds. | 541 // generate 65536 new bookmarks in 3 seconds. |
| 531 snapshot->kernel_info.next_id -= 65536; | 542 snapshot->kernel_info.next_id -= 65536; |
| 532 snapshot->kernel_info_status = kernel_->info_status; | 543 snapshot->kernel_info_status = kernel_->info_status; |
| 533 // This one we reset on failure. | 544 // This one we reset on failure. |
| 534 kernel_->info_status = KERNEL_SHARE_INFO_VALID; | 545 kernel_->info_status = KERNEL_SHARE_INFO_VALID; |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 572 size_t num_erased = 0; | 583 size_t num_erased = 0; |
| 573 kernel_->flushed_metahandles.Push(entry->ref(META_HANDLE)); | 584 kernel_->flushed_metahandles.Push(entry->ref(META_HANDLE)); |
| 574 num_erased = kernel_->ids_index->erase(entry); | 585 num_erased = kernel_->ids_index->erase(entry); |
| 575 DCHECK_EQ(1u, num_erased); | 586 DCHECK_EQ(1u, num_erased); |
| 576 num_erased = kernel_->metahandles_index->erase(entry); | 587 num_erased = kernel_->metahandles_index->erase(entry); |
| 577 DCHECK_EQ(1u, num_erased); | 588 DCHECK_EQ(1u, num_erased); |
| 578 | 589 |
| 579 // Might not be in it | 590 // Might not be in it |
| 580 num_erased = kernel_->client_tag_index->erase(entry); | 591 num_erased = kernel_->client_tag_index->erase(entry); |
| 581 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); | 592 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); |
| 593 DCHECK(!kernel_->parent_id_child_index->count(entry)); |
| 582 delete entry; | 594 delete entry; |
| 583 } | 595 } |
| 584 } | 596 } |
| 585 } | 597 } |
| 586 | 598 |
| 587 void Directory::PurgeEntriesWithTypeIn(const std::set<ModelType>& types) { | 599 void Directory::PurgeEntriesWithTypeIn(const std::set<ModelType>& types) { |
| 588 if (types.count(UNSPECIFIED) != 0U || types.count(TOP_LEVEL_FOLDER) != 0U) { | 600 if (types.count(UNSPECIFIED) != 0U || types.count(TOP_LEVEL_FOLDER) != 0U) { |
| 589 NOTREACHED() << "Don't support purging unspecified or top level entries."; | 601 NOTREACHED() << "Don't support purging unspecified or top level entries."; |
| 590 return; | 602 return; |
| 591 } | 603 } |
| 592 | 604 |
| 593 if (types.empty()) | 605 if (types.empty()) |
| 594 return; | 606 return; |
| 595 | 607 |
| 596 { | 608 { |
| 597 ScopedKernelLock lock(this); | 609 WriteTransaction trans(this, PURGE_ENTRIES, __FILE__, __LINE__); |
| 598 for (MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); | 610 { |
| 599 it != kernel_->metahandles_index->end(); ++it) { | 611 ScopedKernelLock lock(this); |
| 600 const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS); | 612 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); |
| 601 const sync_pb::EntitySpecifics& server_specifics = | 613 while (it != kernel_->metahandles_index->end()) { |
| 602 (*it)->ref(SERVER_SPECIFICS); | 614 const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS); |
| 603 ModelType local_type = GetModelTypeFromSpecifics(local_specifics); | 615 const sync_pb::EntitySpecifics& server_specifics = |
| 604 ModelType server_type = GetModelTypeFromSpecifics(server_specifics); | 616 (*it)->ref(SERVER_SPECIFICS); |
| 617 ModelType local_type = GetModelTypeFromSpecifics(local_specifics); |
| 618 ModelType server_type = GetModelTypeFromSpecifics(server_specifics); |
| 605 | 619 |
| 606 if (types.count(local_type) > 0 || types.count(server_type) > 0) { | 620 // Note the dance around incrementing |it|, since we sometimes erase(). |
| 607 // Set conditions for permanent deletion. | 621 if (types.count(local_type) > 0 || types.count(server_type) > 0) { |
| 608 (*it)->put(IS_DEL, true); | 622 UnlinkEntryFromOrder(*it, NULL, &lock); |
| 609 (*it)->put(IS_UNSYNCED, false); | 623 |
| 610 (*it)->put(IS_UNAPPLIED_UPDATE, false); | 624 kernel_->metahandles_to_purge->insert((*it)->ref(META_HANDLE)); |
| 611 (*it)->mark_dirty(kernel_->dirty_metahandles); | 625 |
| 612 DCHECK(!SafeToPurgeFromMemory(*it)); | 626 size_t num_erased = 0; |
| 627 num_erased = kernel_->ids_index->erase(*it); |
| 628 DCHECK_EQ(1u, num_erased); |
| 629 num_erased = kernel_->client_tag_index->erase(*it); |
| 630 DCHECK_EQ((*it)->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); |
| 631 num_erased = kernel_->parent_id_child_index->erase(*it); |
| 632 DCHECK_EQ((*it)->ref(IS_DEL), !num_erased); |
| 633 kernel_->metahandles_index->erase(it++); |
| 634 } else { |
| 635 ++it; |
| 636 } |
| 613 } | 637 } |
| 614 } | |
| 615 | 638 |
| 616 // Ensure meta tracking for these data types reflects the deleted state. | 639 // Ensure meta tracking for these data types reflects the deleted state. |
| 617 for (std::set<ModelType>::const_iterator it = types.begin(); | 640 for (std::set<ModelType>::const_iterator it = types.begin(); |
| 618 it != types.end(); ++it) { | 641 it != types.end(); ++it) { |
| 619 set_initial_sync_ended_for_type_unsafe(*it, false); | 642 set_initial_sync_ended_for_type_unsafe(*it, false); |
| 620 set_last_download_timestamp_unsafe(*it, 0); | 643 set_last_download_timestamp_unsafe(*it, 0); |
| 644 } |
| 621 } | 645 } |
| 622 } | 646 } |
| 623 } | 647 } |
| 624 | 648 |
| 625 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { | 649 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { |
| 626 ScopedKernelLock lock(this); | 650 ScopedKernelLock lock(this); |
| 627 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 651 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
| 628 | 652 |
| 629 // Because we optimistically cleared the dirty bit on the real entries when | 653 // Because we optimistically cleared the dirty bit on the real entries when |
| 630 // taking the snapshot, we must restore it on failure. Not doing this could | 654 // taking the snapshot, we must restore it on failure. Not doing this could |
| 631 // cause lost data, if no other changes are made to the in-memory entries | 655 // cause lost data, if no other changes are made to the in-memory entries |
| 632 // that would cause the dirty bit to get set again. Setting the bit ensures | 656 // that would cause the dirty bit to get set again. Setting the bit ensures |
| 633 // that SaveChanges will at least try again later. | 657 // that SaveChanges will at least try again later. |
| 634 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin(); | 658 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin(); |
| 635 i != snapshot.dirty_metas.end(); ++i) { | 659 i != snapshot.dirty_metas.end(); ++i) { |
| 636 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); | 660 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); |
| 637 MetahandlesIndex::iterator found = | 661 MetahandlesIndex::iterator found = |
| 638 kernel_->metahandles_index->find(&kernel_->needle); | 662 kernel_->metahandles_index->find(&kernel_->needle); |
| 639 if (found != kernel_->metahandles_index->end()) { | 663 if (found != kernel_->metahandles_index->end()) { |
| 640 (*found)->mark_dirty(kernel_->dirty_metahandles); | 664 (*found)->mark_dirty(kernel_->dirty_metahandles); |
| 641 } | 665 } |
| 642 } | 666 } |
| 667 |
| 668 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), |
| 669 snapshot.metahandles_to_purge.end()); |
| 643 } | 670 } |
| 644 | 671 |
| 645 int64 Directory::last_download_timestamp(ModelType model_type) const { | 672 int64 Directory::last_download_timestamp(ModelType model_type) const { |
| 646 ScopedKernelLock lock(this); | 673 ScopedKernelLock lock(this); |
| 647 return kernel_->persisted_info.last_download_timestamp[model_type]; | 674 return kernel_->persisted_info.last_download_timestamp[model_type]; |
| 648 } | 675 } |
| 649 | 676 |
| 650 void Directory::set_last_download_timestamp(ModelType model_type, | 677 void Directory::set_last_download_timestamp(ModelType model_type, |
| 651 int64 timestamp) { | 678 int64 timestamp) { |
| 652 ScopedKernelLock lock(this); | 679 ScopedKernelLock lock(this); |
| (...skipping 616 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1269 CHECK(index->insert(kernel_->ref(META_HANDLE)).second); | 1296 CHECK(index->insert(kernel_->ref(META_HANDLE)).second); |
| 1270 else | 1297 else |
| 1271 CHECK(1 == index->erase(kernel_->ref(META_HANDLE))); | 1298 CHECK(1 == index->erase(kernel_->ref(META_HANDLE))); |
| 1272 kernel_->put(field, value); | 1299 kernel_->put(field, value); |
| 1273 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | 1300 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); |
| 1274 } | 1301 } |
| 1275 return true; | 1302 return true; |
| 1276 } | 1303 } |
| 1277 | 1304 |
| 1278 void MutableEntry::UnlinkFromOrder() { | 1305 void MutableEntry::UnlinkFromOrder() { |
| 1279 Id old_previous = Get(PREV_ID); | 1306 ScopedKernelLock lock(dir()); |
| 1280 Id old_next = Get(NEXT_ID); | 1307 dir()->UnlinkEntryFromOrder(kernel_, write_transaction(), &lock); |
| 1308 } |
| 1281 | 1309 |
| 1282 // Self-looping signifies that this item is not in the order. If we were to | 1310 void Directory::UnlinkEntryFromOrder(EntryKernel* entry, |
| 1283 // set these to 0, we could get into trouble because this node might look | 1311 WriteTransaction* trans, |
| 1284 // like the first node in the ordering. | 1312 ScopedKernelLock* lock) { |
| 1285 Put(NEXT_ID, Get(ID)); | 1313 CHECK(!trans || this == trans->directory()); |
| 1286 Put(PREV_ID, Get(ID)); | 1314 Id old_previous = entry->ref(PREV_ID); |
| 1315 Id old_next = entry->ref(NEXT_ID); |
| 1316 |
| 1317 entry->put(NEXT_ID, entry->ref(ID)); |
| 1318 entry->put(PREV_ID, entry->ref(ID)); |
| 1319 entry->mark_dirty(kernel_->dirty_metahandles); |
| 1287 | 1320 |
| 1288 if (!old_previous.IsRoot()) { | 1321 if (!old_previous.IsRoot()) { |
| 1289 if (old_previous == old_next) { | 1322 if (old_previous == old_next) { |
| 1290 // Note previous == next doesn't imply previous == next == Get(ID). We | 1323 // Note previous == next doesn't imply previous == next == Get(ID). We |
| 1291 // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added | 1324 // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added |
| 1292 // and deleted before receiving the server ID in the commit response. | 1325 // and deleted before receiving the server ID in the commit response. |
| 1293 CHECK((old_next == Get(ID)) || !old_next.ServerKnows()); | 1326 CHECK((old_next == entry->ref(ID)) || !old_next.ServerKnows()); |
| 1294 return; // Done if we were already self-looped (hence unlinked). | 1327 return; // Done if we were already self-looped (hence unlinked). |
| 1295 } | 1328 } |
| 1296 MutableEntry previous_entry(write_transaction(), GET_BY_ID, old_previous); | 1329 EntryKernel* previous_entry = GetEntryById(old_previous, lock); |
| 1297 CHECK(previous_entry.good()); | 1330 CHECK(previous_entry); |
| 1298 previous_entry.Put(NEXT_ID, old_next); | 1331 if (trans) |
| 1332 trans->SaveOriginal(previous_entry); |
| 1333 previous_entry->put(NEXT_ID, old_next); |
| 1334 previous_entry->mark_dirty(kernel_->dirty_metahandles); |
| 1299 } | 1335 } |
| 1300 | 1336 |
| 1301 if (!old_next.IsRoot()) { | 1337 if (!old_next.IsRoot()) { |
| 1302 MutableEntry next_entry(write_transaction(), GET_BY_ID, old_next); | 1338 EntryKernel* next_entry = GetEntryById(old_next, lock); |
| 1303 CHECK(next_entry.good()); | 1339 CHECK(next_entry); |
| 1304 next_entry.Put(PREV_ID, old_previous); | 1340 if (trans) |
| 1341 trans->SaveOriginal(next_entry); |
| 1342 next_entry->put(PREV_ID, old_previous); |
| 1343 next_entry->mark_dirty(kernel_->dirty_metahandles); |
| 1305 } | 1344 } |
| 1306 } | 1345 } |
| 1307 | 1346 |
| 1308 bool MutableEntry::PutPredecessor(const Id& predecessor_id) { | 1347 bool MutableEntry::PutPredecessor(const Id& predecessor_id) { |
| 1309 UnlinkFromOrder(); | 1348 UnlinkFromOrder(); |
| 1310 | 1349 |
| 1311 if (Get(IS_DEL)) { | 1350 if (Get(IS_DEL)) { |
| 1312 DCHECK(predecessor_id.IsNull()); | 1351 DCHECK(predecessor_id.IsNull()); |
| 1313 return true; | 1352 return true; |
| 1314 } | 1353 } |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1507 return s << std::dec; | 1546 return s << std::dec; |
| 1508 } | 1547 } |
| 1509 | 1548 |
| 1510 FastDump& operator<<(FastDump& dump, const syncable::Blob& blob) { | 1549 FastDump& operator<<(FastDump& dump, const syncable::Blob& blob) { |
| 1511 if (blob.empty()) | 1550 if (blob.empty()) |
| 1512 return dump; | 1551 return dump; |
| 1513 string buffer(HexEncode(&blob[0], blob.size())); | 1552 string buffer(HexEncode(&blob[0], blob.size())); |
| 1514 dump.out_->sputn(buffer.c_str(), buffer.size()); | 1553 dump.out_->sputn(buffer.c_str(), buffer.size()); |
| 1515 return dump; | 1554 return dump; |
| 1516 } | 1555 } |
| OLD | NEW |