Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(346)

Side by Side Diff: components/sync/syncable/directory.cc

Issue 2844333003: [Sync] Address use-after-free in Directory::InsertEntry (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "components/sync/syncable/directory.h" 5 #include "components/sync/syncable/directory.h"
6 6
7 #include <inttypes.h> 7 #include <inttypes.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <iterator> 10 #include <iterator>
(...skipping 336 matching lines...) Expand 10 before | Expand all | Expand 10 after
347 347
348 int Directory::GetPositionIndex(BaseTransaction* trans, 348 int Directory::GetPositionIndex(BaseTransaction* trans,
349 EntryKernel* kernel) const { 349 EntryKernel* kernel) const {
350 const OrderedChildSet* siblings = 350 const OrderedChildSet* siblings =
351 kernel_->parent_child_index.GetSiblings(kernel); 351 kernel_->parent_child_index.GetSiblings(kernel);
352 352
353 OrderedChildSet::const_iterator it = siblings->find(kernel); 353 OrderedChildSet::const_iterator it = siblings->find(kernel);
354 return std::distance(siblings->begin(), it); 354 return std::distance(siblings->begin(), it);
355 } 355 }
356 356
357 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) { 357 bool Directory::InsertEntry(BaseWriteTransaction* trans,
358 std::unique_ptr<EntryKernel> entry) {
358 ScopedKernelLock lock(this); 359 ScopedKernelLock lock(this);
359 return InsertEntry(lock, trans, entry); 360 return InsertEntry(lock, trans, std::move(entry));
360 } 361 }
361 362
362 bool Directory::InsertEntry(const ScopedKernelLock& lock, 363 bool Directory::InsertEntry(const ScopedKernelLock& lock,
363 BaseWriteTransaction* trans, 364 BaseWriteTransaction* trans,
364 EntryKernel* entry) { 365 std::unique_ptr<EntryKernel> entry) {
365 if (!SyncAssert(nullptr != entry, FROM_HERE, "Entry is null", trans)) 366 if (!SyncAssert(nullptr != entry, FROM_HERE, "Entry is null", trans))
366 return false; 367 return false;
368 EntryKernel* entry_ptr = entry.get();
367 369
368 static const char error[] = "Entry already in memory index."; 370 static const char error[] = "Entry already in memory index.";
369 371
370 if (!SyncAssert(kernel_->metahandles_map 372 if (!SyncAssert(kernel_->metahandles_map
371 .insert(std::make_pair(entry->ref(META_HANDLE), 373 .insert(std::make_pair(entry_ptr->ref(META_HANDLE),
372 base::WrapUnique(entry))) 374 std::move(entry)))
373 .second, 375 .second,
374 FROM_HERE, error, trans)) { 376 FROM_HERE, error, trans)) {
375 return false; 377 return false;
376 } 378 }
377 if (!SyncAssert( 379 if (!SyncAssert(
378 kernel_->ids_map.insert(std::make_pair(entry->ref(ID).value(), entry)) 380 kernel_->ids_map
381 .insert(std::make_pair(entry_ptr->ref(ID).value(), entry_ptr))
379 .second, 382 .second,
380 FROM_HERE, error, trans)) { 383 FROM_HERE, error, trans)) {
381 return false; 384 return false;
382 } 385 }
383 if (ParentChildIndex::ShouldInclude(entry)) { 386 if (ParentChildIndex::ShouldInclude(entry_ptr)) {
384 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), FROM_HERE, error, 387 if (!SyncAssert(kernel_->parent_child_index.Insert(entry_ptr), FROM_HERE,
385 trans)) { 388 error, trans)) {
386 return false; 389 return false;
387 } 390 }
388 } 391 }
389 AddToAttachmentIndex(lock, entry->ref(META_HANDLE), 392 AddToAttachmentIndex(lock, entry_ptr->ref(META_HANDLE),
390 entry->ref(ATTACHMENT_METADATA)); 393 entry_ptr->ref(ATTACHMENT_METADATA));
391 394
392 // Should NEVER be created with a client tag or server tag. 395 // Should NEVER be created with a client tag or server tag.
393 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, 396 if (!SyncAssert(entry_ptr->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
394 "Server tag should be empty", trans)) { 397 "Server tag should be empty", trans)) {
395 return false; 398 return false;
396 } 399 }
397 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, 400 if (!SyncAssert(entry_ptr->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
398 "Client tag should be empty", trans)) 401 "Client tag should be empty", trans))
399 return false; 402 return false;
400 403
401 return true; 404 return true;
402 } 405 }
403 406
404 bool Directory::ReindexId(BaseWriteTransaction* trans, 407 bool Directory::ReindexId(BaseWriteTransaction* trans,
405 EntryKernel* const entry, 408 EntryKernel* const entry,
406 const Id& new_id) { 409 const Id& new_id) {
407 ScopedKernelLock lock(this); 410 ScopedKernelLock lock(this);
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after
610 return true; 613 return true;
611 614
612 // Need a write transaction as we are about to permanently purge entries. 615 // Need a write transaction as we are about to permanently purge entries.
613 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); 616 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
614 ScopedKernelLock lock(this); 617 ScopedKernelLock lock(this);
615 // Now drop everything we can out of memory. 618 // Now drop everything we can out of memory.
616 for (auto i = snapshot.dirty_metas.begin(); i != snapshot.dirty_metas.end(); 619 for (auto i = snapshot.dirty_metas.begin(); i != snapshot.dirty_metas.end();
617 ++i) { 620 ++i) {
618 MetahandlesMap::iterator found = 621 MetahandlesMap::iterator found =
619 kernel_->metahandles_map.find((*i)->ref(META_HANDLE)); 622 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
620 EntryKernel* entry = 623 if (found != kernel_->metahandles_map.end() &&
621 (found == kernel_->metahandles_map.end() ? nullptr 624 SafeToPurgeFromMemory(&trans, found->second.get())) {
622 : found->second.get());
623 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
624 // We now drop deleted metahandles that are up to date on both the client 625 // We now drop deleted metahandles that are up to date on both the client
625 // and the server. 626 // and the server.
626 std::unique_ptr<EntryKernel> unique_entry = std::move(found->second); 627 std::unique_ptr<EntryKernel> entry = std::move(found->second);
627 628
628 size_t num_erased = 0; 629 size_t num_erased = 0;
629 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); 630 kernel_->metahandles_map.erase(found);
630 DCHECK_EQ(1u, num_erased);
631 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); 631 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
632 DCHECK_EQ(1u, num_erased); 632 DCHECK_EQ(1u, num_erased);
633 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { 633 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
634 num_erased = 634 num_erased =
635 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); 635 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
636 DCHECK_EQ(1u, num_erased); 636 DCHECK_EQ(1u, num_erased);
637 } 637 }
638 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { 638 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
639 num_erased = 639 num_erased =
640 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); 640 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
641 DCHECK_EQ(1u, num_erased); 641 DCHECK_EQ(1u, num_erased);
642 } 642 }
643 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), FROM_HERE, 643 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry.get()),
644 "Deleted entry still present", (&trans))) 644 FROM_HERE, "Deleted entry still present", (&trans)))
645 return false; 645 return false;
646 RemoveFromAttachmentIndex(lock, entry->ref(META_HANDLE), 646 RemoveFromAttachmentIndex(lock, entry->ref(META_HANDLE),
647 entry->ref(ATTACHMENT_METADATA)); 647 entry->ref(ATTACHMENT_METADATA));
648 } 648 }
649 if (trans.unrecoverable_error_set()) 649 if (trans.unrecoverable_error_set())
650 return false; 650 return false;
651 } 651 }
652 return true; 652 return true;
653 } 653 }
654 654
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
703 } 703 }
704 704
705 // At this point locally created items that aren't synced will become locally 705 // At this point locally created items that aren't synced will become locally
706 // deleted items, and purged on the next snapshot. All other items will match 706 // deleted items, and purged on the next snapshot. All other items will match
707 // the state they would have had if they were just created via a server 707 // the state they would have had if they were just created via a server
708 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). 708 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
709 } 709 }
710 710
711 void Directory::DeleteEntry(const ScopedKernelLock& lock, 711 void Directory::DeleteEntry(const ScopedKernelLock& lock,
712 bool save_to_journal, 712 bool save_to_journal,
713 EntryKernel* entry, 713 EntryKernel* entry_ptr,
714 OwnedEntryKernelSet* entries_to_journal) { 714 OwnedEntryKernelSet* entries_to_journal) {
715 int64_t handle = entry->ref(META_HANDLE); 715 int64_t handle = entry_ptr->ref(META_HANDLE);
716 ModelType server_type =
717 GetModelTypeFromSpecifics(entry->ref(SERVER_SPECIFICS));
718 716
719 kernel_->metahandles_to_purge.insert(handle); 717 kernel_->metahandles_to_purge.insert(handle);
720 718
721 std::unique_ptr<EntryKernel> entry_ptr = 719 std::unique_ptr<EntryKernel> entry =
722 std::move(kernel_->metahandles_map[handle]); 720 std::move(kernel_->metahandles_map[handle]);
723 721
722 ModelType server_type =
723 GetModelTypeFromSpecifics(entry->ref(SERVER_SPECIFICS));
724
724 size_t num_erased = 0; 725 size_t num_erased = 0;
725 num_erased = kernel_->metahandles_map.erase(handle); 726 num_erased = kernel_->metahandles_map.erase(handle);
726 DCHECK_EQ(1u, num_erased); 727 DCHECK_EQ(1u, num_erased);
727 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); 728 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
728 DCHECK_EQ(1u, num_erased); 729 DCHECK_EQ(1u, num_erased);
729 num_erased = kernel_->unsynced_metahandles.erase(handle); 730 num_erased = kernel_->unsynced_metahandles.erase(handle);
730 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); 731 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
731 num_erased = kernel_->unapplied_update_metahandles[server_type].erase(handle); 732 num_erased = kernel_->unapplied_update_metahandles[server_type].erase(handle);
732 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); 733 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
733 if (kernel_->parent_child_index.Contains(entry)) 734 if (kernel_->parent_child_index.Contains(entry.get()))
734 kernel_->parent_child_index.Remove(entry); 735 kernel_->parent_child_index.Remove(entry.get());
735 736
736 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { 737 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
737 num_erased = kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); 738 num_erased = kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
738 DCHECK_EQ(1u, num_erased); 739 DCHECK_EQ(1u, num_erased);
739 } 740 }
740 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { 741 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
741 num_erased = kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); 742 num_erased = kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
742 DCHECK_EQ(1u, num_erased); 743 DCHECK_EQ(1u, num_erased);
743 } 744 }
744 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA)); 745 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));
745 746
746 if (save_to_journal) { 747 if (save_to_journal) {
747 entries_to_journal->insert(std::move(entry_ptr)); 748 entries_to_journal->insert(std::move(entry));
748 } 749 }
749 } 750 }
750 751
751 void Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, 752 void Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
752 ModelTypeSet types_to_journal, 753 ModelTypeSet types_to_journal,
753 ModelTypeSet types_to_unapply) { 754 ModelTypeSet types_to_unapply) {
754 disabled_types.RemoveAll(ProxyTypes()); 755 disabled_types.RemoveAll(ProxyTypes());
755 if (disabled_types.Empty()) 756 if (disabled_types.Empty())
756 return; 757 return;
757 758
(...skipping 875 matching lines...) Expand 10 before | Expand all | Expand 10 after
1633 Directory::Kernel* Directory::kernel() { 1634 Directory::Kernel* Directory::kernel() {
1634 return kernel_; 1635 return kernel_;
1635 } 1636 }
1636 1637
1637 const Directory::Kernel* Directory::kernel() const { 1638 const Directory::Kernel* Directory::kernel() const {
1638 return kernel_; 1639 return kernel_;
1639 } 1640 }
1640 1641
1641 } // namespace syncable 1642 } // namespace syncable
1642 } // namespace syncer 1643 } // namespace syncer
OLDNEW
« no previous file with comments | « components/sync/syncable/directory.h ('k') | components/sync/syncable/model_neutral_mutable_entry.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698