Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1177)

Unified Diff: sync/syncable/directory.cc

Issue 11441026: [Sync] Add support for loading, updating and querying delete journals in (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « sync/syncable/directory.h ('k') | sync/syncable/directory_backing_store.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: sync/syncable/directory.cc
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index 21e05ebe23e7f06309a43d1c0ce0496422793ad4..71217efc4b9e75eee802c9a993a5b3dd2ba5c785 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -101,7 +101,10 @@ Directory::SaveChangesSnapshot::SaveChangesSnapshot()
: kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
}
-Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {}
+Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
+ STLDeleteElements(&dirty_metas);
+ STLDeleteElements(&delete_journals);
+}
Directory::Kernel::Kernel(
const std::string& name,
@@ -121,7 +124,9 @@ Directory::Kernel::Kernel(
cache_guid(info.cache_guid),
next_metahandle(info.max_metahandle + 1),
delegate(delegate),
- transaction_observer(transaction_observer) {
+ transaction_observer(transaction_observer),
+ delete_journals_(new Directory::IdsIndex),
+ delete_journals_to_purge_(new MetahandleSet) {
DCHECK(delegate);
DCHECK(transaction_observer.IsInitialized());
}
@@ -135,6 +140,9 @@ Directory::Kernel::~Kernel() {
delete ids_index;
STLDeleteElements(metahandles_index);
delete metahandles_index;
+ STLDeleteElements(delete_journals_);
+ delete delete_journals_;
+ delete delete_journals_to_purge_;
}
Directory::Directory(
@@ -196,17 +204,19 @@ DirOpenResult Directory::OpenImpl(
DirectoryChangeDelegate* delegate,
const WeakHandle<TransactionObserver>&
transaction_observer) {
-
KernelLoadInfo info;
// Temporary indices before kernel_ initialized in case Load fails. We 0(1)
// swap these later.
MetahandlesIndex metas_bucket;
- DirOpenResult result = store_->Load(&metas_bucket, &info);
+ IdsIndex delete_journals;
+
+ DirOpenResult result = store_->Load(&metas_bucket, &delete_journals, &info);
if (OPENED != result)
return result;
kernel_ = new Kernel(name, info, delegate, transaction_observer);
kernel_->metahandles_index->swap(metas_bucket);
+ kernel_->delete_journals_->swap(delete_journals);
InitializeIndices();
// Write back the share info to reserve some space in 'next_id'. This will
@@ -236,6 +246,75 @@ void Directory::OnUnrecoverableError(const BaseTransaction* trans,
message);
}
+void Directory::UpdateDeleteJournalForServerDelete(BaseTransaction* trans,
+ bool was_deleted,
+ const EntryKernel& entry) {
+ if (!(IsDeleteJournalEnabled(entry.GetServerModelType()) ||
+ IsDeleteJournalEnabled(
+ GetModelTypeFromSpecifics(entry.ref(SPECIFICS))))) {
tim (not reviewing) 2012/12/14 20:12:38 Can you add the comment from your reply in here ex
haitaol1 2012/12/14 23:15:36 Done.
+ return;
+ }
+
+ ScopedKernelLock lock(this);
+ kernel_->needle.put(ID, entry.ref(ID));
+ IdsIndex::const_iterator it =
+ kernel_->delete_journals_->find(&kernel_->needle);
+
+ if (entry.ref(SERVER_IS_DEL)) {
+ if (it == kernel_->delete_journals_->end()) {
+ // New delete.
+ EntryKernel* t = new EntryKernel(entry);
+ kernel_->delete_journals_->insert(t);
+ kernel_->delete_journals_to_purge_->erase(t->ref(META_HANDLE));
+ }
+ } else {
+ // Undelete. This could happen in two cases:
+ // * An entry was actually deleted and undeleted: was_deleted = true.
+ // * A data type was broken, i.e. encountered unrecoverable error, in last
+ // sync session and all its entries were duplicated in delete journals.
+ // On restart, entries are recreated from downloads and recreation calls
+ // UpdateDeleteJournals() to remove live entries from delete journals,
+ // thus only deleted entries remain in journals.
+ if (it != kernel_->delete_journals_->end()) {
+ kernel_->delete_journals_to_purge_->insert((*it)->ref(META_HANDLE));
+ delete *it;
+ kernel_->delete_journals_->erase(it);
+ } else if (was_deleted) {
+ kernel_->delete_journals_to_purge_->insert((*it)->ref(META_HANDLE));
+ }
+ }
+}
+
+void Directory::GetDeleteJournals(BaseTransaction* trans,
+ ModelType type,
+ EntryKernelSet* deleted_entries) {
+ ScopedKernelLock lock(this);
+ DCHECK(!passive_delete_journal_types_.Has(type));
+ for (IdsIndex::const_iterator it = kernel_->delete_journals_->begin();
+ it != kernel_->delete_journals_->end(); ++it) {
+ if ((*it)->GetServerModelType() == type ||
+ GetModelTypeFromSpecifics((*it)->ref(SPECIFICS)) == type) {
+ deleted_entries->insert(*it);
+ }
+ }
+ passive_delete_journal_types_.Put(type);
+}
+
+void Directory::PurgeDeleteJournals(BaseTransaction* trans,
+ const MetahandleSet& to_purge) {
+ ScopedKernelLock lock(this);
+ IdsIndex::const_iterator it = kernel_->delete_journals_->begin();
+ while (it != kernel_->delete_journals_->end()) {
+ int64 handle = (*it)->ref(META_HANDLE);
+ if (to_purge.count(handle)) {
+ delete *it;
+ kernel_->delete_journals_->erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ kernel_->delete_journals_to_purge_->insert(to_purge.begin(), to_purge.end());
+}
EntryKernel* Directory::GetEntryById(const Id& id) {
ScopedKernelLock lock(this);
@@ -466,7 +545,8 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
// Skip over false positives; it happens relatively infrequently.
if (!entry->is_dirty())
continue;
- snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
+ snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
+ new EntryKernel(*entry));
DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i));
// We don't bother removing from the index here as we blow the entire thing
// in a moment, and it unnecessarily complicates iteration.
@@ -488,6 +568,22 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
snapshot->kernel_info_status = kernel_->info_status;
// This one we reset on failure.
kernel_->info_status = KERNEL_SHARE_INFO_VALID;
+
+ // Move passive delete journals to snapshot. Will copy back if snapshot fails
+ // to save.
+ MetahandlesIndex::const_iterator it = kernel_->delete_journals_->begin();
+ while (it != kernel_->delete_journals_->end()) {
+ if (passive_delete_journal_types_.Has((*it)->GetServerModelType()) ||
+ passive_delete_journal_types_.Has(GetModelTypeFromSpecifics(
+ (*it)->ref(SPECIFICS)))) {
+ snapshot->delete_journals.insert(*it);
+ kernel_->delete_journals_->erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ snapshot->delete_journals_to_purge.swap(
+ *kernel_->delete_journals_to_purge_);
}
bool Directory::SaveChanges() {
@@ -518,7 +614,7 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
// Now drop everything we can out of memory.
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
+ kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
MetahandlesIndex::iterator found =
kernel_->metahandles_index->find(&kernel_->needle);
EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
@@ -616,7 +712,7 @@ void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
// that SaveChanges will at least try again later.
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
+ kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
MetahandlesIndex::iterator found =
kernel_->metahandles_index->find(&kernel_->needle);
if (found != kernel_->metahandles_index->end()) {
@@ -626,6 +722,20 @@ void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
snapshot.metahandles_to_purge.end());
+
+ // Restore delete journals.
+ for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
+ i != snapshot.delete_journals.end(); ++i) {
+ kernel_->needle.put(ID, (*i)->ref(ID));
+ if (kernel_->delete_journals_->find(&kernel_->needle) ==
+ kernel_->delete_journals_->end()) {
+ kernel_->delete_journals_->insert(new EntryKernel(**i));
+ }
+ }
+ kernel_->delete_journals_to_purge_->insert(
+ snapshot.delete_journals_to_purge.begin(),
+ snapshot.delete_journals_to_purge.end());
+
}
void Directory::GetDownloadProgress(
@@ -1268,6 +1378,16 @@ EntryKernel* Directory::GetPossibleFirstChild(
return NULL;
}
+/* static */
+bool Directory::IsDeleteJournalEnabled(ModelType type) {
+ switch (type) {
+ case BOOKMARKS:
+ return true;
+ default:
+ return false;
+ }
+}
+
ScopedKernelLock::ScopedKernelLock(const Directory* dir)
: scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
}
« no previous file with comments | « sync/syncable/directory.h ('k') | sync/syncable/directory_backing_store.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698