| Index: sync/syncable/directory_backing_store.cc
|
| diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
|
| index e9ba0acc57c0bd945b1010f3f230a288f64999cb..6918936ba930524ae2a5a255d8f2741729c6fa08 100644
|
| --- a/sync/syncable/directory_backing_store.cc
|
| +++ b/sync/syncable/directory_backing_store.cc
|
| @@ -40,7 +40,7 @@ static const string::size_type kUpdateStatementBufferSize = 2048;
|
|
|
| // Increment this version whenever updating DB tables.
|
| extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
|
| -const int32 kCurrentDBVersion = 83;
|
| +const int32 kCurrentDBVersion = 84;
|
|
|
| // Iterate over the fields of |entry| and bind each to |statement| for
|
| // updating. Returns the number of args bound.
|
| @@ -172,12 +172,21 @@ DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
|
| DirectoryBackingStore::~DirectoryBackingStore() {
|
| }
|
|
|
| -bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
|
| +bool DirectoryBackingStore::DeleteEntries(bool meta_delete,
|
| + const MetahandleSet& handles) {
|
| if (handles.empty())
|
| return true;
|
|
|
| - sql::Statement statement(db_->GetCachedStatement(
|
| - SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
|
| + sql::Statement statement;
|
| + // Call GetCachedStatement() separately to get different statements for
|
| + // different tables.
|
| + if (meta_delete) {
|
| + statement.Assign(db_->GetCachedStatement(
|
| + SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
|
| + } else {
|
| + statement.Assign(db_->GetCachedStatement(
|
| + SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
|
| + }
|
|
|
| for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
|
| ++i) {
|
| @@ -197,21 +206,34 @@ bool DirectoryBackingStore::SaveChanges(
|
| // Back out early if there is nothing to write.
|
| bool save_info =
|
| (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
|
| - if (snapshot.dirty_metas.size() < 1 && !save_info)
|
| + if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
|
| + snapshot.delete_journals.empty() &&
|
| + snapshot.delete_journals_to_purge.empty() && !save_info)
|
| return true;
|
|
|
| sql::Transaction transaction(db_.get());
|
| if (!transaction.Begin())
|
| return false;
|
|
|
| + PrepareSaveEntryStatement("metas", &save_meta_statment_);
|
| for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
|
| i != snapshot.dirty_metas.end(); ++i) {
|
| - DCHECK(i->is_dirty());
|
| - if (!SaveEntryToDB(*i))
|
| + DCHECK((*i)->is_dirty());
|
| + if (!SaveEntryToDB(&save_meta_statment_, **i))
|
| + return false;
|
| + }
|
| +
|
| + if (!DeleteEntries(true, snapshot.metahandles_to_purge))
|
| + return false;
|
| +
|
| + PrepareSaveEntryStatement("deleted_metas", &save_delete_journal_statment_);
|
| + for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
|
| + i != snapshot.delete_journals.end(); ++i) {
|
| + if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
|
| return false;
|
| }
|
|
|
| - if (!DeleteEntries(snapshot.metahandles_to_purge))
|
| + if (!DeleteEntries(false, snapshot.delete_journals_to_purge))
|
| return false;
|
|
|
| if (save_info) {
|
| @@ -364,6 +386,12 @@ bool DirectoryBackingStore::InitializeTables() {
|
| version_on_disk = 83;
|
| }
|
|
|
| + // Version 84 migration added deleted_metas table.
|
| + if (version_on_disk == 83) {
|
| + if (MigrateVersion83To84())
|
| + version_on_disk = 84;
|
| + }
|
| +
|
| // If one of the migrations requested it, drop columns that aren't current.
|
| // It's only safe to do this after migrating all the way to the current
|
| // version.
|
| @@ -460,22 +488,12 @@ bool DirectoryBackingStore::RefreshColumns() {
|
| }
|
|
|
| bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
|
| - string select;
|
| - select.reserve(kUpdateStatementBufferSize);
|
| - select.append("SELECT ");
|
| - AppendColumnList(&select);
|
| - select.append(" FROM metas ");
|
| -
|
| - sql::Statement s(db_->GetUniqueStatement(select.c_str()));
|
| + return LoadEntriesInternal("metas", entry_bucket);
|
| +}
|
|
|
| - while (s.Step()) {
|
| - scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
|
| - // A null kernel is evidence of external data corruption.
|
| - if (!kernel.get())
|
| - return false;
|
| - entry_bucket->insert(kernel.release());
|
| - }
|
| - return s.Succeeded();
|
| +bool DirectoryBackingStore::LoadDeleteJournals(
|
| + IdsIndex* delete_journals) {
|
| + return LoadEntriesInternal("deleted_metas", delete_journals);
|
| }
|
|
|
| bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
|
| @@ -535,38 +553,12 @@ bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
|
| return true;
|
| }
|
|
|
| -bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
|
| - // This statement is constructed at runtime, so we can't use
|
| - // GetCachedStatement() to let the Connection cache it. We will construct
|
| - // and cache it ourselves the first time this function is called.
|
| - if (!save_entry_statement_.is_valid()) {
|
| - string query;
|
| - query.reserve(kUpdateStatementBufferSize);
|
| - query.append("INSERT OR REPLACE INTO metas ");
|
| - string values;
|
| - values.reserve(kUpdateStatementBufferSize);
|
| - values.append("VALUES ");
|
| - const char* separator = "( ";
|
| - int i = 0;
|
| - for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
|
| - query.append(separator);
|
| - values.append(separator);
|
| - separator = ", ";
|
| - query.append(ColumnName(i));
|
| - values.append("?");
|
| - }
|
| - query.append(" ) ");
|
| - values.append(" )");
|
| - query.append(values);
|
| -
|
| - save_entry_statement_.Assign(
|
| - db_->GetUniqueStatement(query.c_str()));
|
| - } else {
|
| - save_entry_statement_.Reset(true);
|
| - }
|
| -
|
| - BindFields(entry, &save_entry_statement_);
|
| - return save_entry_statement_.Run();
|
| +/* static */
|
| +bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
|
| + const EntryKernel& entry) {
|
| + save_statement->Reset(true);
|
| + BindFields(entry, save_statement);
|
| + return save_statement->Run();
|
| }
|
|
|
| bool DirectoryBackingStore::DropDeletedEntries() {
|
| @@ -1091,6 +1083,17 @@ bool DirectoryBackingStore::MigrateVersion82To83() {
|
| return true;
|
| }
|
|
|
| +bool DirectoryBackingStore::MigrateVersion83To84() {
|
| + // Version 84 added deleted_metas table to store deleted metas until we know
|
| + // for sure that the deletions are persisted in native models.
|
| + string query = "CREATE TABLE deleted_metas ";
|
| + query.append(ComposeCreateTableColumnSpecs());
|
| + if (!db_->Execute(query.c_str()))
|
| + return false;
|
| + SetVersion(84);
|
| + return true;
|
| +}
|
| +
|
| bool DirectoryBackingStore::CreateTables() {
|
| DVLOG(1) << "First run, creating tables";
|
| // Create two little tables share_version and share_info
|
| @@ -1169,9 +1172,17 @@ bool DirectoryBackingStore::CreateTables() {
|
| }
|
|
|
| bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
|
| - const char* name = is_temporary ? "temp_metas" : "metas";
|
| string query = "CREATE TABLE ";
|
| - query.append(name);
|
| + query.append(is_temporary ? "temp_metas" : "metas");
|
| + query.append(ComposeCreateTableColumnSpecs());
|
| + if (!db_->Execute(query.c_str()))
|
| + return false;
|
| +
|
| + // Create a deleted_metas table to save copies of deleted metas until the
|
| + // deletions are persisted. For simplicity, don't try to migrate existing
|
| + // data because it's rarely used.
|
| + SafeDropTable("deleted_metas");
|
| + query = "CREATE TABLE deleted_metas ";
|
| query.append(ComposeCreateTableColumnSpecs());
|
| return db_->Execute(query.c_str());
|
| }
|
| @@ -1271,5 +1282,53 @@ bool DirectoryBackingStore::VerifyReferenceIntegrity(
|
| return is_ok;
|
| }
|
|
|
| +template<class T>
|
| +bool DirectoryBackingStore::LoadEntriesInternal(const std::string& table,
|
| + T* bucket) {
|
| + string select;
|
| + select.reserve(kUpdateStatementBufferSize);
|
| + select.append("SELECT ");
|
| + AppendColumnList(&select);
|
| + select.append(" FROM " + table);
|
| +
|
| + sql::Statement s(db_->GetUniqueStatement(select.c_str()));
|
| +
|
| + while (s.Step()) {
|
| + scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
|
| + // A null kernel is evidence of external data corruption.
|
| + if (!kernel.get())
|
| + return false;
|
| + bucket->insert(kernel.release());
|
| + }
|
| + return s.Succeeded();
|
| +}
|
| +
|
| +void DirectoryBackingStore::PrepareSaveEntryStatement(
|
| + const std::string& table, sql::Statement* save_statement) {
|
| + if (save_statement->is_valid())
|
| + return;
|
| +
|
| + string query;
|
| + query.reserve(kUpdateStatementBufferSize);
|
| + query.append("INSERT OR REPLACE INTO " + table);
|
| + string values;
|
| + values.reserve(kUpdateStatementBufferSize);
|
| + values.append(" VALUES ");
|
| + const char* separator = "( ";
|
| + int i = 0;
|
| + for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
|
| + query.append(separator);
|
| + values.append(separator);
|
| + separator = ", ";
|
| + query.append(ColumnName(i));
|
| + values.append("?");
|
| + }
|
| + query.append(" ) ");
|
| + values.append(" )");
|
| + query.append(values);
|
| + save_statement->Assign(db_->GetUniqueStatement(
|
| + base::StringPrintf(query.c_str(), "metas").c_str()));
|
| +}
|
| +
|
| } // namespace syncable
|
| } // namespace syncer
|
|
|