Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(130)

Side by Side Diff: sync/syncable/directory_backing_store.cc

Issue 11441026: [Sync] Add support for loading, updating and querying delete journals in (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "sync/syncable/directory_backing_store.h" 5 #include "sync/syncable/directory_backing_store.h"
6 6
7 #include "build/build_config.h" 7 #include "build/build_config.h"
8 8
9 #include <limits> 9 #include <limits>
10 10
(...skipping 22 matching lines...) Expand all
33 33
34 namespace syncer { 34 namespace syncer {
35 namespace syncable { 35 namespace syncable {
36 36
37 // This just has to be big enough to hold an UPDATE or INSERT statement that 37 // This just has to be big enough to hold an UPDATE or INSERT statement that
38 // modifies all the columns in the entry table. 38 // modifies all the columns in the entry table.
39 static const string::size_type kUpdateStatementBufferSize = 2048; 39 static const string::size_type kUpdateStatementBufferSize = 2048;
40 40
41 // Increment this version whenever updating DB tables. 41 // Increment this version whenever updating DB tables.
42 extern const int32 kCurrentDBVersion; // Global visibility for our unittest. 42 extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
43 const int32 kCurrentDBVersion = 83; 43 const int32 kCurrentDBVersion = 84;
44 44
45 // Iterate over the fields of |entry| and bind each to |statement| for 45 // Iterate over the fields of |entry| and bind each to |statement| for
46 // updating. Returns the number of args bound. 46 // updating. Returns the number of args bound.
47 void BindFields(const EntryKernel& entry, 47 void BindFields(const EntryKernel& entry,
48 sql::Statement* statement) { 48 sql::Statement* statement) {
49 int index = 0; 49 int index = 0;
50 int i = 0; 50 int i = 0;
51 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { 51 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
52 statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i))); 52 statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
53 } 53 }
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
165 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name, 165 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
166 sql::Connection* db) 166 sql::Connection* db)
167 : db_(db), 167 : db_(db),
168 dir_name_(dir_name), 168 dir_name_(dir_name),
169 needs_column_refresh_(false) { 169 needs_column_refresh_(false) {
170 } 170 }
171 171
172 DirectoryBackingStore::~DirectoryBackingStore() { 172 DirectoryBackingStore::~DirectoryBackingStore() {
173 } 173 }
174 174
175 bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) { 175 bool DirectoryBackingStore::DeleteEntries(DeleteFrom from,
176 const MetahandleSet& handles) {
176 if (handles.empty()) 177 if (handles.empty())
177 return true; 178 return true;
178 179
179 sql::Statement statement(db_->GetCachedStatement( 180 sql::Statement statement;
181 // Call GetCachedStatement() separately to get different statements for
182 // different tables.
183 switch (from) {
184 case DELETE_FROM_METAS:
185 statement.Assign(db_->GetCachedStatement(
180 SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?")); 186 SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
187 break;
188 case DELETE_FROM_JOURNAL:
189 statement.Assign(db_->GetCachedStatement(
190 SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
191 break;
192 }
181 193
182 for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end(); 194 for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
183 ++i) { 195 ++i) {
184 statement.BindInt64(0, *i); 196 statement.BindInt64(0, *i);
185 if (!statement.Run()) 197 if (!statement.Run())
186 return false; 198 return false;
187 statement.Reset(true); 199 statement.Reset(true);
188 } 200 }
189 return true; 201 return true;
190 } 202 }
191 203
192 bool DirectoryBackingStore::SaveChanges( 204 bool DirectoryBackingStore::SaveChanges(
193 const Directory::SaveChangesSnapshot& snapshot) { 205 const Directory::SaveChangesSnapshot& snapshot) {
194 DCHECK(CalledOnValidThread()); 206 DCHECK(CalledOnValidThread());
195 DCHECK(db_->is_open()); 207 DCHECK(db_->is_open());
196 208
197 // Back out early if there is nothing to write. 209 // Back out early if there is nothing to write.
198 bool save_info = 210 bool save_info =
199 (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status); 211 (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
200 if (snapshot.dirty_metas.size() < 1 && !save_info) 212 if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
213 snapshot.delete_journals.empty() &&
214 snapshot.delete_journals_to_purge.empty() && !save_info) {
201 return true; 215 return true;
216 }
202 217
203 sql::Transaction transaction(db_.get()); 218 sql::Transaction transaction(db_.get());
204 if (!transaction.Begin()) 219 if (!transaction.Begin())
205 return false; 220 return false;
206 221
222 PrepareSaveEntryStatement("metas", &save_meta_statment_);
207 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); 223 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
208 i != snapshot.dirty_metas.end(); ++i) { 224 i != snapshot.dirty_metas.end(); ++i) {
209 DCHECK(i->is_dirty()); 225 DCHECK((*i)->is_dirty());
210 if (!SaveEntryToDB(*i)) 226 if (!SaveEntryToDB(&save_meta_statment_, **i))
211 return false; 227 return false;
212 } 228 }
213 229
214 if (!DeleteEntries(snapshot.metahandles_to_purge)) 230 if (!DeleteEntries(DELETE_FROM_METAS, snapshot.metahandles_to_purge))
231 return false;
232
233 PrepareSaveEntryStatement("deleted_metas", &save_delete_journal_statment_);
234 for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
235 i != snapshot.delete_journals.end(); ++i) {
236 if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
237 return false;
238 }
239
240 if (!DeleteEntries(DELETE_FROM_JOURNAL, snapshot.delete_journals_to_purge))
215 return false; 241 return false;
216 242
217 if (save_info) { 243 if (save_info) {
218 const Directory::PersistedKernelInfo& info = snapshot.kernel_info; 244 const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
219 sql::Statement s1(db_->GetCachedStatement( 245 sql::Statement s1(db_->GetCachedStatement(
220 SQL_FROM_HERE, 246 SQL_FROM_HERE,
221 "UPDATE share_info " 247 "UPDATE share_info "
222 "SET store_birthday = ?, " 248 "SET store_birthday = ?, "
223 "next_id = ?, " 249 "next_id = ?, "
224 "notification_state = ?, " 250 "notification_state = ?, "
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
357 if (MigrateVersion81To82()) 383 if (MigrateVersion81To82())
358 version_on_disk = 82; 384 version_on_disk = 82;
359 } 385 }
360 386
361 // Version 83 migration added transaction_version column per sync entry. 387 // Version 83 migration added transaction_version column per sync entry.
362 if (version_on_disk == 82) { 388 if (version_on_disk == 82) {
363 if (MigrateVersion82To83()) 389 if (MigrateVersion82To83())
364 version_on_disk = 83; 390 version_on_disk = 83;
365 } 391 }
366 392
393 // Version 84 migration added deleted_metas table.
394 if (version_on_disk == 83) {
395 if (MigrateVersion83To84())
396 version_on_disk = 84;
397 }
398
367 // If one of the migrations requested it, drop columns that aren't current. 399 // If one of the migrations requested it, drop columns that aren't current.
368 // It's only safe to do this after migrating all the way to the current 400 // It's only safe to do this after migrating all the way to the current
369 // version. 401 // version.
370 if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) { 402 if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
371 if (!RefreshColumns()) 403 if (!RefreshColumns())
372 version_on_disk = 0; 404 version_on_disk = 0;
373 } 405 }
374 406
375 // A final, alternative catch-all migration to simply re-sync everything. 407 // A final, alternative catch-all migration to simply re-sync everything.
376 // 408 //
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
453 485
454 SafeDropTable("share_info"); 486 SafeDropTable("share_info");
455 if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info")) 487 if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
456 return false; 488 return false;
457 489
458 needs_column_refresh_ = false; 490 needs_column_refresh_ = false;
459 return true; 491 return true;
460 } 492 }
461 493
462 bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) { 494 bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
463 string select; 495 return LoadEntriesInternal("metas", entry_bucket);
464 select.reserve(kUpdateStatementBufferSize); 496 }
465 select.append("SELECT ");
466 AppendColumnList(&select);
467 select.append(" FROM metas ");
468 497
469 sql::Statement s(db_->GetUniqueStatement(select.c_str())); 498 bool DirectoryBackingStore::LoadDeleteJournals(
470 499 IdsIndex* delete_journals) {
471 while (s.Step()) { 500 return LoadEntriesInternal("deleted_metas", delete_journals);
472 scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
473 // A null kernel is evidence of external data corruption.
474 if (!kernel.get())
475 return false;
476 entry_bucket->insert(kernel.release());
477 }
478 return s.Succeeded();
479 } 501 }
480 502
481 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) { 503 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
482 { 504 {
483 sql::Statement s( 505 sql::Statement s(
484 db_->GetUniqueStatement( 506 db_->GetUniqueStatement(
485 "SELECT store_birthday, next_id, cache_guid, notification_state, " 507 "SELECT store_birthday, next_id, cache_guid, notification_state, "
486 "bag_of_chips " 508 "bag_of_chips "
487 "FROM share_info")); 509 "FROM share_info"));
488 if (!s.Step()) 510 if (!s.Step())
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
528 550
529 info->max_metahandle = s.ColumnInt64(0); 551 info->max_metahandle = s.ColumnInt64(0);
530 552
531 // Verify only one row was returned. 553 // Verify only one row was returned.
532 DCHECK(!s.Step()); 554 DCHECK(!s.Step());
533 DCHECK(s.Succeeded()); 555 DCHECK(s.Succeeded());
534 } 556 }
535 return true; 557 return true;
536 } 558 }
537 559
538 bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) { 560 /* static */
539 // This statement is constructed at runtime, so we can't use 561 bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
540 // GetCachedStatement() to let the Connection cache it. We will construct 562 const EntryKernel& entry) {
541 // and cache it ourselves the first time this function is called. 563 save_statement->Reset(true);
542 if (!save_entry_statement_.is_valid()) { 564 BindFields(entry, save_statement);
543 string query; 565 return save_statement->Run();
544 query.reserve(kUpdateStatementBufferSize);
545 query.append("INSERT OR REPLACE INTO metas ");
546 string values;
547 values.reserve(kUpdateStatementBufferSize);
548 values.append("VALUES ");
549 const char* separator = "( ";
550 int i = 0;
551 for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
552 query.append(separator);
553 values.append(separator);
554 separator = ", ";
555 query.append(ColumnName(i));
556 values.append("?");
557 }
558 query.append(" ) ");
559 values.append(" )");
560 query.append(values);
561
562 save_entry_statement_.Assign(
563 db_->GetUniqueStatement(query.c_str()));
564 } else {
565 save_entry_statement_.Reset(true);
566 }
567
568 BindFields(entry, &save_entry_statement_);
569 return save_entry_statement_.Run();
570 } 566 }
571 567
572 bool DirectoryBackingStore::DropDeletedEntries() { 568 bool DirectoryBackingStore::DropDeletedEntries() {
573 if (!db_->Execute("DELETE FROM metas " 569 if (!db_->Execute("DELETE FROM metas "
574 "WHERE is_del > 0 " 570 "WHERE is_del > 0 "
575 "AND is_unsynced < 1 " 571 "AND is_unsynced < 1 "
576 "AND is_unapplied_update < 1")) { 572 "AND is_unapplied_update < 1")) {
577 return false; 573 return false;
578 } 574 }
579 if (!db_->Execute("DELETE FROM metas " 575 if (!db_->Execute("DELETE FROM metas "
(...skipping 504 matching lines...) Expand 10 before | Expand all | Expand 10 after
1084 "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0")) 1080 "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
1085 return false; 1081 return false;
1086 sql::Statement update(db_->GetUniqueStatement( 1082 sql::Statement update(db_->GetUniqueStatement(
1087 "UPDATE metas SET transaction_version = 0")); 1083 "UPDATE metas SET transaction_version = 0"));
1088 if (!update.Run()) 1084 if (!update.Run())
1089 return false; 1085 return false;
1090 SetVersion(83); 1086 SetVersion(83);
1091 return true; 1087 return true;
1092 } 1088 }
1093 1089
1090 bool DirectoryBackingStore::MigrateVersion83To84() {
1091 // Version 84 added deleted_metas table to store deleted metas until we know
1092 // for sure that the deletions are persisted in native models.
1093 string query = "CREATE TABLE deleted_metas ";
1094 query.append(ComposeCreateTableColumnSpecs());
1095 if (!db_->Execute(query.c_str()))
1096 return false;
1097 SetVersion(84);
1098 return true;
1099 }
1100
1094 bool DirectoryBackingStore::CreateTables() { 1101 bool DirectoryBackingStore::CreateTables() {
1095 DVLOG(1) << "First run, creating tables"; 1102 DVLOG(1) << "First run, creating tables";
1096 // Create two little tables share_version and share_info 1103 // Create two little tables share_version and share_info
1097 if (!db_->Execute( 1104 if (!db_->Execute(
1098 "CREATE TABLE share_version (" 1105 "CREATE TABLE share_version ("
1099 "id VARCHAR(128) primary key, data INT)")) { 1106 "id VARCHAR(128) primary key, data INT)")) {
1100 return false; 1107 return false;
1101 } 1108 }
1102 1109
1103 { 1110 {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1162 s.BindBlob(2, ord.data(), ord.length()); 1169 s.BindBlob(2, ord.data(), ord.length());
1163 1170
1164 if (!s.Run()) 1171 if (!s.Run())
1165 return false; 1172 return false;
1166 } 1173 }
1167 1174
1168 return true; 1175 return true;
1169 } 1176 }
1170 1177
1171 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) { 1178 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
1172 const char* name = is_temporary ? "temp_metas" : "metas";
1173 string query = "CREATE TABLE "; 1179 string query = "CREATE TABLE ";
1174 query.append(name); 1180 query.append(is_temporary ? "temp_metas" : "metas");
1181 query.append(ComposeCreateTableColumnSpecs());
1182 if (!db_->Execute(query.c_str()))
1183 return false;
1184
1185 // Create a deleted_metas table to save copies of deleted metas until the
1186 // deletions are persisted. For simplicity, don't try to migrate existing
1187 // data because it's rarely used.
1188 SafeDropTable("deleted_metas");
1189 query = "CREATE TABLE deleted_metas ";
1175 query.append(ComposeCreateTableColumnSpecs()); 1190 query.append(ComposeCreateTableColumnSpecs());
1176 return db_->Execute(query.c_str()); 1191 return db_->Execute(query.c_str());
1177 } 1192 }
1178 1193
1179 bool DirectoryBackingStore::CreateV71ModelsTable() { 1194 bool DirectoryBackingStore::CreateV71ModelsTable() {
1180 // This is an old schema for the Models table, used from versions 71 to 74. 1195 // This is an old schema for the Models table, used from versions 71 to 74.
1181 return db_->Execute( 1196 return db_->Execute(
1182 "CREATE TABLE models (" 1197 "CREATE TABLE models ("
1183 "model_id BLOB primary key, " 1198 "model_id BLOB primary key, "
1184 "last_download_timestamp INT, " 1199 "last_download_timestamp INT, "
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1264 it != index.end(); ++it) { 1279 it != index.end(); ++it) {
1265 EntryKernel* entry = *it; 1280 EntryKernel* entry = *it;
1266 bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end); 1281 bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end);
1267 bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end); 1282 bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
1268 bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end); 1283 bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end);
1269 is_ok = is_ok && prev_exists && parent_exists && next_exists; 1284 is_ok = is_ok && prev_exists && parent_exists && next_exists;
1270 } 1285 }
1271 return is_ok; 1286 return is_ok;
1272 } 1287 }
1273 1288
1289 template<class T>
1290 bool DirectoryBackingStore::LoadEntriesInternal(const std::string& table,
1291 T* bucket) {
1292 string select;
1293 select.reserve(kUpdateStatementBufferSize);
1294 select.append("SELECT ");
1295 AppendColumnList(&select);
1296 select.append(" FROM " + table);
1297
1298 sql::Statement s(db_->GetUniqueStatement(select.c_str()));
1299
1300 while (s.Step()) {
1301 scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
1302 // A null kernel is evidence of external data corruption.
1303 if (!kernel.get())
1304 return false;
1305 bucket->insert(kernel.release());
1306 }
1307 return s.Succeeded();
1308 }
1309
1310 void DirectoryBackingStore::PrepareSaveEntryStatement(
1311 const std::string& table, sql::Statement* save_statement) {
1312 if (save_statement->is_valid())
1313 return;
1314
1315 string query;
1316 query.reserve(kUpdateStatementBufferSize);
1317 query.append("INSERT OR REPLACE INTO " + table);
1318 string values;
1319 values.reserve(kUpdateStatementBufferSize);
1320 values.append(" VALUES ");
1321 const char* separator = "( ";
1322 int i = 0;
1323 for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
1324 query.append(separator);
1325 values.append(separator);
1326 separator = ", ";
1327 query.append(ColumnName(i));
1328 values.append("?");
1329 }
1330 query.append(" ) ");
1331 values.append(" )");
1332 query.append(values);
1333 save_statement->Assign(db_->GetUniqueStatement(
1334 base::StringPrintf(query.c_str(), "metas").c_str()));
1335 }
1336
1274 } // namespace syncable 1337 } // namespace syncable
1275 } // namespace syncer 1338 } // namespace syncer
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698