Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2460)

Unified Diff: chrome/browser/sync/syncable/directory_backing_store.cc

Issue 6104003: sync: use progress markers instead of timestamps during GetUpdates (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: For review Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: chrome/browser/sync/syncable/directory_backing_store.cc
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
index feefd91b7e6a3dc9deb189d46f19bd9247124b1d..d6e74c77f39d7ef05bd635feb250fe7fbfb00721 100644
--- a/chrome/browser/sync/syncable/directory_backing_store.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store.cc
@@ -44,7 +44,7 @@ static const string::size_type kUpdateStatementBufferSize = 2048;
// Increment this version whenever updating DB tables.
extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
-const int32 kCurrentDBVersion = 74;
+const int32 kCurrentDBVersion = 75;
namespace {
@@ -411,11 +411,13 @@ bool DirectoryBackingStore::SaveChanges(
for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
SQLStatement op;
op.prepare(dbhandle, "INSERT OR REPLACE INTO models (model_id, "
- "last_download_timestamp, initial_sync_ended) VALUES ( ?, ?, ?)");
+ "progress_marker, initial_sync_ended) VALUES ( ?, ?, ?)");
// We persist not ModelType but rather a protobuf-derived ID.
string model_id = ModelTypeEnumToModelId(ModelTypeFromInt(i));
+ string progress_marker;
+ info.download_progress[i].SerializeToString(&progress_marker);
op.bind_blob(0, model_id.data(), model_id.length());
- op.bind_int64(1, info.last_download_timestamp[i]);
+ op.bind_blob(1, progress_marker.data(), progress_marker.length());
op.bind_bool(2, info.initial_sync_ended[i]);
if (!(SQLITE_DONE == op.step() &&
@@ -473,11 +475,18 @@ DirOpenResult DirectoryBackingStore::InitializeTables() {
version_on_disk = 73;
}
+ // Version 74 added state for the autofill migration.
if (version_on_disk == 73) {
if (MigrateVersion73To74())
version_on_disk = 74;
}
+ // Version 75 migrated from int64-based timestamps to per-datatype tokens.
+ if (version_on_disk == 74) {
+ if (MigrateVersion74To75())
+ version_on_disk = 75;
+ }
+
// If one of the migrations requested it, drop columns that aren't current.
// It's only safe to do this after migrating all the way to the current
// version.
@@ -606,14 +615,14 @@ bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
{
SQLStatement query;
query.prepare(load_dbhandle_,
- "SELECT model_id, last_download_timestamp, initial_sync_ended "
+ "SELECT model_id, progress_marker, initial_sync_ended "
"FROM models");
while (SQLITE_ROW == query.step()) {
- string model_id;
- query.column_blob_as_string(0, &model_id);
- ModelType type = ModelIdToModelTypeEnum(model_id);
+ ModelType type = ModelIdToModelTypeEnum(query.column_blob(0),
+ query.column_bytes(0));
if (type != UNSPECIFIED) {
- info->kernel_info.last_download_timestamp[type] = query.column_int64(1);
+ info->kernel_info.download_progress[type].ParseFromArray(
+ query.column_blob(1), query.column_bytes(1));
info->kernel_info.initial_sync_ended[type] = query.column_bool(2);
}
}
@@ -709,14 +718,15 @@ void DirectoryBackingStore::DropAllTables() {
SafeDropTable("share_version");
SafeDropTable("extended_attributes");
SafeDropTable("models");
+ SafeDropTable("temp_models");
needs_column_refresh_ = false;
}
// static
ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
- const string& model_id) {
+ const void* data, int size) {
sync_pb::EntitySpecifics specifics;
- if (!specifics.ParseFromString(model_id))
+ if (!specifics.ParseFromArray(data, size))
return syncable::UNSPECIFIED;
return syncable::GetModelTypeFromSpecifics(specifics);
}
@@ -900,7 +910,7 @@ bool DirectoryBackingStore::MigrateVersion68To69() {
// were removed from the share_info table. They were replaced by
// the 'models' table, which has these values on a per-datatype basis.
bool DirectoryBackingStore::MigrateVersion70To71() {
- if (SQLITE_DONE != CreateModelsTable())
+ if (SQLITE_DONE != CreateV71ModelsTable())
return false;
// Move data from the old share_info columns to the new models table.
@@ -950,12 +960,15 @@ bool DirectoryBackingStore::MigrateVersion70To71() {
}
bool DirectoryBackingStore::MigrateVersion71To72() {
+ // Version 72 removed a table 'extended_attributes', whose
+ // contents didn't matter.
SafeDropTable("extended_attributes");
SetVersion(72);
return true;
}
bool DirectoryBackingStore::MigrateVersion72To73() {
+ // Version 73 added one column to the table 'share_info': notification_state
int result =
ExecQuery(load_dbhandle_,
"ALTER TABLE share_info ADD COLUMN notification_state BLOB");
@@ -966,6 +979,13 @@ bool DirectoryBackingStore::MigrateVersion72To73() {
}
bool DirectoryBackingStore::MigrateVersion73To74() {
+ // Version 74 added the following columns to the table 'share_info':
+ // autofill_migration_state
+ // bookmarks_added_during_autofill_migration
+ // autofill_migration_time
+ // autofill_entries_added_during_migration
+ // autofill_profiles_added_during_migration
+
int result =
ExecQuery(load_dbhandle_,
"ALTER TABLE share_info ADD COLUMN autofill_migration_state "
@@ -1008,6 +1028,60 @@ bool DirectoryBackingStore::MigrateVersion73To74() {
return true;
}
+bool DirectoryBackingStore::MigrateVersion74To75() {
+ // In version 74, there was a table 'models':
+ // blob model_id (entity specifics, primary key)
+ // int last_download_timestamp
+ // boolean initial_sync_ended
+ // In version 75, we deprecated the integer-valued last_download_timestamp,
+ // using insted a protobuf-valued progress_marker field:
+ // blob progress_marker
+ // The progress_marker values are initialized from the value of
+ // last_download_timestamp, thereby preserving the download state.
+
+ // Move aside the old table and create a new empty one at the current schema.
+ if (SQLITE_DONE != ExecQuery(load_dbhandle_,
+ "ALTER TABLE models RENAME TO temp_models")) {
+ return false;
+ }
+ if (!CreateModelsTable())
+ return false;
+
+ SQLStatement query;
+ query.prepare(load_dbhandle_,
+ "SELECT model_id, last_download_timestamp, initial_sync_ended "
+ "FROM temp_models");
+ while (SQLITE_ROW == query.step()) {
+ ModelType type = ModelIdToModelTypeEnum(query.column_blob(0),
+ query.column_bytes(0));
+ if (type != UNSPECIFIED) {
tim (not reviewing) 2011/01/11 19:14:23 TOP_LEVEL_FOLDER?
ncarter (slow) 2011/01/13 00:06:13 Added, thanks. The implementatation of the called
+ // Set the |timestamp_token_for_migration| on a new
+ // DataTypeProgressMarker, using the old value of last_download_timestamp.
+ // The server will turn this into a real token on our behalf the next
+ // time we check for updates.
+ sync_pb::DataTypeProgressMarker progress_marker;
+ progress_marker.set_data_type_id(
+ GetExtensionFieldNumberFromModelType(type));
+ progress_marker.set_timestamp_token_for_migration(query.column_int64(1));
+ std::string progress_blob;
+ progress_marker.SerializeToString(&progress_blob);
+
+ SQLStatement update;
+ update.prepare(load_dbhandle_, "INSERT INTO models (model_id, "
+ "progress_marker, initial_sync_ended) VALUES (?, ?, ?)");
+ update.bind_blob(0, query.column_blob(0), query.column_bytes(0));
+ update.bind_blob(1, progress_blob.data(), progress_blob.length());
+ update.bind_bool(2, query.column_bool(2));
+ if (SQLITE_DONE != update.step())
+ return false;
+ }
+ }
+ // Drop the old table.
+ SafeDropTable("temp_models");
+
+ SetVersion(75);
+ return true;
+}
int DirectoryBackingStore::CreateTables() {
VLOG(1) << "First run, creating tables";
@@ -1106,14 +1180,26 @@ int DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
return ExecQuery(load_dbhandle_, query.c_str());
}
+int DirectoryBackingStore::CreateV71ModelsTable() {
+ // This is an old schema for the Models table, used from versions 71 to 74.
+ return ExecQuery(load_dbhandle_,
+ "CREATE TABLE models ("
+ "model_id BLOB primary key, "
+ "last_download_timestamp INT, "
+ // Gets set if the syncer ever gets updates from the
+ // server and the server returns 0. Lets us detect the
+ // end of the initial sync.
+ "initial_sync_ended BOOLEAN default 0)");
+}
+
int DirectoryBackingStore::CreateModelsTable() {
- // This is the current schema for the Models table, from version 71
+ // This is the current schema for the Models table, from version 75
// onward. If you change the schema, you'll probably want to double-check
- // the use of this function in the v70-v71 migration.
+ // the use of this function in the v74-v75 migration.
return ExecQuery(load_dbhandle_,
"CREATE TABLE models ("
"model_id BLOB primary key, "
- "last_download_timestamp INT, "
+ "progress_marker BLOB, "
// Gets set if the syncer ever gets updates from the
// server and the server returns 0. Lets us detect the
// end of the initial sync.
@@ -1124,9 +1210,8 @@ int DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
const char* name = is_temporary ? "temp_share_info" : "share_info";
string query = "CREATE TABLE ";
query.append(name);
- // This is the current schema for the ShareInfo table, from version 71
- // onward. If you change the schema, you'll probably want to double-check
- // the use of this function in the v70-v71 migration.
+ // This is the current schema for the ShareInfo table, from version 74
+ // onward.
query.append(" ("
"id TEXT primary key, "
"name TEXT, "
@@ -1151,9 +1236,7 @@ int DirectoryBackingStore::CreateShareInfoTableVersion71(
const char* name = is_temporary ? "temp_share_info" : "share_info";
string query = "CREATE TABLE ";
query.append(name);
- // This is the current schema for the ShareInfo table, from version 71
- // onward. If you change the schema, you'll probably want to double-check
- // the use of this function in the v70-v71 migration.
+ // This is the schema for the ShareInfo table used from versions 71 to 72.
query.append(" ("
"id TEXT primary key, "
"name TEXT, "

Powered by Google App Engine
This is Rietveld 408576698