| Index: sync/syncable/directory_backing_store.cc
|
| diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
|
| index a6095279c86a37dd3aa0ccaee513f458c5d410bf..70a68206f9e8b4eb396e2ae8214d7e1cf145d4dc 100644
|
| --- a/sync/syncable/directory_backing_store.cc
|
| +++ b/sync/syncable/directory_backing_store.cc
|
| @@ -15,6 +15,7 @@
|
| #include "base/logging.h"
|
| #include "base/metrics/histogram.h"
|
| #include "base/rand_util.h"
|
| +#include "base/sha1.h"
|
| #include "base/stl_util.h"
|
| #include "base/string_number_conversions.h"
|
| #include "base/stringprintf.h"
|
| @@ -40,7 +41,7 @@ static const string::size_type kUpdateStatementBufferSize = 2048;
|
|
|
| // Increment this version whenever updating DB tables.
|
| extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
|
| -const int32 kCurrentDBVersion = 85;
|
| +const int32 kCurrentDBVersion = 86;
|
|
|
| // Iterate over the fields of |entry| and bind each to |statement| for
|
| // updating. Returns the number of args bound.
|
| @@ -65,13 +66,18 @@ void BindFields(const EntryKernel& entry,
|
| for ( ; i < STRING_FIELDS_END; ++i) {
|
| statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
|
| }
|
| - std::string temp;
|
| for ( ; i < PROTO_FIELDS_END; ++i) {
|
| + std::string temp;
|
| entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
|
| statement->BindBlob(index++, temp.data(), temp.length());
|
| }
|
| - for( ; i < ORDINAL_FIELDS_END; ++i) {
|
| - temp = entry.ref(static_cast<OrdinalField>(i)).ToInternalValue();
|
| + for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
|
| + const std::string& temp =
|
| + entry.ref(static_cast<UniquePositionField>(i)).ToInternalValue();
|
| + statement->BindBlob(index++, temp.data(), temp.length());
|
| + }
|
| + for ( ; i < BYTES_FIELDS_END; ++i) {
|
| + const std::string& temp = entry.ref(static_cast<BytesField>(i));
|
| statement->BindBlob(index++, temp.data(), temp.length());
|
| }
|
| }
|
| @@ -105,19 +111,25 @@ scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
|
| kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
|
| statement->ColumnBlob(i), statement->ColumnByteLength(i));
|
| }
|
| - for( ; i < ORDINAL_FIELDS_END; ++i) {
|
| + for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
|
| std::string temp;
|
| statement->ColumnBlobAsString(i, &temp);
|
| - NodeOrdinal unpacked_ord(temp);
|
| -
|
| - // Its safe to assume that an invalid ordinal is a sign that
|
| - // some external corruption has occurred. Return NULL to force
|
| - // a re-download of the sync data.
|
| - if(!unpacked_ord.IsValid()) {
|
| - DVLOG(1) << "Unpacked invalid ordinal. Signaling that the DB is corrupt";
|
| - return scoped_ptr<EntryKernel>(NULL);
|
| +
|
| + // An empty value indicates an intentionally invalid position.
|
| + // If the string is non-empty, though, the position should be valid.
|
| + if (!temp.empty()) {
|
| + UniquePosition unpacked_pos = UniquePosition::FromBytes(temp);
|
| + if(!unpacked_pos.IsValid()) {
|
| + DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
|
| + return scoped_ptr<EntryKernel>(NULL);
|
| + }
|
| + kernel->mutable_ref(static_cast<UniquePositionField>(i)) = unpacked_pos;
|
| }
|
| - kernel->mutable_ref(static_cast<OrdinalField>(i)) = unpacked_ord;
|
| + }
|
| + for ( ; i < BYTES_FIELDS_END; ++i) {
|
| + std::string temp;
|
| + statement->ColumnBlobAsString(i, &temp);
|
| + kernel->put(static_cast<BytesField>(i), temp);
|
| }
|
| return kernel.Pass();
|
| }
|
| @@ -377,6 +389,13 @@ bool DirectoryBackingStore::InitializeTables() {
|
| version_on_disk = 85;
|
| }
|
|
|
| + // Version 86 migration converts bookmarks to the unique positioning system.
|
| + // It also introduces a new field to store a unique ID for each bookmark.
|
| + if (version_on_disk == 85) {
|
| + if (MigrateVersion85To86())
|
| + version_on_disk = 86;
|
| + }
|
| +
|
| // If one of the migrations requested it, drop columns that aren't current.
|
| // It's only safe to do this after migrating all the way to the current
|
| // version.
|
| @@ -975,7 +994,8 @@ bool DirectoryBackingStore::MigrateVersion76To77() {
|
| #if defined(OS_WIN)
|
| // On Windows, we used to store timestamps in FILETIME format (100s of
|
| // ns since Jan 1, 1601). Magic numbers taken from
|
| -// http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win32-filetime
|
| +// http://stackoverflow.com/questions/5398557/
|
| +// java-library-for-dealing-with-win32-filetime
|
| // .
|
| #define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
|
| #else
|
| @@ -1100,7 +1120,7 @@ bool DirectoryBackingStore::MigrateVersion83To84() {
|
| }
|
|
|
| bool DirectoryBackingStore::MigrateVersion84To85() {
|
| - // Version 84 removes the initial_sync_ended flag.
|
| + // Version 85 removes the initial_sync_ended flag.
|
| if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
|
| return false;
|
| if (!CreateModelsTable())
|
| @@ -1116,6 +1136,98 @@ bool DirectoryBackingStore::MigrateVersion84To85() {
|
| return true;
|
| }
|
|
|
| +bool DirectoryBackingStore::MigrateVersion85To86() {
|
| + // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
|
| + // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
|
| + // and SERVER_UNIQUE_POSITION.
|
| + if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
|
| + "server_unique_position BLOB")) {
|
| + return false;
|
| + }
|
| + if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
|
| + "unique_position BLOB")) {
|
| + return false;
|
| + }
|
| + if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
|
| + "unique_bookmark_tag BLOB")) {
|
| + return false;
|
| + }
|
| +
|
| + sql::Statement get(db_->GetUniqueStatement(
|
| + "SELECT metahandle, id, specifics, server_ordinal_in_parent FROM metas"));
|
| +
|
| + // Note that we set both the local and server position based on the server
|
| + // position. We wll lose any unsynced local position changes. Unfortunately,
|
| + // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
|
| + // can't be translated into a UNIQUE_POSTION in a reliable way.
|
| + sql::Statement put(db_->GetCachedStatement(
|
| + SQL_FROM_HERE,
|
| + "UPDATE metas SET"
|
| + " server_unique_position = ?,"
|
| + " unique_position = ?,"
|
| + " unique_bookmark_tag = ?"
|
| + "WHERE metahandle = ?"));
|
| +
|
| + while (get.Step()) {
|
| + int64 metahandle = get.ColumnInt64(0);
|
| +
|
| + sync_pb::EntitySpecifics specifics;
|
| + specifics.ParseFromArray(
|
| + get.ColumnBlob(1), get.ColumnByteLength(1));
|
| +
|
| + std::string id_string;
|
| + get.ColumnBlobAsString(2, &id_string);
|
| +
|
| + std::string ordinal_string;
|
| + get.ColumnBlobAsString(3, &ordinal_string);
|
| + NodeOrdinal ordinal(ordinal_string);
|
| +
|
| + std::string unique_bookmark_tag;
|
| +
|
| + UniquePosition position;
|
| + if (ShouldMaintainPosition(GetModelTypeFromSpecifics(specifics))) {
|
| + // Ideally, the tag would be based on the originator_cache_guid and
|
| + // the originator_item_id. That's something that all clients can
|
| + // agree on.
|
| + //
|
| + // Unfortunately, that information isn't available here. We will base our
|
| + // tag on the ID instead, which is slightly less consistent. For fully
|
| + // synced items, the tags will match, but there could be some mismatches
|
| + // if there happen to be client IDs during the migration.
|
| + //
|
| + // To get everyone back into a synced state, we will update the bookmark
|
| + // tag according to the originator_cache_guid and originator_item_id when
|
| + // we see updates for this item. That should ensure that commonly
|
| + // modified items will end up with the proper tag values eventually.
|
| + std::string unique_bookmark_tag;
|
| +
|
| + // FIXME: Include some sort of tag prefix here to avoid "collisions"?
|
| + std::string hash = base::SHA1HashString(id_string);
|
| +
|
| + CHECK(base::Base64Encode(hash, &unique_bookmark_tag));
|
| +
|
| + int64 int_position = NodeOrdinalToInt64(ordinal);
|
| + position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
|
| + } else {
|
| + // Leave bookmark_tag and position at their default (invalid) values.
|
| + }
|
| +
|
| + const std::string position_blob = position.ToInternalValue();
|
| + put.BindBlob(0, position_blob.data(), position_blob.length());
|
| + put.BindBlob(1, position_blob.data(), position_blob.length());
|
| + put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
|
| + put.BindInt64(3, metahandle);
|
| +
|
| + if (!put.Run())
|
| + return false;
|
| + put.Reset(true);
|
| + }
|
| +
|
| + SetVersion(86);
|
| + needs_column_refresh_ = true;
|
| + return true;
|
| +}
|
| +
|
| bool DirectoryBackingStore::CreateTables() {
|
| DVLOG(1) << "First run, creating tables";
|
| // Create two little tables share_version and share_info
|
| @@ -1178,13 +1290,10 @@ bool DirectoryBackingStore::CreateTables() {
|
| const int64 now = TimeToProtoTime(base::Time::Now());
|
| sql::Statement s(db_->GetUniqueStatement(
|
| "INSERT INTO metas "
|
| - "( id, metahandle, is_dir, ctime, mtime, server_ordinal_in_parent) "
|
| - "VALUES ( \"r\", 1, 1, ?, ?, ?)"));
|
| + "( id, metahandle, is_dir, ctime, mtime ) "
|
| + "VALUES ( \"r\", 1, 1, ?, ? )"));
|
| s.BindInt64(0, now);
|
| s.BindInt64(1, now);
|
| - const std::string ord =
|
| - NodeOrdinal::CreateInitialOrdinal().ToInternalValue();
|
| - s.BindBlob(2, ord.data(), ord.length());
|
|
|
| if (!s.Run())
|
| return false;
|
| @@ -1307,10 +1416,10 @@ bool DirectoryBackingStore::VerifyReferenceIntegrity(
|
| for (MetahandlesIndex::const_iterator it = index.begin();
|
| it != index.end(); ++it) {
|
| EntryKernel* entry = *it;
|
| - bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end);
|
| bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
|
| - bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end);
|
| - is_ok = is_ok && prev_exists && parent_exists && next_exists;
|
| + if (!parent_exists) {
|
| + return false;
|
| + }
|
| }
|
| return is_ok;
|
| }
|
|
|