OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sync/syncable/directory_backing_store.h" | 5 #include "sync/syncable/directory_backing_store.h" |
6 | 6 |
7 #include "build/build_config.h" | 7 #include "build/build_config.h" |
8 | 8 |
9 #include <limits> | 9 #include <limits> |
10 | 10 |
11 #include "base/base64.h" | 11 #include "base/base64.h" |
12 #include "base/debug/trace_event.h" | 12 #include "base/debug/trace_event.h" |
13 #include "base/file_util.h" | 13 #include "base/file_util.h" |
14 #include "base/hash_tables.h" | 14 #include "base/hash_tables.h" |
15 #include "base/logging.h" | 15 #include "base/logging.h" |
16 #include "base/metrics/histogram.h" | 16 #include "base/metrics/histogram.h" |
17 #include "base/rand_util.h" | 17 #include "base/rand_util.h" |
18 #include "base/stl_util.h" | 18 #include "base/stl_util.h" |
19 #include "base/string_number_conversions.h" | 19 #include "base/string_number_conversions.h" |
20 #include "base/stringprintf.h" | 20 #include "base/stringprintf.h" |
21 #include "base/time.h" | 21 #include "base/time.h" |
22 #include "sql/connection.h" | 22 #include "sql/connection.h" |
23 #include "sql/statement.h" | 23 #include "sql/statement.h" |
24 #include "sql/transaction.h" | 24 #include "sql/transaction.h" |
25 #include "sync/internal_api/public/base/node_ordinal.h" | |
25 #include "sync/protocol/bookmark_specifics.pb.h" | 26 #include "sync/protocol/bookmark_specifics.pb.h" |
26 #include "sync/protocol/sync.pb.h" | 27 #include "sync/protocol/sync.pb.h" |
27 #include "sync/syncable/syncable-inl.h" | 28 #include "sync/syncable/syncable-inl.h" |
28 #include "sync/syncable/syncable_columns.h" | 29 #include "sync/syncable/syncable_columns.h" |
29 #include "sync/util/time.h" | 30 #include "sync/util/time.h" |
30 | 31 |
31 using std::string; | 32 using std::string; |
32 | 33 |
33 namespace syncer { | 34 namespace syncer { |
34 namespace syncable { | 35 namespace syncable { |
35 | 36 |
36 // This just has to be big enough to hold an UPDATE or INSERT statement that | 37 // This just has to be big enough to hold an UPDATE or INSERT statement that |
37 // modifies all the columns in the entry table. | 38 // modifies all the columns in the entry table. |
38 static const string::size_type kUpdateStatementBufferSize = 2048; | 39 static const string::size_type kUpdateStatementBufferSize = 2048; |
39 | 40 |
40 // Increment this version whenever updating DB tables. | 41 // Increment this version whenever updating DB tables. |
41 extern const int32 kCurrentDBVersion; // Global visibility for our unittest. | 42 extern const int32 kCurrentDBVersion; // Global visibility for our unittest. |
42 const int32 kCurrentDBVersion = 80; | 43 const int32 kCurrentDBVersion = 81; |
43 | 44 |
44 // Iterate over the fields of |entry| and bind each to |statement| for | 45 // Iterate over the fields of |entry| and bind each to |statement| for |
45 // updating. Returns the number of args bound. | 46 // updating. Returns the number of args bound. |
46 void BindFields(const EntryKernel& entry, | 47 void BindFields(const EntryKernel& entry, |
47 sql::Statement* statement) { | 48 sql::Statement* statement) { |
48 int index = 0; | 49 int index = 0; |
49 int i = 0; | 50 int i = 0; |
50 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { | 51 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { |
51 statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i))); | 52 statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i))); |
52 } | 53 } |
53 for ( ; i < TIME_FIELDS_END; ++i) { | 54 for ( ; i < TIME_FIELDS_END; ++i) { |
54 statement->BindInt64(index++, | 55 statement->BindInt64(index++, |
55 TimeToProtoTime( | 56 TimeToProtoTime( |
56 entry.ref(static_cast<TimeField>(i)))); | 57 entry.ref(static_cast<TimeField>(i)))); |
57 } | 58 } |
58 for ( ; i < ID_FIELDS_END; ++i) { | 59 for ( ; i < ID_FIELDS_END; ++i) { |
59 statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_); | 60 statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_); |
60 } | 61 } |
61 for ( ; i < BIT_FIELDS_END; ++i) { | 62 for ( ; i < BIT_FIELDS_END; ++i) { |
62 statement->BindInt(index++, entry.ref(static_cast<BitField>(i))); | 63 statement->BindInt(index++, entry.ref(static_cast<BitField>(i))); |
63 } | 64 } |
64 for ( ; i < STRING_FIELDS_END; ++i) { | 65 for ( ; i < STRING_FIELDS_END; ++i) { |
65 statement->BindString(index++, entry.ref(static_cast<StringField>(i))); | 66 statement->BindString(index++, entry.ref(static_cast<StringField>(i))); |
66 } | 67 } |
67 std::string temp; | 68 std::string temp; |
68 for ( ; i < PROTO_FIELDS_END; ++i) { | 69 for ( ; i < PROTO_FIELDS_END; ++i) { |
69 entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp); | 70 entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp); |
70 statement->BindBlob(index++, temp.data(), temp.length()); | 71 statement->BindBlob(index++, temp.data(), temp.length()); |
71 } | 72 } |
73 for( ; i < ORDINAL_FIELDS_END; ++i) { | |
74 temp = entry.ref(static_cast<OrdinalField>(i)).ToInternalValue(); | |
75 statement->BindBlob(index++, temp.data(), temp.length()); | |
76 } | |
72 } | 77 } |
73 | 78 |
74 // The caller owns the returned EntryKernel*. Assumes the statement currently | 79 // The caller owns the returned EntryKernel*. Assumes the statement currently |
75 // points to a valid row in the metas table. | 80 // points to a valid row in the metas table. Returns null to indicate that |
81 // it detected a corruption in the data on unpacking. | |
76 EntryKernel* UnpackEntry(sql::Statement* statement) { | 82 EntryKernel* UnpackEntry(sql::Statement* statement) { |
77 EntryKernel* kernel = new EntryKernel(); | 83 EntryKernel* kernel = new EntryKernel(); |
78 DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT)); | 84 DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT)); |
79 int i = 0; | 85 int i = 0; |
80 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { | 86 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { |
81 kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i)); | 87 kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i)); |
82 } | 88 } |
83 for ( ; i < TIME_FIELDS_END; ++i) { | 89 for ( ; i < TIME_FIELDS_END; ++i) { |
84 kernel->put(static_cast<TimeField>(i), | 90 kernel->put(static_cast<TimeField>(i), |
85 ProtoTimeToTime(statement->ColumnInt64(i))); | 91 ProtoTimeToTime(statement->ColumnInt64(i))); |
86 } | 92 } |
87 for ( ; i < ID_FIELDS_END; ++i) { | 93 for ( ; i < ID_FIELDS_END; ++i) { |
88 kernel->mutable_ref(static_cast<IdField>(i)).s_ = | 94 kernel->mutable_ref(static_cast<IdField>(i)).s_ = |
89 statement->ColumnString(i); | 95 statement->ColumnString(i); |
90 } | 96 } |
91 for ( ; i < BIT_FIELDS_END; ++i) { | 97 for ( ; i < BIT_FIELDS_END; ++i) { |
92 kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i))); | 98 kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i))); |
93 } | 99 } |
94 for ( ; i < STRING_FIELDS_END; ++i) { | 100 for ( ; i < STRING_FIELDS_END; ++i) { |
95 kernel->put(static_cast<StringField>(i), | 101 kernel->put(static_cast<StringField>(i), |
96 statement->ColumnString(i)); | 102 statement->ColumnString(i)); |
97 } | 103 } |
98 for ( ; i < PROTO_FIELDS_END; ++i) { | 104 for ( ; i < PROTO_FIELDS_END; ++i) { |
99 kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray( | 105 kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray( |
100 statement->ColumnBlob(i), statement->ColumnByteLength(i)); | 106 statement->ColumnBlob(i), statement->ColumnByteLength(i)); |
101 } | 107 } |
108 for( ; i < ORDINAL_FIELDS_END; ++i) { | |
109 std::string temp; | |
110 statement->ColumnBlobAsString(i, &temp); | |
111 NodeOrdinal unpacked_ord(temp); | |
112 | |
113 // Its safe to assume that an invalid ordinal is a sign that | |
114 // some external corruption has occurred. Return null to force | |
115 // a re-download of the sync data. | |
116 if(!unpacked_ord.IsValid()) { | |
117 DVLOG(1) <<"Unpacked invalid ordinal. Signaling that the DB is corrupt"; | |
akalin
2012/10/05 22:40:27
space after <<
| |
118 return NULL; | |
akalin
2012/10/05 22:40:27
you're leaking kernel here! Looks like a good tim
vishwath
2012/10/08 20:17:49
Done.
| |
119 } | |
120 kernel->mutable_ref(static_cast<OrdinalField>(i)) = unpacked_ord; | |
121 } | |
102 return kernel; | 122 return kernel; |
103 } | 123 } |
104 | 124 |
105 namespace { | 125 namespace { |
106 | 126 |
107 string ComposeCreateTableColumnSpecs() { | 127 string ComposeCreateTableColumnSpecs() { |
108 const ColumnSpec* begin = g_metas_columns; | 128 const ColumnSpec* begin = g_metas_columns; |
109 const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns); | 129 const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns); |
110 string query; | 130 string query; |
111 query.reserve(kUpdateStatementBufferSize); | 131 query.reserve(kUpdateStatementBufferSize); |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
316 if (MigrateVersion78To79()) | 336 if (MigrateVersion78To79()) |
317 version_on_disk = 79; | 337 version_on_disk = 79; |
318 } | 338 } |
319 | 339 |
320 // Version 80 migration is adding the bag_of_chips column. | 340 // Version 80 migration is adding the bag_of_chips column. |
321 if (version_on_disk == 79) { | 341 if (version_on_disk == 79) { |
322 if (MigrateVersion79To80()) | 342 if (MigrateVersion79To80()) |
323 version_on_disk = 80; | 343 version_on_disk = 80; |
324 } | 344 } |
325 | 345 |
346 // Version 81 changes the server_position_in_parent_field from an int64 | |
347 // to a blob. | |
348 if (version_on_disk == 80) { | |
349 if (MigrateVersion80To81()) | |
350 version_on_disk = 81; | |
351 } | |
352 | |
326 // If one of the migrations requested it, drop columns that aren't current. | 353 // If one of the migrations requested it, drop columns that aren't current. |
327 // It's only safe to do this after migrating all the way to the current | 354 // It's only safe to do this after migrating all the way to the current |
328 // version. | 355 // version. |
329 if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) { | 356 if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) { |
330 if (!RefreshColumns()) | 357 if (!RefreshColumns()) |
331 version_on_disk = 0; | 358 version_on_disk = 0; |
332 } | 359 } |
333 | 360 |
334 // A final, alternative catch-all migration to simply re-sync everything. | 361 // A final, alternative catch-all migration to simply re-sync everything. |
335 // | 362 // |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
421 bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) { | 448 bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) { |
422 string select; | 449 string select; |
423 select.reserve(kUpdateStatementBufferSize); | 450 select.reserve(kUpdateStatementBufferSize); |
424 select.append("SELECT "); | 451 select.append("SELECT "); |
425 AppendColumnList(&select); | 452 AppendColumnList(&select); |
426 select.append(" FROM metas "); | 453 select.append(" FROM metas "); |
427 | 454 |
428 sql::Statement s(db_->GetUniqueStatement(select.c_str())); | 455 sql::Statement s(db_->GetUniqueStatement(select.c_str())); |
429 | 456 |
430 while (s.Step()) { | 457 while (s.Step()) { |
431 EntryKernel *kernel = UnpackEntry(&s); | 458 EntryKernel *kernel = UnpackEntry(&s); |
akalin
2012/10/05 22:40:27
' *' -> '* ', but this should be a scoped_ptr anyw
vishwath
2012/10/08 20:17:49
Done.
| |
459 // A null kernel is evidence of external data corruption. | |
460 if(!kernel) | |
461 return false; | |
432 entry_bucket->insert(kernel); | 462 entry_bucket->insert(kernel); |
433 } | 463 } |
434 return s.Succeeded(); | 464 return s.Succeeded(); |
435 } | 465 } |
436 | 466 |
437 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) { | 467 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) { |
438 { | 468 { |
439 sql::Statement s( | 469 sql::Statement s( |
440 db_->GetUniqueStatement( | 470 db_->GetUniqueStatement( |
441 "SELECT store_birthday, next_id, cache_guid, notification_state, " | 471 "SELECT store_birthday, next_id, cache_guid, notification_state, " |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
496 // and cache it ourselves the first time this function is called. | 526 // and cache it ourselves the first time this function is called. |
497 if (!save_entry_statement_.is_valid()) { | 527 if (!save_entry_statement_.is_valid()) { |
498 string query; | 528 string query; |
499 query.reserve(kUpdateStatementBufferSize); | 529 query.reserve(kUpdateStatementBufferSize); |
500 query.append("INSERT OR REPLACE INTO metas "); | 530 query.append("INSERT OR REPLACE INTO metas "); |
501 string values; | 531 string values; |
502 values.reserve(kUpdateStatementBufferSize); | 532 values.reserve(kUpdateStatementBufferSize); |
503 values.append("VALUES "); | 533 values.append("VALUES "); |
504 const char* separator = "( "; | 534 const char* separator = "( "; |
505 int i = 0; | 535 int i = 0; |
506 for (i = BEGIN_FIELDS; i < PROTO_FIELDS_END; ++i) { | 536 for (i = BEGIN_FIELDS; i < END_FIELDS; ++i) { |
507 query.append(separator); | 537 query.append(separator); |
508 values.append(separator); | 538 values.append(separator); |
509 separator = ", "; | 539 separator = ", "; |
510 query.append(ColumnName(i)); | 540 query.append(ColumnName(i)); |
511 values.append("?"); | 541 values.append("?"); |
512 } | 542 } |
513 query.append(" ) "); | 543 query.append(" ) "); |
514 values.append(" )"); | 544 values.append(" )"); |
515 query.append(values); | 545 query.append(values); |
516 | 546 |
(...skipping 444 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
961 // Some users are stuck with a DB that causes them to reuse existing IDs. We | 991 // Some users are stuck with a DB that causes them to reuse existing IDs. We |
962 // perform this one-time fixup on all users to help the few that are stuck. | 992 // perform this one-time fixup on all users to help the few that are stuck. |
963 // See crbug.com/142987 for details. | 993 // See crbug.com/142987 for details. |
964 if (!db_->Execute( | 994 if (!db_->Execute( |
965 "UPDATE share_info SET next_id = next_id - 65536")) { | 995 "UPDATE share_info SET next_id = next_id - 65536")) { |
966 return false; | 996 return false; |
967 } | 997 } |
968 SetVersion(79); | 998 SetVersion(79); |
969 return true; | 999 return true; |
970 } | 1000 } |
1001 | |
971 bool DirectoryBackingStore::MigrateVersion79To80() { | 1002 bool DirectoryBackingStore::MigrateVersion79To80() { |
972 if (!db_->Execute( | 1003 if (!db_->Execute( |
973 "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB")) | 1004 "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB")) |
974 return false; | 1005 return false; |
975 sql::Statement update(db_->GetUniqueStatement( | 1006 sql::Statement update(db_->GetUniqueStatement( |
976 "UPDATE share_info SET bag_of_chips = ?")); | 1007 "UPDATE share_info SET bag_of_chips = ?")); |
977 // An empty message is serialized to an empty string. | 1008 // An empty message is serialized to an empty string. |
978 update.BindBlob(0, NULL, 0); | 1009 update.BindBlob(0, NULL, 0); |
979 if (!update.Run()) | 1010 if (!update.Run()) |
980 return false; | 1011 return false; |
981 SetVersion(80); | 1012 SetVersion(80); |
982 return true; | 1013 return true; |
983 } | 1014 } |
984 | 1015 |
1016 bool DirectoryBackingStore::MigrateVersion80To81() { | |
1017 if(!db_->Execute( | |
1018 "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB")) | |
1019 return false; | |
1020 | |
1021 sql::Statement get_positions(db_->GetUniqueStatement( | |
1022 "SELECT metahandle, server_position_in_parent FROM metas")); | |
1023 | |
1024 sql::Statement put_ordinals(db_->GetUniqueStatement( | |
1025 "UPDATE metas SET server_ordinal_in_parent = ?" | |
1026 "WHERE metahandle = ?")); | |
1027 | |
1028 while(get_positions.Step()) { | |
1029 int64 metahandle = get_positions.ColumnInt64(0); | |
1030 int64 position = get_positions.ColumnInt64(1); | |
1031 | |
1032 const std::string& ordinal = Int64ToNodeOrdinal(position).ToInternalValue(); | |
1033 put_ordinals.BindBlob(0, ordinal.data(), ordinal.length()); | |
1034 put_ordinals.BindInt64(1, metahandle); | |
1035 | |
1036 if(!put_ordinals.Run()) | |
1037 return false; | |
1038 put_ordinals.Reset(true); | |
1039 } | |
1040 | |
1041 SetVersion(81); | |
1042 needs_column_refresh_ = true; | |
1043 return true; | |
1044 } | |
1045 | |
985 bool DirectoryBackingStore::CreateTables() { | 1046 bool DirectoryBackingStore::CreateTables() { |
986 DVLOG(1) << "First run, creating tables"; | 1047 DVLOG(1) << "First run, creating tables"; |
987 // Create two little tables share_version and share_info | 1048 // Create two little tables share_version and share_info |
988 if (!db_->Execute( | 1049 if (!db_->Execute( |
989 "CREATE TABLE share_version (" | 1050 "CREATE TABLE share_version (" |
990 "id VARCHAR(128) primary key, data INT)")) { | 1051 "id VARCHAR(128) primary key, data INT)")) { |
991 return false; | 1052 return false; |
992 } | 1053 } |
993 | 1054 |
994 { | 1055 { |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1037 | 1098 |
1038 // Create the big metas table. | 1099 // Create the big metas table. |
1039 if (!CreateMetasTable(false)) | 1100 if (!CreateMetasTable(false)) |
1040 return false; | 1101 return false; |
1041 | 1102 |
1042 { | 1103 { |
1043 // Insert the entry for the root into the metas table. | 1104 // Insert the entry for the root into the metas table. |
1044 const int64 now = TimeToProtoTime(base::Time::Now()); | 1105 const int64 now = TimeToProtoTime(base::Time::Now()); |
1045 sql::Statement s(db_->GetUniqueStatement( | 1106 sql::Statement s(db_->GetUniqueStatement( |
1046 "INSERT INTO metas " | 1107 "INSERT INTO metas " |
1047 "( id, metahandle, is_dir, ctime, mtime) " | 1108 "( id, metahandle, is_dir, ctime, mtime, server_ordinal_in_parent) " |
1048 "VALUES ( \"r\", 1, 1, ?, ?)")); | 1109 "VALUES ( \"r\", 1, 1, ?, ?, ?)")); |
1049 s.BindInt64(0, now); | 1110 s.BindInt64(0, now); |
1050 s.BindInt64(1, now); | 1111 s.BindInt64(1, now); |
1112 std::string ord = NodeOrdinal::CreateInitialOrdinal().ToInternalValue(); | |
1113 s.BindBlob(2, ord.data(), ord.length()); | |
1051 | 1114 |
1052 if (!s.Run()) | 1115 if (!s.Run()) |
1053 return false; | 1116 return false; |
1054 } | 1117 } |
1055 | 1118 |
1056 return true; | 1119 return true; |
1057 } | 1120 } |
1058 | 1121 |
1059 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) { | 1122 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) { |
1060 const char* name = is_temporary ? "temp_metas" : "metas"; | 1123 const char* name = is_temporary ? "temp_metas" : "metas"; |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1153 bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end); | 1216 bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end); |
1154 bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end); | 1217 bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end); |
1155 bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end); | 1218 bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end); |
1156 is_ok = is_ok && prev_exists && parent_exists && next_exists; | 1219 is_ok = is_ok && prev_exists && parent_exists && next_exists; |
1157 } | 1220 } |
1158 return is_ok; | 1221 return is_ok; |
1159 } | 1222 } |
1160 | 1223 |
1161 } // namespace syncable | 1224 } // namespace syncable |
1162 } // namespace syncer | 1225 } // namespace syncer |
OLD | NEW |