OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "chrome/browser/sync/syncable/directory_backing_store.h" |
| 6 |
| 7 #ifdef OS_MACOSX |
| 8 #include <CoreFoundation/CoreFoundation.h> |
| 9 #elif defined(OS_LINUX) |
| 10 #include <glib.h> |
| 11 #endif |
| 12 |
| 13 #include <string> |
| 14 |
| 15 #include "base/hash_tables.h" |
| 16 #include "base/logging.h" |
| 17 #include "chrome/browser/sync/protocol/service_constants.h" |
| 18 #include "chrome/browser/sync/syncable/syncable-inl.h" |
| 19 #include "chrome/browser/sync/syncable/syncable_columns.h" |
| 20 #include "chrome/browser/sync/util/crypto_helpers.h" |
| 21 #include "chrome/browser/sync/util/path_helpers.h" |
| 22 #include "chrome/browser/sync/util/query_helpers.h" |
| 23 #include "third_party/sqlite/preprocessed/sqlite3.h" |
| 24 |
| 25 // If sizeof(time_t) != sizeof(int32) we need to alter or expand the sqlite |
| 26 // datatype. |
| 27 COMPILE_ASSERT(sizeof(time_t) == sizeof(int32), time_t_is_not_int32); |
| 28 |
| 29 using std::string; |
| 30 |
| 31 namespace syncable { |
| 32 |
| 33 // This just has to be big enough to hold an UPDATE or |
| 34 // INSERT statement that modifies all the columns in the entry table. |
| 35 static const string::size_type kUpdateStatementBufferSize = 2048; |
| 36 |
| 37 // Increment this version whenever updating DB tables. |
| 38 static const int32 kCurrentDBVersion = 67; |
| 39 |
| 40 // TODO(sync): remove |
| 41 static void PathNameMatch16(sqlite3_context *context, int argc, |
| 42 sqlite3_value **argv) { |
| 43 const PathString pathspec(reinterpret_cast<const PathChar*> |
| 44 (sqlite3_value_text16(argv[0])), sqlite3_value_bytes16(argv[0]) / 2); |
| 45 |
| 46 const void* name_text = sqlite3_value_text16(argv[1]); |
| 47 int name_bytes = sqlite3_value_bytes16(argv[1]); |
| 48 // If the text is null, we need to avoid the PathString constructor. |
| 49 if (name_text != NULL) { |
| 50 // Have to copy to append a terminating 0 anyway. |
| 51 const PathString name(reinterpret_cast<const PathChar*> |
| 52 (sqlite3_value_text16(argv[1])), |
| 53 sqlite3_value_bytes16(argv[1]) / 2); |
| 54 sqlite3_result_int(context, PathNameMatch(name, pathspec)); |
| 55 } else { |
| 56 sqlite3_result_int(context, PathNameMatch(PathString(), pathspec)); |
| 57 } |
| 58 } |
| 59 |
| 60 // Sqlite allows setting of the escape character in an ESCAPE clause and |
| 61 // this character is passed in as a third character to the like function. |
| 62 // See: http://www.sqlite.org/lang_expr.html |
| 63 static void PathNameMatch16WithEscape(sqlite3_context *context, |
| 64 int argc, sqlite3_value **argv) { |
| 65 // Never seen this called, but just in case. |
| 66 LOG(FATAL) << "PathNameMatch16WithEscape() not implemented"; |
| 67 } |
| 68 |
| 69 static void RegisterPathNameCollate(sqlite3* dbhandle) { |
| 70 #ifdef OS_WINDOWS |
| 71 const int collate = SQLITE_UTF16; |
| 72 #else |
| 73 const int collate = SQLITE_UTF8; |
| 74 #endif |
| 75 CHECK(SQLITE_OK == sqlite3_create_collation(dbhandle, "PATHNAME", collate, |
| 76 NULL, &ComparePathNames16)); |
| 77 } |
| 78 |
| 79 // Replace the LIKE operator with our own implementation that |
| 80 // does file spec matching like "*.txt". |
| 81 static void RegisterPathNameMatch(sqlite3* dbhandle) { |
| 82 // We only register this on Windows. We use the normal sqlite |
| 83 // matching function on mac/linux. |
| 84 // note that the function PathNameMatch() does a simple == |
| 85 // comparison on mac, so that would have to be fixed if |
| 86 // we really wanted to use PathNameMatch on mac/linux w/ the |
| 87 // same pattern strings as we do on windows. |
| 88 #ifdef OS_WINDOWS |
| 89 CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like", |
| 90 2, SQLITE_ANY, NULL, &PathNameMatch16, NULL, NULL)); |
| 91 CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like", |
| 92 3, SQLITE_ANY, NULL, &PathNameMatch16WithEscape, NULL, NULL)); |
| 93 #endif // OS_WINDOWS |
| 94 } |
| 95 |
| 96 static inline bool IsSqliteErrorOurFault(int result) { |
| 97 switch (result) { |
| 98 case SQLITE_MISMATCH: |
| 99 case SQLITE_CONSTRAINT: |
| 100 case SQLITE_MISUSE: |
| 101 case SQLITE_RANGE: |
| 102 return true; |
| 103 default: |
| 104 return false; |
| 105 } |
| 106 } |
| 107 |
| 108 namespace { |
| 109 // This small helper class reduces the amount of code in the table upgrade code |
| 110 // below and also CHECKs as soon as there's an issue. |
| 111 class StatementExecutor { |
| 112 public: |
| 113 explicit StatementExecutor(sqlite3* dbhandle) : dbhandle_(dbhandle) { |
| 114 result_ = SQLITE_DONE; |
| 115 } |
| 116 int Exec(const char* query) { |
| 117 if (SQLITE_DONE != result_) |
| 118 return result_; |
| 119 result_ = ::Exec(dbhandle_, query); |
| 120 CHECK(!IsSqliteErrorOurFault(result_)) << query; |
| 121 return result_; |
| 122 } |
| 123 template <typename T1> |
| 124 int Exec(const char* query, T1 arg1) { |
| 125 if (SQLITE_DONE != result_) |
| 126 return result_; |
| 127 result_ = ::Exec(dbhandle_, query, arg1); |
| 128 CHECK(!IsSqliteErrorOurFault(result_)) << query; |
| 129 return result_; |
| 130 } |
| 131 int result() { |
| 132 return result_; |
| 133 } |
| 134 void set_result(int result) { |
| 135 result_ = result; |
| 136 CHECK(!IsSqliteErrorOurFault(result_)) << result_; |
| 137 } |
| 138 bool healthy() const { |
| 139 return SQLITE_DONE == result_; |
| 140 } |
| 141 private: |
| 142 sqlite3* dbhandle_; |
| 143 int result_; |
| 144 DISALLOW_COPY_AND_ASSIGN(StatementExecutor); |
| 145 }; |
| 146 |
| 147 } // namespace |
| 148 |
| 149 static string GenerateCacheGUID() { |
| 150 return Generate128BitRandomHexString(); |
| 151 } |
| 152 |
| 153 // Iterate over the fields of |entry| and bind dirty ones to |statement| for |
| 154 // updating. Returns the number of args bound. |
| 155 static int BindDirtyFields(const EntryKernel& entry, sqlite3_stmt* statement) { |
| 156 int index = 1; |
| 157 int i = 0; |
| 158 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { |
| 159 if (entry.dirty[i]) |
| 160 BindArg(statement, entry.ref(static_cast<Int64Field>(i)), index++); |
| 161 } |
| 162 for ( ; i < ID_FIELDS_END; ++i) { |
| 163 if (entry.dirty[i]) |
| 164 BindArg(statement, entry.ref(static_cast<IdField>(i)), index++); |
| 165 } |
| 166 for ( ; i < BIT_FIELDS_END; ++i) { |
| 167 if (entry.dirty[i]) |
| 168 BindArg(statement, entry.ref(static_cast<BitField>(i)), index++); |
| 169 } |
| 170 for ( ; i < STRING_FIELDS_END; ++i) { |
| 171 if (entry.dirty[i]) |
| 172 BindArg(statement, entry.ref(static_cast<StringField>(i)), index++); |
| 173 } |
| 174 for ( ; i < BLOB_FIELDS_END; ++i) { |
| 175 if (entry.dirty[i]) |
| 176 BindArg(statement, entry.ref(static_cast<BlobField>(i)), index++); |
| 177 } |
| 178 return index - 1; |
| 179 } |
| 180 |
| 181 // The caller owns the returned EntryKernel*. |
| 182 static EntryKernel* UnpackEntry(sqlite3_stmt* statement) { |
| 183 EntryKernel* result = NULL; |
| 184 int query_result = sqlite3_step(statement); |
| 185 if (SQLITE_ROW == query_result) { |
| 186 result = new EntryKernel; |
| 187 CHECK(sqlite3_column_count(statement) == static_cast<int>(FIELD_COUNT)); |
| 188 int i = 0; |
| 189 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { |
| 190 result->ref(static_cast<Int64Field>(i)) = |
| 191 sqlite3_column_int64(statement, i); |
| 192 } |
| 193 for ( ; i < ID_FIELDS_END; ++i) { |
| 194 GetColumn(statement, i, &result->ref(static_cast<IdField>(i))); |
| 195 } |
| 196 for ( ; i < BIT_FIELDS_END; ++i) { |
| 197 result->ref(static_cast<BitField>(i)) = |
| 198 (0 != sqlite3_column_int(statement, i)); |
| 199 } |
| 200 for ( ; i < STRING_FIELDS_END; ++i) { |
| 201 GetColumn(statement, i, &result->ref(static_cast<StringField>(i))); |
| 202 } |
| 203 for ( ; i < BLOB_FIELDS_END; ++i) { |
| 204 GetColumn(statement, i, &result->ref(static_cast<BlobField>(i))); |
| 205 } |
| 206 ZeroFields(result, i); |
| 207 } else { |
| 208 CHECK(SQLITE_DONE == query_result); |
| 209 result = NULL; |
| 210 } |
| 211 return result; |
| 212 } |
| 213 |
| 214 static bool StepDone(sqlite3_stmt* statement, const char* failed_call) { |
| 215 int result = sqlite3_step(statement); |
| 216 if (SQLITE_DONE == result && SQLITE_OK == (result = sqlite3_reset(statement))) |
| 217 return true; |
| 218 // Some error code. |
| 219 LOG(WARNING) << failed_call << " failed with result " << result; |
| 220 CHECK(!IsSqliteErrorOurFault(result)); |
| 221 return false; |
| 222 } |
| 223 |
| 224 static string ComposeCreateTableColumnSpecs(const ColumnSpec* begin, |
| 225 const ColumnSpec* end) { |
| 226 string query; |
| 227 query.reserve(kUpdateStatementBufferSize); |
| 228 char separator = '('; |
| 229 for (const ColumnSpec* column = begin; column != end; ++column) { |
| 230 query.push_back(separator); |
| 231 separator = ','; |
| 232 query.append(column->name); |
| 233 query.push_back(' '); |
| 234 query.append(column->spec); |
| 235 } |
| 236 query.push_back(')'); |
| 237 return query; |
| 238 } |
| 239 |
| 240 /////////////////////////////////////////////////////////////////////////////// |
| 241 // DirectoryBackingStore implementation. |
| 242 |
| 243 DirectoryBackingStore::DirectoryBackingStore(const PathString& dir_name, |
| 244 const PathString& backing_filepath) |
| 245 : dir_name_(dir_name), backing_filepath_(backing_filepath), |
| 246 load_dbhandle_(NULL), save_dbhandle_(NULL) { |
| 247 } |
| 248 |
| 249 DirectoryBackingStore::~DirectoryBackingStore() { |
| 250 if (NULL != load_dbhandle_) { |
| 251 sqlite3_close(load_dbhandle_); |
| 252 load_dbhandle_ = NULL; |
| 253 } |
| 254 if (NULL != save_dbhandle_) { |
| 255 sqlite3_close(save_dbhandle_); |
| 256 save_dbhandle_ = NULL; |
| 257 } |
| 258 } |
| 259 |
| 260 bool DirectoryBackingStore::OpenAndConfigureHandleHelper( |
| 261 sqlite3** handle) const { |
| 262 if (SQLITE_OK == SqliteOpen(backing_filepath_.c_str(), handle)) { |
| 263 sqlite3_busy_timeout(*handle, kDirectoryBackingStoreBusyTimeoutMs); |
| 264 RegisterPathNameCollate(*handle); |
| 265 RegisterPathNameMatch(*handle); |
| 266 return true; |
| 267 } |
| 268 return false; |
| 269 } |
| 270 |
| 271 DirOpenResult DirectoryBackingStore::Load(MetahandlesIndex* entry_bucket, |
| 272 ExtendedAttributes* xattrs_bucket, |
| 273 Directory::KernelLoadInfo* kernel_load_info) { |
| 274 DCHECK(load_dbhandle_ == NULL); |
| 275 if (!OpenAndConfigureHandleHelper(&load_dbhandle_)) |
| 276 return FAILED_OPEN_DATABASE; |
| 277 |
| 278 DirOpenResult result = InitializeTables(); |
| 279 if (OPENED != result) |
| 280 return result; |
| 281 |
| 282 DropDeletedEntries(); |
| 283 LoadEntries(entry_bucket); |
| 284 LoadExtendedAttributes(xattrs_bucket); |
| 285 LoadInfo(kernel_load_info); |
| 286 |
| 287 sqlite3_close(load_dbhandle_); |
| 288 load_dbhandle_ = NULL; // No longer used. |
| 289 |
| 290 return OPENED; |
| 291 } |
| 292 |
| 293 bool DirectoryBackingStore::SaveChanges( |
| 294 const Directory::SaveChangesSnapshot& snapshot) { |
| 295 bool disk_full = false; |
| 296 sqlite3* dbhandle = LazyGetSaveHandle(); |
| 297 { |
| 298 { |
| 299 ScopedStatement begin(PrepareQuery(dbhandle, |
| 300 "BEGIN EXCLUSIVE TRANSACTION")); |
| 301 if (!StepDone(begin.get(), "BEGIN")) { |
| 302 disk_full = true; |
| 303 goto DoneDBTransaction; |
| 304 } |
| 305 } |
| 306 |
| 307 for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin(); |
| 308 !disk_full && i != snapshot.dirty_metas.end(); ++i) { |
| 309 DCHECK(i->dirty.any()); |
| 310 disk_full = !SaveEntryToDB(*i); |
| 311 } |
| 312 |
| 313 for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin(); |
| 314 !disk_full && i != snapshot.dirty_xattrs.end(); ++i) { |
| 315 DCHECK(i->second.dirty); |
| 316 if (i->second.is_deleted) { |
| 317 disk_full = !DeleteExtendedAttributeFromDB(i); |
| 318 } else { |
| 319 disk_full = !SaveExtendedAttributeToDB(i); |
| 320 } |
| 321 } |
| 322 |
| 323 if (!disk_full && (Directory::KERNEL_SHARE_INFO_DIRTY == |
| 324 snapshot.kernel_info_status)) { |
| 325 const Directory::PersistedKernelInfo& info = snapshot.kernel_info; |
| 326 ScopedStatement update(PrepareQuery(dbhandle, "UPDATE share_info " |
| 327 "SET last_sync_timestamp = ?, initial_sync_ended = ?, " |
| 328 "store_birthday = ?, " |
| 329 "next_id = ?", |
| 330 info.last_sync_timestamp, |
| 331 info.initial_sync_ended, |
| 332 info.store_birthday, |
| 333 info.next_id)); |
| 334 disk_full = !(StepDone(update.get(), "UPDATE share_info") |
| 335 && 1 == sqlite3_changes(dbhandle)); |
| 336 } |
| 337 if (disk_full) { |
| 338 ExecOrDie(dbhandle, "ROLLBACK TRANSACTION"); |
| 339 } else { |
| 340 ScopedStatement end_transaction(PrepareQuery(dbhandle, |
| 341 "COMMIT TRANSACTION")); |
| 342 disk_full = !StepDone(end_transaction.get(), "COMMIT TRANSACTION"); |
| 343 } |
| 344 } |
| 345 |
| 346 DoneDBTransaction: |
| 347 return !disk_full; |
| 348 } |
| 349 |
| 350 DirOpenResult DirectoryBackingStore::InitializeTables() { |
| 351 StatementExecutor se(load_dbhandle_); |
| 352 if (SQLITE_DONE != se.Exec("BEGIN EXCLUSIVE TRANSACTION")) { |
| 353 return FAILED_DISK_FULL; |
| 354 } |
| 355 int version_on_disk = 0; |
| 356 |
| 357 if (DoesTableExist(load_dbhandle_, "share_version")) { |
| 358 ScopedStatement version_query( |
| 359 PrepareQuery(load_dbhandle_, "SELECT data from share_version")); |
| 360 int query_result = sqlite3_step(version_query.get()); |
| 361 if (SQLITE_ROW == query_result) { |
| 362 version_on_disk = sqlite3_column_int(version_query.get(), 0); |
| 363 } |
| 364 version_query.reset(NULL); |
| 365 } |
| 366 if (version_on_disk != kCurrentDBVersion) { |
| 367 if (version_on_disk > kCurrentDBVersion) { |
| 368 ExecOrDie(load_dbhandle_, "END TRANSACTION"); |
| 369 return FAILED_NEWER_VERSION; |
| 370 } |
| 371 LOG(INFO) << "Old/null sync database, version " << version_on_disk; |
| 372 // Delete the existing database (if any), and create a freshone. |
| 373 if (se.healthy()) { |
| 374 DropAllTables(); |
| 375 se.set_result(CreateTables()); |
| 376 } |
| 377 } |
| 378 if (SQLITE_DONE == se.result()) { |
| 379 { |
| 380 ScopedStatement statement(PrepareQuery(load_dbhandle_, |
| 381 "SELECT db_create_version, db_create_time FROM share_info")); |
| 382 CHECK(SQLITE_ROW == sqlite3_step(statement.get())); |
| 383 PathString db_create_version; |
| 384 int db_create_time; |
| 385 GetColumn(statement.get(), 0, &db_create_version); |
| 386 GetColumn(statement.get(), 1, &db_create_time); |
| 387 statement.reset(0); |
| 388 LOG(INFO) << "DB created at " << db_create_time << " by version " << |
| 389 db_create_version; |
| 390 } |
| 391 // COMMIT TRANSACTION rolls back on failure. |
| 392 if (SQLITE_DONE == Exec(load_dbhandle_, "COMMIT TRANSACTION")) |
| 393 return OPENED; |
| 394 } else { |
| 395 ExecOrDie(load_dbhandle_, "ROLLBACK TRANSACTION"); |
| 396 } |
| 397 return FAILED_DISK_FULL; |
| 398 } |
| 399 |
| 400 void DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) { |
| 401 string select; |
| 402 select.reserve(kUpdateStatementBufferSize); |
| 403 select.append("SELECT"); |
| 404 const char* joiner = " "; |
| 405 // Be explicit in SELECT order to match up with UnpackEntry. |
| 406 for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) { |
| 407 select.append(joiner); |
| 408 select.append(ColumnName(i)); |
| 409 joiner = ", "; |
| 410 } |
| 411 select.append(" FROM metas "); |
| 412 ScopedStatement statement(PrepareQuery(load_dbhandle_, select.c_str())); |
| 413 base::hash_set<int> handles; |
| 414 while (EntryKernel* kernel = UnpackEntry(statement.get())) { |
| 415 DCHECK(handles.insert(kernel->ref(META_HANDLE)).second); // Only in debug. |
| 416 entry_bucket->insert(kernel); |
| 417 } |
| 418 } |
| 419 |
| 420 void DirectoryBackingStore::LoadExtendedAttributes( |
| 421 ExtendedAttributes* xattrs_bucket) { |
| 422 ScopedStatement statement(PrepareQuery(load_dbhandle_, |
| 423 "SELECT metahandle, key, value FROM extended_attributes")); |
| 424 int step_result = sqlite3_step(statement.get()); |
| 425 while (SQLITE_ROW == step_result) { |
| 426 int64 metahandle; |
| 427 PathString path_string_key; |
| 428 ExtendedAttributeValue val; |
| 429 val.is_deleted = false; |
| 430 GetColumn(statement.get(), 0, &metahandle); |
| 431 GetColumn(statement.get(), 1, &path_string_key); |
| 432 GetColumn(statement.get(), 2, &(val.value)); |
| 433 ExtendedAttributeKey key(metahandle, path_string_key); |
| 434 xattrs_bucket->insert(std::make_pair(key, val)); |
| 435 step_result = sqlite3_step(statement.get()); |
| 436 } |
| 437 CHECK(SQLITE_DONE == step_result); |
| 438 } |
| 439 |
| 440 void DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) { |
| 441 ScopedStatement query(PrepareQuery(load_dbhandle_, |
| 442 "SELECT last_sync_timestamp, initial_sync_ended, " |
| 443 "store_birthday, next_id, cache_guid " |
| 444 "FROM share_info")); |
| 445 CHECK(SQLITE_ROW == sqlite3_step(query.get())); |
| 446 GetColumn(query.get(), 0, &info->kernel_info.last_sync_timestamp); |
| 447 GetColumn(query.get(), 1, &info->kernel_info.initial_sync_ended); |
| 448 GetColumn(query.get(), 2, &info->kernel_info.store_birthday); |
| 449 GetColumn(query.get(), 3, &info->kernel_info.next_id); |
| 450 GetColumn(query.get(), 4, &info->cache_guid); |
| 451 query.reset(PrepareQuery(load_dbhandle_, |
| 452 "SELECT MAX(metahandle) FROM metas")); |
| 453 CHECK(SQLITE_ROW == sqlite3_step(query.get())); |
| 454 GetColumn(query.get(), 0, &info->max_metahandle); |
| 455 } |
| 456 |
| 457 bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) { |
| 458 return entry.ref(IS_NEW) ? SaveNewEntryToDB(entry) : UpdateEntryToDB(entry); |
| 459 } |
| 460 |
| 461 bool DirectoryBackingStore::SaveNewEntryToDB(const EntryKernel& entry) { |
| 462 DCHECK(save_dbhandle_); |
| 463 // TODO(timsteele): Should use INSERT OR REPLACE and eliminate one of |
| 464 // the SaveNew / UpdateEntry code paths. |
| 465 string query; |
| 466 query.reserve(kUpdateStatementBufferSize); |
| 467 query.append("INSERT INTO metas "); |
| 468 string values; |
| 469 values.reserve(kUpdateStatementBufferSize); |
| 470 values.append("VALUES "); |
| 471 const char* separator = "( "; |
| 472 int i = 0; |
| 473 for (i = BEGIN_FIELDS; i < BLOB_FIELDS_END; ++i) { |
| 474 if (entry.dirty[i]) { |
| 475 query.append(separator); |
| 476 values.append(separator); |
| 477 separator = ", "; |
| 478 query.append(ColumnName(i)); |
| 479 values.append("?"); |
| 480 } |
| 481 } |
| 482 query.append(" ) "); |
| 483 values.append(" )"); |
| 484 query.append(values); |
| 485 ScopedStatement const statement(PrepareQuery(save_dbhandle_, query.c_str())); |
| 486 BindDirtyFields(entry, statement.get()); |
| 487 return StepDone(statement.get(), "SaveNewEntryToDB()") && |
| 488 1 == sqlite3_changes(save_dbhandle_); |
| 489 } |
| 490 |
| 491 bool DirectoryBackingStore::UpdateEntryToDB(const EntryKernel& entry) { |
| 492 DCHECK(save_dbhandle_); |
| 493 string query; |
| 494 query.reserve(kUpdateStatementBufferSize); |
| 495 query.append("UPDATE metas "); |
| 496 const char* separator = "SET "; |
| 497 int i; |
| 498 for (i = BEGIN_FIELDS; i < BLOB_FIELDS_END; ++i) { |
| 499 if (entry.dirty[i]) { |
| 500 query.append(separator); |
| 501 separator = ", "; |
| 502 query.append(ColumnName(i)); |
| 503 query.append(" = ? "); |
| 504 } |
| 505 } |
| 506 query.append("WHERE metahandle = ?"); |
| 507 ScopedStatement const statement(PrepareQuery(save_dbhandle_, query.c_str())); |
| 508 const int var_count = BindDirtyFields(entry, statement.get()); |
| 509 BindArg(statement.get(), entry.ref(META_HANDLE), var_count + 1); |
| 510 return StepDone(statement.get(), "UpdateEntryToDB()") && |
| 511 1 == sqlite3_changes(save_dbhandle_); |
| 512 } |
| 513 |
| 514 bool DirectoryBackingStore::SaveExtendedAttributeToDB( |
| 515 ExtendedAttributes::const_iterator i) { |
| 516 DCHECK(save_dbhandle_); |
| 517 ScopedStatement insert(PrepareQuery(save_dbhandle_, |
| 518 "INSERT INTO extended_attributes " |
| 519 "(metahandle, key, value) " |
| 520 "values ( ?, ?, ? )", |
| 521 i->first.metahandle, i->first.key, i->second.value)); |
| 522 return StepDone(insert.get(), "SaveExtendedAttributeToDB()") |
| 523 && 1 == sqlite3_changes(LazyGetSaveHandle()); |
| 524 } |
| 525 |
| 526 bool DirectoryBackingStore::DeleteExtendedAttributeFromDB( |
| 527 ExtendedAttributes::const_iterator i) { |
| 528 DCHECK(save_dbhandle_); |
| 529 ScopedStatement delete_attribute(PrepareQuery(save_dbhandle_, |
| 530 "DELETE FROM extended_attributes " |
| 531 "WHERE metahandle = ? AND key = ? ", |
| 532 i->first.metahandle, i->first.key)); |
| 533 if (!StepDone(delete_attribute.get(), "DeleteExtendedAttributeFromDB()")) { |
| 534 LOG(ERROR) << "DeleteExtendedAttributeFromDB(),StepDone() failed " |
| 535 << "for metahandle: " << i->first.metahandle << " key: " |
| 536 << i->first.key; |
| 537 return false; |
| 538 } |
| 539 // The attribute may have never been saved to the database if it was |
| 540 // created and then immediately deleted. So don't check that we |
| 541 // deleted exactly 1 row. |
| 542 return true; |
| 543 } |
| 544 |
| 545 void DirectoryBackingStore::DropDeletedEntries() { |
| 546 static const char delete_extended_attributes[] = |
| 547 "DELETE FROM extended_attributes WHERE metahandle IN " |
| 548 "(SELECT metahandle from death_row)"; |
| 549 static const char delete_metas[] = "DELETE FROM metas WHERE metahandle IN " |
| 550 "(SELECT metahandle from death_row)"; |
| 551 // Put all statements into a transaction for better performance |
| 552 ExecOrDie(load_dbhandle_, "BEGIN TRANSACTION"); |
| 553 ExecOrDie(load_dbhandle_, "CREATE TEMP TABLE death_row (metahandle BIGINT)"); |
| 554 ExecOrDie(load_dbhandle_, "INSERT INTO death_row " |
| 555 "SELECT metahandle from metas WHERE is_del > 0 " |
| 556 " AND is_unsynced < 1" |
| 557 " AND is_unapplied_update < 1"); |
| 558 StatementExecutor x(load_dbhandle_); |
| 559 x.Exec(delete_extended_attributes); |
| 560 x.Exec(delete_metas); |
| 561 ExecOrDie(load_dbhandle_, "DROP TABLE death_row"); |
| 562 ExecOrDie(load_dbhandle_, "COMMIT TRANSACTION"); |
| 563 } |
| 564 |
| 565 void DirectoryBackingStore::SafeDropTable(const char* table_name) { |
| 566 string query = "DROP TABLE IF EXISTS "; |
| 567 query.append(table_name); |
| 568 const char* tail; |
| 569 sqlite3_stmt* statement = NULL; |
| 570 if (SQLITE_OK == sqlite3_prepare(load_dbhandle_, query.data(), |
| 571 query.size(), &statement, &tail)) { |
| 572 CHECK(SQLITE_DONE == sqlite3_step(statement)); |
| 573 } |
| 574 sqlite3_finalize(statement); |
| 575 } |
| 576 |
| 577 int DirectoryBackingStore::CreateExtendedAttributeTable() { |
| 578 SafeDropTable("extended_attributes"); |
| 579 LOG(INFO) << "CreateExtendedAttributeTable"; |
| 580 return Exec(load_dbhandle_, "CREATE TABLE extended_attributes(" |
| 581 "metahandle bigint, " |
| 582 "key varchar(127), " |
| 583 "value blob, " |
| 584 "PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE)"); |
| 585 } |
| 586 |
| 587 void DirectoryBackingStore::DropAllTables() { |
| 588 SafeDropTable("metas"); |
| 589 SafeDropTable("share_info"); |
| 590 SafeDropTable("share_version"); |
| 591 SafeDropTable("extended_attributes"); |
| 592 } |
| 593 |
| 594 int DirectoryBackingStore::CreateTables() { |
| 595 LOG(INFO) << "First run, creating tables"; |
| 596 // Create two little tables share_version and share_info |
| 597 int result = Exec(load_dbhandle_, "CREATE TABLE share_version (" |
| 598 "id VARCHAR(128) primary key, data INT)"); |
| 599 result = SQLITE_DONE != result ? result : |
| 600 Exec(load_dbhandle_, "INSERT INTO share_version VALUES(?, ?)", |
| 601 dir_name_, kCurrentDBVersion); |
| 602 result = SQLITE_DONE != result ? result : |
| 603 Exec(load_dbhandle_, "CREATE TABLE share_info (" |
| 604 "id VARCHAR(128) primary key, " |
| 605 "last_sync_timestamp INT, " |
| 606 "name VARCHAR(128), " |
| 607 // Gets set if the syncer ever gets updates from the |
| 608 // server and the server returns 0. Lets us detect the |
| 609 // end of the initial sync. |
| 610 "initial_sync_ended BIT default 0, " |
| 611 "store_birthday VARCHAR(256), " |
| 612 "db_create_version VARCHAR(128), " |
| 613 "db_create_time int, " |
| 614 "next_id bigint default -2, " |
| 615 "cache_guid VARCHAR(32))"); |
| 616 result = SQLITE_DONE != result ? result : |
| 617 Exec(load_dbhandle_, "INSERT INTO share_info VALUES" |
| 618 "(?, " // id |
| 619 "0, " // last_sync_timestamp |
| 620 "?, " // name |
| 621 "?, " // initial_sync_ended |
| 622 "?, " // store_birthday |
| 623 "?, " // db_create_version |
| 624 "?, " // db_create_time |
| 625 "-2, " // next_id |
| 626 "?)", // cache_guid |
| 627 dir_name_, // id |
| 628 dir_name_, // name |
| 629 false, // initial_sync_ended |
| 630 "", // store_birthday |
| 631 SYNC_ENGINE_VERSION_STRING, // db_create_version |
| 632 static_cast<int32>(time(0)), // db_create_time |
| 633 GenerateCacheGUID()); // cache_guid |
| 634 // Create the big metas table. |
| 635 string query = "CREATE TABLE metas " + ComposeCreateTableColumnSpecs |
| 636 (g_metas_columns, g_metas_columns + ARRAYSIZE(g_metas_columns)); |
| 637 result = SQLITE_DONE != result ? result : Exec(load_dbhandle_, query.c_str()); |
| 638 // Insert the entry for the root into the metas table. |
| 639 const int64 now = Now(); |
| 640 result = SQLITE_DONE != result ? result : |
| 641 Exec(load_dbhandle_, "INSERT INTO metas " |
| 642 "( id, metahandle, is_dir, ctime, mtime) " |
| 643 "VALUES ( \"r\", 1, 1, ?, ?)", |
| 644 now, now); |
| 645 result = SQLITE_DONE != result ? result : CreateExtendedAttributeTable(); |
| 646 return result; |
| 647 } |
| 648 |
| 649 sqlite3* DirectoryBackingStore::LazyGetSaveHandle() { |
| 650 if (!save_dbhandle_ && !OpenAndConfigureHandleHelper(&save_dbhandle_)) { |
| 651 DCHECK(FALSE) << "Unable to open handle for saving"; |
| 652 return NULL; |
| 653 } |
| 654 return save_dbhandle_; |
| 655 } |
| 656 |
| 657 } // namespace syncable |
OLD | NEW |