OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sql/connection.h" | 5 #include "sql/connection.h" |
6 | 6 |
7 #include <string.h> | 7 #include <string.h> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/files/file_path.h" | 10 #include "base/files/file_path.h" |
11 #include "base/files/file_util.h" | 11 #include "base/files/file_util.h" |
| 12 #include "base/json/json_file_value_serializer.h" |
12 #include "base/lazy_instance.h" | 13 #include "base/lazy_instance.h" |
13 #include "base/logging.h" | 14 #include "base/logging.h" |
14 #include "base/message_loop/message_loop.h" | 15 #include "base/message_loop/message_loop.h" |
15 #include "base/metrics/histogram.h" | 16 #include "base/metrics/histogram.h" |
16 #include "base/metrics/sparse_histogram.h" | 17 #include "base/metrics/sparse_histogram.h" |
17 #include "base/strings/string_split.h" | 18 #include "base/strings/string_split.h" |
18 #include "base/strings/string_util.h" | 19 #include "base/strings/string_util.h" |
19 #include "base/strings/stringprintf.h" | 20 #include "base/strings/stringprintf.h" |
20 #include "base/strings/utf_string_conversions.h" | 21 #include "base/strings/utf_string_conversions.h" |
21 #include "base/synchronization/lock.h" | 22 #include "base/synchronization/lock.h" |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
250 cache_size); | 251 cache_size); |
251 dump->AddScalar("schema_size", | 252 dump->AddScalar("schema_size", |
252 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | 253 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
253 schema_size); | 254 schema_size); |
254 dump->AddScalar("statement_size", | 255 dump->AddScalar("statement_size", |
255 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | 256 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
256 statement_size); | 257 statement_size); |
257 return true; | 258 return true; |
258 } | 259 } |
259 | 260 |
| 261 // Data is persisted in a file shared between databases in the same directory. |
| 262 // The "sqlite-diag" file contains a dictionary with the version number, and an |
| 263 // array of histogram tags for databases which have been dumped. |
| 264 bool Connection::RegisterIntentToUpload() const { |
| 265 static const char* kVersionKey = "version"; |
| 266 static const char* kDiagnosticDumpsKey = "DiagnosticDumps"; |
| 267 static int kVersion = 1; |
| 268 |
| 269 AssertIOAllowed(); |
| 270 |
| 271 if (histogram_tag_.empty()) |
| 272 return false; |
| 273 |
| 274 if (!is_open()) |
| 275 return false; |
| 276 |
| 277 if (in_memory_) |
| 278 return false; |
| 279 |
| 280 const base::FilePath db_path = DbPath(); |
| 281 if (db_path.empty()) |
| 282 return false; |
| 283 |
| 284 // Put the collection of diagnostic data next to the databases. In most |
| 285 // cases, this is the profile directory, but safe-browsing stores a Cookies |
| 286 // file in the directory above the profile directory. |
| 287 base::FilePath breadcrumb_path( |
| 288 db_path.DirName().Append(FILE_PATH_LITERAL("sqlite-diag"))); |
| 289 |
| 290 // Lock against multiple updates to the diagnostics file. This code should |
| 291 // seldom be called in the first place, and when called it should seldom be |
| 292 // called for multiple databases, and when called for multiple databases there |
| 293 // is _probably_ something systemic wrong with the user's system. So the lock |
| 294 // should never be contended, but when it is the database experience is |
| 295 // already bad. |
| 296 base::AutoLock lock(g_sqlite_init_lock.Get()); |
| 297 |
| 298 scoped_ptr<base::Value> root; |
| 299 if (!base::PathExists(breadcrumb_path)) { |
| 300 scoped_ptr<base::DictionaryValue> root_dict(new base::DictionaryValue()); |
| 301 root_dict->SetInteger(kVersionKey, kVersion); |
| 302 |
| 303 scoped_ptr<base::ListValue> dumps(new base::ListValue); |
| 304 dumps->AppendString(histogram_tag_); |
| 305 root_dict->Set(kDiagnosticDumpsKey, dumps.Pass()); |
| 306 |
| 307 root = root_dict.Pass(); |
| 308 } else { |
| 309 // Failure to read a valid dictionary implies that something is going wrong |
| 310 // on the system. |
| 311 JSONFileValueDeserializer deserializer(breadcrumb_path); |
| 312 scoped_ptr<base::Value> read_root( |
| 313 deserializer.Deserialize(nullptr, nullptr)); |
| 314 if (!read_root.get()) |
| 315 return false; |
| 316 scoped_ptr<base::DictionaryValue> root_dict = |
| 317 base::DictionaryValue::From(read_root.Pass()); |
| 318 if (!root_dict) |
| 319 return false; |
| 320 |
| 321 // Don't upload if the version is missing or newer. |
| 322 int version = 0; |
| 323 if (!root_dict->GetInteger(kVersionKey, &version) || version > kVersion) |
| 324 return false; |
| 325 |
| 326 base::ListValue* dumps = nullptr; |
| 327 if (!root_dict->GetList(kDiagnosticDumpsKey, &dumps)) |
| 328 return false; |
| 329 |
| 330 const size_t size = dumps->GetSize(); |
| 331 for (size_t i = 0; i < size; ++i) { |
| 332 std::string s; |
| 333 |
| 334 // Don't upload if the value isn't a string, or indicates a prior upload. |
| 335 if (!dumps->GetString(i, &s) || s == histogram_tag_) |
| 336 return false; |
| 337 } |
| 338 |
| 339 // Record intention to proceed with upload. |
| 340 dumps->AppendString(histogram_tag_); |
| 341 root = root_dict.Pass(); |
| 342 } |
| 343 |
| 344 const base::FilePath breadcrumb_new = |
| 345 breadcrumb_path.AddExtension(FILE_PATH_LITERAL("new")); |
| 346 base::DeleteFile(breadcrumb_new, false); |
| 347 |
| 348 // No upload if the breadcrumb file cannot be updated. |
| 349 // TODO(shess): Consider ImportantFileWriter::WriteFileAtomically() to land |
| 350 // the data on disk. For now, losing the data is not a big problem, so the |
| 351 // sync overhead would probably not be worth it. |
| 352 JSONFileValueSerializer serializer(breadcrumb_new); |
| 353 if (!serializer.Serialize(*root)) |
| 354 return false; |
| 355 if (!base::PathExists(breadcrumb_new)) |
| 356 return false; |
| 357 if (!base::ReplaceFile(breadcrumb_new, breadcrumb_path, nullptr)) { |
| 358 base::DeleteFile(breadcrumb_new, false); |
| 359 return false; |
| 360 } |
| 361 |
| 362 return true; |
| 363 } |
| 364 |
260 // static | 365 // static |
261 void Connection::SetErrorIgnorer(Connection::ErrorIgnorerCallback* cb) { | 366 void Connection::SetErrorIgnorer(Connection::ErrorIgnorerCallback* cb) { |
262 CHECK(current_ignorer_cb_ == NULL); | 367 CHECK(current_ignorer_cb_ == NULL); |
263 current_ignorer_cb_ = cb; | 368 current_ignorer_cb_ = cb; |
264 } | 369 } |
265 | 370 |
266 // static | 371 // static |
267 void Connection::ResetErrorIgnorer() { | 372 void Connection::ResetErrorIgnorer() { |
268 CHECK(current_ignorer_cb_); | 373 CHECK(current_ignorer_cb_); |
269 current_ignorer_cb_ = NULL; | 374 current_ignorer_cb_ = NULL; |
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
575 // If no changes have been made, skip flushing. This allows the first page of | 680 // If no changes have been made, skip flushing. This allows the first page of |
576 // the database to remain in cache across multiple reads. | 681 // the database to remain in cache across multiple reads. |
577 const int total_changes = sqlite3_total_changes(db_); | 682 const int total_changes = sqlite3_total_changes(db_); |
578 if (total_changes == total_changes_at_last_release_) | 683 if (total_changes == total_changes_at_last_release_) |
579 return; | 684 return; |
580 | 685 |
581 total_changes_at_last_release_ = total_changes; | 686 total_changes_at_last_release_ = total_changes; |
582 sqlite3_db_release_memory(db_); | 687 sqlite3_db_release_memory(db_); |
583 } | 688 } |
584 | 689 |
| 690 base::FilePath Connection::DbPath() const { |
| 691 if (!is_open()) |
| 692 return base::FilePath(); |
| 693 |
| 694 const char* path = sqlite3_db_filename(db_, "main"); |
| 695 const base::StringPiece db_path(path); |
| 696 #if defined(OS_WIN) |
| 697 return base::FilePath(base::UTF8ToWide(db_path)); |
| 698 #elif defined(OS_POSIX) |
| 699 return base::FilePath(db_path); |
| 700 #else |
| 701 NOTREACHED(); |
| 702 return base::FilePath(); |
| 703 #endif |
| 704 } |
| 705 |
585 void Connection::TrimMemory(bool aggressively) { | 706 void Connection::TrimMemory(bool aggressively) { |
586 if (!db_) | 707 if (!db_) |
587 return; | 708 return; |
588 | 709 |
589 // TODO(shess): investigate using sqlite3_db_release_memory() when possible. | 710 // TODO(shess): investigate using sqlite3_db_release_memory() when possible. |
590 int original_cache_size; | 711 int original_cache_size; |
591 { | 712 { |
592 Statement sql_get_original(GetUniqueStatement("PRAGMA cache_size")); | 713 Statement sql_get_original(GetUniqueStatement("PRAGMA cache_size")); |
593 if (!sql_get_original.Step()) { | 714 if (!sql_get_original.Step()) { |
594 DLOG(WARNING) << "Could not get cache size " << GetErrorMessage(); | 715 DLOG(WARNING) << "Could not get cache size " << GetErrorMessage(); |
(...skipping 932 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1527 ignore_result(Execute(kNoWritableSchema)); | 1648 ignore_result(Execute(kNoWritableSchema)); |
1528 | 1649 |
1529 return ret; | 1650 return ret; |
1530 } | 1651 } |
1531 | 1652 |
1532 base::TimeTicks TimeSource::Now() { | 1653 base::TimeTicks TimeSource::Now() { |
1533 return base::TimeTicks::Now(); | 1654 return base::TimeTicks::Now(); |
1534 } | 1655 } |
1535 | 1656 |
1536 } // namespace sql | 1657 } // namespace sql |
OLD | NEW |