| OLD | NEW |
| 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/backend_impl.h" | 5 #include "net/disk_cache/backend_impl.h" |
| 6 | 6 |
| 7 #include "base/field_trial.h" | 7 #include "base/field_trial.h" |
| 8 #include "base/file_path.h" | 8 #include "base/file_path.h" |
| 9 #include "base/file_util.h" | 9 #include "base/file_util.h" |
| 10 #include "base/histogram.h" | 10 #include "base/histogram.h" |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 155 if (!first) | 155 if (!first) |
| 156 return; | 156 return; |
| 157 | 157 |
| 158 // Field trials involve static objects so we have to do this only once. | 158 // Field trials involve static objects so we have to do this only once. |
| 159 first = false; | 159 first = false; |
| 160 scoped_refptr<FieldTrial> trial1 = new FieldTrial("CacheSize", 10); | 160 scoped_refptr<FieldTrial> trial1 = new FieldTrial("CacheSize", 10); |
| 161 std::string group1 = StringPrintf("CacheSizeGroup_%d", size_group); | 161 std::string group1 = StringPrintf("CacheSizeGroup_%d", size_group); |
| 162 trial1->AppendGroup(group1, FieldTrial::kAllRemainingProbability); | 162 trial1->AppendGroup(group1, FieldTrial::kAllRemainingProbability); |
| 163 } | 163 } |
| 164 | 164 |
| 165 // ------------------------------------------------------------------------ |
| 166 |
| 167 // This class takes care of building an instance of the backend. |
| 168 class CacheCreator { |
| 169 public: |
| 170 CacheCreator(const FilePath& path, bool force, int max_bytes, |
| 171 net::CacheType type, uint32 flags, |
| 172 base::MessageLoopProxy* thread, disk_cache::Backend** backend, |
| 173 net::CompletionCallback* callback) |
| 174 : path_(path), force_(force), retry_(false), max_bytes_(max_bytes), |
| 175 type_(type), flags_(flags), thread_(thread), backend_(backend), |
| 176 callback_(callback), cache_(NULL), |
| 177 ALLOW_THIS_IN_INITIALIZER_LIST( |
| 178 my_callback_(this, &CacheCreator::OnIOComplete)) { |
| 179 } |
| 180 ~CacheCreator() {} |
| 181 |
| 182 // Creates the backend. |
| 183 int Run(); |
| 184 |
| 185 // Callback implementation. |
| 186 void OnIOComplete(int result); |
| 187 |
| 188 private: |
| 189 void DoCallback(int result); |
| 190 |
| 191 const FilePath& path_; |
| 192 bool force_; |
| 193 bool retry_; |
| 194 int max_bytes_; |
| 195 net::CacheType type_; |
| 196 uint32 flags_; |
| 197 scoped_refptr<base::MessageLoopProxy> thread_; |
| 198 disk_cache::Backend** backend_; |
| 199 net::CompletionCallback* callback_; |
| 200 disk_cache::BackendImpl* cache_; |
| 201 net::CompletionCallbackImpl<CacheCreator> my_callback_; |
| 202 |
| 203 DISALLOW_COPY_AND_ASSIGN(CacheCreator); |
| 204 }; |
| 205 |
| 206 int CacheCreator::Run() { |
| 207 cache_ = new disk_cache::BackendImpl(path_, thread_); |
| 208 cache_->SetMaxSize(max_bytes_); |
| 209 cache_->SetType(type_); |
| 210 cache_->SetFlags(flags_); |
| 211 int rv = cache_->Init(&my_callback_); |
| 212 DCHECK_EQ(net::ERR_IO_PENDING, rv); |
| 213 return rv; |
| 214 } |
| 215 |
| 216 void CacheCreator::OnIOComplete(int result) { |
| 217 if (result == net::OK || !force_ || retry_) |
| 218 return DoCallback(result); |
| 219 |
| 220 // This is a failure and we are supposed to try again, so delete the object, |
| 221 // delete all the files, and try again. |
| 222 retry_ = true; |
| 223 delete cache_; |
| 224 cache_ = NULL; |
| 225 if (!DelayedCacheCleanup(path_)) |
| 226 return DoCallback(result); |
| 227 |
| 228 // The worker thread will start deleting files soon, but the original folder |
| 229 // is not there anymore... let's create a new set of files. |
| 230 int rv = Run(); |
| 231 DCHECK_EQ(net::ERR_IO_PENDING, rv); |
| 232 } |
| 233 |
| 234 void CacheCreator::DoCallback(int result) { |
| 235 DCHECK_NE(net::ERR_IO_PENDING, result); |
| 236 if (result == net::OK) { |
| 237 *backend_ = cache_; |
| 238 } else { |
| 239 LOG(ERROR) << "Unable to create cache"; |
| 240 *backend_ = NULL; |
| 241 delete cache_; |
| 242 } |
| 243 callback_->Run(result); |
| 244 delete this; |
| 245 } |
| 246 |
| 247 // ------------------------------------------------------------------------ |
| 248 |
| 249 // A task to perform final cleanup on the background thread. |
| 250 class FinalCleanup : public Task { |
| 251 public: |
| 252 explicit FinalCleanup(disk_cache::BackendImpl* backend) : backend_(backend) {} |
| 253 ~FinalCleanup() {} |
| 254 |
| 255 virtual void Run(); |
| 256 private: |
| 257 disk_cache::BackendImpl* backend_; |
| 258 DISALLOW_EVIL_CONSTRUCTORS(FinalCleanup); |
| 259 }; |
| 260 |
| 261 void FinalCleanup::Run() { |
| 262 backend_->CleanupCache(); |
| 263 } |
| 264 |
| 165 } // namespace | 265 } // namespace |
| 166 | 266 |
| 167 // ------------------------------------------------------------------------ | 267 // ------------------------------------------------------------------------ |
| 168 | 268 |
| 169 namespace disk_cache { | 269 namespace disk_cache { |
| 170 | 270 |
| 171 int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes, | 271 int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes, |
| 172 bool force, base::MessageLoopProxy* thread, | 272 bool force, base::MessageLoopProxy* thread, |
| 173 Backend** backend, CompletionCallback* callback) { | 273 Backend** backend, CompletionCallback* callback) { |
| 174 DCHECK(callback); | 274 DCHECK(callback); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 221 // still fail if we are not able to rename the cache folder (for instance due to | 321 // still fail if we are not able to rename the cache folder (for instance due to |
| 222 // a sharing violation), and in that case a cache for this profile (on the | 322 // a sharing violation), and in that case a cache for this profile (on the |
| 223 // desired path) cannot be created. | 323 // desired path) cannot be created. |
| 224 // | 324 // |
| 225 // Static. | 325 // Static. |
| 226 int BackendImpl::CreateBackend(const FilePath& full_path, bool force, | 326 int BackendImpl::CreateBackend(const FilePath& full_path, bool force, |
| 227 int max_bytes, net::CacheType type, | 327 int max_bytes, net::CacheType type, |
| 228 uint32 flags, base::MessageLoopProxy* thread, | 328 uint32 flags, base::MessageLoopProxy* thread, |
| 229 Backend** backend, | 329 Backend** backend, |
| 230 CompletionCallback* callback) { | 330 CompletionCallback* callback) { |
| 231 BackendImpl* cache = new BackendImpl(full_path, thread); | 331 CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type, |
| 232 cache->SetMaxSize(max_bytes); | 332 flags, thread, backend, callback); |
| 233 cache->SetType(type); | 333 // This object will self-destroy when finished. |
| 234 cache->SetFlags(flags); | 334 return creator->Run(); |
| 235 if (cache->Init()) { | 335 } |
| 236 *backend = cache; | 336 |
| 337 int BackendImpl::SyncInit() { |
| 338 if (Init()) |
| 237 return net::OK; | 339 return net::OK; |
| 238 } | |
| 239 | 340 |
| 240 *backend = NULL; | |
| 241 delete cache; | |
| 242 if (!force) | |
| 243 return net::ERR_FAILED; | |
| 244 | |
| 245 if (!DelayedCacheCleanup(full_path)) | |
| 246 return net::ERR_FAILED; | |
| 247 | |
| 248 // The worker thread will start deleting files soon, but the original folder | |
| 249 // is not there anymore... let's create a new set of files. | |
| 250 cache = new BackendImpl(full_path, thread); | |
| 251 cache->SetMaxSize(max_bytes); | |
| 252 cache->SetType(type); | |
| 253 cache->SetFlags(flags); | |
| 254 if (cache->Init()) { | |
| 255 *backend = cache; | |
| 256 return net::OK; | |
| 257 } | |
| 258 | |
| 259 delete cache; | |
| 260 LOG(ERROR) << "Unable to create cache"; | |
| 261 return net::ERR_FAILED; | 341 return net::ERR_FAILED; |
| 262 } | 342 } |
| 263 | 343 |
| 264 bool BackendImpl::Init() { | 344 bool BackendImpl::Init() { |
| 265 DCHECK(!init_); | 345 DCHECK(!init_); |
| 266 if (init_) | 346 if (init_) |
| 267 return false; | 347 return false; |
| 268 | 348 |
| 269 bool create_files = false; | 349 bool create_files = false; |
| 270 if (!InitBackingStore(&create_files)) { | 350 if (!InitBackingStore(&create_files)) { |
| (...skipping 26 matching lines...) Expand all Loading... |
| 297 new_eviction_ = (cache_type_ == net::DISK_CACHE); | 377 new_eviction_ = (cache_type_ == net::DISK_CACHE); |
| 298 } | 378 } |
| 299 | 379 |
| 300 if (!CheckIndex()) { | 380 if (!CheckIndex()) { |
| 301 ReportError(ERR_INIT_FAILED); | 381 ReportError(ERR_INIT_FAILED); |
| 302 return false; | 382 return false; |
| 303 } | 383 } |
| 304 | 384 |
| 305 // We don't care if the value overflows. The only thing we care about is that | 385 // We don't care if the value overflows. The only thing we care about is that |
| 306 // the id cannot be zero, because that value is used as "not dirty". | 386 // the id cannot be zero, because that value is used as "not dirty". |
| 307 // Increasing the value once per second gives us many years before a we start | 387 // Increasing the value once per second gives us many years before we start |
| 308 // having collisions. | 388 // having collisions. |
| 309 data_->header.this_id++; | 389 data_->header.this_id++; |
| 310 if (!data_->header.this_id) | 390 if (!data_->header.this_id) |
| 311 data_->header.this_id++; | 391 data_->header.this_id++; |
| 312 | 392 |
| 313 if (data_->header.crash) { | 393 if (data_->header.crash) { |
| 314 ReportError(ERR_PREVIOUS_CRASH); | 394 ReportError(ERR_PREVIOUS_CRASH); |
| 315 } else { | 395 } else { |
| 316 ReportError(0); | 396 ReportError(0); |
| 317 data_->header.crash = 1; | 397 data_->header.crash = 1; |
| (...skipping 10 matching lines...) Expand all Loading... |
| 328 disabled_ = !rankings_.Init(this, new_eviction_); | 408 disabled_ = !rankings_.Init(this, new_eviction_); |
| 329 eviction_.Init(this); | 409 eviction_.Init(this); |
| 330 | 410 |
| 331 // Setup load-time data only for the main cache. | 411 // Setup load-time data only for the main cache. |
| 332 if (cache_type() == net::DISK_CACHE) | 412 if (cache_type() == net::DISK_CACHE) |
| 333 SetFieldTrialInfo(GetSizeGroup()); | 413 SetFieldTrialInfo(GetSizeGroup()); |
| 334 | 414 |
| 335 return !disabled_; | 415 return !disabled_; |
| 336 } | 416 } |
| 337 | 417 |
| 418 int BackendImpl::Init(CompletionCallback* callback) { |
| 419 background_queue_.Init(callback); |
| 420 return net::ERR_IO_PENDING; |
| 421 } |
| 422 |
| 338 BackendImpl::~BackendImpl() { | 423 BackendImpl::~BackendImpl() { |
| 339 Trace("Backend destructor"); | 424 background_queue_.WaitForPendingIO(); |
| 340 if (!init_) | |
| 341 return; | |
| 342 | 425 |
| 343 if (data_) | 426 if (background_queue_.BackgroundIsCurrentThread()) { |
| 344 data_->header.crash = 0; | 427 // Unit tests may use the same thread for everything. |
| 428 CleanupCache(); |
| 429 } else { |
| 430 background_queue_.background_thread()->PostTask(FROM_HERE, |
| 431 new FinalCleanup(this)); |
| 432 done_.Wait(); |
| 433 } |
| 434 } |
| 345 | 435 |
| 346 timer_.Stop(); | 436 void BackendImpl::CleanupCache() { |
| 437 Trace("Backend Cleanup"); |
| 438 if (init_) { |
| 439 if (data_) |
| 440 data_->header.crash = 0; |
| 347 | 441 |
| 348 File::WaitForPendingIO(&num_pending_io_); | 442 timer_.Stop(); |
| 349 DCHECK(!num_refs_); | 443 File::WaitForPendingIO(&num_pending_io_); |
| 444 DCHECK(!num_refs_); |
| 445 } |
| 446 factory_.RevokeAll(); |
| 447 done_.Signal(); |
| 350 } | 448 } |
| 351 | 449 |
| 352 // ------------------------------------------------------------------------ | 450 // ------------------------------------------------------------------------ |
| 353 | 451 |
| 354 int32 BackendImpl::GetEntryCount() const { | 452 int32 BackendImpl::GetEntryCount() const { |
| 355 if (!index_) | 453 if (!index_) |
| 356 return 0; | 454 return 0; |
| 357 // num_entries includes entries already evicted. | 455 // num_entries includes entries already evicted. |
| 358 int32 not_deleted = data_->header.num_entries - | 456 int32 not_deleted = data_->header.num_entries - |
| 359 data_->header.lru.sizes[Rankings::DELETED]; | 457 data_->header.lru.sizes[Rankings::DELETED]; |
| (...skipping 26 matching lines...) Expand all Loading... |
| 386 return NULL; | 484 return NULL; |
| 387 } | 485 } |
| 388 | 486 |
| 389 eviction_.OnOpenEntry(cache_entry); | 487 eviction_.OnOpenEntry(cache_entry); |
| 390 | 488 |
| 391 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start); | 489 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start); |
| 392 stats_.OnEvent(Stats::OPEN_HIT); | 490 stats_.OnEvent(Stats::OPEN_HIT); |
| 393 return cache_entry; | 491 return cache_entry; |
| 394 } | 492 } |
| 395 | 493 |
| 396 bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) { | 494 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) { |
| 397 DCHECK(entry); | 495 DCHECK(entry); |
| 398 *entry = OpenEntryImpl(key); | 496 *entry = OpenEntryImpl(key); |
| 399 return (*entry) ? true : false; | 497 return (*entry) ? net::OK : net::ERR_FAILED; |
| 400 } | 498 } |
| 401 | 499 |
| 402 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, | 500 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, |
| 403 CompletionCallback* callback) { | 501 CompletionCallback* callback) { |
| 404 if (OpenEntry(key, entry)) | 502 DCHECK(callback); |
| 405 return net::OK; | 503 background_queue_.OpenEntry(key, entry, callback); |
| 406 | 504 return net::ERR_IO_PENDING; |
| 407 return net::ERR_FAILED; | |
| 408 } | 505 } |
| 409 | 506 |
| 410 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { | 507 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { |
| 411 if (disabled_ || key.empty()) | 508 if (disabled_ || key.empty()) |
| 412 return NULL; | 509 return NULL; |
| 413 | 510 |
| 414 TimeTicks start = TimeTicks::Now(); | 511 TimeTicks start = TimeTicks::Now(); |
| 415 uint32 hash = Hash(key); | 512 uint32 hash = Hash(key); |
| 416 | 513 |
| 417 scoped_refptr<EntryImpl> parent; | 514 scoped_refptr<EntryImpl> parent; |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 477 eviction_.OnCreateEntry(cache_entry); | 574 eviction_.OnCreateEntry(cache_entry); |
| 478 if (!parent.get()) | 575 if (!parent.get()) |
| 479 data_->table[hash & mask_] = entry_address.value(); | 576 data_->table[hash & mask_] = entry_address.value(); |
| 480 | 577 |
| 481 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start); | 578 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start); |
| 482 stats_.OnEvent(Stats::CREATE_HIT); | 579 stats_.OnEvent(Stats::CREATE_HIT); |
| 483 Trace("create entry hit "); | 580 Trace("create entry hit "); |
| 484 return cache_entry.release(); | 581 return cache_entry.release(); |
| 485 } | 582 } |
| 486 | 583 |
| 487 bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) { | 584 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) { |
| 488 DCHECK(entry); | 585 DCHECK(entry); |
| 489 *entry = CreateEntryImpl(key); | 586 *entry = CreateEntryImpl(key); |
| 490 return (*entry) ? true : false; | 587 return (*entry) ? net::OK : net::ERR_FAILED; |
| 491 } | 588 } |
| 492 | 589 |
| 493 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, | 590 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, |
| 494 CompletionCallback* callback) { | 591 CompletionCallback* callback) { |
| 495 if (CreateEntry(key, entry)) | 592 DCHECK(callback); |
| 593 background_queue_.CreateEntry(key, entry, callback); |
| 594 return net::ERR_IO_PENDING; |
| 595 } |
| 596 |
| 597 int BackendImpl::SyncDoomEntry(const std::string& key) { |
| 598 if (DoomEntry(key)) |
| 496 return net::OK; | 599 return net::OK; |
| 497 | 600 |
| 498 return net::ERR_FAILED; | 601 return net::ERR_FAILED; |
| 499 } | 602 } |
| 500 | 603 |
| 501 bool BackendImpl::DoomEntry(const std::string& key) { | 604 bool BackendImpl::DoomEntry(const std::string& key) { |
| 502 if (disabled_) | 605 if (disabled_) |
| 503 return false; | 606 return false; |
| 504 | 607 |
| 505 Entry* entry; | 608 EntryImpl* entry = OpenEntryImpl(key); |
| 506 if (!OpenEntry(key, &entry)) | 609 if (!entry) |
| 507 return false; | 610 return false; |
| 508 | 611 |
| 509 // Note that you'd think you could just pass &entry_impl to OpenEntry, | 612 entry->DoomImpl(); |
| 510 // but that triggers strict aliasing problems with gcc. | 613 entry->Release(); |
| 511 EntryImpl* entry_impl = reinterpret_cast<EntryImpl*>(entry); | |
| 512 entry_impl->Doom(); | |
| 513 entry_impl->Release(); | |
| 514 return true; | 614 return true; |
| 515 } | 615 } |
| 516 | 616 |
| 517 int BackendImpl::DoomEntry(const std::string& key, | 617 int BackendImpl::DoomEntry(const std::string& key, |
| 518 CompletionCallback* callback) { | 618 CompletionCallback* callback) { |
| 519 if (DoomEntry(key)) | 619 DCHECK(callback); |
| 620 background_queue_.DoomEntry(key, callback); |
| 621 return net::ERR_IO_PENDING; |
| 622 } |
| 623 |
| 624 int BackendImpl::SyncDoomAllEntries() { |
| 625 if (DoomAllEntries()) |
| 520 return net::OK; | 626 return net::OK; |
| 521 | 627 |
| 522 return net::ERR_FAILED; | 628 return net::ERR_FAILED; |
| 523 } | 629 } |
| 524 | 630 |
| 525 bool BackendImpl::DoomAllEntries() { | 631 bool BackendImpl::DoomAllEntries() { |
| 526 if (!num_refs_) { | 632 if (!num_refs_) { |
| 527 PrepareForRestart(); | 633 PrepareForRestart(); |
| 528 DeleteCache(path_, false); | 634 DeleteCache(path_, false); |
| 529 return Init(); | 635 return Init(); |
| 530 } else { | 636 } else { |
| 531 if (disabled_) | 637 if (disabled_) |
| 532 return false; | 638 return false; |
| 533 | 639 |
| 534 eviction_.TrimCache(true); | 640 eviction_.TrimCache(true); |
| 535 stats_.OnEvent(Stats::DOOM_CACHE); | 641 stats_.OnEvent(Stats::DOOM_CACHE); |
| 536 return true; | 642 return true; |
| 537 } | 643 } |
| 538 } | 644 } |
| 539 | 645 |
| 540 int BackendImpl::DoomAllEntries(CompletionCallback* callback) { | 646 int BackendImpl::DoomAllEntries(CompletionCallback* callback) { |
| 541 if (DoomAllEntries()) | 647 DCHECK(callback); |
| 648 background_queue_.DoomAllEntries(callback); |
| 649 return net::ERR_IO_PENDING; |
| 650 } |
| 651 |
| 652 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, |
| 653 const base::Time end_time) { |
| 654 if (DoomEntriesBetween(initial_time, end_time)) |
| 542 return net::OK; | 655 return net::OK; |
| 543 | 656 |
| 544 return net::ERR_FAILED; | 657 return net::ERR_FAILED; |
| 545 } | 658 } |
| 546 | 659 |
| 547 bool BackendImpl::DoomEntriesBetween(const Time initial_time, | 660 bool BackendImpl::DoomEntriesBetween(const Time initial_time, |
| 548 const Time end_time) { | 661 const Time end_time) { |
| 549 if (end_time.is_null()) | 662 if (end_time.is_null()) |
| 550 return DoomEntriesSince(initial_time); | 663 return DoomEntriesSince(initial_time); |
| 551 | 664 |
| 552 DCHECK(end_time >= initial_time); | 665 DCHECK(end_time >= initial_time); |
| 553 | 666 |
| 554 if (disabled_) | 667 if (disabled_) |
| 555 return false; | 668 return false; |
| 556 | 669 |
| 557 Entry* node, *next; | 670 EntryImpl* node; |
| 558 void* iter = NULL; | 671 void* iter = NULL; |
| 559 if (!OpenNextEntry(&iter, &next)) | 672 EntryImpl* next = OpenNextEntryImpl(&iter); |
| 673 if (!next) |
| 560 return true; | 674 return true; |
| 561 | 675 |
| 562 while (next) { | 676 while (next) { |
| 563 node = next; | 677 node = next; |
| 564 if (!OpenNextEntry(&iter, &next)) | 678 next = OpenNextEntryImpl(&iter); |
| 565 next = NULL; | |
| 566 | 679 |
| 567 if (node->GetLastUsed() >= initial_time && | 680 if (node->GetLastUsed() >= initial_time && |
| 568 node->GetLastUsed() < end_time) { | 681 node->GetLastUsed() < end_time) { |
| 569 node->Doom(); | 682 node->DoomImpl(); |
| 570 } else if (node->GetLastUsed() < initial_time) { | 683 } else if (node->GetLastUsed() < initial_time) { |
| 571 if (next) | 684 if (next) |
| 572 next->Close(); | 685 next->Release(); |
| 573 next = NULL; | 686 next = NULL; |
| 574 EndEnumeration(&iter); | 687 SyncEndEnumeration(iter); |
| 575 } | 688 } |
| 576 | 689 |
| 577 node->Close(); | 690 node->Release(); |
| 578 } | 691 } |
| 579 | 692 |
| 580 return true; | 693 return true; |
| 581 } | 694 } |
| 582 | 695 |
| 583 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, | 696 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, |
| 584 const base::Time end_time, | 697 const base::Time end_time, |
| 585 CompletionCallback* callback) { | 698 CompletionCallback* callback) { |
| 586 if (DoomEntriesBetween(initial_time, end_time)) | 699 DCHECK(callback); |
| 700 background_queue_.DoomEntriesBetween(initial_time, end_time, callback); |
| 701 return net::ERR_IO_PENDING; |
| 702 } |
| 703 |
| 704 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { |
| 705 if (DoomEntriesSince(initial_time)) |
| 587 return net::OK; | 706 return net::OK; |
| 588 | 707 |
| 589 return net::ERR_FAILED; | 708 return net::ERR_FAILED; |
| 590 } | 709 } |
| 591 | 710 |
| 592 // We use OpenNextEntry to retrieve elements from the cache, until we get | 711 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get |
| 593 // entries that are too old. | 712 // entries that are too old. |
| 594 bool BackendImpl::DoomEntriesSince(const Time initial_time) { | 713 bool BackendImpl::DoomEntriesSince(const Time initial_time) { |
| 595 if (disabled_) | 714 if (disabled_) |
| 596 return false; | 715 return false; |
| 597 | 716 |
| 598 for (;;) { | 717 for (;;) { |
| 599 Entry* entry; | |
| 600 void* iter = NULL; | 718 void* iter = NULL; |
| 601 if (!OpenNextEntry(&iter, &entry)) | 719 EntryImpl* entry = OpenNextEntryImpl(&iter); |
| 720 if (!entry) |
| 602 return true; | 721 return true; |
| 603 | 722 |
| 604 if (initial_time > entry->GetLastUsed()) { | 723 if (initial_time > entry->GetLastUsed()) { |
| 605 entry->Close(); | 724 entry->Release(); |
| 606 EndEnumeration(&iter); | 725 SyncEndEnumeration(iter); |
| 607 return true; | 726 return true; |
| 608 } | 727 } |
| 609 | 728 |
| 610 entry->Doom(); | 729 entry->DoomImpl(); |
| 611 entry->Close(); | 730 entry->Release(); |
| 612 EndEnumeration(&iter); // Dooming the entry invalidates the iterator. | 731 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator. |
| 613 } | 732 } |
| 614 } | 733 } |
| 615 | 734 |
| 616 int BackendImpl::DoomEntriesSince(const base::Time initial_time, | 735 int BackendImpl::DoomEntriesSince(const base::Time initial_time, |
| 617 CompletionCallback* callback) { | 736 CompletionCallback* callback) { |
| 618 if (DoomEntriesSince(initial_time)) | 737 DCHECK(callback); |
| 619 return net::OK; | 738 background_queue_.DoomEntriesSince(initial_time, callback); |
| 620 | 739 return net::ERR_IO_PENDING; |
| 621 return net::ERR_FAILED; | |
| 622 } | 740 } |
| 623 | 741 |
| 624 bool BackendImpl::OpenNextEntry(void** iter, Entry** next_entry) { | 742 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) { |
| 625 return OpenFollowingEntry(true, iter, next_entry); | 743 *next_entry = OpenNextEntryImpl(iter); |
| 744 return (*next_entry) ? net::OK : net::ERR_FAILED; |
| 745 } |
| 746 |
| 747 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) { |
| 748 return OpenFollowingEntry(true, iter); |
| 626 } | 749 } |
| 627 | 750 |
| 628 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, | 751 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, |
| 629 CompletionCallback* callback) { | 752 CompletionCallback* callback) { |
| 630 if (OpenNextEntry(iter, next_entry)) | 753 DCHECK(callback); |
| 631 return net::OK; | 754 background_queue_.OpenNextEntry(iter, next_entry, callback); |
| 755 return net::ERR_IO_PENDING; |
| 756 } |
| 632 | 757 |
| 633 return net::ERR_FAILED; | 758 void BackendImpl::SyncEndEnumeration(void* iter) { |
| 759 scoped_ptr<Rankings::Iterator> iterator( |
| 760 reinterpret_cast<Rankings::Iterator*>(iter)); |
| 634 } | 761 } |
| 635 | 762 |
| 636 void BackendImpl::EndEnumeration(void** iter) { | 763 void BackendImpl::EndEnumeration(void** iter) { |
| 637 scoped_ptr<Rankings::Iterator> iterator( | 764 background_queue_.EndEnumeration(*iter); |
| 638 reinterpret_cast<Rankings::Iterator*>(*iter)); | |
| 639 *iter = NULL; | 765 *iter = NULL; |
| 640 } | 766 } |
| 641 | 767 |
| 642 void BackendImpl::GetStats(StatsItems* stats) { | 768 void BackendImpl::GetStats(StatsItems* stats) { |
| 643 if (disabled_) | 769 if (disabled_) |
| 644 return; | 770 return; |
| 645 | 771 |
| 646 std::pair<std::string, std::string> item; | 772 std::pair<std::string, std::string> item; |
| 647 | 773 |
| 648 item.first = "Entries"; | 774 item.first = "Entries"; |
| (...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1009 } | 1135 } |
| 1010 | 1136 |
| 1011 void BackendImpl::SetFlags(uint32 flags) { | 1137 void BackendImpl::SetFlags(uint32 flags) { |
| 1012 user_flags_ |= flags; | 1138 user_flags_ |= flags; |
| 1013 } | 1139 } |
| 1014 | 1140 |
| 1015 void BackendImpl::ClearRefCountForTest() { | 1141 void BackendImpl::ClearRefCountForTest() { |
| 1016 num_refs_ = 0; | 1142 num_refs_ = 0; |
| 1017 } | 1143 } |
| 1018 | 1144 |
| 1145 int BackendImpl::FlushQueueForTest(CompletionCallback* callback) { |
| 1146 background_queue_.FlushQueue(callback); |
| 1147 return net::ERR_IO_PENDING; |
| 1148 } |
| 1149 |
| 1019 int BackendImpl::SelfCheck() { | 1150 int BackendImpl::SelfCheck() { |
| 1020 if (!init_) { | 1151 if (!init_) { |
| 1021 LOG(ERROR) << "Init failed"; | 1152 LOG(ERROR) << "Init failed"; |
| 1022 return ERR_INIT_FAILED; | 1153 return ERR_INIT_FAILED; |
| 1023 } | 1154 } |
| 1024 | 1155 |
| 1025 int num_entries = rankings_.SelfCheck(); | 1156 int num_entries = rankings_.SelfCheck(); |
| 1026 if (num_entries < 0) { | 1157 if (num_entries < 0) { |
| 1027 LOG(ERROR) << "Invalid rankings list, error " << num_entries; | 1158 LOG(ERROR) << "Invalid rankings list, error " << num_entries; |
| 1028 return num_entries; | 1159 return num_entries; |
| 1029 } | 1160 } |
| 1030 | 1161 |
| 1031 if (num_entries != data_->header.num_entries) { | 1162 if (num_entries != data_->header.num_entries) { |
| 1032 LOG(ERROR) << "Number of entries mismatch"; | 1163 LOG(ERROR) << "Number of entries mismatch"; |
| 1033 return ERR_NUM_ENTRIES_MISMATCH; | 1164 return ERR_NUM_ENTRIES_MISMATCH; |
| 1034 } | 1165 } |
| 1035 | 1166 |
| 1036 return CheckAllEntries(); | 1167 return CheckAllEntries(); |
| 1037 } | 1168 } |
| 1038 | 1169 |
| 1039 bool BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry) { | 1170 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) { |
| 1040 return OpenFollowingEntry(false, iter, prev_entry); | 1171 *prev_entry = OpenPrevEntryImpl(iter); |
| 1172 return (*prev_entry) ? net::OK : net::ERR_FAILED; |
| 1173 } |
| 1174 |
| 1175 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, |
| 1176 CompletionCallback* callback) { |
| 1177 DCHECK(callback); |
| 1178 background_queue_.OpenPrevEntry(iter, prev_entry, callback); |
| 1179 return net::ERR_IO_PENDING; |
| 1180 } |
| 1181 |
| 1182 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) { |
| 1183 return OpenFollowingEntry(false, iter); |
| 1041 } | 1184 } |
| 1042 | 1185 |
| 1043 // ------------------------------------------------------------------------ | 1186 // ------------------------------------------------------------------------ |
| 1044 | 1187 |
| 1045 // We just created a new file so we're going to write the header and set the | 1188 // We just created a new file so we're going to write the header and set the |
| 1046 // file length to include the hash table (zero filled). | 1189 // file length to include the hash table (zero filled). |
| 1047 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { | 1190 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { |
| 1048 AdjustMaxCacheSize(0); | 1191 AdjustMaxCacheSize(0); |
| 1049 | 1192 |
| 1050 IndexHeader header; | 1193 IndexHeader header; |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1144 DCHECK(!open_entries_.size()); | 1287 DCHECK(!open_entries_.size()); |
| 1145 PrepareForRestart(); | 1288 PrepareForRestart(); |
| 1146 DelayedCacheCleanup(path_); | 1289 DelayedCacheCleanup(path_); |
| 1147 | 1290 |
| 1148 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); | 1291 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); |
| 1149 | 1292 |
| 1150 // Don't call Init() if directed by the unit test: we are simulating a failure | 1293 // Don't call Init() if directed by the unit test: we are simulating a failure |
| 1151 // trying to re-enable the cache. | 1294 // trying to re-enable the cache. |
| 1152 if (unit_test_) | 1295 if (unit_test_) |
| 1153 init_ = true; // Let the destructor do proper cleanup. | 1296 init_ = true; // Let the destructor do proper cleanup. |
| 1154 else if (Init()) | 1297 else if (SyncInit()) |
| 1155 stats_.SetCounter(Stats::FATAL_ERROR, errors + 1); | 1298 stats_.SetCounter(Stats::FATAL_ERROR, errors + 1); |
| 1156 } | 1299 } |
| 1157 | 1300 |
| 1158 void BackendImpl::PrepareForRestart() { | 1301 void BackendImpl::PrepareForRestart() { |
| 1159 // Reset the mask_ if it was not given by the user. | 1302 // Reset the mask_ if it was not given by the user. |
| 1160 if (!(user_flags_ & kMask)) | 1303 if (!(user_flags_ & kMask)) |
| 1161 mask_ = 0; | 1304 mask_ = 0; |
| 1162 | 1305 |
| 1163 if (!(user_flags_ & kNewEviction)) | 1306 if (!(user_flags_ & kNewEviction)) |
| 1164 new_eviction_ = false; | 1307 new_eviction_ = false; |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1294 parent_entry = NULL; | 1437 parent_entry = NULL; |
| 1295 | 1438 |
| 1296 if (cache_entry && (find_parent || !found)) | 1439 if (cache_entry && (find_parent || !found)) |
| 1297 cache_entry = NULL; | 1440 cache_entry = NULL; |
| 1298 | 1441 |
| 1299 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); | 1442 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); |
| 1300 return tmp; | 1443 return tmp; |
| 1301 } | 1444 } |
| 1302 | 1445 |
| 1303 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. | 1446 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. |
| 1304 bool BackendImpl::OpenFollowingEntry(bool forward, void** iter, | 1447 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { |
| 1305 Entry** next_entry) { | |
| 1306 if (disabled_) | 1448 if (disabled_) |
| 1307 return false; | 1449 return NULL; |
| 1308 | 1450 |
| 1309 DCHECK(iter); | 1451 DCHECK(iter); |
| 1310 DCHECK(next_entry); | |
| 1311 *next_entry = NULL; | |
| 1312 | 1452 |
| 1313 const int kListsToSearch = 3; | 1453 const int kListsToSearch = 3; |
| 1314 scoped_refptr<EntryImpl> entries[kListsToSearch]; | 1454 scoped_refptr<EntryImpl> entries[kListsToSearch]; |
| 1315 scoped_ptr<Rankings::Iterator> iterator( | 1455 scoped_ptr<Rankings::Iterator> iterator( |
| 1316 reinterpret_cast<Rankings::Iterator*>(*iter)); | 1456 reinterpret_cast<Rankings::Iterator*>(*iter)); |
| 1317 *iter = NULL; | 1457 *iter = NULL; |
| 1318 | 1458 |
| 1319 if (!iterator.get()) { | 1459 if (!iterator.get()) { |
| 1320 iterator.reset(new Rankings::Iterator(&rankings_)); | 1460 iterator.reset(new Rankings::Iterator(&rankings_)); |
| 1321 bool ret = false; | 1461 bool ret = false; |
| 1322 | 1462 |
| 1323 // Get an entry from each list. | 1463 // Get an entry from each list. |
| 1324 for (int i = 0; i < kListsToSearch; i++) { | 1464 for (int i = 0; i < kListsToSearch; i++) { |
| 1325 EntryImpl* temp = NULL; | 1465 EntryImpl* temp = NULL; |
| 1326 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), | 1466 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), |
| 1327 &iterator->nodes[i], &temp); | 1467 &iterator->nodes[i], &temp); |
| 1328 entries[i].swap(&temp); // The entry was already addref'd. | 1468 entries[i].swap(&temp); // The entry was already addref'd. |
| 1329 } | 1469 } |
| 1330 if (!ret) | 1470 if (!ret) |
| 1331 return false; | 1471 return NULL; |
| 1332 } else { | 1472 } else { |
| 1333 // Get the next entry from the last list, and the actual entries for the | 1473 // Get the next entry from the last list, and the actual entries for the |
| 1334 // elements on the other lists. | 1474 // elements on the other lists. |
| 1335 for (int i = 0; i < kListsToSearch; i++) { | 1475 for (int i = 0; i < kListsToSearch; i++) { |
| 1336 EntryImpl* temp = NULL; | 1476 EntryImpl* temp = NULL; |
| 1337 if (iterator->list == i) { | 1477 if (iterator->list == i) { |
| 1338 OpenFollowingEntryFromList(forward, iterator->list, | 1478 OpenFollowingEntryFromList(forward, iterator->list, |
| 1339 &iterator->nodes[i], &temp); | 1479 &iterator->nodes[i], &temp); |
| 1340 } else { | 1480 } else { |
| 1341 temp = GetEnumeratedEntry(iterator->nodes[i], false); | 1481 temp = GetEnumeratedEntry(iterator->nodes[i], false); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1357 continue; | 1497 continue; |
| 1358 } | 1498 } |
| 1359 if (access_times[i] > access_times[newest]) | 1499 if (access_times[i] > access_times[newest]) |
| 1360 newest = i; | 1500 newest = i; |
| 1361 if (access_times[i] < access_times[oldest]) | 1501 if (access_times[i] < access_times[oldest]) |
| 1362 oldest = i; | 1502 oldest = i; |
| 1363 } | 1503 } |
| 1364 } | 1504 } |
| 1365 | 1505 |
| 1366 if (newest < 0 || oldest < 0) | 1506 if (newest < 0 || oldest < 0) |
| 1367 return false; | 1507 return NULL; |
| 1368 | 1508 |
| 1509 EntryImpl* next_entry; |
| 1369 if (forward) { | 1510 if (forward) { |
| 1370 entries[newest].swap(reinterpret_cast<EntryImpl**>(next_entry)); | 1511 next_entry = entries[newest].release(); |
| 1371 iterator->list = static_cast<Rankings::List>(newest); | 1512 iterator->list = static_cast<Rankings::List>(newest); |
| 1372 } else { | 1513 } else { |
| 1373 entries[oldest].swap(reinterpret_cast<EntryImpl**>(next_entry)); | 1514 next_entry = entries[oldest].release(); |
| 1374 iterator->list = static_cast<Rankings::List>(oldest); | 1515 iterator->list = static_cast<Rankings::List>(oldest); |
| 1375 } | 1516 } |
| 1376 | 1517 |
| 1377 *iter = iterator.release(); | 1518 *iter = iterator.release(); |
| 1378 return true; | 1519 return next_entry; |
| 1379 } | 1520 } |
| 1380 | 1521 |
| 1381 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, | 1522 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, |
| 1382 CacheRankingsBlock** from_entry, | 1523 CacheRankingsBlock** from_entry, |
| 1383 EntryImpl** next_entry) { | 1524 EntryImpl** next_entry) { |
| 1384 if (disabled_) | 1525 if (disabled_) |
| 1385 return false; | 1526 return false; |
| 1386 | 1527 |
| 1387 if (!new_eviction_ && Rankings::NO_USE != list) | 1528 if (!new_eviction_ && Rankings::NO_USE != list) |
| 1388 return false; | 1529 return false; |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1417 DestroyInvalidEntryFromEnumeration(entry); | 1558 DestroyInvalidEntryFromEnumeration(entry); |
| 1418 return NULL; | 1559 return NULL; |
| 1419 } | 1560 } |
| 1420 | 1561 |
| 1421 // There is no need to store the entry to disk if we want to delete it. | 1562 // There is no need to store the entry to disk if we want to delete it. |
| 1422 if (!to_evict && !entry->Update()) { | 1563 if (!to_evict && !entry->Update()) { |
| 1423 entry->Release(); | 1564 entry->Release(); |
| 1424 return NULL; | 1565 return NULL; |
| 1425 } | 1566 } |
| 1426 | 1567 |
| 1568 // Make sure that we save the key for later. |
| 1569 entry->GetKey(); |
| 1570 |
| 1427 return entry; | 1571 return entry; |
| 1428 } | 1572 } |
| 1429 | 1573 |
| 1430 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { | 1574 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { |
| 1431 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { | 1575 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { |
| 1432 deleted_entry->Release(); | 1576 deleted_entry->Release(); |
| 1433 stats_.OnEvent(Stats::CREATE_MISS); | 1577 stats_.OnEvent(Stats::CREATE_MISS); |
| 1434 Trace("create entry miss "); | 1578 Trace("create entry miss "); |
| 1435 return NULL; | 1579 return NULL; |
| 1436 } | 1580 } |
| (...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1709 | 1853 |
| 1710 return num_dirty; | 1854 return num_dirty; |
| 1711 } | 1855 } |
| 1712 | 1856 |
| 1713 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { | 1857 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { |
| 1714 RankingsNode* rankings = cache_entry->rankings()->Data(); | 1858 RankingsNode* rankings = cache_entry->rankings()->Data(); |
| 1715 return !rankings->dummy; | 1859 return !rankings->dummy; |
| 1716 } | 1860 } |
| 1717 | 1861 |
| 1718 } // namespace disk_cache | 1862 } // namespace disk_cache |
| OLD | NEW |