Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1006)

Side by Side Diff: net/disk_cache/backend_impl.cc

Issue 2827043: Disk cache: Switch the disk cache to use the cache_thread. ... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: ... and the fix Created 10 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/backend_impl.h" 5 #include "net/disk_cache/backend_impl.h"
6 6
7 #include "base/field_trial.h" 7 #include "base/field_trial.h"
8 #include "base/file_path.h" 8 #include "base/file_path.h"
9 #include "base/file_util.h" 9 #include "base/file_util.h"
10 #include "base/histogram.h" 10 #include "base/histogram.h"
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
155 if (!first) 155 if (!first)
156 return; 156 return;
157 157
158 // Field trials involve static objects so we have to do this only once. 158 // Field trials involve static objects so we have to do this only once.
159 first = false; 159 first = false;
160 scoped_refptr<FieldTrial> trial1 = new FieldTrial("CacheSize", 10); 160 scoped_refptr<FieldTrial> trial1 = new FieldTrial("CacheSize", 10);
161 std::string group1 = StringPrintf("CacheSizeGroup_%d", size_group); 161 std::string group1 = StringPrintf("CacheSizeGroup_%d", size_group);
162 trial1->AppendGroup(group1, FieldTrial::kAllRemainingProbability); 162 trial1->AppendGroup(group1, FieldTrial::kAllRemainingProbability);
163 } 163 }
164 164
165 // ------------------------------------------------------------------------
166
167 // This class takes care of building an instance of the backend.
168 class CacheCreator {
169 public:
170 CacheCreator(const FilePath& path, bool force, int max_bytes,
171 net::CacheType type, uint32 flags,
172 base::MessageLoopProxy* thread, disk_cache::Backend** backend,
173 net::CompletionCallback* callback)
174 : path_(path), force_(force), retry_(false), max_bytes_(max_bytes),
175 type_(type), flags_(flags), thread_(thread), backend_(backend),
176 callback_(callback), cache_(NULL),
177 ALLOW_THIS_IN_INITIALIZER_LIST(
178 my_callback_(this, &CacheCreator::OnIOComplete)) {
179 }
180 ~CacheCreator() {}
181
182 // Creates the backend.
183 int Run();
184
185 // Callback implementation.
186 void OnIOComplete(int result);
187
188 private:
189 void DoCallback(int result);
190
191 const FilePath& path_;
192 bool force_;
193 bool retry_;
194 int max_bytes_;
195 net::CacheType type_;
196 uint32 flags_;
197 scoped_refptr<base::MessageLoopProxy> thread_;
198 disk_cache::Backend** backend_;
199 net::CompletionCallback* callback_;
200 disk_cache::BackendImpl* cache_;
201 net::CompletionCallbackImpl<CacheCreator> my_callback_;
202
203 DISALLOW_COPY_AND_ASSIGN(CacheCreator);
204 };
205
206 int CacheCreator::Run() {
207 cache_ = new disk_cache::BackendImpl(path_, thread_);
208 cache_->SetMaxSize(max_bytes_);
209 cache_->SetType(type_);
210 cache_->SetFlags(flags_);
211 int rv = cache_->Init(&my_callback_);
212 DCHECK_EQ(net::ERR_IO_PENDING, rv);
213 return rv;
214 }
215
216 void CacheCreator::OnIOComplete(int result) {
217 if (result == net::OK || !force_ || retry_)
218 return DoCallback(result);
219
220 // This is a failure and we are supposed to try again, so delete the object,
221 // delete all the files, and try again.
222 retry_ = true;
223 delete cache_;
224 if (!DelayedCacheCleanup(path_))
225 return DoCallback(result);
226
227 // The worker thread will start deleting files soon, but the original folder
228 // is not there anymore... let's create a new set of files.
229 int rv = Run();
230 DCHECK_EQ(net::ERR_IO_PENDING, rv);
231 }
232
233 void CacheCreator::DoCallback(int result) {
234 DCHECK_NE(net::ERR_IO_PENDING, result);
235 if (result == net::OK) {
236 *backend_ = cache_;
237 } else {
238 LOG(ERROR) << "Unable to create cache";
239 *backend_ = NULL;
240 delete cache_;
241 }
242 callback_->Run(result);
243 delete this;
244 }
245
246 // ------------------------------------------------------------------------
247
248 // A task to perform final cleanup on the background thread.
249 class FinalCleanup : public Task {
250 public:
251 explicit FinalCleanup(disk_cache::BackendImpl* backend) : backend_(backend) {}
252 ~FinalCleanup() {}
253
254 virtual void Run();
255 private:
256 disk_cache::BackendImpl* backend_;
257 DISALLOW_EVIL_CONSTRUCTORS(FinalCleanup);
258 };
259
260 void FinalCleanup::Run() {
261 backend_->CleanupCache();
262 }
263
165 } // namespace 264 } // namespace
166 265
167 // ------------------------------------------------------------------------ 266 // ------------------------------------------------------------------------
168 267
169 namespace disk_cache { 268 namespace disk_cache {
170 269
171 int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes, 270 int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes,
172 bool force, base::MessageLoopProxy* thread, 271 bool force, base::MessageLoopProxy* thread,
173 Backend** backend, CompletionCallback* callback) { 272 Backend** backend, CompletionCallback* callback) {
174 DCHECK(callback); 273 DCHECK(callback);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
221 // still fail if we are not able to rename the cache folder (for instance due to 320 // still fail if we are not able to rename the cache folder (for instance due to
222 // a sharing violation), and in that case a cache for this profile (on the 321 // a sharing violation), and in that case a cache for this profile (on the
223 // desired path) cannot be created. 322 // desired path) cannot be created.
224 // 323 //
225 // Static. 324 // Static.
226 int BackendImpl::CreateBackend(const FilePath& full_path, bool force, 325 int BackendImpl::CreateBackend(const FilePath& full_path, bool force,
227 int max_bytes, net::CacheType type, 326 int max_bytes, net::CacheType type,
228 uint32 flags, base::MessageLoopProxy* thread, 327 uint32 flags, base::MessageLoopProxy* thread,
229 Backend** backend, 328 Backend** backend,
230 CompletionCallback* callback) { 329 CompletionCallback* callback) {
231 BackendImpl* cache = new BackendImpl(full_path, thread); 330 CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type,
232 cache->SetMaxSize(max_bytes); 331 flags, thread, backend, callback);
233 cache->SetType(type); 332 // This object will self-destroy when finished.
234 cache->SetFlags(flags); 333 return creator->Run();
235 if (cache->Init()) { 334 }
236 *backend = cache; 335
336 int BackendImpl::SyncInit() {
337 if (Init())
237 return net::OK; 338 return net::OK;
238 }
239 339
240 *backend = NULL;
241 delete cache;
242 if (!force)
243 return net::ERR_FAILED;
244
245 if (!DelayedCacheCleanup(full_path))
246 return net::ERR_FAILED;
247
248 // The worker thread will start deleting files soon, but the original folder
249 // is not there anymore... let's create a new set of files.
250 cache = new BackendImpl(full_path, thread);
251 cache->SetMaxSize(max_bytes);
252 cache->SetType(type);
253 cache->SetFlags(flags);
254 if (cache->Init()) {
255 *backend = cache;
256 return net::OK;
257 }
258
259 delete cache;
260 LOG(ERROR) << "Unable to create cache";
261 return net::ERR_FAILED; 340 return net::ERR_FAILED;
262 } 341 }
263 342
264 bool BackendImpl::Init() { 343 bool BackendImpl::Init() {
265 DCHECK(!init_); 344 DCHECK(!init_);
266 if (init_) 345 if (init_)
267 return false; 346 return false;
268 347
269 bool create_files = false; 348 bool create_files = false;
270 if (!InitBackingStore(&create_files)) { 349 if (!InitBackingStore(&create_files)) {
(...skipping 26 matching lines...) Expand all
297 new_eviction_ = (cache_type_ == net::DISK_CACHE); 376 new_eviction_ = (cache_type_ == net::DISK_CACHE);
298 } 377 }
299 378
300 if (!CheckIndex()) { 379 if (!CheckIndex()) {
301 ReportError(ERR_INIT_FAILED); 380 ReportError(ERR_INIT_FAILED);
302 return false; 381 return false;
303 } 382 }
304 383
305 // We don't care if the value overflows. The only thing we care about is that 384 // We don't care if the value overflows. The only thing we care about is that
306 // the id cannot be zero, because that value is used as "not dirty". 385 // the id cannot be zero, because that value is used as "not dirty".
307 // Increasing the value once per second gives us many years before a we start 386 // Increasing the value once per second gives us many years before we start
308 // having collisions. 387 // having collisions.
309 data_->header.this_id++; 388 data_->header.this_id++;
310 if (!data_->header.this_id) 389 if (!data_->header.this_id)
311 data_->header.this_id++; 390 data_->header.this_id++;
312 391
313 if (data_->header.crash) { 392 if (data_->header.crash) {
314 ReportError(ERR_PREVIOUS_CRASH); 393 ReportError(ERR_PREVIOUS_CRASH);
315 } else { 394 } else {
316 ReportError(0); 395 ReportError(0);
317 data_->header.crash = 1; 396 data_->header.crash = 1;
(...skipping 10 matching lines...) Expand all
328 disabled_ = !rankings_.Init(this, new_eviction_); 407 disabled_ = !rankings_.Init(this, new_eviction_);
329 eviction_.Init(this); 408 eviction_.Init(this);
330 409
331 // Setup load-time data only for the main cache. 410 // Setup load-time data only for the main cache.
332 if (cache_type() == net::DISK_CACHE) 411 if (cache_type() == net::DISK_CACHE)
333 SetFieldTrialInfo(GetSizeGroup()); 412 SetFieldTrialInfo(GetSizeGroup());
334 413
335 return !disabled_; 414 return !disabled_;
336 } 415 }
337 416
417 int BackendImpl::Init(CompletionCallback* callback) {
418 background_queue_.Init(callback);
419 return net::ERR_IO_PENDING;
420 }
421
338 BackendImpl::~BackendImpl() { 422 BackendImpl::~BackendImpl() {
339 Trace("Backend destructor"); 423 background_queue_.WaitForPendingIO();
340 if (!init_)
341 return;
342 424
343 if (data_) 425 if (background_queue_.BackgroundIsCurrentThread()) {
344 data_->header.crash = 0; 426 // Unit tests may use the same thread for everything.
427 CleanupCache();
428 } else {
429 background_queue_.background_thread()->PostTask(FROM_HERE,
430 new FinalCleanup(this));
431 done_.Wait();
432 }
433 }
345 434
346 timer_.Stop(); 435 void BackendImpl::CleanupCache() {
436 Trace("Backend Cleanup");
437 if (init_) {
438 if (data_)
439 data_->header.crash = 0;
347 440
348 File::WaitForPendingIO(&num_pending_io_); 441 timer_.Stop();
349 DCHECK(!num_refs_); 442 File::WaitForPendingIO(&num_pending_io_);
443 DCHECK(!num_refs_);
444 }
445 factory_.RevokeAll();
446 done_.Signal();
350 } 447 }
351 448
352 // ------------------------------------------------------------------------ 449 // ------------------------------------------------------------------------
353 450
354 int32 BackendImpl::GetEntryCount() const { 451 int32 BackendImpl::GetEntryCount() const {
355 if (!index_) 452 if (!index_)
356 return 0; 453 return 0;
357 // num_entries includes entries already evicted. 454 // num_entries includes entries already evicted.
358 int32 not_deleted = data_->header.num_entries - 455 int32 not_deleted = data_->header.num_entries -
359 data_->header.lru.sizes[Rankings::DELETED]; 456 data_->header.lru.sizes[Rankings::DELETED];
(...skipping 26 matching lines...) Expand all
386 return NULL; 483 return NULL;
387 } 484 }
388 485
389 eviction_.OnOpenEntry(cache_entry); 486 eviction_.OnOpenEntry(cache_entry);
390 487
391 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start); 488 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start);
392 stats_.OnEvent(Stats::OPEN_HIT); 489 stats_.OnEvent(Stats::OPEN_HIT);
393 return cache_entry; 490 return cache_entry;
394 } 491 }
395 492
396 bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) { 493 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
397 DCHECK(entry); 494 DCHECK(entry);
398 *entry = OpenEntryImpl(key); 495 *entry = OpenEntryImpl(key);
399 return (*entry) ? true : false; 496 return (*entry) ? net::OK : net::ERR_FAILED;
400 } 497 }
401 498
402 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, 499 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
403 CompletionCallback* callback) { 500 CompletionCallback* callback) {
404 if (OpenEntry(key, entry)) 501 DCHECK(callback);
405 return net::OK; 502 background_queue_.OpenEntry(key, entry, callback);
406 503 return net::ERR_IO_PENDING;
407 return net::ERR_FAILED;
408 } 504 }
409 505
410 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { 506 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
411 if (disabled_ || key.empty()) 507 if (disabled_ || key.empty())
412 return NULL; 508 return NULL;
413 509
414 TimeTicks start = TimeTicks::Now(); 510 TimeTicks start = TimeTicks::Now();
415 uint32 hash = Hash(key); 511 uint32 hash = Hash(key);
416 512
417 scoped_refptr<EntryImpl> parent; 513 scoped_refptr<EntryImpl> parent;
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
477 eviction_.OnCreateEntry(cache_entry); 573 eviction_.OnCreateEntry(cache_entry);
478 if (!parent.get()) 574 if (!parent.get())
479 data_->table[hash & mask_] = entry_address.value(); 575 data_->table[hash & mask_] = entry_address.value();
480 576
481 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start); 577 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
482 stats_.OnEvent(Stats::CREATE_HIT); 578 stats_.OnEvent(Stats::CREATE_HIT);
483 Trace("create entry hit "); 579 Trace("create entry hit ");
484 return cache_entry.release(); 580 return cache_entry.release();
485 } 581 }
486 582
487 bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) { 583 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
488 DCHECK(entry); 584 DCHECK(entry);
489 *entry = CreateEntryImpl(key); 585 *entry = CreateEntryImpl(key);
490 return (*entry) ? true : false; 586 return (*entry) ? net::OK : net::ERR_FAILED;
491 } 587 }
492 588
493 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, 589 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
494 CompletionCallback* callback) { 590 CompletionCallback* callback) {
495 if (CreateEntry(key, entry)) 591 DCHECK(callback);
592 background_queue_.CreateEntry(key, entry, callback);
593 return net::ERR_IO_PENDING;
594 }
595
596 int BackendImpl::SyncDoomEntry(const std::string& key) {
597 if (DoomEntry(key))
496 return net::OK; 598 return net::OK;
497 599
498 return net::ERR_FAILED; 600 return net::ERR_FAILED;
499 } 601 }
500 602
501 bool BackendImpl::DoomEntry(const std::string& key) { 603 bool BackendImpl::DoomEntry(const std::string& key) {
502 if (disabled_) 604 if (disabled_)
503 return false; 605 return false;
504 606
505 Entry* entry; 607 EntryImpl* entry = OpenEntryImpl(key);
506 if (!OpenEntry(key, &entry)) 608 if (!entry)
507 return false; 609 return false;
508 610
509 // Note that you'd think you could just pass &entry_impl to OpenEntry, 611 entry->DoomImpl();
510 // but that triggers strict aliasing problems with gcc. 612 entry->Release();
511 EntryImpl* entry_impl = reinterpret_cast<EntryImpl*>(entry);
512 entry_impl->Doom();
513 entry_impl->Release();
514 return true; 613 return true;
515 } 614 }
516 615
517 int BackendImpl::DoomEntry(const std::string& key, 616 int BackendImpl::DoomEntry(const std::string& key,
518 CompletionCallback* callback) { 617 CompletionCallback* callback) {
519 if (DoomEntry(key)) 618 DCHECK(callback);
619 background_queue_.DoomEntry(key, callback);
620 return net::ERR_IO_PENDING;
621 }
622
623 int BackendImpl::SyncDoomAllEntries() {
624 if (DoomAllEntries())
520 return net::OK; 625 return net::OK;
521 626
522 return net::ERR_FAILED; 627 return net::ERR_FAILED;
523 } 628 }
524 629
525 bool BackendImpl::DoomAllEntries() { 630 bool BackendImpl::DoomAllEntries() {
526 if (!num_refs_) { 631 if (!num_refs_) {
527 PrepareForRestart(); 632 PrepareForRestart();
528 DeleteCache(path_, false); 633 DeleteCache(path_, false);
529 return Init(); 634 return Init();
530 } else { 635 } else {
531 if (disabled_) 636 if (disabled_)
532 return false; 637 return false;
533 638
534 eviction_.TrimCache(true); 639 eviction_.TrimCache(true);
535 stats_.OnEvent(Stats::DOOM_CACHE); 640 stats_.OnEvent(Stats::DOOM_CACHE);
536 return true; 641 return true;
537 } 642 }
538 } 643 }
539 644
540 int BackendImpl::DoomAllEntries(CompletionCallback* callback) { 645 int BackendImpl::DoomAllEntries(CompletionCallback* callback) {
541 if (DoomAllEntries()) 646 DCHECK(callback);
647 background_queue_.DoomAllEntries(callback);
648 return net::ERR_IO_PENDING;
649 }
650
651 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
652 const base::Time end_time) {
653 if (DoomEntriesBetween(initial_time, end_time))
542 return net::OK; 654 return net::OK;
543 655
544 return net::ERR_FAILED; 656 return net::ERR_FAILED;
545 } 657 }
546 658
547 bool BackendImpl::DoomEntriesBetween(const Time initial_time, 659 bool BackendImpl::DoomEntriesBetween(const Time initial_time,
548 const Time end_time) { 660 const Time end_time) {
549 if (end_time.is_null()) 661 if (end_time.is_null())
550 return DoomEntriesSince(initial_time); 662 return DoomEntriesSince(initial_time);
551 663
552 DCHECK(end_time >= initial_time); 664 DCHECK(end_time >= initial_time);
553 665
554 if (disabled_) 666 if (disabled_)
555 return false; 667 return false;
556 668
557 Entry* node, *next; 669 EntryImpl* node;
558 void* iter = NULL; 670 void* iter = NULL;
559 if (!OpenNextEntry(&iter, &next)) 671 EntryImpl* next = OpenNextEntryImpl(&iter);
672 if (!next)
560 return true; 673 return true;
561 674
562 while (next) { 675 while (next) {
563 node = next; 676 node = next;
564 if (!OpenNextEntry(&iter, &next)) 677 next = OpenNextEntryImpl(&iter);
565 next = NULL;
566 678
567 if (node->GetLastUsed() >= initial_time && 679 if (node->GetLastUsed() >= initial_time &&
568 node->GetLastUsed() < end_time) { 680 node->GetLastUsed() < end_time) {
569 node->Doom(); 681 node->DoomImpl();
570 } else if (node->GetLastUsed() < initial_time) { 682 } else if (node->GetLastUsed() < initial_time) {
571 if (next) 683 if (next)
572 next->Close(); 684 next->Release();
573 next = NULL; 685 next = NULL;
574 EndEnumeration(&iter); 686 SyncEndEnumeration(iter);
575 } 687 }
576 688
577 node->Close(); 689 node->Release();
578 } 690 }
579 691
580 return true; 692 return true;
581 } 693 }
582 694
583 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, 695 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
584 const base::Time end_time, 696 const base::Time end_time,
585 CompletionCallback* callback) { 697 CompletionCallback* callback) {
586 if (DoomEntriesBetween(initial_time, end_time)) 698 DCHECK(callback);
699 background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
700 return net::ERR_IO_PENDING;
701 }
702
703 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
704 if (DoomEntriesSince(initial_time))
587 return net::OK; 705 return net::OK;
588 706
589 return net::ERR_FAILED; 707 return net::ERR_FAILED;
590 } 708 }
591 709
592 // We use OpenNextEntry to retrieve elements from the cache, until we get 710 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
593 // entries that are too old. 711 // entries that are too old.
594 bool BackendImpl::DoomEntriesSince(const Time initial_time) { 712 bool BackendImpl::DoomEntriesSince(const Time initial_time) {
595 if (disabled_) 713 if (disabled_)
596 return false; 714 return false;
597 715
598 for (;;) { 716 for (;;) {
599 Entry* entry;
600 void* iter = NULL; 717 void* iter = NULL;
601 if (!OpenNextEntry(&iter, &entry)) 718 EntryImpl* entry = OpenNextEntryImpl(&iter);
719 if (!entry)
602 return true; 720 return true;
603 721
604 if (initial_time > entry->GetLastUsed()) { 722 if (initial_time > entry->GetLastUsed()) {
605 entry->Close(); 723 entry->Release();
606 EndEnumeration(&iter); 724 SyncEndEnumeration(iter);
607 return true; 725 return true;
608 } 726 }
609 727
610 entry->Doom(); 728 entry->DoomImpl();
611 entry->Close(); 729 entry->Release();
612 EndEnumeration(&iter); // Dooming the entry invalidates the iterator. 730 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
613 } 731 }
614 } 732 }
615 733
616 int BackendImpl::DoomEntriesSince(const base::Time initial_time, 734 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
617 CompletionCallback* callback) { 735 CompletionCallback* callback) {
618 if (DoomEntriesSince(initial_time)) 736 DCHECK(callback);
619 return net::OK; 737 background_queue_.DoomEntriesSince(initial_time, callback);
620 738 return net::ERR_IO_PENDING;
621 return net::ERR_FAILED;
622 } 739 }
623 740
624 bool BackendImpl::OpenNextEntry(void** iter, Entry** next_entry) { 741 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
625 return OpenFollowingEntry(true, iter, next_entry); 742 *next_entry = OpenNextEntryImpl(iter);
743 return (*next_entry) ? net::OK : net::ERR_FAILED;
744 }
745
746 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
747 return OpenFollowingEntry(true, iter);
626 } 748 }
627 749
628 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, 750 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
629 CompletionCallback* callback) { 751 CompletionCallback* callback) {
630 if (OpenNextEntry(iter, next_entry)) 752 DCHECK(callback);
631 return net::OK; 753 background_queue_.OpenNextEntry(iter, next_entry, callback);
754 return net::ERR_IO_PENDING;
755 }
632 756
633 return net::ERR_FAILED; 757 void BackendImpl::SyncEndEnumeration(void* iter) {
758 scoped_ptr<Rankings::Iterator> iterator(
759 reinterpret_cast<Rankings::Iterator*>(iter));
634 } 760 }
635 761
636 void BackendImpl::EndEnumeration(void** iter) { 762 void BackendImpl::EndEnumeration(void** iter) {
637 scoped_ptr<Rankings::Iterator> iterator( 763 background_queue_.EndEnumeration(*iter);
638 reinterpret_cast<Rankings::Iterator*>(*iter));
639 *iter = NULL; 764 *iter = NULL;
640 } 765 }
641 766
642 void BackendImpl::GetStats(StatsItems* stats) { 767 void BackendImpl::GetStats(StatsItems* stats) {
643 if (disabled_) 768 if (disabled_)
644 return; 769 return;
645 770
646 std::pair<std::string, std::string> item; 771 std::pair<std::string, std::string> item;
647 772
648 item.first = "Entries"; 773 item.first = "Entries";
(...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after
1009 } 1134 }
1010 1135
1011 void BackendImpl::SetFlags(uint32 flags) { 1136 void BackendImpl::SetFlags(uint32 flags) {
1012 user_flags_ |= flags; 1137 user_flags_ |= flags;
1013 } 1138 }
1014 1139
1015 void BackendImpl::ClearRefCountForTest() { 1140 void BackendImpl::ClearRefCountForTest() {
1016 num_refs_ = 0; 1141 num_refs_ = 0;
1017 } 1142 }
1018 1143
1144 int BackendImpl::FlushQueueForTest(CompletionCallback* callback) {
1145 background_queue_.FlushQueue(callback);
1146 return net::ERR_IO_PENDING;
1147 }
1148
1019 int BackendImpl::SelfCheck() { 1149 int BackendImpl::SelfCheck() {
1020 if (!init_) { 1150 if (!init_) {
1021 LOG(ERROR) << "Init failed"; 1151 LOG(ERROR) << "Init failed";
1022 return ERR_INIT_FAILED; 1152 return ERR_INIT_FAILED;
1023 } 1153 }
1024 1154
1025 int num_entries = rankings_.SelfCheck(); 1155 int num_entries = rankings_.SelfCheck();
1026 if (num_entries < 0) { 1156 if (num_entries < 0) {
1027 LOG(ERROR) << "Invalid rankings list, error " << num_entries; 1157 LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1028 return num_entries; 1158 return num_entries;
1029 } 1159 }
1030 1160
1031 if (num_entries != data_->header.num_entries) { 1161 if (num_entries != data_->header.num_entries) {
1032 LOG(ERROR) << "Number of entries mismatch"; 1162 LOG(ERROR) << "Number of entries mismatch";
1033 return ERR_NUM_ENTRIES_MISMATCH; 1163 return ERR_NUM_ENTRIES_MISMATCH;
1034 } 1164 }
1035 1165
1036 return CheckAllEntries(); 1166 return CheckAllEntries();
1037 } 1167 }
1038 1168
1039 bool BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry) { 1169 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
1040 return OpenFollowingEntry(false, iter, prev_entry); 1170 *prev_entry = OpenPrevEntryImpl(iter);
1171 return (*prev_entry) ? net::OK : net::ERR_FAILED;
1172 }
1173
1174 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
1175 CompletionCallback* callback) {
1176 DCHECK(callback);
1177 background_queue_.OpenPrevEntry(iter, prev_entry, callback);
1178 return net::ERR_IO_PENDING;
1179 }
1180
1181 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
1182 return OpenFollowingEntry(false, iter);
1041 } 1183 }
1042 1184
1043 // ------------------------------------------------------------------------ 1185 // ------------------------------------------------------------------------
1044 1186
1045 // We just created a new file so we're going to write the header and set the 1187 // We just created a new file so we're going to write the header and set the
1046 // file length to include the hash table (zero filled). 1188 // file length to include the hash table (zero filled).
1047 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { 1189 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1048 AdjustMaxCacheSize(0); 1190 AdjustMaxCacheSize(0);
1049 1191
1050 IndexHeader header; 1192 IndexHeader header;
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
1144 DCHECK(!open_entries_.size()); 1286 DCHECK(!open_entries_.size());
1145 PrepareForRestart(); 1287 PrepareForRestart();
1146 DelayedCacheCleanup(path_); 1288 DelayedCacheCleanup(path_);
1147 1289
1148 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); 1290 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
1149 1291
1150 // Don't call Init() if directed by the unit test: we are simulating a failure 1292 // Don't call Init() if directed by the unit test: we are simulating a failure
1151 // trying to re-enable the cache. 1293 // trying to re-enable the cache.
1152 if (unit_test_) 1294 if (unit_test_)
1153 init_ = true; // Let the destructor do proper cleanup. 1295 init_ = true; // Let the destructor do proper cleanup.
1154 else if (Init()) 1296 else if (SyncInit())
1155 stats_.SetCounter(Stats::FATAL_ERROR, errors + 1); 1297 stats_.SetCounter(Stats::FATAL_ERROR, errors + 1);
1156 } 1298 }
1157 1299
1158 void BackendImpl::PrepareForRestart() { 1300 void BackendImpl::PrepareForRestart() {
1159 // Reset the mask_ if it was not given by the user. 1301 // Reset the mask_ if it was not given by the user.
1160 if (!(user_flags_ & kMask)) 1302 if (!(user_flags_ & kMask))
1161 mask_ = 0; 1303 mask_ = 0;
1162 1304
1163 if (!(user_flags_ & kNewEviction)) 1305 if (!(user_flags_ & kNewEviction))
1164 new_eviction_ = false; 1306 new_eviction_ = false;
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
1294 parent_entry = NULL; 1436 parent_entry = NULL;
1295 1437
1296 if (cache_entry && (find_parent || !found)) 1438 if (cache_entry && (find_parent || !found))
1297 cache_entry = NULL; 1439 cache_entry = NULL;
1298 1440
1299 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); 1441 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
1300 return tmp; 1442 return tmp;
1301 } 1443 }
1302 1444
1303 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. 1445 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
1304 bool BackendImpl::OpenFollowingEntry(bool forward, void** iter, 1446 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
1305 Entry** next_entry) {
1306 if (disabled_) 1447 if (disabled_)
1307 return false; 1448 return NULL;
1308 1449
1309 DCHECK(iter); 1450 DCHECK(iter);
1310 DCHECK(next_entry);
1311 *next_entry = NULL;
1312 1451
1313 const int kListsToSearch = 3; 1452 const int kListsToSearch = 3;
1314 scoped_refptr<EntryImpl> entries[kListsToSearch]; 1453 scoped_refptr<EntryImpl> entries[kListsToSearch];
1315 scoped_ptr<Rankings::Iterator> iterator( 1454 scoped_ptr<Rankings::Iterator> iterator(
1316 reinterpret_cast<Rankings::Iterator*>(*iter)); 1455 reinterpret_cast<Rankings::Iterator*>(*iter));
1317 *iter = NULL; 1456 *iter = NULL;
1318 1457
1319 if (!iterator.get()) { 1458 if (!iterator.get()) {
1320 iterator.reset(new Rankings::Iterator(&rankings_)); 1459 iterator.reset(new Rankings::Iterator(&rankings_));
1321 bool ret = false; 1460 bool ret = false;
1322 1461
1323 // Get an entry from each list. 1462 // Get an entry from each list.
1324 for (int i = 0; i < kListsToSearch; i++) { 1463 for (int i = 0; i < kListsToSearch; i++) {
1325 EntryImpl* temp = NULL; 1464 EntryImpl* temp = NULL;
1326 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), 1465 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
1327 &iterator->nodes[i], &temp); 1466 &iterator->nodes[i], &temp);
1328 entries[i].swap(&temp); // The entry was already addref'd. 1467 entries[i].swap(&temp); // The entry was already addref'd.
1329 } 1468 }
1330 if (!ret) 1469 if (!ret)
1331 return false; 1470 return NULL;
1332 } else { 1471 } else {
1333 // Get the next entry from the last list, and the actual entries for the 1472 // Get the next entry from the last list, and the actual entries for the
1334 // elements on the other lists. 1473 // elements on the other lists.
1335 for (int i = 0; i < kListsToSearch; i++) { 1474 for (int i = 0; i < kListsToSearch; i++) {
1336 EntryImpl* temp = NULL; 1475 EntryImpl* temp = NULL;
1337 if (iterator->list == i) { 1476 if (iterator->list == i) {
1338 OpenFollowingEntryFromList(forward, iterator->list, 1477 OpenFollowingEntryFromList(forward, iterator->list,
1339 &iterator->nodes[i], &temp); 1478 &iterator->nodes[i], &temp);
1340 } else { 1479 } else {
1341 temp = GetEnumeratedEntry(iterator->nodes[i], false); 1480 temp = GetEnumeratedEntry(iterator->nodes[i], false);
(...skipping 15 matching lines...) Expand all
1357 continue; 1496 continue;
1358 } 1497 }
1359 if (access_times[i] > access_times[newest]) 1498 if (access_times[i] > access_times[newest])
1360 newest = i; 1499 newest = i;
1361 if (access_times[i] < access_times[oldest]) 1500 if (access_times[i] < access_times[oldest])
1362 oldest = i; 1501 oldest = i;
1363 } 1502 }
1364 } 1503 }
1365 1504
1366 if (newest < 0 || oldest < 0) 1505 if (newest < 0 || oldest < 0)
1367 return false; 1506 return NULL;
1368 1507
1508 EntryImpl* next_entry;
1369 if (forward) { 1509 if (forward) {
1370 entries[newest].swap(reinterpret_cast<EntryImpl**>(next_entry)); 1510 next_entry = entries[newest].release();
1371 iterator->list = static_cast<Rankings::List>(newest); 1511 iterator->list = static_cast<Rankings::List>(newest);
1372 } else { 1512 } else {
1373 entries[oldest].swap(reinterpret_cast<EntryImpl**>(next_entry)); 1513 next_entry = entries[oldest].release();
1374 iterator->list = static_cast<Rankings::List>(oldest); 1514 iterator->list = static_cast<Rankings::List>(oldest);
1375 } 1515 }
1376 1516
1377 *iter = iterator.release(); 1517 *iter = iterator.release();
1378 return true; 1518 return next_entry;
1379 } 1519 }
1380 1520
1381 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, 1521 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
1382 CacheRankingsBlock** from_entry, 1522 CacheRankingsBlock** from_entry,
1383 EntryImpl** next_entry) { 1523 EntryImpl** next_entry) {
1384 if (disabled_) 1524 if (disabled_)
1385 return false; 1525 return false;
1386 1526
1387 if (!new_eviction_ && Rankings::NO_USE != list) 1527 if (!new_eviction_ && Rankings::NO_USE != list)
1388 return false; 1528 return false;
(...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after
1709 1849
1710 return num_dirty; 1850 return num_dirty;
1711 } 1851 }
1712 1852
1713 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { 1853 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
1714 RankingsNode* rankings = cache_entry->rankings()->Data(); 1854 RankingsNode* rankings = cache_entry->rankings()->Data();
1715 return !rankings->dummy; 1855 return !rankings->dummy;
1716 } 1856 }
1717 1857
1718 } // namespace disk_cache 1858 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698