OLD | NEW |
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/disk_cache/backend_impl.h" | 5 #include "net/disk_cache/backend_impl.h" |
6 | 6 |
7 #include "base/file_util.h" | 7 #include "base/file_util.h" |
8 #include "base/histogram.h" | 8 #include "base/histogram.h" |
9 #include "base/message_loop.h" | 9 #include "base/message_loop.h" |
10 #include "base/string_util.h" | 10 #include "base/string_util.h" |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
152 | 152 |
153 // If the initialization of the cache fails, and force is true, we will discard | 153 // If the initialization of the cache fails, and force is true, we will discard |
154 // the whole cache and create a new one. In order to process a potentially large | 154 // the whole cache and create a new one. In order to process a potentially large |
155 // number of files, we'll rename the cache folder to old_ + original_name + | 155 // number of files, we'll rename the cache folder to old_ + original_name + |
156 // number, (located on the same parent folder), and spawn a worker thread to | 156 // number, (located on the same parent folder), and spawn a worker thread to |
157 // delete all the files on all the stale cache folders. The whole process can | 157 // delete all the files on all the stale cache folders. The whole process can |
158 // still fail if we are not able to rename the cache folder (for instance due to | 158 // still fail if we are not able to rename the cache folder (for instance due to |
159 // a sharing violation), and in that case a cache for this profile (on the | 159 // a sharing violation), and in that case a cache for this profile (on the |
160 // desired path) cannot be created. | 160 // desired path) cannot be created. |
161 Backend* CreateCacheBackend(const std::wstring& full_path, bool force, | 161 Backend* CreateCacheBackend(const std::wstring& full_path, bool force, |
162 int max_bytes) { | 162 int max_bytes, net::CacheType type) { |
163 BackendImpl* cache = new BackendImpl(full_path); | 163 BackendImpl* cache = new BackendImpl(full_path); |
164 cache->SetMaxSize(max_bytes); | 164 cache->SetMaxSize(max_bytes); |
| 165 cache->SetType(type); |
165 if (cache->Init()) | 166 if (cache->Init()) |
166 return cache; | 167 return cache; |
167 | 168 |
168 delete cache; | 169 delete cache; |
169 if (!force) | 170 if (!force) |
170 return NULL; | 171 return NULL; |
171 | 172 |
172 if (!DelayedCacheCleanup(full_path)) | 173 if (!DelayedCacheCleanup(full_path)) |
173 return NULL; | 174 return NULL; |
174 | 175 |
175 // The worker thread will start deleting files soon, but the original folder | 176 // The worker thread will start deleting files soon, but the original folder |
176 // is not there anymore... let's create a new set of files. | 177 // is not there anymore... let's create a new set of files. |
177 cache = new BackendImpl(full_path); | 178 cache = new BackendImpl(full_path); |
178 cache->SetMaxSize(max_bytes); | 179 cache->SetMaxSize(max_bytes); |
| 180 cache->SetType(type); |
179 if (cache->Init()) | 181 if (cache->Init()) |
180 return cache; | 182 return cache; |
181 | 183 |
182 delete cache; | 184 delete cache; |
183 LOG(ERROR) << "Unable to create cache"; | 185 LOG(ERROR) << "Unable to create cache"; |
184 return NULL; | 186 return NULL; |
185 } | 187 } |
186 | 188 |
187 // ------------------------------------------------------------------------ | 189 // ------------------------------------------------------------------------ |
188 | 190 |
189 bool BackendImpl::Init() { | 191 bool BackendImpl::Init() { |
190 DCHECK(!init_); | 192 DCHECK(!init_); |
191 if (init_) | 193 if (init_) |
192 return false; | 194 return false; |
193 | 195 |
194 #ifdef USE_NEW_EVICTION | 196 #ifdef USE_NEW_EVICTION |
195 new_eviction_ = true; | 197 new_eviction_ = true; |
196 #endif | 198 #endif |
197 | 199 |
198 bool create_files = false; | 200 bool create_files = false; |
199 if (!InitBackingStore(&create_files)) { | 201 if (!InitBackingStore(&create_files)) { |
200 ReportError(ERR_STORAGE_ERROR); | 202 ReportError(ERR_STORAGE_ERROR); |
201 return false; | 203 return false; |
202 } | 204 } |
203 | 205 |
204 num_refs_ = num_pending_io_ = max_refs_ = 0; | 206 num_refs_ = num_pending_io_ = max_refs_ = 0; |
205 | 207 |
206 if (!restarted_) { | 208 if (!restarted_) { |
| 209 trace_object_ = TraceObject::GetTraceObject(); |
207 // Create a recurrent timer of 30 secs. | 210 // Create a recurrent timer of 30 secs. |
208 int timer_delay = unit_test_ ? 1000 : 30000; | 211 int timer_delay = unit_test_ ? 1000 : 30000; |
209 timer_.Start(TimeDelta::FromMilliseconds(timer_delay), this, | 212 timer_.Start(TimeDelta::FromMilliseconds(timer_delay), this, |
210 &BackendImpl::OnStatsTimer); | 213 &BackendImpl::OnStatsTimer); |
211 } | 214 } |
212 | 215 |
213 init_ = true; | 216 init_ = true; |
214 if (data_) | 217 if (data_) |
215 InitExperiment(&data_->header.experiment); | 218 InitExperiment(&data_->header.experiment); |
216 | 219 |
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
521 return false; | 524 return false; |
522 | 525 |
523 // Zero size means use the default. | 526 // Zero size means use the default. |
524 if (!max_bytes) | 527 if (!max_bytes) |
525 return true; | 528 return true; |
526 | 529 |
527 max_size_ = max_bytes; | 530 max_size_ = max_bytes; |
528 return true; | 531 return true; |
529 } | 532 } |
530 | 533 |
| 534 void BackendImpl::SetType(net::CacheType type) { |
| 535 DCHECK(type != net::MEMORY_CACHE); |
| 536 cache_type_ = type; |
| 537 } |
| 538 |
531 std::wstring BackendImpl::GetFileName(Addr address) const { | 539 std::wstring BackendImpl::GetFileName(Addr address) const { |
532 if (!address.is_separate_file() || !address.is_initialized()) { | 540 if (!address.is_separate_file() || !address.is_initialized()) { |
533 NOTREACHED(); | 541 NOTREACHED(); |
534 return std::wstring(); | 542 return std::wstring(); |
535 } | 543 } |
536 | 544 |
537 std::wstring name(path_); | 545 std::wstring name(path_); |
538 std::wstring tmp = StringPrintf(L"f_%06x", address.FileNumber()); | 546 std::wstring tmp = StringPrintf(L"f_%06x", address.FileNumber()); |
539 file_util::AppendToPath(&name, tmp); | 547 file_util::AppendToPath(&name, tmp); |
540 return name; | 548 return name; |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
676 stats_.ModifyStorageStats(old_size, new_size); | 684 stats_.ModifyStorageStats(old_size, new_size); |
677 } | 685 } |
678 | 686 |
679 void BackendImpl::TooMuchStorageRequested(int32 size) { | 687 void BackendImpl::TooMuchStorageRequested(int32 size) { |
680 stats_.ModifyStorageStats(0, size); | 688 stats_.ModifyStorageStats(0, size); |
681 } | 689 } |
682 | 690 |
683 // We want to remove biases from some histograms so we only send data once per | 691 // We want to remove biases from some histograms so we only send data once per |
684 // week. | 692 // week. |
685 bool BackendImpl::ShouldReportAgain() { | 693 bool BackendImpl::ShouldReportAgain() { |
686 static bool first_time = true; | 694 if (uma_report_) |
687 static bool should_send = false; | 695 return uma_report_ == 2; |
688 | 696 |
689 if (!first_time) | 697 uma_report_++; |
690 return should_send; | |
691 | |
692 first_time = false; | |
693 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); | 698 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); |
694 Time last_time = Time::FromInternalValue(last_report); | 699 Time last_time = Time::FromInternalValue(last_report); |
695 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { | 700 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { |
696 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); | 701 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); |
697 should_send = true; | 702 uma_report_++; |
698 return true; | 703 return true; |
699 } | 704 } |
700 return false; | 705 return false; |
701 } | 706 } |
702 | 707 |
703 void BackendImpl::FirstEviction() { | 708 void BackendImpl::FirstEviction() { |
704 DCHECK(data_->header.create_time); | 709 DCHECK(data_->header.create_time); |
705 | 710 |
706 Time create_time = Time::FromInternalValue(data_->header.create_time); | 711 Time create_time = Time::FromInternalValue(data_->header.create_time); |
707 UMA_HISTOGRAM_HOURS("DiskCache.FillupAge", | 712 UMA_HISTOGRAM_HOURS("DiskCache.FillupAge", |
708 (Time::Now() - create_time).InHours()); | 713 (Time::Now() - create_time).InHours()); |
709 | 714 |
710 int64 use_hours = stats_.GetCounter(Stats::TIMER) / 120; | 715 int64 use_hours = stats_.GetCounter(Stats::TIMER) / 120; |
711 UMA_HISTOGRAM_HOURS("DiskCache.FillupTime", static_cast<int>(use_hours)); | 716 UMA_HISTOGRAM_HOURS("DiskCache.FillupTime", static_cast<int>(use_hours)); |
712 UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstHitRatio", stats_.GetHitRatio()); | 717 UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstHitRatio", stats_.GetHitRatio()); |
713 | 718 |
714 int avg_size = data_->header.num_bytes / GetEntryCount(); | 719 int avg_size = data_->header.num_bytes / GetEntryCount(); |
715 UMA_HISTOGRAM_COUNTS("DiskCache.FirstEntrySize", avg_size); | 720 UMA_HISTOGRAM_COUNTS("DiskCache.FirstEntrySize", avg_size); |
716 | 721 |
717 int large_entries_bytes = stats_.GetLargeEntriesSize(); | 722 int large_entries_bytes = stats_.GetLargeEntriesSize(); |
718 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; | 723 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; |
719 UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstLargeEntriesRatio", large_ratio); | 724 UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstLargeEntriesRatio", large_ratio); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
753 void BackendImpl::OnStatsTimer() { | 758 void BackendImpl::OnStatsTimer() { |
754 stats_.OnEvent(Stats::TIMER); | 759 stats_.OnEvent(Stats::TIMER); |
755 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); | 760 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); |
756 int64 time = stats_.GetCounter(Stats::TIMER); | 761 int64 time = stats_.GetCounter(Stats::TIMER); |
757 | 762 |
758 current = current * (time - 1) + num_refs_; | 763 current = current * (time - 1) + num_refs_; |
759 current /= time; | 764 current /= time; |
760 stats_.SetCounter(Stats::OPEN_ENTRIES, current); | 765 stats_.SetCounter(Stats::OPEN_ENTRIES, current); |
761 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); | 766 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); |
762 | 767 |
763 static bool first_time = true; | |
764 if (!data_) | 768 if (!data_) |
765 first_time = false; | 769 first_timer_ = false; |
766 if (first_time) { | 770 if (first_timer_) { |
767 first_time = false; | 771 first_timer_ = false; |
768 if (ShouldReportAgain()) | 772 if (ShouldReportAgain()) |
769 ReportStats(); | 773 ReportStats(); |
770 } | 774 } |
771 | 775 |
772 // Save stats to disk at 5 min intervals. | 776 // Save stats to disk at 5 min intervals. |
773 if (time % 10 == 0) | 777 if (time % 10 == 0) |
774 stats_.Store(); | 778 stats_.Store(); |
775 } | 779 } |
776 | 780 |
777 void BackendImpl::IncrementIoCount() { | 781 void BackendImpl::IncrementIoCount() { |
(...skipping 522 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1300 } | 1304 } |
1301 | 1305 |
1302 if (!data_->header.lru.filled) | 1306 if (!data_->header.lru.filled) |
1303 return; | 1307 return; |
1304 | 1308 |
1305 // This is an up to date client that will report FirstEviction() data. After | 1309 // This is an up to date client that will report FirstEviction() data. After |
1306 // that event, start reporting this: | 1310 // that event, start reporting this: |
1307 | 1311 |
1308 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; | 1312 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; |
1309 UMA_HISTOGRAM_HOURS("DiskCache.TotalTime", static_cast<int>(total_hours)); | 1313 UMA_HISTOGRAM_HOURS("DiskCache.TotalTime", static_cast<int>(total_hours)); |
1310 | 1314 |
1311 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; | 1315 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; |
1312 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) | 1316 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) |
1313 return; | 1317 return; |
1314 | 1318 |
1315 UMA_HISTOGRAM_HOURS("DiskCache.UseTime", static_cast<int>(use_hours)); | 1319 UMA_HISTOGRAM_HOURS("DiskCache.UseTime", static_cast<int>(use_hours)); |
1316 UMA_HISTOGRAM_PERCENTAGE("DiskCache.HitRatio", stats_.GetHitRatio()); | 1320 UMA_HISTOGRAM_PERCENTAGE("DiskCache.HitRatio", stats_.GetHitRatio()); |
1317 UMA_HISTOGRAM_PERCENTAGE("DiskCache.ResurrectRatio", | 1321 UMA_HISTOGRAM_PERCENTAGE("DiskCache.ResurrectRatio", |
1318 stats_.GetResurrectRatio()); | 1322 stats_.GetResurrectRatio()); |
1319 | 1323 |
1320 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; | 1324 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; |
1321 UMA_HISTOGRAM_COUNTS("DiskCache.TrimRate", static_cast<int>(trim_rate)); | 1325 UMA_HISTOGRAM_COUNTS("DiskCache.TrimRate", static_cast<int>(trim_rate)); |
1322 | 1326 |
1323 int avg_size = data_->header.num_bytes / GetEntryCount(); | 1327 int avg_size = data_->header.num_bytes / GetEntryCount(); |
1324 UMA_HISTOGRAM_COUNTS("DiskCache.EntrySize", avg_size); | 1328 UMA_HISTOGRAM_COUNTS("DiskCache.EntrySize", avg_size); |
1325 | 1329 |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1448 | 1452 |
1449 return num_dirty; | 1453 return num_dirty; |
1450 } | 1454 } |
1451 | 1455 |
1452 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { | 1456 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { |
1453 RankingsNode* rankings = cache_entry->rankings()->Data(); | 1457 RankingsNode* rankings = cache_entry->rankings()->Data(); |
1454 return !rankings->pointer; | 1458 return !rankings->pointer; |
1455 } | 1459 } |
1456 | 1460 |
1457 } // namespace disk_cache | 1461 } // namespace disk_cache |
OLD | NEW |