OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/disk_cache/backend_impl.h" | 5 #include "net/disk_cache/v3/backend_worker.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/bind_helpers.h" | |
9 #include "base/file_util.h" | 8 #include "base/file_util.h" |
10 #include "base/files/file_path.h" | |
11 #include "base/hash.h" | |
12 #include "base/message_loop/message_loop.h" | 9 #include "base/message_loop/message_loop.h" |
13 #include "base/metrics/field_trial.h" | |
14 #include "base/metrics/histogram.h" | |
15 #include "base/metrics/stats_counters.h" | |
16 #include "base/rand_util.h" | |
17 #include "base/strings/string_util.h" | |
18 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
19 #include "base/sys_info.h" | |
20 #include "base/threading/thread_restrictions.h" | |
21 #include "base/time/time.h" | |
22 #include "base/timer/timer.h" | |
23 #include "net/base/net_errors.h" | 11 #include "net/base/net_errors.h" |
24 #include "net/disk_cache/cache_util.h" | 12 #include "net/disk_cache/cache_util.h" |
25 #include "net/disk_cache/entry_impl.h" | |
26 #include "net/disk_cache/errors.h" | 13 #include "net/disk_cache/errors.h" |
27 #include "net/disk_cache/experiments.h" | 14 #include "net/disk_cache/experiments.h" |
28 #include "net/disk_cache/file.h" | 15 #include "net/disk_cache/mapped_file.h" |
29 | 16 #include "net/disk_cache/v3/backend_work_item.h" |
30 // This has to be defined before including histogram_macros.h from this file. | 17 #include "net/disk_cache/v3/disk_format_v3.h" |
31 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ | |
32 #include "net/disk_cache/histogram_macros.h" | |
33 | 18 |
34 using base::Time; | 19 using base::Time; |
35 using base::TimeDelta; | 20 using base::TimeDelta; |
36 using base::TimeTicks; | 21 using base::TimeTicks; |
37 | 22 |
38 namespace { | 23 namespace { |
39 | 24 |
40 const char* kIndexName = "index"; | 25 const char kIndexName[] = "index"; |
| 26 const char kIndexBackupName[] = "index_bak"; |
| 27 const char kTable1Name[] = "index_tb1"; |
| 28 const char kTable2Name[] = "index_tb2"; |
| 29 const char kTable2TempName[] = "index_tb2_tmp"; |
| 30 const int kMaxOldFolders = 100; |
41 | 31 |
42 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. | 32 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. |
43 // Note that the actual target is to keep the index table load factor under 55% | 33 // Note that the actual target is to keep the index table load factor under 55% |
44 // for most users. | 34 // for most users. |
45 const int k64kEntriesStore = 240 * 1000 * 1000; | 35 const int k64kEntriesStore = 240 * 1000 * 1000; |
46 const int kBaseTableLen = 64 * 1024; | 36 const int kBaseTableLen = 64 * 1024; |
47 const int kDefaultCacheSize = 80 * 1024 * 1024; | 37 const int kDefaultCacheSize = 80 * 1024 * 1024; |
48 | 38 |
49 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). | 39 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). |
50 const int kTrimDelay = 10; | 40 const int kTrimDelay = 10; |
(...skipping 10 matching lines...) Expand all Loading... |
61 | 51 |
62 // The biggest storage_size for int32 requires a 4 MB table. | 52 // The biggest storage_size for int32 requires a 4 MB table. |
63 return kBaseTableLen * 16; | 53 return kBaseTableLen * 16; |
64 } | 54 } |
65 | 55 |
66 int MaxStorageSizeForTable(int table_len) { | 56 int MaxStorageSizeForTable(int table_len) { |
67 return table_len * (k64kEntriesStore / kBaseTableLen); | 57 return table_len * (k64kEntriesStore / kBaseTableLen); |
68 } | 58 } |
69 | 59 |
70 size_t GetIndexSize(int table_len) { | 60 size_t GetIndexSize(int table_len) { |
71 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; | 61 // |
72 return sizeof(disk_cache::IndexHeader) + table_size; | 62 //size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; |
| 63 //return sizeof(disk_cache::IndexHeaderV3) + table_size; |
| 64 return 0; |
| 65 } |
| 66 |
| 67 size_t GetIndexBitmapSize(int table_len) { |
| 68 DCHECK_LT(table_len, 1 << 22); |
| 69 size_t base_bits = disk_cache::kBaseBitmapBytes * 8; |
| 70 if (table_len < static_cast<int>(base_bits)) |
| 71 return sizeof(disk_cache::IndexBitmap); |
| 72 |
| 73 size_t extra_pages = (table_len / 8) - disk_cache::kBaseBitmapBytes; |
| 74 extra_pages = (extra_pages + 4095) / 4096; |
| 75 return sizeof(disk_cache::IndexBitmap) + extra_pages * 4096; |
73 } | 76 } |
74 | 77 |
75 // ------------------------------------------------------------------------ | 78 // ------------------------------------------------------------------------ |
76 | 79 |
77 // Sets group for the current experiment. Returns false if the files should be | 80 // Sets group for the current experiment. Returns false if the files should be |
78 // discarded. | 81 // discarded. |
79 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) { | 82 bool InitExperiment(disk_cache::IndexHeaderV3* header) { |
80 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 || | |
81 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) { | |
82 // Discard current cache. | |
83 return false; | |
84 } | |
85 | |
86 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") == | |
87 "ExperimentControl") { | |
88 if (cache_created) { | |
89 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL; | |
90 return true; | |
91 } else if (header->experiment != disk_cache::EXPERIMENT_SIMPLE_CONTROL) { | |
92 return false; | |
93 } | |
94 } | |
95 | |
96 header->experiment = disk_cache::NO_EXPERIMENT; | 83 header->experiment = disk_cache::NO_EXPERIMENT; |
97 return true; | 84 return true; |
98 } | 85 } |
99 | 86 |
100 } // namespace | 87 } // namespace |
101 | 88 |
102 // ------------------------------------------------------------------------ | 89 // ------------------------------------------------------------------------ |
103 | 90 |
104 namespace disk_cache { | 91 namespace disk_cache { |
105 | 92 |
106 BackendImpl::BackendImpl(const base::FilePath& path, | 93 BackendImplV3::Worker::Worker(const base::FilePath& path, |
107 base::MessageLoopProxy* cache_thread, | 94 base::MessageLoopProxy* main_thread) |
108 net::NetLog* net_log) | 95 : path_(path), |
109 : background_queue_(this, cache_thread), | 96 main_thread_(main_thread), |
110 path_(path), | 97 cleanup_work_item_(NULL), |
111 block_files_(path), | 98 init_(false), |
112 mask_(0), | 99 doubling_index_(false), |
113 max_size_(0), | 100 user_flags_(0) { |
114 up_ticks_(0), | 101 } |
115 cache_type_(net::DISK_CACHE), | 102 |
116 uma_report_(0), | 103 int BackendImplV3::Worker::Init(uint32 flags, scoped_ptr<InitResult>* result) { |
117 user_flags_(0), | |
118 init_(false), | |
119 restarted_(false), | |
120 unit_test_(false), | |
121 read_only_(false), | |
122 disabled_(false), | |
123 new_eviction_(false), | |
124 first_timer_(true), | |
125 user_load_(false), | |
126 net_log_(net_log), | |
127 done_(true, false), | |
128 ptr_factory_(this) { | |
129 } | |
130 | |
131 int BackendImpl::SyncInit() { | |
132 #if defined(NET_BUILD_STRESS_CACHE) | |
133 // Start evictions right away. | |
134 up_ticks_ = kTrimDelay * 2; | |
135 #endif | |
136 DCHECK(!init_); | 104 DCHECK(!init_); |
137 if (init_) | 105 if (init_) |
138 return net::ERR_FAILED; | 106 return ERR_INIT_FAILED; |
| 107 |
| 108 user_flags_ = flags; |
| 109 result->reset(new InitResult); |
139 | 110 |
140 bool create_files = false; | 111 bool create_files = false; |
141 if (!InitBackingStore(&create_files)) { | 112 if (!InitBackingStore(&create_files)) |
142 ReportError(ERR_STORAGE_ERROR); | 113 return ERR_STORAGE_ERROR; |
143 return net::ERR_FAILED; | |
144 } | |
145 | |
146 num_refs_ = num_pending_io_ = max_refs_ = 0; | |
147 entry_count_ = byte_count_ = 0; | |
148 | |
149 if (!restarted_) { | |
150 buffer_bytes_ = 0; | |
151 trace_object_ = TraceObject::GetTraceObject(); | |
152 // Create a recurrent timer of 30 secs. | |
153 int timer_delay = unit_test_ ? 1000 : 30000; | |
154 timer_.reset(new base::RepeatingTimer<BackendImpl>()); | |
155 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, | |
156 &BackendImpl::OnStatsTimer); | |
157 } | |
158 | 114 |
159 init_ = true; | 115 init_ = true; |
160 Trace("Init"); | 116 if (!LoadIndex(result->get())) |
161 | 117 return ERR_INIT_FAILED; |
162 if (data_->header.experiment != NO_EXPERIMENT && | 118 |
163 cache_type_ != net::DISK_CACHE) { | 119 int rv = ERR_NO_ERROR; |
164 // No experiment for other caches. | 120 IndexHeaderV3* index = |
165 return net::ERR_FAILED; | 121 reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
166 } | 122 if (create_files || !index->num_entries) |
167 | 123 rv = ERR_CACHE_CREATED; |
168 if (!(user_flags_ & kNoRandom)) { | 124 |
169 // The unit test controls directly what to test. | 125 if (create_files && (flags & EVICTION_V2)) { |
170 new_eviction_ = (cache_type_ == net::DISK_CACHE); | 126 index->flags |= CACHE_EVICTION_2; |
171 } | 127 } |
172 | 128 |
173 if (!CheckIndex()) { | 129 if (!(flags & BASIC_UNIT_TEST) && !InitExperiment(index)) |
174 ReportError(ERR_INIT_FAILED); | 130 return ERR_INIT_FAILED; |
175 return net::ERR_FAILED; | 131 |
176 } | 132 if (index->crash != 0) |
177 | 133 rv = ERR_PREVIOUS_CRASH; |
178 if (!restarted_ && (create_files || !data_->header.num_entries)) | 134 index->crash = 1; |
179 ReportError(ERR_CACHE_CREATED); | 135 |
180 | 136 block_files_.reset(new BlockFiles(path_)); |
181 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && | 137 if (flags & BASIC_UNIT_TEST) |
182 !InitExperiment(&data_->header, create_files)) { | 138 block_files_->UseSmallSizeIncrementsForTest(); |
183 return net::ERR_FAILED; | 139 |
184 } | 140 if (!block_files_->Init(create_files, kFirstAdditionalBlockFileV3)) |
185 | 141 return ERR_INIT_FAILED; |
186 // We don't care if the value overflows. The only thing we care about is that | 142 |
187 // the id cannot be zero, because that value is used as "not dirty". | 143 block_files_->GetBitmaps(index->max_block_file, |
188 // Increasing the value once per second gives us many years before we start | 144 &result->get()->block_bitmaps); |
189 // having collisions. | 145 index->max_block_file = static_cast<int>(result->get()->block_bitmaps.size()); |
190 data_->header.this_id++; | 146 |
191 if (!data_->header.this_id) | 147 if (!InitStats(index, result->get())) |
192 data_->header.this_id++; | 148 return ERR_INIT_FAILED; |
193 | |
194 bool previous_crash = (data_->header.crash != 0); | |
195 data_->header.crash = 1; | |
196 | |
197 if (!block_files_.Init(create_files)) | |
198 return net::ERR_FAILED; | |
199 | |
200 // We want to minimize the changes to cache for an AppCache. | |
201 if (cache_type() == net::APP_CACHE) { | |
202 DCHECK(!new_eviction_); | |
203 read_only_ = true; | |
204 } else if (cache_type() == net::SHADER_CACHE) { | |
205 DCHECK(!new_eviction_); | |
206 } | |
207 | |
208 eviction_.Init(this); | |
209 | |
210 // stats_ and rankings_ may end up calling back to us so we better be enabled. | |
211 disabled_ = false; | |
212 if (!InitStats()) | |
213 return net::ERR_FAILED; | |
214 | |
215 disabled_ = !rankings_.Init(this, new_eviction_); | |
216 | 149 |
217 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) | 150 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) |
218 trace_object_->EnableTracing(false); | 151 trace_object_->EnableTracing(false); |
219 int sc = SelfCheck(); | 152 int sc = SelfCheck(); |
220 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) | 153 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) |
221 NOTREACHED(); | 154 NOTREACHED(); |
222 trace_object_->EnableTracing(true); | 155 trace_object_->EnableTracing(true); |
223 #endif | 156 #endif |
224 | 157 |
225 if (previous_crash) { | 158 return rv; |
226 ReportError(ERR_PREVIOUS_CRASH); | 159 } |
227 } else if (!restarted_) { | 160 |
228 ReportError(ERR_NO_ERROR); | 161 int BackendImplV3::Worker::Restart(uint32 flags, |
229 } | 162 scoped_ptr<InitResult>* result) { |
230 | 163 Trace("Worker::Restart"); |
231 FlushIndex(); | 164 if (init_) { |
232 | 165 init_ = false; |
233 return disabled_ ? net::ERR_FAILED : net::OK; | 166 } |
234 } | 167 |
235 | 168 CloseFiles(); |
236 void BackendImpl::PrepareForRestart() { | 169 DeleteCache(path_, false); |
237 // Reset the mask_ if it was not given by the user. | 170 |
238 if (!(user_flags_ & kMask)) | 171 return Init(flags, result); |
239 mask_ = 0; | 172 } |
240 | 173 |
241 if (!(user_flags_ & kNewEviction)) | 174 int BackendImplV3::Worker::GrowIndex(uint32 flags, |
242 new_eviction_ = false; | 175 scoped_ptr<InitResult>* result) { |
243 | 176 Trace("Worker::GrowIndex, flags 0x%x", flags); |
244 disabled_ = true; | 177 if (!init_) |
245 data_->header.crash = 0; | 178 return ERR_OPERATION_FAILED; |
246 index_->Flush(); | 179 |
247 index_ = NULL; | 180 if (flags & WorkItem::WORK_COMPLETE) { |
248 data_ = NULL; | 181 index_header_ = big_index_header_; |
249 block_files_.CloseFiles(); | 182 big_index_header_ = NULL; |
250 rankings_.Reset(); | 183 if (big_main_table_) { |
| 184 main_table_ = big_main_table_; |
| 185 big_main_table_ = NULL; |
| 186 } |
| 187 if (!big_extra_temp_table_) |
| 188 extra_table_ = big_extra_table_; |
| 189 big_extra_table_ = NULL; |
| 190 |
| 191 // If the index takes time to move the cells, it creates a new work item to |
| 192 // notify completion, which executes this code. |
| 193 if (big_extra_temp_table_) |
| 194 return GrowDone(); |
| 195 |
| 196 return ERR_NO_ERROR; |
| 197 } |
| 198 |
| 199 IndexHeaderV3* header = |
| 200 reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
| 201 |
| 202 int current_main_len = header->table_len / kBaseTableLen * kBaseTableLen; |
| 203 int step_size = std::min(8192, current_main_len / 8); |
| 204 if (user_flags_ & BASIC_UNIT_TEST) |
| 205 step_size = 8; |
| 206 if ((user_flags_ & UNIT_TEST_MODE) && !doubling_index_) |
| 207 step_size = (header->table_len * 3 / 2) & 0x7ffffff0; |
| 208 int new_len = header->table_len + step_size; |
| 209 |
| 210 bool double_index = false; |
| 211 if (!doubling_index_) { |
| 212 DCHECK(!big_extra_table_); |
| 213 DCHECK(!big_main_table_); |
| 214 double_index = (new_len / kBaseTableLen != |
| 215 header->table_len / kBaseTableLen); |
| 216 } |
| 217 |
| 218 int extra_len = new_len - kBaseTableLen; |
| 219 if (double_index) { |
| 220 // We double the table when the extra table is about to reach the size of |
| 221 // the main table. That means that right after this, the new extra table |
| 222 // should be between 19% and 23% of the main table so we start with 25%. |
| 223 extra_len = std::min(8192, current_main_len / 4); |
| 224 extra_len = (user_flags_ & BASIC_UNIT_TEST) ? 128 : extra_len; |
| 225 int main_len = (header->table_len / kBaseTableLen + 1) * kBaseTableLen; |
| 226 new_len = main_len + extra_len; |
| 227 |
| 228 if (!CreateExtraTable(extra_len * kBytesPerCell)) |
| 229 return ERR_OPERATION_FAILED; |
| 230 |
| 231 if (!main_table_->SetLength(main_len * kBytesPerCell)) |
| 232 return ERR_OPERATION_FAILED; |
| 233 } else if (doubling_index_) { |
| 234 if (!big_extra_temp_table_->SetLength(extra_len * kBytesPerCell)) |
| 235 return ERR_OPERATION_FAILED; |
| 236 } else { |
| 237 if (!extra_table_->SetLength(extra_len * kBytesPerCell)) |
| 238 return ERR_OPERATION_FAILED; |
| 239 } |
| 240 |
| 241 if (!index_header_->SetLength(GetIndexBitmapSize(new_len))) |
| 242 return ERR_OPERATION_FAILED; |
| 243 |
| 244 scoped_refptr<MappedFile> big_index_header = new MappedFile(); |
| 245 if (!big_index_header->Init(path_.AppendASCII(kIndexName), 0)) { |
| 246 LOG(ERROR) << "Unable to remap index"; |
| 247 return ERR_OPERATION_FAILED; |
| 248 } |
| 249 |
| 250 scoped_refptr<MappedFile> big_extra_table = new MappedFile(); |
| 251 const char* extra_name = (double_index || doubling_index_) ? kTable2TempName : |
| 252 kTable2Name; |
| 253 if (!big_extra_table->Init(path_.AppendASCII(extra_name), 0)) { |
| 254 LOG(ERROR) << "Unable to remap index_tb2"; |
| 255 return ERR_OPERATION_FAILED; |
| 256 } |
| 257 |
| 258 if (double_index) { |
| 259 scoped_refptr<MappedFile> big_main_table = new MappedFile(); |
| 260 if (!big_main_table->Init(path_.AppendASCII(kTable1Name), 0)) { |
| 261 LOG(ERROR) << "Unable to remap index_tb1"; |
| 262 return ERR_OPERATION_FAILED; |
| 263 } |
| 264 big_main_table_.swap(big_main_table); |
| 265 |
| 266 // Grab an extra reference to the new extra table that can be used for an |
| 267 // extended period, while the index is being rebuilt. The normal reference |
| 268 // (big_extra_table_) will be released when the work item is completed, but |
| 269 // that doesn't mean the index is done with it. |
| 270 // Note that we are able to process slow grow requests even when the index |
| 271 // is being doubled. |
| 272 big_extra_temp_table_ = big_extra_table; |
| 273 } |
| 274 big_index_header_.swap(big_index_header); |
| 275 big_extra_table_.swap(big_extra_table); |
| 276 |
| 277 header = reinterpret_cast<IndexHeaderV3*>(big_index_header_->buffer()); |
| 278 header->table_len = new_len; |
| 279 |
| 280 result->reset(new InitResult); |
| 281 result->get()->index_data.main_table = NULL; |
| 282 |
| 283 result->get()->index_data.index_bitmap = |
| 284 reinterpret_cast<IndexBitmap*>(big_index_header_->buffer()); |
| 285 result->get()->index_data.extra_table = |
| 286 reinterpret_cast<IndexBucket*>(big_extra_table_->buffer()); |
| 287 |
| 288 if (double_index) { |
| 289 result->get()->index_data.main_table = |
| 290 reinterpret_cast<IndexBucket*>(big_main_table_->buffer()); |
| 291 doubling_index_ = true; |
| 292 } |
| 293 |
| 294 return ERR_NO_ERROR; |
| 295 } |
| 296 |
| 297 int BackendImplV3::Worker::GrowFiles(uint32 flags, |
| 298 scoped_ptr<InitResult>* result) { |
| 299 Trace("Worker::GrowFiles, flags 0x%x", flags); |
| 300 if (!init_) |
| 301 return ERR_OPERATION_FAILED; |
| 302 |
| 303 if (flags & WorkItem::WORK_COMPLETE) { |
| 304 block_files_.reset(); |
| 305 block_files_.swap(big_block_files_); |
| 306 return ERR_NO_ERROR; |
| 307 } |
| 308 |
| 309 big_block_files_.reset(new BlockFiles(path_)); |
| 310 if (user_flags_ & BASIC_UNIT_TEST) |
| 311 big_block_files_->UseSmallSizeIncrementsForTest(); |
| 312 |
| 313 if (!big_block_files_->Init(false, kFirstAdditionalBlockFileV3)) |
| 314 return ERR_INIT_FAILED; |
| 315 |
| 316 IndexHeaderV3* index = |
| 317 reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
| 318 |
| 319 result->reset(new InitResult); |
| 320 big_block_files_->GetBitmaps(index->max_block_file, |
| 321 &result->get()->block_bitmaps); |
| 322 index->max_block_file = static_cast<int>(result->get()->block_bitmaps.size()); |
| 323 return ERR_NO_ERROR; |
| 324 } |
| 325 |
| 326 int BackendImplV3::Worker::Delete(Addr address) { |
| 327 if (address.is_block_file()) |
| 328 return ERR_OPERATION_FAILED; |
| 329 |
| 330 if (DeleteCacheFile(GetFileName(address))) |
| 331 return ERR_NO_ERROR; |
| 332 |
| 333 return ERR_OPERATION_FAILED; |
| 334 } |
| 335 |
| 336 int BackendImplV3::Worker::Close(Addr address) { |
| 337 if (address.is_block_file()) |
| 338 return ERR_OPERATION_FAILED; |
| 339 |
| 340 FilesMap::iterator it = files_.find(address.value()); |
| 341 if (it != files_.end()) |
| 342 files_.erase(it); |
| 343 |
| 344 return ERR_NO_ERROR; |
| 345 } |
| 346 |
| 347 void BackendImplV3::Worker::OnDoWork(WorkItem* work_item) { |
| 348 if (work_item->type() == WorkItem::WORK_CLEANUP) |
| 349 return Cleanup(work_item); |
| 350 |
| 351 work_item->Start(this); |
| 352 } |
| 353 |
| 354 void BackendImplV3::Worker::DoneWithItem(WorkItem* work_item) { |
| 355 bool rv = main_thread_->PostTask(FROM_HERE, |
| 356 base::Bind(&WorkItem::OnDone, work_item)); |
| 357 DCHECK(rv); |
| 358 } |
| 359 |
| 360 File* BackendImplV3::Worker::GetBackingFile(Addr address, bool for_write) { |
| 361 disk_cache::File* file; |
| 362 if (address.is_separate_file()) |
| 363 file = GetExternalFile(address, for_write); |
| 364 else |
| 365 file = block_files_->GetFile(address); |
| 366 return file; |
| 367 } |
| 368 |
| 369 File* BackendImplV3::Worker::GetBackupIndexFile() { |
| 370 DCHECK(!index_backup_.get()); |
| 371 index_backup_ = new MappedFile(); |
| 372 index_backup_->set_force_creation(); |
| 373 if (!index_backup_->InitNoMap(path_.AppendASCII(kIndexBackupName))) { |
| 374 LOG(ERROR) << "Unable to open index_bak"; |
| 375 return NULL; |
| 376 } |
| 377 return index_backup_.get(); |
| 378 } |
| 379 |
| 380 void BackendImplV3::Worker::CloseBackupIndexFile() { |
| 381 index_backup_ = NULL; |
| 382 } |
| 383 |
| 384 bool BackendImplV3::Worker::IsValid() { |
| 385 return init_; |
| 386 } |
| 387 |
| 388 // ------------------------------------------------------------------------ |
| 389 |
| 390 BackendImplV3::Worker::~Worker() { |
| 391 if (cleanup_work_item_) |
| 392 main_thread_->PostTask(FROM_HERE, |
| 393 base::Bind(&WorkItem::OnDone, cleanup_work_item_)); |
| 394 } |
| 395 |
| 396 void BackendImplV3::Worker::Cleanup(WorkItem* work_item) { |
| 397 Trace("Worker::Cleanup"); |
| 398 if (!work_item->user_callback().is_null()) |
| 399 cleanup_work_item_ = work_item; |
| 400 |
| 401 if (init_) { |
| 402 IndexHeaderV3* index = |
| 403 reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
| 404 index->crash = 0; |
| 405 } |
| 406 |
| 407 CloseFiles(); |
251 init_ = false; | 408 init_ = false; |
252 restarted_ = true; | 409 |
253 } | 410 if (work_item->user_callback().is_null()) { |
254 | 411 // This is the only message we don't return to the main thread, we are done |
255 BackendImpl::~BackendImpl() { | 412 // with the work item for good. |
256 if (user_flags_ & kNoRandom) { | 413 work_item->Release(); |
257 // This is a unit test, so we want to be strict about not leaking entries | 414 } |
258 // and completing all the work. | 415 } |
259 background_queue_.WaitForPendingIO(); | 416 |
260 } else { | 417 void BackendImplV3::Worker::CloseFiles() { |
261 // This is most likely not a test, so we want to do as little work as | 418 index_header_ = NULL; |
262 // possible at this time, at the price of leaving dirty entries behind. | 419 main_table_ = NULL; |
263 background_queue_.DropPendingIO(); | 420 extra_table_ = NULL; |
264 } | 421 index_backup_ = NULL; |
265 | 422 block_files_->CloseFiles(); |
266 if (background_queue_.BackgroundIsCurrentThread()) { | 423 files_.clear(); |
267 // Unit tests may use the same thread for everything. | 424 |
268 CleanupCache(); | 425 big_index_header_ = NULL; |
269 } else { | 426 big_main_table_ = NULL; |
270 background_queue_.background_thread()->PostTask( | 427 big_extra_table_ = NULL; |
271 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); | 428 big_extra_temp_table_ = NULL; |
272 // http://crbug.com/74623 | 429 if (big_block_files_.get()) |
273 base::ThreadRestrictions::ScopedAllowWait allow_wait; | 430 big_block_files_->CloseFiles(); |
274 done_.Wait(); | 431 } |
275 } | 432 |
276 } | 433 File* BackendImplV3::Worker::GetExternalFile(Addr address, bool for_write) { |
277 | 434 FilesMap::iterator it = files_.find(address.value()); |
278 void BackendImpl::CleanupCache() { | 435 if (it != files_.end()) |
279 Trace("Backend Cleanup"); | 436 return it->second; |
280 eviction_.Stop(); | 437 |
281 timer_.reset(); | 438 scoped_refptr<disk_cache::File> file(new disk_cache::File(false)); |
282 | 439 if (for_write) |
283 if (init_) { | 440 file->set_force_creation(); |
284 StoreStats(); | 441 if (file->Init(GetFileName(address))) |
285 if (data_) | 442 files_[address.value()] = file.get(); |
286 data_->header.crash = 0; | 443 else |
287 | 444 file = NULL; |
288 if (user_flags_ & kNoRandom) { | 445 |
289 // This is a net_unittest, verify that we are not 'leaking' entries. | 446 return file; |
290 File::WaitForPendingIO(&num_pending_io_); | 447 } |
291 DCHECK(!num_refs_); | 448 |
292 } else { | 449 base::FilePath BackendImplV3::Worker::GetFileName(Addr address) const { |
293 File::DropPendingIO(); | |
294 } | |
295 } | |
296 block_files_.CloseFiles(); | |
297 FlushIndex(); | |
298 index_ = NULL; | |
299 ptr_factory_.InvalidateWeakPtrs(); | |
300 done_.Signal(); | |
301 } | |
302 | |
303 base::FilePath BackendImpl::GetFileName(Addr address) const { | |
304 if (!address.is_separate_file() || !address.is_initialized()) { | 450 if (!address.is_separate_file() || !address.is_initialized()) { |
305 NOTREACHED(); | 451 NOTREACHED(); |
306 return base::FilePath(); | 452 return base::FilePath(); |
307 } | 453 } |
308 | 454 |
309 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); | 455 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); |
310 return path_.AppendASCII(tmp); | 456 return path_.AppendASCII(tmp); |
311 } | 457 } |
312 | 458 |
313 // We just created a new file so we're going to write the header and set the | 459 // We just created a new file so we're going to write the header and set the |
314 // file length to include the hash table (zero filled). | 460 // file length to include the hash table (zero filled). |
315 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { | 461 bool BackendImplV3::Worker::CreateBackingStore(disk_cache::File* file) { |
316 AdjustMaxCacheSize(0); | 462 IndexHeaderV3 header; |
317 | 463 memset(&header, 0, sizeof(header)); |
318 IndexHeader header; | 464 header.magic = kIndexMagicV3; |
319 header.table_len = DesiredIndexTableLen(max_size_); | 465 header.version = kVersion3; |
320 | 466 header.max_block_file = kFirstAdditionalBlockFileV3; |
321 // We need file version 2.1 for the new eviction algorithm. | 467 |
322 if (new_eviction_) | 468 // Start with 12.5% of the size of the main table. |
323 header.version = 0x20001; | 469 int extra_len = (user_flags_ & BASIC_UNIT_TEST) ? 8 : kBaseTableLen / 8; |
| 470 header.table_len = kBaseTableLen + extra_len; |
| 471 header.max_bucket = kBaseTableLen / 4 - 1; |
| 472 header.flags = SMALL_CACHE; |
324 | 473 |
325 header.create_time = Time::Now().ToInternalValue(); | 474 header.create_time = Time::Now().ToInternalValue(); |
| 475 header.base_time = (Time::Now() - TimeDelta::FromDays(20)).ToInternalValue(); |
326 | 476 |
327 if (!file->Write(&header, sizeof(header), 0)) | 477 if (!file->Write(&header, sizeof(header), 0)) |
328 return false; | 478 return false; |
329 | 479 |
330 return file->SetLength(GetIndexSize(header.table_len)); | 480 if (!file->SetLength(GetIndexBitmapSize(header.table_len))) |
| 481 return false; |
| 482 |
| 483 int flags = base::PLATFORM_FILE_READ | |
| 484 base::PLATFORM_FILE_WRITE | |
| 485 base::PLATFORM_FILE_CREATE | |
| 486 base::PLATFORM_FILE_EXCLUSIVE_WRITE; |
| 487 |
| 488 base::FilePath name = path_.AppendASCII(kIndexBackupName); |
| 489 scoped_refptr<disk_cache::File> file2(new disk_cache::File( |
| 490 base::CreatePlatformFile(name, flags, NULL, NULL))); |
| 491 |
| 492 if (!file2->IsValid()) |
| 493 return false; |
| 494 |
| 495 if (!file2->Write(&header, sizeof(header), 0)) |
| 496 return false; |
| 497 |
| 498 if (!file2->SetLength(GetIndexBitmapSize(header.table_len))) |
| 499 return false; |
| 500 |
| 501 name = path_.AppendASCII(kTable1Name); |
| 502 file2 = new disk_cache::File(base::CreatePlatformFile(name, flags, NULL, |
| 503 NULL)); |
| 504 if (!file2->IsValid()) |
| 505 return false; |
| 506 |
| 507 if (!file2->SetLength(kBaseTableLen * kBytesPerCell)) |
| 508 return false; |
| 509 |
| 510 name = path_.AppendASCII(kTable2Name); |
| 511 file2 = new disk_cache::File(base::CreatePlatformFile(name, flags, NULL, |
| 512 NULL)); |
| 513 if (!file2->IsValid()) |
| 514 return false; |
| 515 |
| 516 if (!file2->SetLength(extra_len * kBytesPerCell)) |
| 517 return false; |
| 518 |
| 519 return true; |
331 } | 520 } |
332 | 521 |
333 bool BackendImpl::InitBackingStore(bool* file_created) { | 522 bool BackendImplV3::Worker::CreateExtraTable(int extra_len) { |
| 523 int flags = base::PLATFORM_FILE_READ | |
| 524 base::PLATFORM_FILE_WRITE | |
| 525 base::PLATFORM_FILE_CREATE | |
| 526 base::PLATFORM_FILE_EXCLUSIVE_WRITE; |
| 527 |
| 528 base::FilePath name = path_.AppendASCII(kTable2TempName); |
| 529 scoped_refptr<disk_cache::File> file(new disk_cache::File( |
| 530 base::CreatePlatformFile(name, flags, NULL, NULL))); |
| 531 if (!file->IsValid()) |
| 532 return false; |
| 533 |
| 534 if (!file->SetLength(extra_len * kBytesPerCell)) |
| 535 return false; |
| 536 |
| 537 return true; |
| 538 } |
| 539 |
| 540 bool BackendImplV3::Worker::InitBackingStore(bool* file_created) { |
334 if (!file_util::CreateDirectory(path_)) | 541 if (!file_util::CreateDirectory(path_)) |
335 return false; | 542 return false; |
336 | 543 |
337 base::FilePath index_name = path_.AppendASCII(kIndexName); | 544 base::FilePath index_name = path_.AppendASCII(kIndexName); |
338 | 545 |
339 int flags = base::PLATFORM_FILE_READ | | 546 int flags = base::PLATFORM_FILE_READ | |
340 base::PLATFORM_FILE_WRITE | | 547 base::PLATFORM_FILE_WRITE | |
341 base::PLATFORM_FILE_OPEN_ALWAYS | | 548 base::PLATFORM_FILE_OPEN_ALWAYS | |
342 base::PLATFORM_FILE_EXCLUSIVE_WRITE; | 549 base::PLATFORM_FILE_EXCLUSIVE_WRITE; |
343 scoped_refptr<disk_cache::File> file(new disk_cache::File( | 550 scoped_refptr<disk_cache::File> file(new disk_cache::File( |
344 base::CreatePlatformFile(index_name, flags, file_created, NULL))); | 551 base::CreatePlatformFile(index_name, flags, file_created, NULL))); |
345 | 552 |
346 if (!file->IsValid()) | 553 if (!file->IsValid()) |
347 return false; | 554 return false; |
348 | 555 |
349 bool ret = true; | 556 bool ret = true; |
350 if (*file_created) | 557 if (*file_created) |
351 ret = CreateBackingStore(file.get()); | 558 ret = CreateBackingStore(file); |
352 | 559 |
353 file = NULL; | 560 file = NULL; |
354 if (!ret) | 561 if (!ret) |
355 return false; | 562 return false; |
356 | 563 |
357 index_ = new MappedFile(); | 564 index_header_ = new MappedFile(); |
358 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0)); | 565 if (!index_header_->Init(index_name, 0)) { |
359 if (!data_) { | 566 LOG(ERROR) << "Unable to map index"; |
360 LOG(ERROR) << "Unable to map Index file"; | |
361 return false; | 567 return false; |
362 } | 568 } |
363 | 569 |
364 if (index_->GetLength() < sizeof(Index)) { | 570 if (index_header_->GetLength() < sizeof(IndexBitmap)) { |
365 // We verify this again on CheckIndex() but it's easier to make sure now | 571 // We verify this again on CheckIndex() but it's easier to make sure now |
366 // that the header is there. | 572 // that the header is there. |
367 LOG(ERROR) << "Corrupt Index file"; | 573 LOG(ERROR) << "Corrupt index file"; |
| 574 return false; |
| 575 } |
| 576 |
| 577 main_table_ = new MappedFile(); |
| 578 if (!main_table_->Init(path_.AppendASCII(kTable1Name), 0)) { |
| 579 LOG(ERROR) << "Unable to map index_tb1"; |
| 580 return false; |
| 581 } |
| 582 |
| 583 extra_table_ = new MappedFile(); |
| 584 if (!extra_table_->Init(path_.AppendASCII(kTable2Name), 0)) { |
| 585 LOG(ERROR) << "Unable to map index_tb2"; |
| 586 return false; |
| 587 } |
| 588 |
| 589 index_backup_ = new MappedFile(); |
| 590 if (!index_backup_->Init(path_.AppendASCII(kIndexBackupName), 0)) { |
| 591 LOG(ERROR) << "Unable to map index_bak"; |
368 return false; | 592 return false; |
369 } | 593 } |
370 | 594 |
371 return true; | 595 return true; |
372 } | 596 } |
373 | 597 |
374 void BackendImpl::ReportError(int error) { | 598 bool BackendImplV3::Worker::LoadIndex(InitResult* init_result) { |
375 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || | 599 init_result->index_data.index_bitmap = |
376 error == ERR_CACHE_CREATED); | 600 reinterpret_cast<IndexBitmap*>(index_header_->buffer()); |
| 601 init_result->index_data.main_table = |
| 602 reinterpret_cast<IndexBucket*>(main_table_->buffer()); |
| 603 init_result->index_data.extra_table = |
| 604 reinterpret_cast<IndexBucket*>(extra_table_->buffer()); |
377 | 605 |
378 // We transmit positive numbers, instead of direct error codes. | 606 if (!CheckIndexFile(index_header_)) |
379 DCHECK_LE(error, 0); | 607 return false; |
380 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); | 608 |
| 609 if (!CheckIndexFile(index_backup_)) |
| 610 return false; |
| 611 |
| 612 IndexHeaderV3& header = init_result->index_data.index_bitmap->header; |
| 613 |
| 614 size_t extra_table_len = header.table_len % kBaseTableLen; |
| 615 size_t main_table_len = (kBaseTableLen - extra_table_len) * kBytesPerCell; |
| 616 extra_table_len *= kBytesPerCell; |
| 617 |
| 618 if (main_table_->GetLength() < main_table_len || |
| 619 extra_table_->GetLength() < extra_table_len) { |
| 620 LOG(ERROR) << "Truncated table"; |
| 621 return false; |
| 622 } |
| 623 |
| 624 IndexBitmap* index = reinterpret_cast<IndexBitmap*>(index_backup_->buffer()); |
| 625 |
| 626 init_result->index_data.backup_header.reset(new IndexHeaderV3); |
| 627 memcpy(init_result->index_data.backup_header.get(), &index->header, |
| 628 sizeof(index->header)); |
| 629 |
| 630 size_t bitmap_len = GetIndexBitmapSize(index->header.table_len) - |
| 631 sizeof(index->header); |
| 632 init_result->index_data.backup_bitmap.reset(new uint32[bitmap_len / 4]); |
| 633 memcpy(init_result->index_data.backup_bitmap.get(), &index->bitmap, |
| 634 bitmap_len); |
| 635 |
| 636 // Close the backup. |
| 637 index_backup_ = NULL; |
| 638 return true; |
381 } | 639 } |
382 | 640 |
383 | 641 bool BackendImplV3::Worker::CheckIndexFile(MappedFile* file) { |
384 bool BackendImpl::CheckIndex() { | 642 size_t current_size = file->GetLength(); |
385 DCHECK(data_); | 643 if (current_size < sizeof(IndexBitmap)) { |
386 | |
387 size_t current_size = index_->GetLength(); | |
388 if (current_size < sizeof(Index)) { | |
389 LOG(ERROR) << "Corrupt Index file"; | 644 LOG(ERROR) << "Corrupt Index file"; |
390 return false; | 645 return false; |
391 } | 646 } |
392 | 647 |
393 if (new_eviction_) { | 648 IndexHeaderV3* header = reinterpret_cast<IndexHeaderV3*>(file->buffer()); |
394 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. | 649 |
395 if (kIndexMagic != data_->header.magic || | 650 if (kIndexMagicV3 != header->magic || kVersion3 != header->version) { |
396 kCurrentVersion >> 16 != data_->header.version >> 16) { | 651 LOG(ERROR) << "Invalid file version or magic"; |
397 LOG(ERROR) << "Invalid file version or magic"; | 652 return false; |
398 return false; | |
399 } | |
400 if (kCurrentVersion == data_->header.version) { | |
401 // We need file version 2.1 for the new eviction algorithm. | |
402 UpgradeTo2_1(); | |
403 } | |
404 } else { | |
405 if (kIndexMagic != data_->header.magic || | |
406 kCurrentVersion != data_->header.version) { | |
407 LOG(ERROR) << "Invalid file version or magic"; | |
408 return false; | |
409 } | |
410 } | 653 } |
411 | 654 |
412 if (!data_->header.table_len) { | 655 if (header->table_len <= 0 || header->table_len > 1 << 22) { |
413 LOG(ERROR) << "Invalid table size"; | 656 LOG(ERROR) << "Invalid table size"; |
414 return false; | 657 return false; |
415 } | 658 } |
416 | 659 |
417 if (current_size < GetIndexSize(data_->header.table_len) || | 660 int min_mask = (user_flags_ & BASIC_UNIT_TEST) ? 0x3 : 0xff; |
418 data_->header.table_len & (kBaseTableLen - 1)) { | 661 if (current_size < GetIndexBitmapSize(header->table_len) || |
| 662 header->table_len & (min_mask)) { |
419 LOG(ERROR) << "Corrupt Index file"; | 663 LOG(ERROR) << "Corrupt Index file"; |
420 return false; | 664 return false; |
421 } | 665 } |
422 | 666 |
423 AdjustMaxCacheSize(data_->header.table_len); | 667 //AdjustMaxCacheSize(header->table_len); |
424 | 668 |
425 #if !defined(NET_BUILD_STRESS_CACHE) | 669 #if !defined(NET_BUILD_STRESS_CACHE) |
426 if (data_->header.num_bytes < 0 || | 670 if (header->num_bytes < 0 || header->max_bytes < 0 || |
427 (max_size_ < kint32max - kDefaultCacheSize && | 671 header->num_bytes > header->max_bytes + kDefaultCacheSize) { |
428 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { | 672 LOG(ERROR) << "Invalid cache size"; |
429 LOG(ERROR) << "Invalid cache (current) size"; | |
430 return false; | 673 return false; |
431 } | 674 } |
432 #endif | 675 #endif |
433 | 676 |
434 if (data_->header.num_entries < 0) { | 677 if (header->num_entries < 0) { |
435 LOG(ERROR) << "Invalid number of entries"; | 678 LOG(ERROR) << "Invalid number of entries"; |
436 return false; | 679 return false; |
437 } | 680 } |
438 | 681 |
439 if (!mask_) | |
440 mask_ = data_->header.table_len - 1; | |
441 | |
442 // Load the table into memory with a single read. | 682 // Load the table into memory with a single read. |
443 scoped_ptr<char[]> buf(new char[current_size]); | 683 //scoped_array<char> buf(new char[current_size]); |
444 return index_->Read(buf.get(), current_size, 0); | 684 //return index_->Read(buf.get(), current_size, 0); |
| 685 |
| 686 return true; |
445 } | 687 } |
446 | 688 |
447 bool BackendImpl::InitStats() { | 689 bool BackendImplV3::Worker::InitStats(IndexHeaderV3* index, |
448 Addr address(data_->header.stats); | 690 InitResult* result) { |
449 int size = stats_.StorageSize(); | 691 Addr address(index->stats); |
450 | 692 if (!address.is_initialized()) |
451 if (!address.is_initialized()) { | 693 return true; |
452 FileType file_type = Addr::RequiredFileType(size); | |
453 DCHECK_NE(file_type, EXTERNAL); | |
454 int num_blocks = Addr::RequiredBlocks(size, file_type); | |
455 | |
456 if (!CreateBlock(file_type, num_blocks, &address)) | |
457 return false; | |
458 return stats_.Init(NULL, 0, address); | |
459 } | |
460 | 694 |
461 if (!address.is_block_file()) { | 695 if (!address.is_block_file()) { |
462 NOTREACHED(); | 696 NOTREACHED(); |
463 return false; | 697 return false; |
464 } | 698 } |
465 | 699 |
| 700 int size = address.num_blocks() * address.BlockSize(); |
| 701 |
466 // Load the required data. | 702 // Load the required data. |
467 size = address.num_blocks() * address.BlockSize(); | 703 MappedFile* file = GetMappedFile(address); |
468 MappedFile* file = File(address); | |
469 if (!file) | 704 if (!file) |
470 return false; | 705 return false; |
471 | 706 |
472 scoped_ptr<char[]> data(new char[size]); | 707 scoped_ptr<char[]> data(new char[size]); |
473 size_t offset = address.start_block() * address.BlockSize() + | 708 size_t offset = address.start_block() * address.BlockSize() + |
474 kBlockHeaderSize; | 709 kBlockHeaderSize; |
475 if (!file->Read(data.get(), size, offset)) | 710 if (!file->Read(data.get(), size, offset)) |
476 return false; | 711 return false; |
477 | 712 |
478 if (!stats_.Init(data.get(), size, address)) | 713 result->stats_data = data.Pass(); |
479 return false; | |
480 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) | |
481 stats_.InitSizeHistogram(); | |
482 return true; | 714 return true; |
483 } | 715 } |
484 | 716 |
| 717 int BackendImplV3::Worker::GrowDone() { |
| 718 Trace("Worker::GrowDone"); |
| 719 if (!init_) |
| 720 return ERR_OPERATION_FAILED; |
| 721 |
| 722 DCHECK(doubling_index_); |
| 723 doubling_index_ = false; |
| 724 |
| 725 extra_table_ = big_extra_temp_table_; |
| 726 big_extra_temp_table_ = NULL; |
| 727 |
| 728 return ERR_NO_ERROR; |
| 729 } |
| 730 |
485 } // namespace disk_cache | 731 } // namespace disk_cache |
OLD | NEW |