Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(55)

Side by Side Diff: net/disk_cache/v3/backend_worker.cc

Issue 121643003: Reorganize net/disk_cache into backend specific directories. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: rebase & remediate Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/backend_impl.h"
6
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/hash.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram.h"
15 #include "base/metrics/stats_counters.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_info.h"
20 #include "base/threading/thread_restrictions.h"
21 #include "base/time/time.h"
22 #include "base/timer/timer.h"
23 #include "net/base/net_errors.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/entry_impl.h"
26 #include "net/disk_cache/errors.h"
27 #include "net/disk_cache/experiments.h"
28 #include "net/disk_cache/file.h"
29
30 // Define BLOCKFILE_BACKEND_IMPL_OBJ to be a disk_cache::BackendImpl* in order
31 // to use the CACHE_UMA histogram macro.
32 #define BLOCKFILE_BACKEND_IMPL_OBJ this
33 #include "net/disk_cache/histogram_macros.h"
34
35 using base::Time;
36 using base::TimeDelta;
37 using base::TimeTicks;
38
39 namespace {
40
41 const char* kIndexName = "index";
42
43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
44 // Note that the actual target is to keep the index table load factor under 55%
45 // for most users.
46 const int k64kEntriesStore = 240 * 1000 * 1000;
47 const int kBaseTableLen = 64 * 1024;
48 const int kDefaultCacheSize = 80 * 1024 * 1024;
49
50 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
51 const int kTrimDelay = 10;
52
53 int DesiredIndexTableLen(int32 storage_size) {
54 if (storage_size <= k64kEntriesStore)
55 return kBaseTableLen;
56 if (storage_size <= k64kEntriesStore * 2)
57 return kBaseTableLen * 2;
58 if (storage_size <= k64kEntriesStore * 4)
59 return kBaseTableLen * 4;
60 if (storage_size <= k64kEntriesStore * 8)
61 return kBaseTableLen * 8;
62
63 // The biggest storage_size for int32 requires a 4 MB table.
64 return kBaseTableLen * 16;
65 }
66
67 int MaxStorageSizeForTable(int table_len) {
68 return table_len * (k64kEntriesStore / kBaseTableLen);
69 }
70
71 size_t GetIndexSize(int table_len) {
72 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
73 return sizeof(disk_cache::IndexHeader) + table_size;
74 }
75
76 // ------------------------------------------------------------------------
77
78 // Sets group for the current experiment. Returns false if the files should be
79 // discarded.
80 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
81 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
82 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
83 // Discard current cache.
84 return false;
85 }
86
87 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
88 "ExperimentControl") {
89 if (cache_created) {
90 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
91 return true;
92 } else if (header->experiment != disk_cache::EXPERIMENT_SIMPLE_CONTROL) {
93 return false;
94 }
95 }
96
97 header->experiment = disk_cache::NO_EXPERIMENT;
98 return true;
99 }
100
101 } // namespace
102
103 // ------------------------------------------------------------------------
104
105 namespace disk_cache {
106
107 BackendImpl::BackendImpl(const base::FilePath& path,
108 base::MessageLoopProxy* cache_thread,
109 net::NetLog* net_log)
110 : background_queue_(this, cache_thread),
111 path_(path),
112 block_files_(path),
113 mask_(0),
114 max_size_(0),
115 up_ticks_(0),
116 cache_type_(net::DISK_CACHE),
117 uma_report_(0),
118 user_flags_(0),
119 init_(false),
120 restarted_(false),
121 unit_test_(false),
122 read_only_(false),
123 disabled_(false),
124 new_eviction_(false),
125 first_timer_(true),
126 user_load_(false),
127 net_log_(net_log),
128 done_(true, false),
129 ptr_factory_(this) {
130 }
131
132 int BackendImpl::SyncInit() {
133 #if defined(NET_BUILD_STRESS_CACHE)
134 // Start evictions right away.
135 up_ticks_ = kTrimDelay * 2;
136 #endif
137 DCHECK(!init_);
138 if (init_)
139 return net::ERR_FAILED;
140
141 bool create_files = false;
142 if (!InitBackingStore(&create_files)) {
143 ReportError(ERR_STORAGE_ERROR);
144 return net::ERR_FAILED;
145 }
146
147 num_refs_ = num_pending_io_ = max_refs_ = 0;
148 entry_count_ = byte_count_ = 0;
149
150 if (!restarted_) {
151 buffer_bytes_ = 0;
152 trace_object_ = TraceObject::GetTraceObject();
153 // Create a recurrent timer of 30 secs.
154 int timer_delay = unit_test_ ? 1000 : 30000;
155 timer_.reset(new base::RepeatingTimer<BackendImpl>());
156 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
157 &BackendImpl::OnStatsTimer);
158 }
159
160 init_ = true;
161 Trace("Init");
162
163 if (data_->header.experiment != NO_EXPERIMENT &&
164 cache_type_ != net::DISK_CACHE) {
165 // No experiment for other caches.
166 return net::ERR_FAILED;
167 }
168
169 if (!(user_flags_ & kNoRandom)) {
170 // The unit test controls directly what to test.
171 new_eviction_ = (cache_type_ == net::DISK_CACHE);
172 }
173
174 if (!CheckIndex()) {
175 ReportError(ERR_INIT_FAILED);
176 return net::ERR_FAILED;
177 }
178
179 if (!restarted_ && (create_files || !data_->header.num_entries))
180 ReportError(ERR_CACHE_CREATED);
181
182 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
183 !InitExperiment(&data_->header, create_files)) {
184 return net::ERR_FAILED;
185 }
186
187 // We don't care if the value overflows. The only thing we care about is that
188 // the id cannot be zero, because that value is used as "not dirty".
189 // Increasing the value once per second gives us many years before we start
190 // having collisions.
191 data_->header.this_id++;
192 if (!data_->header.this_id)
193 data_->header.this_id++;
194
195 bool previous_crash = (data_->header.crash != 0);
196 data_->header.crash = 1;
197
198 if (!block_files_.Init(create_files))
199 return net::ERR_FAILED;
200
201 // We want to minimize the changes to cache for an AppCache.
202 if (cache_type() == net::APP_CACHE) {
203 DCHECK(!new_eviction_);
204 read_only_ = true;
205 } else if (cache_type() == net::SHADER_CACHE) {
206 DCHECK(!new_eviction_);
207 }
208
209 eviction_.Init(this);
210
211 // stats_ and rankings_ may end up calling back to us so we better be enabled.
212 disabled_ = false;
213 if (!InitStats())
214 return net::ERR_FAILED;
215
216 disabled_ = !rankings_.Init(this, new_eviction_);
217
218 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
219 trace_object_->EnableTracing(false);
220 int sc = SelfCheck();
221 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
222 NOTREACHED();
223 trace_object_->EnableTracing(true);
224 #endif
225
226 if (previous_crash) {
227 ReportError(ERR_PREVIOUS_CRASH);
228 } else if (!restarted_) {
229 ReportError(ERR_NO_ERROR);
230 }
231
232 FlushIndex();
233
234 return disabled_ ? net::ERR_FAILED : net::OK;
235 }
236
237 void BackendImpl::PrepareForRestart() {
238 // Reset the mask_ if it was not given by the user.
239 if (!(user_flags_ & kMask))
240 mask_ = 0;
241
242 if (!(user_flags_ & kNewEviction))
243 new_eviction_ = false;
244
245 disabled_ = true;
246 data_->header.crash = 0;
247 index_->Flush();
248 index_ = NULL;
249 data_ = NULL;
250 block_files_.CloseFiles();
251 rankings_.Reset();
252 init_ = false;
253 restarted_ = true;
254 }
255
256 BackendImpl::~BackendImpl() {
257 if (user_flags_ & kNoRandom) {
258 // This is a unit test, so we want to be strict about not leaking entries
259 // and completing all the work.
260 background_queue_.WaitForPendingIO();
261 } else {
262 // This is most likely not a test, so we want to do as little work as
263 // possible at this time, at the price of leaving dirty entries behind.
264 background_queue_.DropPendingIO();
265 }
266
267 if (background_queue_.BackgroundIsCurrentThread()) {
268 // Unit tests may use the same thread for everything.
269 CleanupCache();
270 } else {
271 background_queue_.background_thread()->PostTask(
272 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
273 // http://crbug.com/74623
274 base::ThreadRestrictions::ScopedAllowWait allow_wait;
275 done_.Wait();
276 }
277 }
278
279 void BackendImpl::CleanupCache() {
280 Trace("Backend Cleanup");
281 eviction_.Stop();
282 timer_.reset();
283
284 if (init_) {
285 StoreStats();
286 if (data_)
287 data_->header.crash = 0;
288
289 if (user_flags_ & kNoRandom) {
290 // This is a net_unittest, verify that we are not 'leaking' entries.
291 File::WaitForPendingIO(&num_pending_io_);
292 DCHECK(!num_refs_);
293 } else {
294 File::DropPendingIO();
295 }
296 }
297 block_files_.CloseFiles();
298 FlushIndex();
299 index_ = NULL;
300 ptr_factory_.InvalidateWeakPtrs();
301 done_.Signal();
302 }
303
304 base::FilePath BackendImpl::GetFileName(Addr address) const {
305 if (!address.is_separate_file() || !address.is_initialized()) {
306 NOTREACHED();
307 return base::FilePath();
308 }
309
310 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
311 return path_.AppendASCII(tmp);
312 }
313
314 // We just created a new file so we're going to write the header and set the
315 // file length to include the hash table (zero filled).
316 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
317 AdjustMaxCacheSize(0);
318
319 IndexHeader header;
320 header.table_len = DesiredIndexTableLen(max_size_);
321
322 // We need file version 2.1 for the new eviction algorithm.
323 if (new_eviction_)
324 header.version = 0x20001;
325
326 header.create_time = Time::Now().ToInternalValue();
327
328 if (!file->Write(&header, sizeof(header), 0))
329 return false;
330
331 return file->SetLength(GetIndexSize(header.table_len));
332 }
333
334 bool BackendImpl::InitBackingStore(bool* file_created) {
335 if (!base::CreateDirectory(path_))
336 return false;
337
338 base::FilePath index_name = path_.AppendASCII(kIndexName);
339
340 int flags = base::PLATFORM_FILE_READ |
341 base::PLATFORM_FILE_WRITE |
342 base::PLATFORM_FILE_OPEN_ALWAYS |
343 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
344 scoped_refptr<disk_cache::File> file(new disk_cache::File(
345 base::CreatePlatformFile(index_name, flags, file_created, NULL)));
346
347 if (!file->IsValid())
348 return false;
349
350 bool ret = true;
351 if (*file_created)
352 ret = CreateBackingStore(file.get());
353
354 file = NULL;
355 if (!ret)
356 return false;
357
358 index_ = new MappedFile();
359 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
360 if (!data_) {
361 LOG(ERROR) << "Unable to map Index file";
362 return false;
363 }
364
365 if (index_->GetLength() < sizeof(Index)) {
366 // We verify this again on CheckIndex() but it's easier to make sure now
367 // that the header is there.
368 LOG(ERROR) << "Corrupt Index file";
369 return false;
370 }
371
372 return true;
373 }
374
375 void BackendImpl::ReportError(int error) {
376 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
377 error == ERR_CACHE_CREATED);
378
379 // We transmit positive numbers, instead of direct error codes.
380 DCHECK_LE(error, 0);
381 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
382 }
383
384
385 bool BackendImpl::CheckIndex() {
386 DCHECK(data_);
387
388 size_t current_size = index_->GetLength();
389 if (current_size < sizeof(Index)) {
390 LOG(ERROR) << "Corrupt Index file";
391 return false;
392 }
393
394 if (new_eviction_) {
395 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
396 if (kIndexMagic != data_->header.magic ||
397 kCurrentVersion >> 16 != data_->header.version >> 16) {
398 LOG(ERROR) << "Invalid file version or magic";
399 return false;
400 }
401 if (kCurrentVersion == data_->header.version) {
402 // We need file version 2.1 for the new eviction algorithm.
403 UpgradeTo2_1();
404 }
405 } else {
406 if (kIndexMagic != data_->header.magic ||
407 kCurrentVersion != data_->header.version) {
408 LOG(ERROR) << "Invalid file version or magic";
409 return false;
410 }
411 }
412
413 if (!data_->header.table_len) {
414 LOG(ERROR) << "Invalid table size";
415 return false;
416 }
417
418 if (current_size < GetIndexSize(data_->header.table_len) ||
419 data_->header.table_len & (kBaseTableLen - 1)) {
420 LOG(ERROR) << "Corrupt Index file";
421 return false;
422 }
423
424 AdjustMaxCacheSize(data_->header.table_len);
425
426 #if !defined(NET_BUILD_STRESS_CACHE)
427 if (data_->header.num_bytes < 0 ||
428 (max_size_ < kint32max - kDefaultCacheSize &&
429 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
430 LOG(ERROR) << "Invalid cache (current) size";
431 return false;
432 }
433 #endif
434
435 if (data_->header.num_entries < 0) {
436 LOG(ERROR) << "Invalid number of entries";
437 return false;
438 }
439
440 if (!mask_)
441 mask_ = data_->header.table_len - 1;
442
443 // Load the table into memory with a single read.
444 scoped_ptr<char[]> buf(new char[current_size]);
445 return index_->Read(buf.get(), current_size, 0);
446 }
447
448 bool BackendImpl::InitStats() {
449 Addr address(data_->header.stats);
450 int size = stats_.StorageSize();
451
452 if (!address.is_initialized()) {
453 FileType file_type = Addr::RequiredFileType(size);
454 DCHECK_NE(file_type, EXTERNAL);
455 int num_blocks = Addr::RequiredBlocks(size, file_type);
456
457 if (!CreateBlock(file_type, num_blocks, &address))
458 return false;
459 return stats_.Init(NULL, 0, address);
460 }
461
462 if (!address.is_block_file()) {
463 NOTREACHED();
464 return false;
465 }
466
467 // Load the required data.
468 size = address.num_blocks() * address.BlockSize();
469 MappedFile* file = File(address);
470 if (!file)
471 return false;
472
473 scoped_ptr<char[]> data(new char[size]);
474 size_t offset = address.start_block() * address.BlockSize() +
475 kBlockHeaderSize;
476 if (!file->Read(data.get(), size, offset))
477 return false;
478
479 if (!stats_.Init(data.get(), size, address))
480 return false;
481 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
482 stats_.InitSizeHistogram();
483 return true;
484 }
485
486 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698