Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: net/disk_cache/v3/backend_worker.cc

Issue 121643003: Reorganize net/disk_cache into backend specific directories. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: rebase to upstream Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/backend_impl.h"
6
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/hash.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram.h"
15 #include "base/metrics/stats_counters.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_info.h"
20 #include "base/threading/thread_restrictions.h"
21 #include "base/time/time.h"
22 #include "base/timer/timer.h"
23 #include "net/base/net_errors.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/entry_impl.h"
26 #include "net/disk_cache/errors.h"
27 #include "net/disk_cache/experiments.h"
28 #include "net/disk_cache/file.h"
29
30 #define CACHE_HISTOGRAM_MACROS_BACKEND_OBJ this
31 #include "net/disk_cache/histogram_macros.h"
32
33 using base::Time;
34 using base::TimeDelta;
35 using base::TimeTicks;
36
37 namespace {
38
39 const char* kIndexName = "index";
40
41 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
42 // Note that the actual target is to keep the index table load factor under 55%
43 // for most users.
44 const int k64kEntriesStore = 240 * 1000 * 1000;
45 const int kBaseTableLen = 64 * 1024;
46 const int kDefaultCacheSize = 80 * 1024 * 1024;
47
48 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
49 const int kTrimDelay = 10;
50
51 int DesiredIndexTableLen(int32 storage_size) {
52 if (storage_size <= k64kEntriesStore)
53 return kBaseTableLen;
54 if (storage_size <= k64kEntriesStore * 2)
55 return kBaseTableLen * 2;
56 if (storage_size <= k64kEntriesStore * 4)
57 return kBaseTableLen * 4;
58 if (storage_size <= k64kEntriesStore * 8)
59 return kBaseTableLen * 8;
60
61 // The biggest storage_size for int32 requires a 4 MB table.
62 return kBaseTableLen * 16;
63 }
64
65 int MaxStorageSizeForTable(int table_len) {
66 return table_len * (k64kEntriesStore / kBaseTableLen);
67 }
68
69 size_t GetIndexSize(int table_len) {
70 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
71 return sizeof(disk_cache::IndexHeader) + table_size;
72 }
73
74 // ------------------------------------------------------------------------
75
76 // Sets group for the current experiment. Returns false if the files should be
77 // discarded.
78 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
79 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
80 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
81 // Discard current cache.
82 return false;
83 }
84
85 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
86 "ExperimentControl") {
87 if (cache_created) {
88 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
89 return true;
90 } else if (header->experiment != disk_cache::EXPERIMENT_SIMPLE_CONTROL) {
91 return false;
92 }
93 }
94
95 header->experiment = disk_cache::NO_EXPERIMENT;
96 return true;
97 }
98
99 } // namespace
100
101 // ------------------------------------------------------------------------
102
103 namespace disk_cache {
104
105 BackendImpl::BackendImpl(const base::FilePath& path,
106 base::MessageLoopProxy* cache_thread,
107 net::NetLog* net_log)
108 : background_queue_(this, cache_thread),
109 path_(path),
110 block_files_(path),
111 mask_(0),
112 max_size_(0),
113 up_ticks_(0),
114 cache_type_(net::DISK_CACHE),
115 uma_report_(0),
116 user_flags_(0),
117 init_(false),
118 restarted_(false),
119 unit_test_(false),
120 read_only_(false),
121 disabled_(false),
122 new_eviction_(false),
123 first_timer_(true),
124 user_load_(false),
125 net_log_(net_log),
126 done_(true, false),
127 ptr_factory_(this) {
128 }
129
130 int BackendImpl::SyncInit() {
131 #if defined(NET_BUILD_STRESS_CACHE)
132 // Start evictions right away.
133 up_ticks_ = kTrimDelay * 2;
134 #endif
135 DCHECK(!init_);
136 if (init_)
137 return net::ERR_FAILED;
138
139 bool create_files = false;
140 if (!InitBackingStore(&create_files)) {
141 ReportError(ERR_STORAGE_ERROR);
142 return net::ERR_FAILED;
143 }
144
145 num_refs_ = num_pending_io_ = max_refs_ = 0;
146 entry_count_ = byte_count_ = 0;
147
148 if (!restarted_) {
149 buffer_bytes_ = 0;
150 trace_object_ = TraceObject::GetTraceObject();
151 // Create a recurrent timer of 30 secs.
152 int timer_delay = unit_test_ ? 1000 : 30000;
153 timer_.reset(new base::RepeatingTimer<BackendImpl>());
154 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
155 &BackendImpl::OnStatsTimer);
156 }
157
158 init_ = true;
159 Trace("Init");
160
161 if (data_->header.experiment != NO_EXPERIMENT &&
162 cache_type_ != net::DISK_CACHE) {
163 // No experiment for other caches.
164 return net::ERR_FAILED;
165 }
166
167 if (!(user_flags_ & kNoRandom)) {
168 // The unit test controls directly what to test.
169 new_eviction_ = (cache_type_ == net::DISK_CACHE);
170 }
171
172 if (!CheckIndex()) {
173 ReportError(ERR_INIT_FAILED);
174 return net::ERR_FAILED;
175 }
176
177 if (!restarted_ && (create_files || !data_->header.num_entries))
178 ReportError(ERR_CACHE_CREATED);
179
180 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
181 !InitExperiment(&data_->header, create_files)) {
182 return net::ERR_FAILED;
183 }
184
185 // We don't care if the value overflows. The only thing we care about is that
186 // the id cannot be zero, because that value is used as "not dirty".
187 // Increasing the value once per second gives us many years before we start
188 // having collisions.
189 data_->header.this_id++;
190 if (!data_->header.this_id)
191 data_->header.this_id++;
192
193 bool previous_crash = (data_->header.crash != 0);
194 data_->header.crash = 1;
195
196 if (!block_files_.Init(create_files))
197 return net::ERR_FAILED;
198
199 // We want to minimize the changes to cache for an AppCache.
200 if (cache_type() == net::APP_CACHE) {
201 DCHECK(!new_eviction_);
202 read_only_ = true;
203 } else if (cache_type() == net::SHADER_CACHE) {
204 DCHECK(!new_eviction_);
205 }
206
207 eviction_.Init(this);
208
209 // stats_ and rankings_ may end up calling back to us so we better be enabled.
210 disabled_ = false;
211 if (!InitStats())
212 return net::ERR_FAILED;
213
214 disabled_ = !rankings_.Init(this, new_eviction_);
215
216 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
217 trace_object_->EnableTracing(false);
218 int sc = SelfCheck();
219 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
220 NOTREACHED();
221 trace_object_->EnableTracing(true);
222 #endif
223
224 if (previous_crash) {
225 ReportError(ERR_PREVIOUS_CRASH);
226 } else if (!restarted_) {
227 ReportError(ERR_NO_ERROR);
228 }
229
230 FlushIndex();
231
232 return disabled_ ? net::ERR_FAILED : net::OK;
233 }
234
235 void BackendImpl::PrepareForRestart() {
236 // Reset the mask_ if it was not given by the user.
237 if (!(user_flags_ & kMask))
238 mask_ = 0;
239
240 if (!(user_flags_ & kNewEviction))
241 new_eviction_ = false;
242
243 disabled_ = true;
244 data_->header.crash = 0;
245 index_->Flush();
246 index_ = NULL;
247 data_ = NULL;
248 block_files_.CloseFiles();
249 rankings_.Reset();
250 init_ = false;
251 restarted_ = true;
252 }
253
254 BackendImpl::~BackendImpl() {
255 if (user_flags_ & kNoRandom) {
256 // This is a unit test, so we want to be strict about not leaking entries
257 // and completing all the work.
258 background_queue_.WaitForPendingIO();
259 } else {
260 // This is most likely not a test, so we want to do as little work as
261 // possible at this time, at the price of leaving dirty entries behind.
262 background_queue_.DropPendingIO();
263 }
264
265 if (background_queue_.BackgroundIsCurrentThread()) {
266 // Unit tests may use the same thread for everything.
267 CleanupCache();
268 } else {
269 background_queue_.background_thread()->PostTask(
270 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
271 // http://crbug.com/74623
272 base::ThreadRestrictions::ScopedAllowWait allow_wait;
273 done_.Wait();
274 }
275 }
276
277 void BackendImpl::CleanupCache() {
278 Trace("Backend Cleanup");
279 eviction_.Stop();
280 timer_.reset();
281
282 if (init_) {
283 StoreStats();
284 if (data_)
285 data_->header.crash = 0;
286
287 if (user_flags_ & kNoRandom) {
288 // This is a net_unittest, verify that we are not 'leaking' entries.
289 File::WaitForPendingIO(&num_pending_io_);
290 DCHECK(!num_refs_);
291 } else {
292 File::DropPendingIO();
293 }
294 }
295 block_files_.CloseFiles();
296 FlushIndex();
297 index_ = NULL;
298 ptr_factory_.InvalidateWeakPtrs();
299 done_.Signal();
300 }
301
302 base::FilePath BackendImpl::GetFileName(Addr address) const {
303 if (!address.is_separate_file() || !address.is_initialized()) {
304 NOTREACHED();
305 return base::FilePath();
306 }
307
308 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
309 return path_.AppendASCII(tmp);
310 }
311
312 // We just created a new file so we're going to write the header and set the
313 // file length to include the hash table (zero filled).
314 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
315 AdjustMaxCacheSize(0);
316
317 IndexHeader header;
318 header.table_len = DesiredIndexTableLen(max_size_);
319
320 // We need file version 2.1 for the new eviction algorithm.
321 if (new_eviction_)
322 header.version = 0x20001;
323
324 header.create_time = Time::Now().ToInternalValue();
325
326 if (!file->Write(&header, sizeof(header), 0))
327 return false;
328
329 return file->SetLength(GetIndexSize(header.table_len));
330 }
331
332 bool BackendImpl::InitBackingStore(bool* file_created) {
333 if (!base::CreateDirectory(path_))
334 return false;
335
336 base::FilePath index_name = path_.AppendASCII(kIndexName);
337
338 int flags = base::PLATFORM_FILE_READ |
339 base::PLATFORM_FILE_WRITE |
340 base::PLATFORM_FILE_OPEN_ALWAYS |
341 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
342 scoped_refptr<disk_cache::File> file(new disk_cache::File(
343 base::CreatePlatformFile(index_name, flags, file_created, NULL)));
344
345 if (!file->IsValid())
346 return false;
347
348 bool ret = true;
349 if (*file_created)
350 ret = CreateBackingStore(file.get());
351
352 file = NULL;
353 if (!ret)
354 return false;
355
356 index_ = new MappedFile();
357 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
358 if (!data_) {
359 LOG(ERROR) << "Unable to map Index file";
360 return false;
361 }
362
363 if (index_->GetLength() < sizeof(Index)) {
364 // We verify this again on CheckIndex() but it's easier to make sure now
365 // that the header is there.
366 LOG(ERROR) << "Corrupt Index file";
367 return false;
368 }
369
370 return true;
371 }
372
373 void BackendImpl::ReportError(int error) {
374 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
375 error == ERR_CACHE_CREATED);
376
377 // We transmit positive numbers, instead of direct error codes.
378 DCHECK_LE(error, 0);
379 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
380 }
381
382
383 bool BackendImpl::CheckIndex() {
384 DCHECK(data_);
385
386 size_t current_size = index_->GetLength();
387 if (current_size < sizeof(Index)) {
388 LOG(ERROR) << "Corrupt Index file";
389 return false;
390 }
391
392 if (new_eviction_) {
393 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
394 if (kIndexMagic != data_->header.magic ||
395 kCurrentVersion >> 16 != data_->header.version >> 16) {
396 LOG(ERROR) << "Invalid file version or magic";
397 return false;
398 }
399 if (kCurrentVersion == data_->header.version) {
400 // We need file version 2.1 for the new eviction algorithm.
401 UpgradeTo2_1();
402 }
403 } else {
404 if (kIndexMagic != data_->header.magic ||
405 kCurrentVersion != data_->header.version) {
406 LOG(ERROR) << "Invalid file version or magic";
407 return false;
408 }
409 }
410
411 if (!data_->header.table_len) {
412 LOG(ERROR) << "Invalid table size";
413 return false;
414 }
415
416 if (current_size < GetIndexSize(data_->header.table_len) ||
417 data_->header.table_len & (kBaseTableLen - 1)) {
418 LOG(ERROR) << "Corrupt Index file";
419 return false;
420 }
421
422 AdjustMaxCacheSize(data_->header.table_len);
423
424 #if !defined(NET_BUILD_STRESS_CACHE)
425 if (data_->header.num_bytes < 0 ||
426 (max_size_ < kint32max - kDefaultCacheSize &&
427 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
428 LOG(ERROR) << "Invalid cache (current) size";
429 return false;
430 }
431 #endif
432
433 if (data_->header.num_entries < 0) {
434 LOG(ERROR) << "Invalid number of entries";
435 return false;
436 }
437
438 if (!mask_)
439 mask_ = data_->header.table_len - 1;
440
441 // Load the table into memory with a single read.
442 scoped_ptr<char[]> buf(new char[current_size]);
443 return index_->Read(buf.get(), current_size, 0);
444 }
445
446 bool BackendImpl::InitStats() {
447 Addr address(data_->header.stats);
448 int size = stats_.StorageSize();
449
450 if (!address.is_initialized()) {
451 FileType file_type = Addr::RequiredFileType(size);
452 DCHECK_NE(file_type, EXTERNAL);
453 int num_blocks = Addr::RequiredBlocks(size, file_type);
454
455 if (!CreateBlock(file_type, num_blocks, &address))
456 return false;
457 return stats_.Init(NULL, 0, address);
458 }
459
460 if (!address.is_block_file()) {
461 NOTREACHED();
462 return false;
463 }
464
465 // Load the required data.
466 size = address.num_blocks() * address.BlockSize();
467 MappedFile* file = File(address);
468 if (!file)
469 return false;
470
471 scoped_ptr<char[]> data(new char[size]);
472 size_t offset = address.start_block() * address.BlockSize() +
473 kBlockHeaderSize;
474 if (!file->Read(data.get(), size, offset))
475 return false;
476
477 if (!stats_.Init(data.get(), size, address))
478 return false;
479 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
480 stats_.InitSizeHistogram();
481 return true;
482 }
483
484 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698