OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/disk_cache/entry_impl.h" | |
6 | |
7 #include "base/hash.h" | |
8 #include "base/message_loop/message_loop.h" | |
9 #include "base/metrics/histogram.h" | |
10 #include "base/strings/string_util.h" | |
11 #include "net/base/io_buffer.h" | |
12 #include "net/base/net_errors.h" | |
13 #include "net/disk_cache/backend_impl.h" | |
14 #include "net/disk_cache/bitmap.h" | |
15 #include "net/disk_cache/cache_util.h" | |
16 #include "net/disk_cache/disk_format.h" | |
17 #include "net/disk_cache/net_log_parameters.h" | |
18 #include "net/disk_cache/sparse_control.h" | |
19 | |
20 // Define BLOCKFILE_BACKEND_IMPL_OBJ to be a disk_cache::BackendImpl* in order | |
21 // to use the CACHE_UMA histogram macro. | |
22 #define BLOCKFILE_BACKEND_IMPL_OBJ backend_ | |
23 #include "net/disk_cache/histogram_macros.h" | |
24 | |
25 using base::Time; | |
26 using base::TimeDelta; | |
27 using base::TimeTicks; | |
28 | |
29 namespace { | |
30 | |
31 // Index for the file used to store the key, if any (files_[kKeyFileIndex]). | |
32 const int kKeyFileIndex = 3; | |
33 | |
34 // This class implements FileIOCallback to buffer the callback from a file IO | |
35 // operation from the actual net class. | |
36 class SyncCallback: public disk_cache::FileIOCallback { | |
37 public: | |
38 // |end_event_type| is the event type to log on completion. Logs nothing on | |
39 // discard, or when the NetLog is not set to log all events. | |
40 SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer, | |
41 const net::CompletionCallback& callback, | |
42 net::NetLog::EventType end_event_type) | |
43 : entry_(entry), callback_(callback), buf_(buffer), | |
44 start_(TimeTicks::Now()), end_event_type_(end_event_type) { | |
45 entry->AddRef(); | |
46 entry->IncrementIoCount(); | |
47 } | |
48 virtual ~SyncCallback() {} | |
49 | |
50 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; | |
51 void Discard(); | |
52 | |
53 private: | |
54 disk_cache::EntryImpl* entry_; | |
55 net::CompletionCallback callback_; | |
56 scoped_refptr<net::IOBuffer> buf_; | |
57 TimeTicks start_; | |
58 const net::NetLog::EventType end_event_type_; | |
59 | |
60 DISALLOW_COPY_AND_ASSIGN(SyncCallback); | |
61 }; | |
62 | |
63 void SyncCallback::OnFileIOComplete(int bytes_copied) { | |
64 entry_->DecrementIoCount(); | |
65 if (!callback_.is_null()) { | |
66 if (entry_->net_log().IsLoggingAllEvents()) { | |
67 entry_->net_log().EndEvent( | |
68 end_event_type_, | |
69 disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied)); | |
70 } | |
71 entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_); | |
72 buf_ = NULL; // Release the buffer before invoking the callback. | |
73 callback_.Run(bytes_copied); | |
74 } | |
75 entry_->Release(); | |
76 delete this; | |
77 } | |
78 | |
79 void SyncCallback::Discard() { | |
80 callback_.Reset(); | |
81 buf_ = NULL; | |
82 OnFileIOComplete(0); | |
83 } | |
84 | |
85 const int kMaxBufferSize = 1024 * 1024; // 1 MB. | |
86 | |
87 } // namespace | |
88 | |
89 namespace disk_cache { | |
90 | |
91 // This class handles individual memory buffers that store data before it is | |
92 // sent to disk. The buffer can start at any offset, but if we try to write to | |
93 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to | |
94 // zero. The buffer grows up to a size determined by the backend, to keep the | |
95 // total memory used under control. | |
96 class EntryImpl::UserBuffer { | |
97 public: | |
98 explicit UserBuffer(BackendImpl* backend) | |
99 : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) { | |
100 buffer_.reserve(kMaxBlockSize); | |
101 } | |
102 ~UserBuffer() { | |
103 if (backend_.get()) | |
104 backend_->BufferDeleted(capacity() - kMaxBlockSize); | |
105 } | |
106 | |
107 // Returns true if we can handle writing |len| bytes to |offset|. | |
108 bool PreWrite(int offset, int len); | |
109 | |
110 // Truncates the buffer to |offset| bytes. | |
111 void Truncate(int offset); | |
112 | |
113 // Writes |len| bytes from |buf| at the given |offset|. | |
114 void Write(int offset, IOBuffer* buf, int len); | |
115 | |
116 // Returns true if we can read |len| bytes from |offset|, given that the | |
117 // actual file has |eof| bytes stored. Note that the number of bytes to read | |
118 // may be modified by this method even though it returns false: that means we | |
119 // should do a smaller read from disk. | |
120 bool PreRead(int eof, int offset, int* len); | |
121 | |
122 // Read |len| bytes from |buf| at the given |offset|. | |
123 int Read(int offset, IOBuffer* buf, int len); | |
124 | |
125 // Prepare this buffer for reuse. | |
126 void Reset(); | |
127 | |
128 char* Data() { return buffer_.size() ? &buffer_[0] : NULL; } | |
129 int Size() { return static_cast<int>(buffer_.size()); } | |
130 int Start() { return offset_; } | |
131 int End() { return offset_ + Size(); } | |
132 | |
133 private: | |
134 int capacity() { return static_cast<int>(buffer_.capacity()); } | |
135 bool GrowBuffer(int required, int limit); | |
136 | |
137 base::WeakPtr<BackendImpl> backend_; | |
138 int offset_; | |
139 std::vector<char> buffer_; | |
140 bool grow_allowed_; | |
141 DISALLOW_COPY_AND_ASSIGN(UserBuffer); | |
142 }; | |
143 | |
144 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) { | |
145 DCHECK_GE(offset, 0); | |
146 DCHECK_GE(len, 0); | |
147 DCHECK_GE(offset + len, 0); | |
148 | |
149 // We don't want to write before our current start. | |
150 if (offset < offset_) | |
151 return false; | |
152 | |
153 // Lets get the common case out of the way. | |
154 if (offset + len <= capacity()) | |
155 return true; | |
156 | |
157 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the | |
158 // buffer offset_ at 0. | |
159 if (!Size() && offset > kMaxBlockSize) | |
160 return GrowBuffer(len, kMaxBufferSize); | |
161 | |
162 int required = offset - offset_ + len; | |
163 return GrowBuffer(required, kMaxBufferSize * 6 / 5); | |
164 } | |
165 | |
166 void EntryImpl::UserBuffer::Truncate(int offset) { | |
167 DCHECK_GE(offset, 0); | |
168 DCHECK_GE(offset, offset_); | |
169 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_; | |
170 | |
171 offset -= offset_; | |
172 if (Size() >= offset) | |
173 buffer_.resize(offset); | |
174 } | |
175 | |
176 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) { | |
177 DCHECK_GE(offset, 0); | |
178 DCHECK_GE(len, 0); | |
179 DCHECK_GE(offset + len, 0); | |
180 DCHECK_GE(offset, offset_); | |
181 DVLOG(3) << "Buffer write at " << offset << " current " << offset_; | |
182 | |
183 if (!Size() && offset > kMaxBlockSize) | |
184 offset_ = offset; | |
185 | |
186 offset -= offset_; | |
187 | |
188 if (offset > Size()) | |
189 buffer_.resize(offset); | |
190 | |
191 if (!len) | |
192 return; | |
193 | |
194 char* buffer = buf->data(); | |
195 int valid_len = Size() - offset; | |
196 int copy_len = std::min(valid_len, len); | |
197 if (copy_len) { | |
198 memcpy(&buffer_[offset], buffer, copy_len); | |
199 len -= copy_len; | |
200 buffer += copy_len; | |
201 } | |
202 if (!len) | |
203 return; | |
204 | |
205 buffer_.insert(buffer_.end(), buffer, buffer + len); | |
206 } | |
207 | |
208 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) { | |
209 DCHECK_GE(offset, 0); | |
210 DCHECK_GT(*len, 0); | |
211 | |
212 if (offset < offset_) { | |
213 // We are reading before this buffer. | |
214 if (offset >= eof) | |
215 return true; | |
216 | |
217 // If the read overlaps with the buffer, change its length so that there is | |
218 // no overlap. | |
219 *len = std::min(*len, offset_ - offset); | |
220 *len = std::min(*len, eof - offset); | |
221 | |
222 // We should read from disk. | |
223 return false; | |
224 } | |
225 | |
226 if (!Size()) | |
227 return false; | |
228 | |
229 // See if we can fulfill the first part of the operation. | |
230 return (offset - offset_ < Size()); | |
231 } | |
232 | |
233 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) { | |
234 DCHECK_GE(offset, 0); | |
235 DCHECK_GT(len, 0); | |
236 DCHECK(Size() || offset < offset_); | |
237 | |
238 int clean_bytes = 0; | |
239 if (offset < offset_) { | |
240 // We don't have a file so lets fill the first part with 0. | |
241 clean_bytes = std::min(offset_ - offset, len); | |
242 memset(buf->data(), 0, clean_bytes); | |
243 if (len == clean_bytes) | |
244 return len; | |
245 offset = offset_; | |
246 len -= clean_bytes; | |
247 } | |
248 | |
249 int start = offset - offset_; | |
250 int available = Size() - start; | |
251 DCHECK_GE(start, 0); | |
252 DCHECK_GE(available, 0); | |
253 len = std::min(len, available); | |
254 memcpy(buf->data() + clean_bytes, &buffer_[start], len); | |
255 return len + clean_bytes; | |
256 } | |
257 | |
258 void EntryImpl::UserBuffer::Reset() { | |
259 if (!grow_allowed_) { | |
260 if (backend_.get()) | |
261 backend_->BufferDeleted(capacity() - kMaxBlockSize); | |
262 grow_allowed_ = true; | |
263 std::vector<char> tmp; | |
264 buffer_.swap(tmp); | |
265 buffer_.reserve(kMaxBlockSize); | |
266 } | |
267 offset_ = 0; | |
268 buffer_.clear(); | |
269 } | |
270 | |
271 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) { | |
272 DCHECK_GE(required, 0); | |
273 int current_size = capacity(); | |
274 if (required <= current_size) | |
275 return true; | |
276 | |
277 if (required > limit) | |
278 return false; | |
279 | |
280 if (!backend_.get()) | |
281 return false; | |
282 | |
283 int to_add = std::max(required - current_size, kMaxBlockSize * 4); | |
284 to_add = std::max(current_size, to_add); | |
285 required = std::min(current_size + to_add, limit); | |
286 | |
287 grow_allowed_ = backend_->IsAllocAllowed(current_size, required); | |
288 if (!grow_allowed_) | |
289 return false; | |
290 | |
291 DVLOG(3) << "Buffer grow to " << required; | |
292 | |
293 buffer_.reserve(required); | |
294 return true; | |
295 } | |
296 | |
297 // ------------------------------------------------------------------------ | |
298 | |
299 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) | |
300 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), | |
301 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), | |
302 dirty_(false) { | |
303 entry_.LazyInit(backend->File(address), address); | |
304 for (int i = 0; i < kNumStreams; i++) { | |
305 unreported_size_[i] = 0; | |
306 } | |
307 } | |
308 | |
309 void EntryImpl::DoomImpl() { | |
310 if (doomed_ || !backend_.get()) | |
311 return; | |
312 | |
313 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | |
314 backend_->InternalDoomEntry(this); | |
315 } | |
316 | |
317 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | |
318 const CompletionCallback& callback) { | |
319 if (net_log_.IsLoggingAllEvents()) { | |
320 net_log_.BeginEvent( | |
321 net::NetLog::TYPE_ENTRY_READ_DATA, | |
322 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); | |
323 } | |
324 | |
325 int result = InternalReadData(index, offset, buf, buf_len, callback); | |
326 | |
327 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | |
328 net_log_.EndEvent( | |
329 net::NetLog::TYPE_ENTRY_READ_DATA, | |
330 CreateNetLogReadWriteCompleteCallback(result)); | |
331 } | |
332 return result; | |
333 } | |
334 | |
335 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | |
336 const CompletionCallback& callback, | |
337 bool truncate) { | |
338 if (net_log_.IsLoggingAllEvents()) { | |
339 net_log_.BeginEvent( | |
340 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
341 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); | |
342 } | |
343 | |
344 int result = InternalWriteData(index, offset, buf, buf_len, callback, | |
345 truncate); | |
346 | |
347 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | |
348 net_log_.EndEvent( | |
349 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
350 CreateNetLogReadWriteCompleteCallback(result)); | |
351 } | |
352 return result; | |
353 } | |
354 | |
355 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | |
356 const CompletionCallback& callback) { | |
357 DCHECK(node_.Data()->dirty || read_only_); | |
358 int result = InitSparseData(); | |
359 if (net::OK != result) | |
360 return result; | |
361 | |
362 TimeTicks start = TimeTicks::Now(); | |
363 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | |
364 callback); | |
365 ReportIOTime(kSparseRead, start); | |
366 return result; | |
367 } | |
368 | |
369 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | |
370 const CompletionCallback& callback) { | |
371 DCHECK(node_.Data()->dirty || read_only_); | |
372 int result = InitSparseData(); | |
373 if (net::OK != result) | |
374 return result; | |
375 | |
376 TimeTicks start = TimeTicks::Now(); | |
377 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | |
378 buf_len, callback); | |
379 ReportIOTime(kSparseWrite, start); | |
380 return result; | |
381 } | |
382 | |
383 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { | |
384 int result = InitSparseData(); | |
385 if (net::OK != result) | |
386 return result; | |
387 | |
388 return sparse_->GetAvailableRange(offset, len, start); | |
389 } | |
390 | |
391 void EntryImpl::CancelSparseIOImpl() { | |
392 if (!sparse_.get()) | |
393 return; | |
394 | |
395 sparse_->CancelIO(); | |
396 } | |
397 | |
398 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { | |
399 DCHECK(sparse_.get()); | |
400 return sparse_->ReadyToUse(callback); | |
401 } | |
402 | |
403 uint32 EntryImpl::GetHash() { | |
404 return entry_.Data()->hash; | |
405 } | |
406 | |
407 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | |
408 uint32 hash) { | |
409 Trace("Create entry In"); | |
410 EntryStore* entry_store = entry_.Data(); | |
411 RankingsNode* node = node_.Data(); | |
412 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); | |
413 memset(node, 0, sizeof(RankingsNode)); | |
414 if (!node_.LazyInit(backend_->File(node_address), node_address)) | |
415 return false; | |
416 | |
417 entry_store->rankings_node = node_address.value(); | |
418 node->contents = entry_.address().value(); | |
419 | |
420 entry_store->hash = hash; | |
421 entry_store->creation_time = Time::Now().ToInternalValue(); | |
422 entry_store->key_len = static_cast<int32>(key.size()); | |
423 if (entry_store->key_len > kMaxInternalKeyLength) { | |
424 Addr address(0); | |
425 if (!CreateBlock(entry_store->key_len + 1, &address)) | |
426 return false; | |
427 | |
428 entry_store->long_key = address.value(); | |
429 File* key_file = GetBackingFile(address, kKeyFileIndex); | |
430 key_ = key; | |
431 | |
432 size_t offset = 0; | |
433 if (address.is_block_file()) | |
434 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
435 | |
436 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { | |
437 DeleteData(address, kKeyFileIndex); | |
438 return false; | |
439 } | |
440 | |
441 if (address.is_separate_file()) | |
442 key_file->SetLength(key.size() + 1); | |
443 } else { | |
444 memcpy(entry_store->key, key.data(), key.size()); | |
445 entry_store->key[key.size()] = '\0'; | |
446 } | |
447 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); | |
448 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); | |
449 node->dirty = backend_->GetCurrentEntryId(); | |
450 Log("Create Entry "); | |
451 return true; | |
452 } | |
453 | |
454 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { | |
455 if (entry_.Data()->hash != hash || | |
456 static_cast<size_t>(entry_.Data()->key_len) != key.size()) | |
457 return false; | |
458 | |
459 return (key.compare(GetKey()) == 0); | |
460 } | |
461 | |
462 void EntryImpl::InternalDoom() { | |
463 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); | |
464 DCHECK(node_.HasData()); | |
465 if (!node_.Data()->dirty) { | |
466 node_.Data()->dirty = backend_->GetCurrentEntryId(); | |
467 node_.Store(); | |
468 } | |
469 doomed_ = true; | |
470 } | |
471 | |
472 void EntryImpl::DeleteEntryData(bool everything) { | |
473 DCHECK(doomed_ || !everything); | |
474 | |
475 if (GetEntryFlags() & PARENT_ENTRY) { | |
476 // We have some child entries that must go away. | |
477 SparseControl::DeleteChildren(this); | |
478 } | |
479 | |
480 if (GetDataSize(0)) | |
481 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); | |
482 if (GetDataSize(1)) | |
483 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); | |
484 for (int index = 0; index < kNumStreams; index++) { | |
485 Addr address(entry_.Data()->data_addr[index]); | |
486 if (address.is_initialized()) { | |
487 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | |
488 unreported_size_[index], 0); | |
489 entry_.Data()->data_addr[index] = 0; | |
490 entry_.Data()->data_size[index] = 0; | |
491 entry_.Store(); | |
492 DeleteData(address, index); | |
493 } | |
494 } | |
495 | |
496 if (!everything) | |
497 return; | |
498 | |
499 // Remove all traces of this entry. | |
500 backend_->RemoveEntry(this); | |
501 | |
502 // Note that at this point node_ and entry_ are just two blocks of data, and | |
503 // even if they reference each other, nobody should be referencing them. | |
504 | |
505 Addr address(entry_.Data()->long_key); | |
506 DeleteData(address, kKeyFileIndex); | |
507 backend_->ModifyStorageSize(entry_.Data()->key_len, 0); | |
508 | |
509 backend_->DeleteBlock(entry_.address(), true); | |
510 entry_.Discard(); | |
511 | |
512 if (!LeaveRankingsBehind()) { | |
513 backend_->DeleteBlock(node_.address(), true); | |
514 node_.Discard(); | |
515 } | |
516 } | |
517 | |
518 CacheAddr EntryImpl::GetNextAddress() { | |
519 return entry_.Data()->next; | |
520 } | |
521 | |
522 void EntryImpl::SetNextAddress(Addr address) { | |
523 DCHECK_NE(address.value(), entry_.address().value()); | |
524 entry_.Data()->next = address.value(); | |
525 bool success = entry_.Store(); | |
526 DCHECK(success); | |
527 } | |
528 | |
529 bool EntryImpl::LoadNodeAddress() { | |
530 Addr address(entry_.Data()->rankings_node); | |
531 if (!node_.LazyInit(backend_->File(address), address)) | |
532 return false; | |
533 return node_.Load(); | |
534 } | |
535 | |
536 bool EntryImpl::Update() { | |
537 DCHECK(node_.HasData()); | |
538 | |
539 if (read_only_) | |
540 return true; | |
541 | |
542 RankingsNode* rankings = node_.Data(); | |
543 if (!rankings->dirty) { | |
544 rankings->dirty = backend_->GetCurrentEntryId(); | |
545 if (!node_.Store()) | |
546 return false; | |
547 } | |
548 return true; | |
549 } | |
550 | |
551 void EntryImpl::SetDirtyFlag(int32 current_id) { | |
552 DCHECK(node_.HasData()); | |
553 if (node_.Data()->dirty && current_id != node_.Data()->dirty) | |
554 dirty_ = true; | |
555 | |
556 if (!current_id) | |
557 dirty_ = true; | |
558 } | |
559 | |
560 void EntryImpl::SetPointerForInvalidEntry(int32 new_id) { | |
561 node_.Data()->dirty = new_id; | |
562 node_.Store(); | |
563 } | |
564 | |
565 bool EntryImpl::LeaveRankingsBehind() { | |
566 return !node_.Data()->contents; | |
567 } | |
568 | |
569 // This only includes checks that relate to the first block of the entry (the | |
570 // first 256 bytes), and values that should be set from the entry creation. | |
571 // Basically, even if there is something wrong with this entry, we want to see | |
572 // if it is possible to load the rankings node and delete them together. | |
573 bool EntryImpl::SanityCheck() { | |
574 if (!entry_.VerifyHash()) | |
575 return false; | |
576 | |
577 EntryStore* stored = entry_.Data(); | |
578 if (!stored->rankings_node || stored->key_len <= 0) | |
579 return false; | |
580 | |
581 if (stored->reuse_count < 0 || stored->refetch_count < 0) | |
582 return false; | |
583 | |
584 Addr rankings_addr(stored->rankings_node); | |
585 if (!rankings_addr.SanityCheckForRankings()) | |
586 return false; | |
587 | |
588 Addr next_addr(stored->next); | |
589 if (next_addr.is_initialized() && !next_addr.SanityCheckForEntryV2()) { | |
590 STRESS_NOTREACHED(); | |
591 return false; | |
592 } | |
593 STRESS_DCHECK(next_addr.value() != entry_.address().value()); | |
594 | |
595 if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL) | |
596 return false; | |
597 | |
598 Addr key_addr(stored->long_key); | |
599 if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) || | |
600 (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized())) | |
601 return false; | |
602 | |
603 if (!key_addr.SanityCheckV2()) | |
604 return false; | |
605 | |
606 if (key_addr.is_initialized() && | |
607 ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) || | |
608 (stored->key_len >= kMaxBlockSize && key_addr.is_block_file()))) | |
609 return false; | |
610 | |
611 int num_blocks = NumBlocksForEntry(stored->key_len); | |
612 if (entry_.address().num_blocks() != num_blocks) | |
613 return false; | |
614 | |
615 return true; | |
616 } | |
617 | |
618 bool EntryImpl::DataSanityCheck() { | |
619 EntryStore* stored = entry_.Data(); | |
620 Addr key_addr(stored->long_key); | |
621 | |
622 // The key must be NULL terminated. | |
623 if (!key_addr.is_initialized() && stored->key[stored->key_len]) | |
624 return false; | |
625 | |
626 if (stored->hash != base::Hash(GetKey())) | |
627 return false; | |
628 | |
629 for (int i = 0; i < kNumStreams; i++) { | |
630 Addr data_addr(stored->data_addr[i]); | |
631 int data_size = stored->data_size[i]; | |
632 if (data_size < 0) | |
633 return false; | |
634 if (!data_size && data_addr.is_initialized()) | |
635 return false; | |
636 if (!data_addr.SanityCheckV2()) | |
637 return false; | |
638 if (!data_size) | |
639 continue; | |
640 if (data_size <= kMaxBlockSize && data_addr.is_separate_file()) | |
641 return false; | |
642 if (data_size > kMaxBlockSize && data_addr.is_block_file()) | |
643 return false; | |
644 } | |
645 return true; | |
646 } | |
647 | |
648 void EntryImpl::FixForDelete() { | |
649 EntryStore* stored = entry_.Data(); | |
650 Addr key_addr(stored->long_key); | |
651 | |
652 if (!key_addr.is_initialized()) | |
653 stored->key[stored->key_len] = '\0'; | |
654 | |
655 for (int i = 0; i < kNumStreams; i++) { | |
656 Addr data_addr(stored->data_addr[i]); | |
657 int data_size = stored->data_size[i]; | |
658 if (data_addr.is_initialized()) { | |
659 if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) || | |
660 (data_size > kMaxBlockSize && data_addr.is_block_file()) || | |
661 !data_addr.SanityCheckV2()) { | |
662 STRESS_NOTREACHED(); | |
663 // The address is weird so don't attempt to delete it. | |
664 stored->data_addr[i] = 0; | |
665 // In general, trust the stored size as it should be in sync with the | |
666 // total size tracked by the backend. | |
667 } | |
668 } | |
669 if (data_size < 0) | |
670 stored->data_size[i] = 0; | |
671 } | |
672 entry_.Store(); | |
673 } | |
674 | |
675 void EntryImpl::IncrementIoCount() { | |
676 backend_->IncrementIoCount(); | |
677 } | |
678 | |
679 void EntryImpl::DecrementIoCount() { | |
680 if (backend_.get()) | |
681 backend_->DecrementIoCount(); | |
682 } | |
683 | |
684 void EntryImpl::OnEntryCreated(BackendImpl* backend) { | |
685 // Just grab a reference to the backround queue. | |
686 background_queue_ = backend->GetBackgroundQueue(); | |
687 } | |
688 | |
689 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { | |
690 node_.Data()->last_used = last_used.ToInternalValue(); | |
691 node_.Data()->last_modified = last_modified.ToInternalValue(); | |
692 node_.set_modified(); | |
693 } | |
694 | |
695 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { | |
696 if (!backend_.get()) | |
697 return; | |
698 | |
699 switch (op) { | |
700 case kRead: | |
701 CACHE_UMA(AGE_MS, "ReadTime", 0, start); | |
702 break; | |
703 case kWrite: | |
704 CACHE_UMA(AGE_MS, "WriteTime", 0, start); | |
705 break; | |
706 case kSparseRead: | |
707 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); | |
708 break; | |
709 case kSparseWrite: | |
710 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); | |
711 break; | |
712 case kAsyncIO: | |
713 CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start); | |
714 break; | |
715 case kReadAsync1: | |
716 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start); | |
717 break; | |
718 case kWriteAsync1: | |
719 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); | |
720 break; | |
721 default: | |
722 NOTREACHED(); | |
723 } | |
724 } | |
725 | |
726 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { | |
727 DCHECK(!net_log_.net_log()); | |
728 net_log_ = net::BoundNetLog::Make( | |
729 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); | |
730 net_log_.BeginEvent( | |
731 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, | |
732 CreateNetLogEntryCreationCallback(this, created)); | |
733 } | |
734 | |
735 const net::BoundNetLog& EntryImpl::net_log() const { | |
736 return net_log_; | |
737 } | |
738 | |
739 // static | |
740 int EntryImpl::NumBlocksForEntry(int key_size) { | |
741 // The longest key that can be stored using one block. | |
742 int key1_len = | |
743 static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key)); | |
744 | |
745 if (key_size < key1_len || key_size > kMaxInternalKeyLength) | |
746 return 1; | |
747 | |
748 return ((key_size - key1_len) / 256 + 2); | |
749 } | |
750 | |
751 // ------------------------------------------------------------------------ | |
752 | |
753 void EntryImpl::Doom() { | |
754 if (background_queue_.get()) | |
755 background_queue_->DoomEntryImpl(this); | |
756 } | |
757 | |
758 void EntryImpl::Close() { | |
759 if (background_queue_.get()) | |
760 background_queue_->CloseEntryImpl(this); | |
761 } | |
762 | |
763 std::string EntryImpl::GetKey() const { | |
764 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | |
765 int key_len = entry->Data()->key_len; | |
766 if (key_len <= kMaxInternalKeyLength) | |
767 return std::string(entry->Data()->key); | |
768 | |
769 // We keep a copy of the key so that we can always return it, even if the | |
770 // backend is disabled. | |
771 if (!key_.empty()) | |
772 return key_; | |
773 | |
774 Addr address(entry->Data()->long_key); | |
775 DCHECK(address.is_initialized()); | |
776 size_t offset = 0; | |
777 if (address.is_block_file()) | |
778 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
779 | |
780 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); | |
781 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, | |
782 kKeyFileIndex); | |
783 if (!key_file) | |
784 return std::string(); | |
785 | |
786 ++key_len; // We store a trailing \0 on disk that we read back below. | |
787 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len)) | |
788 return std::string(); | |
789 | |
790 if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset)) | |
791 key_.clear(); | |
792 return key_; | |
793 } | |
794 | |
795 Time EntryImpl::GetLastUsed() const { | |
796 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | |
797 return Time::FromInternalValue(node->Data()->last_used); | |
798 } | |
799 | |
800 Time EntryImpl::GetLastModified() const { | |
801 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | |
802 return Time::FromInternalValue(node->Data()->last_modified); | |
803 } | |
804 | |
805 int32 EntryImpl::GetDataSize(int index) const { | |
806 if (index < 0 || index >= kNumStreams) | |
807 return 0; | |
808 | |
809 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | |
810 return entry->Data()->data_size[index]; | |
811 } | |
812 | |
813 int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, | |
814 const CompletionCallback& callback) { | |
815 if (callback.is_null()) | |
816 return ReadDataImpl(index, offset, buf, buf_len, callback); | |
817 | |
818 DCHECK(node_.Data()->dirty || read_only_); | |
819 if (index < 0 || index >= kNumStreams) | |
820 return net::ERR_INVALID_ARGUMENT; | |
821 | |
822 int entry_size = entry_.Data()->data_size[index]; | |
823 if (offset >= entry_size || offset < 0 || !buf_len) | |
824 return 0; | |
825 | |
826 if (buf_len < 0) | |
827 return net::ERR_INVALID_ARGUMENT; | |
828 | |
829 if (!background_queue_.get()) | |
830 return net::ERR_UNEXPECTED; | |
831 | |
832 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); | |
833 return net::ERR_IO_PENDING; | |
834 } | |
835 | |
836 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, | |
837 const CompletionCallback& callback, bool truncate) { | |
838 if (callback.is_null()) | |
839 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); | |
840 | |
841 DCHECK(node_.Data()->dirty || read_only_); | |
842 if (index < 0 || index >= kNumStreams) | |
843 return net::ERR_INVALID_ARGUMENT; | |
844 | |
845 if (offset < 0 || buf_len < 0) | |
846 return net::ERR_INVALID_ARGUMENT; | |
847 | |
848 if (!background_queue_.get()) | |
849 return net::ERR_UNEXPECTED; | |
850 | |
851 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, | |
852 callback); | |
853 return net::ERR_IO_PENDING; | |
854 } | |
855 | |
856 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
857 const CompletionCallback& callback) { | |
858 if (callback.is_null()) | |
859 return ReadSparseDataImpl(offset, buf, buf_len, callback); | |
860 | |
861 if (!background_queue_.get()) | |
862 return net::ERR_UNEXPECTED; | |
863 | |
864 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); | |
865 return net::ERR_IO_PENDING; | |
866 } | |
867 | |
868 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
869 const CompletionCallback& callback) { | |
870 if (callback.is_null()) | |
871 return WriteSparseDataImpl(offset, buf, buf_len, callback); | |
872 | |
873 if (!background_queue_.get()) | |
874 return net::ERR_UNEXPECTED; | |
875 | |
876 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); | |
877 return net::ERR_IO_PENDING; | |
878 } | |
879 | |
880 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | |
881 const CompletionCallback& callback) { | |
882 if (!background_queue_.get()) | |
883 return net::ERR_UNEXPECTED; | |
884 | |
885 background_queue_->GetAvailableRange(this, offset, len, start, callback); | |
886 return net::ERR_IO_PENDING; | |
887 } | |
888 | |
889 bool EntryImpl::CouldBeSparse() const { | |
890 if (sparse_.get()) | |
891 return true; | |
892 | |
893 scoped_ptr<SparseControl> sparse; | |
894 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); | |
895 return sparse->CouldBeSparse(); | |
896 } | |
897 | |
898 void EntryImpl::CancelSparseIO() { | |
899 if (background_queue_.get()) | |
900 background_queue_->CancelSparseIO(this); | |
901 } | |
902 | |
903 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { | |
904 if (!sparse_.get()) | |
905 return net::OK; | |
906 | |
907 if (!background_queue_.get()) | |
908 return net::ERR_UNEXPECTED; | |
909 | |
910 background_queue_->ReadyForSparseIO(this, callback); | |
911 return net::ERR_IO_PENDING; | |
912 } | |
913 | |
914 // When an entry is deleted from the cache, we clean up all the data associated | |
915 // with it for two reasons: to simplify the reuse of the block (we know that any | |
916 // unused block is filled with zeros), and to simplify the handling of write / | |
917 // read partial information from an entry (don't have to worry about returning | |
918 // data related to a previous cache entry because the range was not fully | |
919 // written before). | |
920 EntryImpl::~EntryImpl() { | |
921 if (!backend_.get()) { | |
922 entry_.clear_modified(); | |
923 node_.clear_modified(); | |
924 return; | |
925 } | |
926 Log("~EntryImpl in"); | |
927 | |
928 // Save the sparse info to disk. This will generate IO for this entry and | |
929 // maybe for a child entry, so it is important to do it before deleting this | |
930 // entry. | |
931 sparse_.reset(); | |
932 | |
933 // Remove this entry from the list of open entries. | |
934 backend_->OnEntryDestroyBegin(entry_.address()); | |
935 | |
936 if (doomed_) { | |
937 DeleteEntryData(true); | |
938 } else { | |
939 #if defined(NET_BUILD_STRESS_CACHE) | |
940 SanityCheck(); | |
941 #endif | |
942 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE); | |
943 bool ret = true; | |
944 for (int index = 0; index < kNumStreams; index++) { | |
945 if (user_buffers_[index].get()) { | |
946 if (!(ret = Flush(index, 0))) | |
947 LOG(ERROR) << "Failed to save user data"; | |
948 } | |
949 if (unreported_size_[index]) { | |
950 backend_->ModifyStorageSize( | |
951 entry_.Data()->data_size[index] - unreported_size_[index], | |
952 entry_.Data()->data_size[index]); | |
953 } | |
954 } | |
955 | |
956 if (!ret) { | |
957 // There was a failure writing the actual data. Mark the entry as dirty. | |
958 int current_id = backend_->GetCurrentEntryId(); | |
959 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1; | |
960 node_.Store(); | |
961 } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) { | |
962 node_.Data()->dirty = 0; | |
963 node_.Store(); | |
964 } | |
965 } | |
966 | |
967 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); | |
968 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); | |
969 backend_->OnEntryDestroyEnd(); | |
970 } | |
971 | |
972 // ------------------------------------------------------------------------ | |
973 | |
974 int EntryImpl::InternalReadData(int index, int offset, | |
975 IOBuffer* buf, int buf_len, | |
976 const CompletionCallback& callback) { | |
977 DCHECK(node_.Data()->dirty || read_only_); | |
978 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; | |
979 if (index < 0 || index >= kNumStreams) | |
980 return net::ERR_INVALID_ARGUMENT; | |
981 | |
982 int entry_size = entry_.Data()->data_size[index]; | |
983 if (offset >= entry_size || offset < 0 || !buf_len) | |
984 return 0; | |
985 | |
986 if (buf_len < 0) | |
987 return net::ERR_INVALID_ARGUMENT; | |
988 | |
989 if (!backend_.get()) | |
990 return net::ERR_UNEXPECTED; | |
991 | |
992 TimeTicks start = TimeTicks::Now(); | |
993 | |
994 if (offset + buf_len > entry_size) | |
995 buf_len = entry_size - offset; | |
996 | |
997 UpdateRank(false); | |
998 | |
999 backend_->OnEvent(Stats::READ_DATA); | |
1000 backend_->OnRead(buf_len); | |
1001 | |
1002 Addr address(entry_.Data()->data_addr[index]); | |
1003 int eof = address.is_initialized() ? entry_size : 0; | |
1004 if (user_buffers_[index].get() && | |
1005 user_buffers_[index]->PreRead(eof, offset, &buf_len)) { | |
1006 // Complete the operation locally. | |
1007 buf_len = user_buffers_[index]->Read(offset, buf, buf_len); | |
1008 ReportIOTime(kRead, start); | |
1009 return buf_len; | |
1010 } | |
1011 | |
1012 address.set_value(entry_.Data()->data_addr[index]); | |
1013 DCHECK(address.is_initialized()); | |
1014 if (!address.is_initialized()) { | |
1015 DoomImpl(); | |
1016 return net::ERR_FAILED; | |
1017 } | |
1018 | |
1019 File* file = GetBackingFile(address, index); | |
1020 if (!file) { | |
1021 DoomImpl(); | |
1022 LOG(ERROR) << "No file for " << std::hex << address.value(); | |
1023 return net::ERR_FILE_NOT_FOUND; | |
1024 } | |
1025 | |
1026 size_t file_offset = offset; | |
1027 if (address.is_block_file()) { | |
1028 DCHECK_LE(offset + buf_len, kMaxBlockSize); | |
1029 file_offset += address.start_block() * address.BlockSize() + | |
1030 kBlockHeaderSize; | |
1031 } | |
1032 | |
1033 SyncCallback* io_callback = NULL; | |
1034 if (!callback.is_null()) { | |
1035 io_callback = new SyncCallback(this, buf, callback, | |
1036 net::NetLog::TYPE_ENTRY_READ_DATA); | |
1037 } | |
1038 | |
1039 TimeTicks start_async = TimeTicks::Now(); | |
1040 | |
1041 bool completed; | |
1042 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { | |
1043 if (io_callback) | |
1044 io_callback->Discard(); | |
1045 DoomImpl(); | |
1046 return net::ERR_CACHE_READ_FAILURE; | |
1047 } | |
1048 | |
1049 if (io_callback && completed) | |
1050 io_callback->Discard(); | |
1051 | |
1052 if (io_callback) | |
1053 ReportIOTime(kReadAsync1, start_async); | |
1054 | |
1055 ReportIOTime(kRead, start); | |
1056 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; | |
1057 } | |
1058 | |
1059 int EntryImpl::InternalWriteData(int index, int offset, | |
1060 IOBuffer* buf, int buf_len, | |
1061 const CompletionCallback& callback, | |
1062 bool truncate) { | |
1063 DCHECK(node_.Data()->dirty || read_only_); | |
1064 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; | |
1065 if (index < 0 || index >= kNumStreams) | |
1066 return net::ERR_INVALID_ARGUMENT; | |
1067 | |
1068 if (offset < 0 || buf_len < 0) | |
1069 return net::ERR_INVALID_ARGUMENT; | |
1070 | |
1071 if (!backend_.get()) | |
1072 return net::ERR_UNEXPECTED; | |
1073 | |
1074 int max_file_size = backend_->MaxFileSize(); | |
1075 | |
1076 // offset or buf_len could be negative numbers. | |
1077 if (offset > max_file_size || buf_len > max_file_size || | |
1078 offset + buf_len > max_file_size) { | |
1079 int size = offset + buf_len; | |
1080 if (size <= max_file_size) | |
1081 size = kint32max; | |
1082 backend_->TooMuchStorageRequested(size); | |
1083 return net::ERR_FAILED; | |
1084 } | |
1085 | |
1086 TimeTicks start = TimeTicks::Now(); | |
1087 | |
1088 // Read the size at this point (it may change inside prepare). | |
1089 int entry_size = entry_.Data()->data_size[index]; | |
1090 bool extending = entry_size < offset + buf_len; | |
1091 truncate = truncate && entry_size > offset + buf_len; | |
1092 Trace("To PrepareTarget 0x%x", entry_.address().value()); | |
1093 if (!PrepareTarget(index, offset, buf_len, truncate)) | |
1094 return net::ERR_FAILED; | |
1095 | |
1096 Trace("From PrepareTarget 0x%x", entry_.address().value()); | |
1097 if (extending || truncate) | |
1098 UpdateSize(index, entry_size, offset + buf_len); | |
1099 | |
1100 UpdateRank(true); | |
1101 | |
1102 backend_->OnEvent(Stats::WRITE_DATA); | |
1103 backend_->OnWrite(buf_len); | |
1104 | |
1105 if (user_buffers_[index].get()) { | |
1106 // Complete the operation locally. | |
1107 user_buffers_[index]->Write(offset, buf, buf_len); | |
1108 ReportIOTime(kWrite, start); | |
1109 return buf_len; | |
1110 } | |
1111 | |
1112 Addr address(entry_.Data()->data_addr[index]); | |
1113 if (offset + buf_len == 0) { | |
1114 if (truncate) { | |
1115 DCHECK(!address.is_initialized()); | |
1116 } | |
1117 return 0; | |
1118 } | |
1119 | |
1120 File* file = GetBackingFile(address, index); | |
1121 if (!file) | |
1122 return net::ERR_FILE_NOT_FOUND; | |
1123 | |
1124 size_t file_offset = offset; | |
1125 if (address.is_block_file()) { | |
1126 DCHECK_LE(offset + buf_len, kMaxBlockSize); | |
1127 file_offset += address.start_block() * address.BlockSize() + | |
1128 kBlockHeaderSize; | |
1129 } else if (truncate || (extending && !buf_len)) { | |
1130 if (!file->SetLength(offset + buf_len)) | |
1131 return net::ERR_FAILED; | |
1132 } | |
1133 | |
1134 if (!buf_len) | |
1135 return 0; | |
1136 | |
1137 SyncCallback* io_callback = NULL; | |
1138 if (!callback.is_null()) { | |
1139 io_callback = new SyncCallback(this, buf, callback, | |
1140 net::NetLog::TYPE_ENTRY_WRITE_DATA); | |
1141 } | |
1142 | |
1143 TimeTicks start_async = TimeTicks::Now(); | |
1144 | |
1145 bool completed; | |
1146 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, | |
1147 &completed)) { | |
1148 if (io_callback) | |
1149 io_callback->Discard(); | |
1150 return net::ERR_CACHE_WRITE_FAILURE; | |
1151 } | |
1152 | |
1153 if (io_callback && completed) | |
1154 io_callback->Discard(); | |
1155 | |
1156 if (io_callback) | |
1157 ReportIOTime(kWriteAsync1, start_async); | |
1158 | |
1159 ReportIOTime(kWrite, start); | |
1160 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; | |
1161 } | |
1162 | |
1163 // ------------------------------------------------------------------------ | |
1164 | |
1165 bool EntryImpl::CreateDataBlock(int index, int size) { | |
1166 DCHECK(index >= 0 && index < kNumStreams); | |
1167 | |
1168 Addr address(entry_.Data()->data_addr[index]); | |
1169 if (!CreateBlock(size, &address)) | |
1170 return false; | |
1171 | |
1172 entry_.Data()->data_addr[index] = address.value(); | |
1173 entry_.Store(); | |
1174 return true; | |
1175 } | |
1176 | |
1177 bool EntryImpl::CreateBlock(int size, Addr* address) { | |
1178 DCHECK(!address->is_initialized()); | |
1179 if (!backend_.get()) | |
1180 return false; | |
1181 | |
1182 FileType file_type = Addr::RequiredFileType(size); | |
1183 if (EXTERNAL == file_type) { | |
1184 if (size > backend_->MaxFileSize()) | |
1185 return false; | |
1186 if (!backend_->CreateExternalFile(address)) | |
1187 return false; | |
1188 } else { | |
1189 int num_blocks = Addr::RequiredBlocks(size, file_type); | |
1190 | |
1191 if (!backend_->CreateBlock(file_type, num_blocks, address)) | |
1192 return false; | |
1193 } | |
1194 return true; | |
1195 } | |
1196 | |
1197 // Note that this method may end up modifying a block file so upon return the | |
1198 // involved block will be free, and could be reused for something else. If there | |
1199 // is a crash after that point (and maybe before returning to the caller), the | |
1200 // entry will be left dirty... and at some point it will be discarded; it is | |
1201 // important that the entry doesn't keep a reference to this address, or we'll | |
1202 // end up deleting the contents of |address| once again. | |
1203 void EntryImpl::DeleteData(Addr address, int index) { | |
1204 DCHECK(backend_.get()); | |
1205 if (!address.is_initialized()) | |
1206 return; | |
1207 if (address.is_separate_file()) { | |
1208 int failure = !DeleteCacheFile(backend_->GetFileName(address)); | |
1209 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); | |
1210 if (failure) { | |
1211 LOG(ERROR) << "Failed to delete " << | |
1212 backend_->GetFileName(address).value() << " from the cache."; | |
1213 } | |
1214 if (files_[index].get()) | |
1215 files_[index] = NULL; // Releases the object. | |
1216 } else { | |
1217 backend_->DeleteBlock(address, true); | |
1218 } | |
1219 } | |
1220 | |
1221 void EntryImpl::UpdateRank(bool modified) { | |
1222 if (!backend_.get()) | |
1223 return; | |
1224 | |
1225 if (!doomed_) { | |
1226 // Everything is handled by the backend. | |
1227 backend_->UpdateRank(this, modified); | |
1228 return; | |
1229 } | |
1230 | |
1231 Time current = Time::Now(); | |
1232 node_.Data()->last_used = current.ToInternalValue(); | |
1233 | |
1234 if (modified) | |
1235 node_.Data()->last_modified = current.ToInternalValue(); | |
1236 } | |
1237 | |
1238 File* EntryImpl::GetBackingFile(Addr address, int index) { | |
1239 if (!backend_.get()) | |
1240 return NULL; | |
1241 | |
1242 File* file; | |
1243 if (address.is_separate_file()) | |
1244 file = GetExternalFile(address, index); | |
1245 else | |
1246 file = backend_->File(address); | |
1247 return file; | |
1248 } | |
1249 | |
1250 File* EntryImpl::GetExternalFile(Addr address, int index) { | |
1251 DCHECK(index >= 0 && index <= kKeyFileIndex); | |
1252 if (!files_[index].get()) { | |
1253 // For a key file, use mixed mode IO. | |
1254 scoped_refptr<File> file(new File(kKeyFileIndex == index)); | |
1255 if (file->Init(backend_->GetFileName(address))) | |
1256 files_[index].swap(file); | |
1257 } | |
1258 return files_[index].get(); | |
1259 } | |
1260 | |
1261 // We keep a memory buffer for everything that ends up stored on a block file | |
1262 // (because we don't know yet the final data size), and for some of the data | |
1263 // that end up on external files. This function will initialize that memory | |
1264 // buffer and / or the files needed to store the data. | |
1265 // | |
1266 // In general, a buffer may overlap data already stored on disk, and in that | |
1267 // case, the contents of the buffer are the most accurate. It may also extend | |
1268 // the file, but we don't want to read from disk just to keep the buffer up to | |
1269 // date. This means that as soon as there is a chance to get confused about what | |
1270 // is the most recent version of some part of a file, we'll flush the buffer and | |
1271 // reuse it for the new data. Keep in mind that the normal use pattern is quite | |
1272 // simple (write sequentially from the beginning), so we optimize for handling | |
1273 // that case. | |
1274 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, | |
1275 bool truncate) { | |
1276 if (truncate) | |
1277 return HandleTruncation(index, offset, buf_len); | |
1278 | |
1279 if (!offset && !buf_len) | |
1280 return true; | |
1281 | |
1282 Addr address(entry_.Data()->data_addr[index]); | |
1283 if (address.is_initialized()) { | |
1284 if (address.is_block_file() && !MoveToLocalBuffer(index)) | |
1285 return false; | |
1286 | |
1287 if (!user_buffers_[index].get() && offset < kMaxBlockSize) { | |
1288 // We are about to create a buffer for the first 16KB, make sure that we | |
1289 // preserve existing data. | |
1290 if (!CopyToLocalBuffer(index)) | |
1291 return false; | |
1292 } | |
1293 } | |
1294 | |
1295 if (!user_buffers_[index].get()) | |
1296 user_buffers_[index].reset(new UserBuffer(backend_.get())); | |
1297 | |
1298 return PrepareBuffer(index, offset, buf_len); | |
1299 } | |
1300 | |
1301 // We get to this function with some data already stored. If there is a | |
1302 // truncation that results on data stored internally, we'll explicitly | |
1303 // handle the case here. | |
1304 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) { | |
1305 Addr address(entry_.Data()->data_addr[index]); | |
1306 | |
1307 int current_size = entry_.Data()->data_size[index]; | |
1308 int new_size = offset + buf_len; | |
1309 | |
1310 if (!new_size) { | |
1311 // This is by far the most common scenario. | |
1312 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0); | |
1313 entry_.Data()->data_addr[index] = 0; | |
1314 entry_.Data()->data_size[index] = 0; | |
1315 unreported_size_[index] = 0; | |
1316 entry_.Store(); | |
1317 DeleteData(address, index); | |
1318 | |
1319 user_buffers_[index].reset(); | |
1320 return true; | |
1321 } | |
1322 | |
1323 // We never postpone truncating a file, if there is one, but we may postpone | |
1324 // telling the backend about the size reduction. | |
1325 if (user_buffers_[index].get()) { | |
1326 DCHECK_GE(current_size, user_buffers_[index]->Start()); | |
1327 if (!address.is_initialized()) { | |
1328 // There is no overlap between the buffer and disk. | |
1329 if (new_size > user_buffers_[index]->Start()) { | |
1330 // Just truncate our buffer. | |
1331 DCHECK_LT(new_size, user_buffers_[index]->End()); | |
1332 user_buffers_[index]->Truncate(new_size); | |
1333 return true; | |
1334 } | |
1335 | |
1336 // Just discard our buffer. | |
1337 user_buffers_[index]->Reset(); | |
1338 return PrepareBuffer(index, offset, buf_len); | |
1339 } | |
1340 | |
1341 // There is some overlap or we need to extend the file before the | |
1342 // truncation. | |
1343 if (offset > user_buffers_[index]->Start()) | |
1344 user_buffers_[index]->Truncate(new_size); | |
1345 UpdateSize(index, current_size, new_size); | |
1346 if (!Flush(index, 0)) | |
1347 return false; | |
1348 user_buffers_[index].reset(); | |
1349 } | |
1350 | |
1351 // We have data somewhere, and it is not in a buffer. | |
1352 DCHECK(!user_buffers_[index].get()); | |
1353 DCHECK(address.is_initialized()); | |
1354 | |
1355 if (new_size > kMaxBlockSize) | |
1356 return true; // Let the operation go directly to disk. | |
1357 | |
1358 return ImportSeparateFile(index, offset + buf_len); | |
1359 } | |
1360 | |
1361 bool EntryImpl::CopyToLocalBuffer(int index) { | |
1362 Addr address(entry_.Data()->data_addr[index]); | |
1363 DCHECK(!user_buffers_[index].get()); | |
1364 DCHECK(address.is_initialized()); | |
1365 | |
1366 int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize); | |
1367 user_buffers_[index].reset(new UserBuffer(backend_.get())); | |
1368 user_buffers_[index]->Write(len, NULL, 0); | |
1369 | |
1370 File* file = GetBackingFile(address, index); | |
1371 int offset = 0; | |
1372 | |
1373 if (address.is_block_file()) | |
1374 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
1375 | |
1376 if (!file || | |
1377 !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) { | |
1378 user_buffers_[index].reset(); | |
1379 return false; | |
1380 } | |
1381 return true; | |
1382 } | |
1383 | |
1384 bool EntryImpl::MoveToLocalBuffer(int index) { | |
1385 if (!CopyToLocalBuffer(index)) | |
1386 return false; | |
1387 | |
1388 Addr address(entry_.Data()->data_addr[index]); | |
1389 entry_.Data()->data_addr[index] = 0; | |
1390 entry_.Store(); | |
1391 DeleteData(address, index); | |
1392 | |
1393 // If we lose this entry we'll see it as zero sized. | |
1394 int len = entry_.Data()->data_size[index]; | |
1395 backend_->ModifyStorageSize(len - unreported_size_[index], 0); | |
1396 unreported_size_[index] = len; | |
1397 return true; | |
1398 } | |
1399 | |
1400 bool EntryImpl::ImportSeparateFile(int index, int new_size) { | |
1401 if (entry_.Data()->data_size[index] > new_size) | |
1402 UpdateSize(index, entry_.Data()->data_size[index], new_size); | |
1403 | |
1404 return MoveToLocalBuffer(index); | |
1405 } | |
1406 | |
1407 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) { | |
1408 DCHECK(user_buffers_[index].get()); | |
1409 if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) || | |
1410 offset > entry_.Data()->data_size[index]) { | |
1411 // We are about to extend the buffer or the file (with zeros), so make sure | |
1412 // that we are not overwriting anything. | |
1413 Addr address(entry_.Data()->data_addr[index]); | |
1414 if (address.is_initialized() && address.is_separate_file()) { | |
1415 if (!Flush(index, 0)) | |
1416 return false; | |
1417 // There is an actual file already, and we don't want to keep track of | |
1418 // its length so we let this operation go straight to disk. | |
1419 // The only case when a buffer is allowed to extend the file (as in fill | |
1420 // with zeros before the start) is when there is no file yet to extend. | |
1421 user_buffers_[index].reset(); | |
1422 return true; | |
1423 } | |
1424 } | |
1425 | |
1426 if (!user_buffers_[index]->PreWrite(offset, buf_len)) { | |
1427 if (!Flush(index, offset + buf_len)) | |
1428 return false; | |
1429 | |
1430 // Lets try again. | |
1431 if (offset > user_buffers_[index]->End() || | |
1432 !user_buffers_[index]->PreWrite(offset, buf_len)) { | |
1433 // We cannot complete the operation with a buffer. | |
1434 DCHECK(!user_buffers_[index]->Size()); | |
1435 DCHECK(!user_buffers_[index]->Start()); | |
1436 user_buffers_[index].reset(); | |
1437 } | |
1438 } | |
1439 return true; | |
1440 } | |
1441 | |
1442 bool EntryImpl::Flush(int index, int min_len) { | |
1443 Addr address(entry_.Data()->data_addr[index]); | |
1444 DCHECK(user_buffers_[index].get()); | |
1445 DCHECK(!address.is_initialized() || address.is_separate_file()); | |
1446 DVLOG(3) << "Flush"; | |
1447 | |
1448 int size = std::max(entry_.Data()->data_size[index], min_len); | |
1449 if (size && !address.is_initialized() && !CreateDataBlock(index, size)) | |
1450 return false; | |
1451 | |
1452 if (!entry_.Data()->data_size[index]) { | |
1453 DCHECK(!user_buffers_[index]->Size()); | |
1454 return true; | |
1455 } | |
1456 | |
1457 address.set_value(entry_.Data()->data_addr[index]); | |
1458 | |
1459 int len = user_buffers_[index]->Size(); | |
1460 int offset = user_buffers_[index]->Start(); | |
1461 if (!len && !offset) | |
1462 return true; | |
1463 | |
1464 if (address.is_block_file()) { | |
1465 DCHECK_EQ(len, entry_.Data()->data_size[index]); | |
1466 DCHECK(!offset); | |
1467 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
1468 } | |
1469 | |
1470 File* file = GetBackingFile(address, index); | |
1471 if (!file) | |
1472 return false; | |
1473 | |
1474 if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL)) | |
1475 return false; | |
1476 user_buffers_[index]->Reset(); | |
1477 | |
1478 return true; | |
1479 } | |
1480 | |
1481 void EntryImpl::UpdateSize(int index, int old_size, int new_size) { | |
1482 if (entry_.Data()->data_size[index] == new_size) | |
1483 return; | |
1484 | |
1485 unreported_size_[index] += new_size - old_size; | |
1486 entry_.Data()->data_size[index] = new_size; | |
1487 entry_.set_modified(); | |
1488 } | |
1489 | |
1490 int EntryImpl::InitSparseData() { | |
1491 if (sparse_.get()) | |
1492 return net::OK; | |
1493 | |
1494 // Use a local variable so that sparse_ never goes from 'valid' to NULL. | |
1495 scoped_ptr<SparseControl> sparse(new SparseControl(this)); | |
1496 int result = sparse->Init(); | |
1497 if (net::OK == result) | |
1498 sparse_.swap(sparse); | |
1499 | |
1500 return result; | |
1501 } | |
1502 | |
1503 void EntryImpl::SetEntryFlags(uint32 flags) { | |
1504 entry_.Data()->flags |= flags; | |
1505 entry_.set_modified(); | |
1506 } | |
1507 | |
1508 uint32 EntryImpl::GetEntryFlags() { | |
1509 return entry_.Data()->flags; | |
1510 } | |
1511 | |
1512 void EntryImpl::GetData(int index, char** buffer, Addr* address) { | |
1513 DCHECK(backend_.get()); | |
1514 if (user_buffers_[index].get() && user_buffers_[index]->Size() && | |
1515 !user_buffers_[index]->Start()) { | |
1516 // The data is already in memory, just copy it and we're done. | |
1517 int data_len = entry_.Data()->data_size[index]; | |
1518 if (data_len <= user_buffers_[index]->Size()) { | |
1519 DCHECK(!user_buffers_[index]->Start()); | |
1520 *buffer = new char[data_len]; | |
1521 memcpy(*buffer, user_buffers_[index]->Data(), data_len); | |
1522 return; | |
1523 } | |
1524 } | |
1525 | |
1526 // Bad news: we'd have to read the info from disk so instead we'll just tell | |
1527 // the caller where to read from. | |
1528 *buffer = NULL; | |
1529 address->set_value(entry_.Data()->data_addr[index]); | |
1530 if (address->is_initialized()) { | |
1531 // Prevent us from deleting the block from the backing store. | |
1532 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | |
1533 unreported_size_[index], 0); | |
1534 entry_.Data()->data_addr[index] = 0; | |
1535 entry_.Data()->data_size[index] = 0; | |
1536 } | |
1537 } | |
1538 | |
1539 void EntryImpl::Log(const char* msg) { | |
1540 int dirty = 0; | |
1541 if (node_.HasData()) { | |
1542 dirty = node_.Data()->dirty; | |
1543 } | |
1544 | |
1545 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | |
1546 entry_.address().value(), node_.address().value()); | |
1547 | |
1548 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | |
1549 entry_.Data()->data_addr[1], entry_.Data()->long_key); | |
1550 | |
1551 Trace(" doomed: %d 0x%x", doomed_, dirty); | |
1552 } | |
1553 | |
1554 } // namespace disk_cache | |
OLD | NEW |