| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "net/disk_cache/entry_impl.h" | |
| 6 | |
| 7 #include "base/hash.h" | |
| 8 #include "base/message_loop/message_loop.h" | |
| 9 #include "base/metrics/histogram.h" | |
| 10 #include "base/strings/string_util.h" | |
| 11 #include "net/base/io_buffer.h" | |
| 12 #include "net/base/net_errors.h" | |
| 13 #include "net/disk_cache/backend_impl.h" | |
| 14 #include "net/disk_cache/bitmap.h" | |
| 15 #include "net/disk_cache/cache_util.h" | |
| 16 #include "net/disk_cache/disk_format.h" | |
| 17 #include "net/disk_cache/net_log_parameters.h" | |
| 18 #include "net/disk_cache/sparse_control.h" | |
| 19 | |
| 20 #define CACHE_HISTOGRAM_MACROS_BACKEND_IMPL_OBJ backend_ | |
| 21 #include "net/disk_cache/histogram_macros.h" | |
| 22 | |
| 23 using base::Time; | |
| 24 using base::TimeDelta; | |
| 25 using base::TimeTicks; | |
| 26 | |
| 27 namespace { | |
| 28 | |
| 29 // Index for the file used to store the key, if any (files_[kKeyFileIndex]). | |
| 30 const int kKeyFileIndex = 3; | |
| 31 | |
| 32 // This class implements FileIOCallback to buffer the callback from a file IO | |
| 33 // operation from the actual net class. | |
| 34 class SyncCallback: public disk_cache::FileIOCallback { | |
| 35 public: | |
| 36 // |end_event_type| is the event type to log on completion. Logs nothing on | |
| 37 // discard, or when the NetLog is not set to log all events. | |
| 38 SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer, | |
| 39 const net::CompletionCallback& callback, | |
| 40 net::NetLog::EventType end_event_type) | |
| 41 : entry_(entry), callback_(callback), buf_(buffer), | |
| 42 start_(TimeTicks::Now()), end_event_type_(end_event_type) { | |
| 43 entry->AddRef(); | |
| 44 entry->IncrementIoCount(); | |
| 45 } | |
| 46 virtual ~SyncCallback() {} | |
| 47 | |
| 48 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; | |
| 49 void Discard(); | |
| 50 | |
| 51 private: | |
| 52 disk_cache::EntryImpl* entry_; | |
| 53 net::CompletionCallback callback_; | |
| 54 scoped_refptr<net::IOBuffer> buf_; | |
| 55 TimeTicks start_; | |
| 56 const net::NetLog::EventType end_event_type_; | |
| 57 | |
| 58 DISALLOW_COPY_AND_ASSIGN(SyncCallback); | |
| 59 }; | |
| 60 | |
| 61 void SyncCallback::OnFileIOComplete(int bytes_copied) { | |
| 62 entry_->DecrementIoCount(); | |
| 63 if (!callback_.is_null()) { | |
| 64 if (entry_->net_log().IsLoggingAllEvents()) { | |
| 65 entry_->net_log().EndEvent( | |
| 66 end_event_type_, | |
| 67 disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied)); | |
| 68 } | |
| 69 entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_); | |
| 70 buf_ = NULL; // Release the buffer before invoking the callback. | |
| 71 callback_.Run(bytes_copied); | |
| 72 } | |
| 73 entry_->Release(); | |
| 74 delete this; | |
| 75 } | |
| 76 | |
| 77 void SyncCallback::Discard() { | |
| 78 callback_.Reset(); | |
| 79 buf_ = NULL; | |
| 80 OnFileIOComplete(0); | |
| 81 } | |
| 82 | |
| 83 const int kMaxBufferSize = 1024 * 1024; // 1 MB. | |
| 84 | |
| 85 } // namespace | |
| 86 | |
| 87 namespace disk_cache { | |
| 88 | |
| 89 // This class handles individual memory buffers that store data before it is | |
| 90 // sent to disk. The buffer can start at any offset, but if we try to write to | |
| 91 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to | |
| 92 // zero. The buffer grows up to a size determined by the backend, to keep the | |
| 93 // total memory used under control. | |
| 94 class EntryImpl::UserBuffer { | |
| 95 public: | |
| 96 explicit UserBuffer(BackendImpl* backend) | |
| 97 : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) { | |
| 98 buffer_.reserve(kMaxBlockSize); | |
| 99 } | |
| 100 ~UserBuffer() { | |
| 101 if (backend_.get()) | |
| 102 backend_->BufferDeleted(capacity() - kMaxBlockSize); | |
| 103 } | |
| 104 | |
| 105 // Returns true if we can handle writing |len| bytes to |offset|. | |
| 106 bool PreWrite(int offset, int len); | |
| 107 | |
| 108 // Truncates the buffer to |offset| bytes. | |
| 109 void Truncate(int offset); | |
| 110 | |
| 111 // Writes |len| bytes from |buf| at the given |offset|. | |
| 112 void Write(int offset, IOBuffer* buf, int len); | |
| 113 | |
| 114 // Returns true if we can read |len| bytes from |offset|, given that the | |
| 115 // actual file has |eof| bytes stored. Note that the number of bytes to read | |
| 116 // may be modified by this method even though it returns false: that means we | |
| 117 // should do a smaller read from disk. | |
| 118 bool PreRead(int eof, int offset, int* len); | |
| 119 | |
| 120 // Read |len| bytes from |buf| at the given |offset|. | |
| 121 int Read(int offset, IOBuffer* buf, int len); | |
| 122 | |
| 123 // Prepare this buffer for reuse. | |
| 124 void Reset(); | |
| 125 | |
| 126 char* Data() { return buffer_.size() ? &buffer_[0] : NULL; } | |
| 127 int Size() { return static_cast<int>(buffer_.size()); } | |
| 128 int Start() { return offset_; } | |
| 129 int End() { return offset_ + Size(); } | |
| 130 | |
| 131 private: | |
| 132 int capacity() { return static_cast<int>(buffer_.capacity()); } | |
| 133 bool GrowBuffer(int required, int limit); | |
| 134 | |
| 135 base::WeakPtr<BackendImpl> backend_; | |
| 136 int offset_; | |
| 137 std::vector<char> buffer_; | |
| 138 bool grow_allowed_; | |
| 139 DISALLOW_COPY_AND_ASSIGN(UserBuffer); | |
| 140 }; | |
| 141 | |
| 142 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) { | |
| 143 DCHECK_GE(offset, 0); | |
| 144 DCHECK_GE(len, 0); | |
| 145 DCHECK_GE(offset + len, 0); | |
| 146 | |
| 147 // We don't want to write before our current start. | |
| 148 if (offset < offset_) | |
| 149 return false; | |
| 150 | |
| 151 // Lets get the common case out of the way. | |
| 152 if (offset + len <= capacity()) | |
| 153 return true; | |
| 154 | |
| 155 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the | |
| 156 // buffer offset_ at 0. | |
| 157 if (!Size() && offset > kMaxBlockSize) | |
| 158 return GrowBuffer(len, kMaxBufferSize); | |
| 159 | |
| 160 int required = offset - offset_ + len; | |
| 161 return GrowBuffer(required, kMaxBufferSize * 6 / 5); | |
| 162 } | |
| 163 | |
| 164 void EntryImpl::UserBuffer::Truncate(int offset) { | |
| 165 DCHECK_GE(offset, 0); | |
| 166 DCHECK_GE(offset, offset_); | |
| 167 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_; | |
| 168 | |
| 169 offset -= offset_; | |
| 170 if (Size() >= offset) | |
| 171 buffer_.resize(offset); | |
| 172 } | |
| 173 | |
| 174 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) { | |
| 175 DCHECK_GE(offset, 0); | |
| 176 DCHECK_GE(len, 0); | |
| 177 DCHECK_GE(offset + len, 0); | |
| 178 DCHECK_GE(offset, offset_); | |
| 179 DVLOG(3) << "Buffer write at " << offset << " current " << offset_; | |
| 180 | |
| 181 if (!Size() && offset > kMaxBlockSize) | |
| 182 offset_ = offset; | |
| 183 | |
| 184 offset -= offset_; | |
| 185 | |
| 186 if (offset > Size()) | |
| 187 buffer_.resize(offset); | |
| 188 | |
| 189 if (!len) | |
| 190 return; | |
| 191 | |
| 192 char* buffer = buf->data(); | |
| 193 int valid_len = Size() - offset; | |
| 194 int copy_len = std::min(valid_len, len); | |
| 195 if (copy_len) { | |
| 196 memcpy(&buffer_[offset], buffer, copy_len); | |
| 197 len -= copy_len; | |
| 198 buffer += copy_len; | |
| 199 } | |
| 200 if (!len) | |
| 201 return; | |
| 202 | |
| 203 buffer_.insert(buffer_.end(), buffer, buffer + len); | |
| 204 } | |
| 205 | |
| 206 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) { | |
| 207 DCHECK_GE(offset, 0); | |
| 208 DCHECK_GT(*len, 0); | |
| 209 | |
| 210 if (offset < offset_) { | |
| 211 // We are reading before this buffer. | |
| 212 if (offset >= eof) | |
| 213 return true; | |
| 214 | |
| 215 // If the read overlaps with the buffer, change its length so that there is | |
| 216 // no overlap. | |
| 217 *len = std::min(*len, offset_ - offset); | |
| 218 *len = std::min(*len, eof - offset); | |
| 219 | |
| 220 // We should read from disk. | |
| 221 return false; | |
| 222 } | |
| 223 | |
| 224 if (!Size()) | |
| 225 return false; | |
| 226 | |
| 227 // See if we can fulfill the first part of the operation. | |
| 228 return (offset - offset_ < Size()); | |
| 229 } | |
| 230 | |
| 231 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) { | |
| 232 DCHECK_GE(offset, 0); | |
| 233 DCHECK_GT(len, 0); | |
| 234 DCHECK(Size() || offset < offset_); | |
| 235 | |
| 236 int clean_bytes = 0; | |
| 237 if (offset < offset_) { | |
| 238 // We don't have a file so lets fill the first part with 0. | |
| 239 clean_bytes = std::min(offset_ - offset, len); | |
| 240 memset(buf->data(), 0, clean_bytes); | |
| 241 if (len == clean_bytes) | |
| 242 return len; | |
| 243 offset = offset_; | |
| 244 len -= clean_bytes; | |
| 245 } | |
| 246 | |
| 247 int start = offset - offset_; | |
| 248 int available = Size() - start; | |
| 249 DCHECK_GE(start, 0); | |
| 250 DCHECK_GE(available, 0); | |
| 251 len = std::min(len, available); | |
| 252 memcpy(buf->data() + clean_bytes, &buffer_[start], len); | |
| 253 return len + clean_bytes; | |
| 254 } | |
| 255 | |
| 256 void EntryImpl::UserBuffer::Reset() { | |
| 257 if (!grow_allowed_) { | |
| 258 if (backend_.get()) | |
| 259 backend_->BufferDeleted(capacity() - kMaxBlockSize); | |
| 260 grow_allowed_ = true; | |
| 261 std::vector<char> tmp; | |
| 262 buffer_.swap(tmp); | |
| 263 buffer_.reserve(kMaxBlockSize); | |
| 264 } | |
| 265 offset_ = 0; | |
| 266 buffer_.clear(); | |
| 267 } | |
| 268 | |
| 269 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) { | |
| 270 DCHECK_GE(required, 0); | |
| 271 int current_size = capacity(); | |
| 272 if (required <= current_size) | |
| 273 return true; | |
| 274 | |
| 275 if (required > limit) | |
| 276 return false; | |
| 277 | |
| 278 if (!backend_.get()) | |
| 279 return false; | |
| 280 | |
| 281 int to_add = std::max(required - current_size, kMaxBlockSize * 4); | |
| 282 to_add = std::max(current_size, to_add); | |
| 283 required = std::min(current_size + to_add, limit); | |
| 284 | |
| 285 grow_allowed_ = backend_->IsAllocAllowed(current_size, required); | |
| 286 if (!grow_allowed_) | |
| 287 return false; | |
| 288 | |
| 289 DVLOG(3) << "Buffer grow to " << required; | |
| 290 | |
| 291 buffer_.reserve(required); | |
| 292 return true; | |
| 293 } | |
| 294 | |
| 295 // ------------------------------------------------------------------------ | |
| 296 | |
| 297 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) | |
| 298 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), | |
| 299 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), | |
| 300 dirty_(false) { | |
| 301 entry_.LazyInit(backend->File(address), address); | |
| 302 for (int i = 0; i < kNumStreams; i++) { | |
| 303 unreported_size_[i] = 0; | |
| 304 } | |
| 305 } | |
| 306 | |
| 307 void EntryImpl::DoomImpl() { | |
| 308 if (doomed_ || !backend_.get()) | |
| 309 return; | |
| 310 | |
| 311 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | |
| 312 backend_->InternalDoomEntry(this); | |
| 313 } | |
| 314 | |
| 315 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | |
| 316 const CompletionCallback& callback) { | |
| 317 if (net_log_.IsLoggingAllEvents()) { | |
| 318 net_log_.BeginEvent( | |
| 319 net::NetLog::TYPE_ENTRY_READ_DATA, | |
| 320 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); | |
| 321 } | |
| 322 | |
| 323 int result = InternalReadData(index, offset, buf, buf_len, callback); | |
| 324 | |
| 325 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | |
| 326 net_log_.EndEvent( | |
| 327 net::NetLog::TYPE_ENTRY_READ_DATA, | |
| 328 CreateNetLogReadWriteCompleteCallback(result)); | |
| 329 } | |
| 330 return result; | |
| 331 } | |
| 332 | |
| 333 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | |
| 334 const CompletionCallback& callback, | |
| 335 bool truncate) { | |
| 336 if (net_log_.IsLoggingAllEvents()) { | |
| 337 net_log_.BeginEvent( | |
| 338 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
| 339 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); | |
| 340 } | |
| 341 | |
| 342 int result = InternalWriteData(index, offset, buf, buf_len, callback, | |
| 343 truncate); | |
| 344 | |
| 345 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | |
| 346 net_log_.EndEvent( | |
| 347 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
| 348 CreateNetLogReadWriteCompleteCallback(result)); | |
| 349 } | |
| 350 return result; | |
| 351 } | |
| 352 | |
| 353 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | |
| 354 const CompletionCallback& callback) { | |
| 355 DCHECK(node_.Data()->dirty || read_only_); | |
| 356 int result = InitSparseData(); | |
| 357 if (net::OK != result) | |
| 358 return result; | |
| 359 | |
| 360 TimeTicks start = TimeTicks::Now(); | |
| 361 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | |
| 362 callback); | |
| 363 ReportIOTime(kSparseRead, start); | |
| 364 return result; | |
| 365 } | |
| 366 | |
| 367 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | |
| 368 const CompletionCallback& callback) { | |
| 369 DCHECK(node_.Data()->dirty || read_only_); | |
| 370 int result = InitSparseData(); | |
| 371 if (net::OK != result) | |
| 372 return result; | |
| 373 | |
| 374 TimeTicks start = TimeTicks::Now(); | |
| 375 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | |
| 376 buf_len, callback); | |
| 377 ReportIOTime(kSparseWrite, start); | |
| 378 return result; | |
| 379 } | |
| 380 | |
| 381 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { | |
| 382 int result = InitSparseData(); | |
| 383 if (net::OK != result) | |
| 384 return result; | |
| 385 | |
| 386 return sparse_->GetAvailableRange(offset, len, start); | |
| 387 } | |
| 388 | |
| 389 void EntryImpl::CancelSparseIOImpl() { | |
| 390 if (!sparse_.get()) | |
| 391 return; | |
| 392 | |
| 393 sparse_->CancelIO(); | |
| 394 } | |
| 395 | |
| 396 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { | |
| 397 DCHECK(sparse_.get()); | |
| 398 return sparse_->ReadyToUse(callback); | |
| 399 } | |
| 400 | |
| 401 uint32 EntryImpl::GetHash() { | |
| 402 return entry_.Data()->hash; | |
| 403 } | |
| 404 | |
| 405 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | |
| 406 uint32 hash) { | |
| 407 Trace("Create entry In"); | |
| 408 EntryStore* entry_store = entry_.Data(); | |
| 409 RankingsNode* node = node_.Data(); | |
| 410 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); | |
| 411 memset(node, 0, sizeof(RankingsNode)); | |
| 412 if (!node_.LazyInit(backend_->File(node_address), node_address)) | |
| 413 return false; | |
| 414 | |
| 415 entry_store->rankings_node = node_address.value(); | |
| 416 node->contents = entry_.address().value(); | |
| 417 | |
| 418 entry_store->hash = hash; | |
| 419 entry_store->creation_time = Time::Now().ToInternalValue(); | |
| 420 entry_store->key_len = static_cast<int32>(key.size()); | |
| 421 if (entry_store->key_len > kMaxInternalKeyLength) { | |
| 422 Addr address(0); | |
| 423 if (!CreateBlock(entry_store->key_len + 1, &address)) | |
| 424 return false; | |
| 425 | |
| 426 entry_store->long_key = address.value(); | |
| 427 File* key_file = GetBackingFile(address, kKeyFileIndex); | |
| 428 key_ = key; | |
| 429 | |
| 430 size_t offset = 0; | |
| 431 if (address.is_block_file()) | |
| 432 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
| 433 | |
| 434 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { | |
| 435 DeleteData(address, kKeyFileIndex); | |
| 436 return false; | |
| 437 } | |
| 438 | |
| 439 if (address.is_separate_file()) | |
| 440 key_file->SetLength(key.size() + 1); | |
| 441 } else { | |
| 442 memcpy(entry_store->key, key.data(), key.size()); | |
| 443 entry_store->key[key.size()] = '\0'; | |
| 444 } | |
| 445 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); | |
| 446 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); | |
| 447 node->dirty = backend_->GetCurrentEntryId(); | |
| 448 Log("Create Entry "); | |
| 449 return true; | |
| 450 } | |
| 451 | |
| 452 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { | |
| 453 if (entry_.Data()->hash != hash || | |
| 454 static_cast<size_t>(entry_.Data()->key_len) != key.size()) | |
| 455 return false; | |
| 456 | |
| 457 return (key.compare(GetKey()) == 0); | |
| 458 } | |
| 459 | |
| 460 void EntryImpl::InternalDoom() { | |
| 461 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); | |
| 462 DCHECK(node_.HasData()); | |
| 463 if (!node_.Data()->dirty) { | |
| 464 node_.Data()->dirty = backend_->GetCurrentEntryId(); | |
| 465 node_.Store(); | |
| 466 } | |
| 467 doomed_ = true; | |
| 468 } | |
| 469 | |
| 470 void EntryImpl::DeleteEntryData(bool everything) { | |
| 471 DCHECK(doomed_ || !everything); | |
| 472 | |
| 473 if (GetEntryFlags() & PARENT_ENTRY) { | |
| 474 // We have some child entries that must go away. | |
| 475 SparseControl::DeleteChildren(this); | |
| 476 } | |
| 477 | |
| 478 if (GetDataSize(0)) | |
| 479 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); | |
| 480 if (GetDataSize(1)) | |
| 481 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); | |
| 482 for (int index = 0; index < kNumStreams; index++) { | |
| 483 Addr address(entry_.Data()->data_addr[index]); | |
| 484 if (address.is_initialized()) { | |
| 485 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | |
| 486 unreported_size_[index], 0); | |
| 487 entry_.Data()->data_addr[index] = 0; | |
| 488 entry_.Data()->data_size[index] = 0; | |
| 489 entry_.Store(); | |
| 490 DeleteData(address, index); | |
| 491 } | |
| 492 } | |
| 493 | |
| 494 if (!everything) | |
| 495 return; | |
| 496 | |
| 497 // Remove all traces of this entry. | |
| 498 backend_->RemoveEntry(this); | |
| 499 | |
| 500 // Note that at this point node_ and entry_ are just two blocks of data, and | |
| 501 // even if they reference each other, nobody should be referencing them. | |
| 502 | |
| 503 Addr address(entry_.Data()->long_key); | |
| 504 DeleteData(address, kKeyFileIndex); | |
| 505 backend_->ModifyStorageSize(entry_.Data()->key_len, 0); | |
| 506 | |
| 507 backend_->DeleteBlock(entry_.address(), true); | |
| 508 entry_.Discard(); | |
| 509 | |
| 510 if (!LeaveRankingsBehind()) { | |
| 511 backend_->DeleteBlock(node_.address(), true); | |
| 512 node_.Discard(); | |
| 513 } | |
| 514 } | |
| 515 | |
| 516 CacheAddr EntryImpl::GetNextAddress() { | |
| 517 return entry_.Data()->next; | |
| 518 } | |
| 519 | |
| 520 void EntryImpl::SetNextAddress(Addr address) { | |
| 521 DCHECK_NE(address.value(), entry_.address().value()); | |
| 522 entry_.Data()->next = address.value(); | |
| 523 bool success = entry_.Store(); | |
| 524 DCHECK(success); | |
| 525 } | |
| 526 | |
| 527 bool EntryImpl::LoadNodeAddress() { | |
| 528 Addr address(entry_.Data()->rankings_node); | |
| 529 if (!node_.LazyInit(backend_->File(address), address)) | |
| 530 return false; | |
| 531 return node_.Load(); | |
| 532 } | |
| 533 | |
| 534 bool EntryImpl::Update() { | |
| 535 DCHECK(node_.HasData()); | |
| 536 | |
| 537 if (read_only_) | |
| 538 return true; | |
| 539 | |
| 540 RankingsNode* rankings = node_.Data(); | |
| 541 if (!rankings->dirty) { | |
| 542 rankings->dirty = backend_->GetCurrentEntryId(); | |
| 543 if (!node_.Store()) | |
| 544 return false; | |
| 545 } | |
| 546 return true; | |
| 547 } | |
| 548 | |
| 549 void EntryImpl::SetDirtyFlag(int32 current_id) { | |
| 550 DCHECK(node_.HasData()); | |
| 551 if (node_.Data()->dirty && current_id != node_.Data()->dirty) | |
| 552 dirty_ = true; | |
| 553 | |
| 554 if (!current_id) | |
| 555 dirty_ = true; | |
| 556 } | |
| 557 | |
| 558 void EntryImpl::SetPointerForInvalidEntry(int32 new_id) { | |
| 559 node_.Data()->dirty = new_id; | |
| 560 node_.Store(); | |
| 561 } | |
| 562 | |
| 563 bool EntryImpl::LeaveRankingsBehind() { | |
| 564 return !node_.Data()->contents; | |
| 565 } | |
| 566 | |
| 567 // This only includes checks that relate to the first block of the entry (the | |
| 568 // first 256 bytes), and values that should be set from the entry creation. | |
| 569 // Basically, even if there is something wrong with this entry, we want to see | |
| 570 // if it is possible to load the rankings node and delete them together. | |
| 571 bool EntryImpl::SanityCheck() { | |
| 572 if (!entry_.VerifyHash()) | |
| 573 return false; | |
| 574 | |
| 575 EntryStore* stored = entry_.Data(); | |
| 576 if (!stored->rankings_node || stored->key_len <= 0) | |
| 577 return false; | |
| 578 | |
| 579 if (stored->reuse_count < 0 || stored->refetch_count < 0) | |
| 580 return false; | |
| 581 | |
| 582 Addr rankings_addr(stored->rankings_node); | |
| 583 if (!rankings_addr.SanityCheckForRankings()) | |
| 584 return false; | |
| 585 | |
| 586 Addr next_addr(stored->next); | |
| 587 if (next_addr.is_initialized() && !next_addr.SanityCheckForEntryV2()) { | |
| 588 STRESS_NOTREACHED(); | |
| 589 return false; | |
| 590 } | |
| 591 STRESS_DCHECK(next_addr.value() != entry_.address().value()); | |
| 592 | |
| 593 if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL) | |
| 594 return false; | |
| 595 | |
| 596 Addr key_addr(stored->long_key); | |
| 597 if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) || | |
| 598 (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized())) | |
| 599 return false; | |
| 600 | |
| 601 if (!key_addr.SanityCheckV2()) | |
| 602 return false; | |
| 603 | |
| 604 if (key_addr.is_initialized() && | |
| 605 ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) || | |
| 606 (stored->key_len >= kMaxBlockSize && key_addr.is_block_file()))) | |
| 607 return false; | |
| 608 | |
| 609 int num_blocks = NumBlocksForEntry(stored->key_len); | |
| 610 if (entry_.address().num_blocks() != num_blocks) | |
| 611 return false; | |
| 612 | |
| 613 return true; | |
| 614 } | |
| 615 | |
| 616 bool EntryImpl::DataSanityCheck() { | |
| 617 EntryStore* stored = entry_.Data(); | |
| 618 Addr key_addr(stored->long_key); | |
| 619 | |
| 620 // The key must be NULL terminated. | |
| 621 if (!key_addr.is_initialized() && stored->key[stored->key_len]) | |
| 622 return false; | |
| 623 | |
| 624 if (stored->hash != base::Hash(GetKey())) | |
| 625 return false; | |
| 626 | |
| 627 for (int i = 0; i < kNumStreams; i++) { | |
| 628 Addr data_addr(stored->data_addr[i]); | |
| 629 int data_size = stored->data_size[i]; | |
| 630 if (data_size < 0) | |
| 631 return false; | |
| 632 if (!data_size && data_addr.is_initialized()) | |
| 633 return false; | |
| 634 if (!data_addr.SanityCheckV2()) | |
| 635 return false; | |
| 636 if (!data_size) | |
| 637 continue; | |
| 638 if (data_size <= kMaxBlockSize && data_addr.is_separate_file()) | |
| 639 return false; | |
| 640 if (data_size > kMaxBlockSize && data_addr.is_block_file()) | |
| 641 return false; | |
| 642 } | |
| 643 return true; | |
| 644 } | |
| 645 | |
| 646 void EntryImpl::FixForDelete() { | |
| 647 EntryStore* stored = entry_.Data(); | |
| 648 Addr key_addr(stored->long_key); | |
| 649 | |
| 650 if (!key_addr.is_initialized()) | |
| 651 stored->key[stored->key_len] = '\0'; | |
| 652 | |
| 653 for (int i = 0; i < kNumStreams; i++) { | |
| 654 Addr data_addr(stored->data_addr[i]); | |
| 655 int data_size = stored->data_size[i]; | |
| 656 if (data_addr.is_initialized()) { | |
| 657 if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) || | |
| 658 (data_size > kMaxBlockSize && data_addr.is_block_file()) || | |
| 659 !data_addr.SanityCheckV2()) { | |
| 660 STRESS_NOTREACHED(); | |
| 661 // The address is weird so don't attempt to delete it. | |
| 662 stored->data_addr[i] = 0; | |
| 663 // In general, trust the stored size as it should be in sync with the | |
| 664 // total size tracked by the backend. | |
| 665 } | |
| 666 } | |
| 667 if (data_size < 0) | |
| 668 stored->data_size[i] = 0; | |
| 669 } | |
| 670 entry_.Store(); | |
| 671 } | |
| 672 | |
| 673 void EntryImpl::IncrementIoCount() { | |
| 674 backend_->IncrementIoCount(); | |
| 675 } | |
| 676 | |
| 677 void EntryImpl::DecrementIoCount() { | |
| 678 if (backend_.get()) | |
| 679 backend_->DecrementIoCount(); | |
| 680 } | |
| 681 | |
| 682 void EntryImpl::OnEntryCreated(BackendImpl* backend) { | |
| 683 // Just grab a reference to the backround queue. | |
| 684 background_queue_ = backend->GetBackgroundQueue(); | |
| 685 } | |
| 686 | |
| 687 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { | |
| 688 node_.Data()->last_used = last_used.ToInternalValue(); | |
| 689 node_.Data()->last_modified = last_modified.ToInternalValue(); | |
| 690 node_.set_modified(); | |
| 691 } | |
| 692 | |
| 693 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { | |
| 694 if (!backend_.get()) | |
| 695 return; | |
| 696 | |
| 697 switch (op) { | |
| 698 case kRead: | |
| 699 CACHE_UMA(AGE_MS, "ReadTime", 0, start); | |
| 700 break; | |
| 701 case kWrite: | |
| 702 CACHE_UMA(AGE_MS, "WriteTime", 0, start); | |
| 703 break; | |
| 704 case kSparseRead: | |
| 705 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); | |
| 706 break; | |
| 707 case kSparseWrite: | |
| 708 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); | |
| 709 break; | |
| 710 case kAsyncIO: | |
| 711 CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start); | |
| 712 break; | |
| 713 case kReadAsync1: | |
| 714 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start); | |
| 715 break; | |
| 716 case kWriteAsync1: | |
| 717 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); | |
| 718 break; | |
| 719 default: | |
| 720 NOTREACHED(); | |
| 721 } | |
| 722 } | |
| 723 | |
| 724 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { | |
| 725 DCHECK(!net_log_.net_log()); | |
| 726 net_log_ = net::BoundNetLog::Make( | |
| 727 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); | |
| 728 net_log_.BeginEvent( | |
| 729 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, | |
| 730 CreateNetLogEntryCreationCallback(this, created)); | |
| 731 } | |
| 732 | |
| 733 const net::BoundNetLog& EntryImpl::net_log() const { | |
| 734 return net_log_; | |
| 735 } | |
| 736 | |
| 737 // static | |
| 738 int EntryImpl::NumBlocksForEntry(int key_size) { | |
| 739 // The longest key that can be stored using one block. | |
| 740 int key1_len = | |
| 741 static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key)); | |
| 742 | |
| 743 if (key_size < key1_len || key_size > kMaxInternalKeyLength) | |
| 744 return 1; | |
| 745 | |
| 746 return ((key_size - key1_len) / 256 + 2); | |
| 747 } | |
| 748 | |
| 749 // ------------------------------------------------------------------------ | |
| 750 | |
| 751 void EntryImpl::Doom() { | |
| 752 if (background_queue_.get()) | |
| 753 background_queue_->DoomEntryImpl(this); | |
| 754 } | |
| 755 | |
| 756 void EntryImpl::Close() { | |
| 757 if (background_queue_.get()) | |
| 758 background_queue_->CloseEntryImpl(this); | |
| 759 } | |
| 760 | |
| 761 std::string EntryImpl::GetKey() const { | |
| 762 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | |
| 763 int key_len = entry->Data()->key_len; | |
| 764 if (key_len <= kMaxInternalKeyLength) | |
| 765 return std::string(entry->Data()->key); | |
| 766 | |
| 767 // We keep a copy of the key so that we can always return it, even if the | |
| 768 // backend is disabled. | |
| 769 if (!key_.empty()) | |
| 770 return key_; | |
| 771 | |
| 772 Addr address(entry->Data()->long_key); | |
| 773 DCHECK(address.is_initialized()); | |
| 774 size_t offset = 0; | |
| 775 if (address.is_block_file()) | |
| 776 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
| 777 | |
| 778 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); | |
| 779 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, | |
| 780 kKeyFileIndex); | |
| 781 if (!key_file) | |
| 782 return std::string(); | |
| 783 | |
| 784 ++key_len; // We store a trailing \0 on disk that we read back below. | |
| 785 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len)) | |
| 786 return std::string(); | |
| 787 | |
| 788 if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset)) | |
| 789 key_.clear(); | |
| 790 return key_; | |
| 791 } | |
| 792 | |
| 793 Time EntryImpl::GetLastUsed() const { | |
| 794 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | |
| 795 return Time::FromInternalValue(node->Data()->last_used); | |
| 796 } | |
| 797 | |
| 798 Time EntryImpl::GetLastModified() const { | |
| 799 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | |
| 800 return Time::FromInternalValue(node->Data()->last_modified); | |
| 801 } | |
| 802 | |
| 803 int32 EntryImpl::GetDataSize(int index) const { | |
| 804 if (index < 0 || index >= kNumStreams) | |
| 805 return 0; | |
| 806 | |
| 807 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | |
| 808 return entry->Data()->data_size[index]; | |
| 809 } | |
| 810 | |
| 811 int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, | |
| 812 const CompletionCallback& callback) { | |
| 813 if (callback.is_null()) | |
| 814 return ReadDataImpl(index, offset, buf, buf_len, callback); | |
| 815 | |
| 816 DCHECK(node_.Data()->dirty || read_only_); | |
| 817 if (index < 0 || index >= kNumStreams) | |
| 818 return net::ERR_INVALID_ARGUMENT; | |
| 819 | |
| 820 int entry_size = entry_.Data()->data_size[index]; | |
| 821 if (offset >= entry_size || offset < 0 || !buf_len) | |
| 822 return 0; | |
| 823 | |
| 824 if (buf_len < 0) | |
| 825 return net::ERR_INVALID_ARGUMENT; | |
| 826 | |
| 827 if (!background_queue_.get()) | |
| 828 return net::ERR_UNEXPECTED; | |
| 829 | |
| 830 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); | |
| 831 return net::ERR_IO_PENDING; | |
| 832 } | |
| 833 | |
| 834 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, | |
| 835 const CompletionCallback& callback, bool truncate) { | |
| 836 if (callback.is_null()) | |
| 837 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); | |
| 838 | |
| 839 DCHECK(node_.Data()->dirty || read_only_); | |
| 840 if (index < 0 || index >= kNumStreams) | |
| 841 return net::ERR_INVALID_ARGUMENT; | |
| 842 | |
| 843 if (offset < 0 || buf_len < 0) | |
| 844 return net::ERR_INVALID_ARGUMENT; | |
| 845 | |
| 846 if (!background_queue_.get()) | |
| 847 return net::ERR_UNEXPECTED; | |
| 848 | |
| 849 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, | |
| 850 callback); | |
| 851 return net::ERR_IO_PENDING; | |
| 852 } | |
| 853 | |
| 854 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
| 855 const CompletionCallback& callback) { | |
| 856 if (callback.is_null()) | |
| 857 return ReadSparseDataImpl(offset, buf, buf_len, callback); | |
| 858 | |
| 859 if (!background_queue_.get()) | |
| 860 return net::ERR_UNEXPECTED; | |
| 861 | |
| 862 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); | |
| 863 return net::ERR_IO_PENDING; | |
| 864 } | |
| 865 | |
| 866 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
| 867 const CompletionCallback& callback) { | |
| 868 if (callback.is_null()) | |
| 869 return WriteSparseDataImpl(offset, buf, buf_len, callback); | |
| 870 | |
| 871 if (!background_queue_.get()) | |
| 872 return net::ERR_UNEXPECTED; | |
| 873 | |
| 874 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); | |
| 875 return net::ERR_IO_PENDING; | |
| 876 } | |
| 877 | |
| 878 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | |
| 879 const CompletionCallback& callback) { | |
| 880 if (!background_queue_.get()) | |
| 881 return net::ERR_UNEXPECTED; | |
| 882 | |
| 883 background_queue_->GetAvailableRange(this, offset, len, start, callback); | |
| 884 return net::ERR_IO_PENDING; | |
| 885 } | |
| 886 | |
| 887 bool EntryImpl::CouldBeSparse() const { | |
| 888 if (sparse_.get()) | |
| 889 return true; | |
| 890 | |
| 891 scoped_ptr<SparseControl> sparse; | |
| 892 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); | |
| 893 return sparse->CouldBeSparse(); | |
| 894 } | |
| 895 | |
| 896 void EntryImpl::CancelSparseIO() { | |
| 897 if (background_queue_.get()) | |
| 898 background_queue_->CancelSparseIO(this); | |
| 899 } | |
| 900 | |
| 901 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { | |
| 902 if (!sparse_.get()) | |
| 903 return net::OK; | |
| 904 | |
| 905 if (!background_queue_.get()) | |
| 906 return net::ERR_UNEXPECTED; | |
| 907 | |
| 908 background_queue_->ReadyForSparseIO(this, callback); | |
| 909 return net::ERR_IO_PENDING; | |
| 910 } | |
| 911 | |
| 912 // When an entry is deleted from the cache, we clean up all the data associated | |
| 913 // with it for two reasons: to simplify the reuse of the block (we know that any | |
| 914 // unused block is filled with zeros), and to simplify the handling of write / | |
| 915 // read partial information from an entry (don't have to worry about returning | |
| 916 // data related to a previous cache entry because the range was not fully | |
| 917 // written before). | |
| 918 EntryImpl::~EntryImpl() { | |
| 919 if (!backend_.get()) { | |
| 920 entry_.clear_modified(); | |
| 921 node_.clear_modified(); | |
| 922 return; | |
| 923 } | |
| 924 Log("~EntryImpl in"); | |
| 925 | |
| 926 // Save the sparse info to disk. This will generate IO for this entry and | |
| 927 // maybe for a child entry, so it is important to do it before deleting this | |
| 928 // entry. | |
| 929 sparse_.reset(); | |
| 930 | |
| 931 // Remove this entry from the list of open entries. | |
| 932 backend_->OnEntryDestroyBegin(entry_.address()); | |
| 933 | |
| 934 if (doomed_) { | |
| 935 DeleteEntryData(true); | |
| 936 } else { | |
| 937 #if defined(NET_BUILD_STRESS_CACHE) | |
| 938 SanityCheck(); | |
| 939 #endif | |
| 940 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE); | |
| 941 bool ret = true; | |
| 942 for (int index = 0; index < kNumStreams; index++) { | |
| 943 if (user_buffers_[index].get()) { | |
| 944 if (!(ret = Flush(index, 0))) | |
| 945 LOG(ERROR) << "Failed to save user data"; | |
| 946 } | |
| 947 if (unreported_size_[index]) { | |
| 948 backend_->ModifyStorageSize( | |
| 949 entry_.Data()->data_size[index] - unreported_size_[index], | |
| 950 entry_.Data()->data_size[index]); | |
| 951 } | |
| 952 } | |
| 953 | |
| 954 if (!ret) { | |
| 955 // There was a failure writing the actual data. Mark the entry as dirty. | |
| 956 int current_id = backend_->GetCurrentEntryId(); | |
| 957 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1; | |
| 958 node_.Store(); | |
| 959 } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) { | |
| 960 node_.Data()->dirty = 0; | |
| 961 node_.Store(); | |
| 962 } | |
| 963 } | |
| 964 | |
| 965 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); | |
| 966 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); | |
| 967 backend_->OnEntryDestroyEnd(); | |
| 968 } | |
| 969 | |
| 970 // ------------------------------------------------------------------------ | |
| 971 | |
| 972 int EntryImpl::InternalReadData(int index, int offset, | |
| 973 IOBuffer* buf, int buf_len, | |
| 974 const CompletionCallback& callback) { | |
| 975 DCHECK(node_.Data()->dirty || read_only_); | |
| 976 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; | |
| 977 if (index < 0 || index >= kNumStreams) | |
| 978 return net::ERR_INVALID_ARGUMENT; | |
| 979 | |
| 980 int entry_size = entry_.Data()->data_size[index]; | |
| 981 if (offset >= entry_size || offset < 0 || !buf_len) | |
| 982 return 0; | |
| 983 | |
| 984 if (buf_len < 0) | |
| 985 return net::ERR_INVALID_ARGUMENT; | |
| 986 | |
| 987 if (!backend_.get()) | |
| 988 return net::ERR_UNEXPECTED; | |
| 989 | |
| 990 TimeTicks start = TimeTicks::Now(); | |
| 991 | |
| 992 if (offset + buf_len > entry_size) | |
| 993 buf_len = entry_size - offset; | |
| 994 | |
| 995 UpdateRank(false); | |
| 996 | |
| 997 backend_->OnEvent(Stats::READ_DATA); | |
| 998 backend_->OnRead(buf_len); | |
| 999 | |
| 1000 Addr address(entry_.Data()->data_addr[index]); | |
| 1001 int eof = address.is_initialized() ? entry_size : 0; | |
| 1002 if (user_buffers_[index].get() && | |
| 1003 user_buffers_[index]->PreRead(eof, offset, &buf_len)) { | |
| 1004 // Complete the operation locally. | |
| 1005 buf_len = user_buffers_[index]->Read(offset, buf, buf_len); | |
| 1006 ReportIOTime(kRead, start); | |
| 1007 return buf_len; | |
| 1008 } | |
| 1009 | |
| 1010 address.set_value(entry_.Data()->data_addr[index]); | |
| 1011 DCHECK(address.is_initialized()); | |
| 1012 if (!address.is_initialized()) { | |
| 1013 DoomImpl(); | |
| 1014 return net::ERR_FAILED; | |
| 1015 } | |
| 1016 | |
| 1017 File* file = GetBackingFile(address, index); | |
| 1018 if (!file) { | |
| 1019 DoomImpl(); | |
| 1020 LOG(ERROR) << "No file for " << std::hex << address.value(); | |
| 1021 return net::ERR_FILE_NOT_FOUND; | |
| 1022 } | |
| 1023 | |
| 1024 size_t file_offset = offset; | |
| 1025 if (address.is_block_file()) { | |
| 1026 DCHECK_LE(offset + buf_len, kMaxBlockSize); | |
| 1027 file_offset += address.start_block() * address.BlockSize() + | |
| 1028 kBlockHeaderSize; | |
| 1029 } | |
| 1030 | |
| 1031 SyncCallback* io_callback = NULL; | |
| 1032 if (!callback.is_null()) { | |
| 1033 io_callback = new SyncCallback(this, buf, callback, | |
| 1034 net::NetLog::TYPE_ENTRY_READ_DATA); | |
| 1035 } | |
| 1036 | |
| 1037 TimeTicks start_async = TimeTicks::Now(); | |
| 1038 | |
| 1039 bool completed; | |
| 1040 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { | |
| 1041 if (io_callback) | |
| 1042 io_callback->Discard(); | |
| 1043 DoomImpl(); | |
| 1044 return net::ERR_CACHE_READ_FAILURE; | |
| 1045 } | |
| 1046 | |
| 1047 if (io_callback && completed) | |
| 1048 io_callback->Discard(); | |
| 1049 | |
| 1050 if (io_callback) | |
| 1051 ReportIOTime(kReadAsync1, start_async); | |
| 1052 | |
| 1053 ReportIOTime(kRead, start); | |
| 1054 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; | |
| 1055 } | |
| 1056 | |
| 1057 int EntryImpl::InternalWriteData(int index, int offset, | |
| 1058 IOBuffer* buf, int buf_len, | |
| 1059 const CompletionCallback& callback, | |
| 1060 bool truncate) { | |
| 1061 DCHECK(node_.Data()->dirty || read_only_); | |
| 1062 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; | |
| 1063 if (index < 0 || index >= kNumStreams) | |
| 1064 return net::ERR_INVALID_ARGUMENT; | |
| 1065 | |
| 1066 if (offset < 0 || buf_len < 0) | |
| 1067 return net::ERR_INVALID_ARGUMENT; | |
| 1068 | |
| 1069 if (!backend_.get()) | |
| 1070 return net::ERR_UNEXPECTED; | |
| 1071 | |
| 1072 int max_file_size = backend_->MaxFileSize(); | |
| 1073 | |
| 1074 // offset or buf_len could be negative numbers. | |
| 1075 if (offset > max_file_size || buf_len > max_file_size || | |
| 1076 offset + buf_len > max_file_size) { | |
| 1077 int size = offset + buf_len; | |
| 1078 if (size <= max_file_size) | |
| 1079 size = kint32max; | |
| 1080 backend_->TooMuchStorageRequested(size); | |
| 1081 return net::ERR_FAILED; | |
| 1082 } | |
| 1083 | |
| 1084 TimeTicks start = TimeTicks::Now(); | |
| 1085 | |
| 1086 // Read the size at this point (it may change inside prepare). | |
| 1087 int entry_size = entry_.Data()->data_size[index]; | |
| 1088 bool extending = entry_size < offset + buf_len; | |
| 1089 truncate = truncate && entry_size > offset + buf_len; | |
| 1090 Trace("To PrepareTarget 0x%x", entry_.address().value()); | |
| 1091 if (!PrepareTarget(index, offset, buf_len, truncate)) | |
| 1092 return net::ERR_FAILED; | |
| 1093 | |
| 1094 Trace("From PrepareTarget 0x%x", entry_.address().value()); | |
| 1095 if (extending || truncate) | |
| 1096 UpdateSize(index, entry_size, offset + buf_len); | |
| 1097 | |
| 1098 UpdateRank(true); | |
| 1099 | |
| 1100 backend_->OnEvent(Stats::WRITE_DATA); | |
| 1101 backend_->OnWrite(buf_len); | |
| 1102 | |
| 1103 if (user_buffers_[index].get()) { | |
| 1104 // Complete the operation locally. | |
| 1105 user_buffers_[index]->Write(offset, buf, buf_len); | |
| 1106 ReportIOTime(kWrite, start); | |
| 1107 return buf_len; | |
| 1108 } | |
| 1109 | |
| 1110 Addr address(entry_.Data()->data_addr[index]); | |
| 1111 if (offset + buf_len == 0) { | |
| 1112 if (truncate) { | |
| 1113 DCHECK(!address.is_initialized()); | |
| 1114 } | |
| 1115 return 0; | |
| 1116 } | |
| 1117 | |
| 1118 File* file = GetBackingFile(address, index); | |
| 1119 if (!file) | |
| 1120 return net::ERR_FILE_NOT_FOUND; | |
| 1121 | |
| 1122 size_t file_offset = offset; | |
| 1123 if (address.is_block_file()) { | |
| 1124 DCHECK_LE(offset + buf_len, kMaxBlockSize); | |
| 1125 file_offset += address.start_block() * address.BlockSize() + | |
| 1126 kBlockHeaderSize; | |
| 1127 } else if (truncate || (extending && !buf_len)) { | |
| 1128 if (!file->SetLength(offset + buf_len)) | |
| 1129 return net::ERR_FAILED; | |
| 1130 } | |
| 1131 | |
| 1132 if (!buf_len) | |
| 1133 return 0; | |
| 1134 | |
| 1135 SyncCallback* io_callback = NULL; | |
| 1136 if (!callback.is_null()) { | |
| 1137 io_callback = new SyncCallback(this, buf, callback, | |
| 1138 net::NetLog::TYPE_ENTRY_WRITE_DATA); | |
| 1139 } | |
| 1140 | |
| 1141 TimeTicks start_async = TimeTicks::Now(); | |
| 1142 | |
| 1143 bool completed; | |
| 1144 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, | |
| 1145 &completed)) { | |
| 1146 if (io_callback) | |
| 1147 io_callback->Discard(); | |
| 1148 return net::ERR_CACHE_WRITE_FAILURE; | |
| 1149 } | |
| 1150 | |
| 1151 if (io_callback && completed) | |
| 1152 io_callback->Discard(); | |
| 1153 | |
| 1154 if (io_callback) | |
| 1155 ReportIOTime(kWriteAsync1, start_async); | |
| 1156 | |
| 1157 ReportIOTime(kWrite, start); | |
| 1158 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; | |
| 1159 } | |
| 1160 | |
| 1161 // ------------------------------------------------------------------------ | |
| 1162 | |
| 1163 bool EntryImpl::CreateDataBlock(int index, int size) { | |
| 1164 DCHECK(index >= 0 && index < kNumStreams); | |
| 1165 | |
| 1166 Addr address(entry_.Data()->data_addr[index]); | |
| 1167 if (!CreateBlock(size, &address)) | |
| 1168 return false; | |
| 1169 | |
| 1170 entry_.Data()->data_addr[index] = address.value(); | |
| 1171 entry_.Store(); | |
| 1172 return true; | |
| 1173 } | |
| 1174 | |
| 1175 bool EntryImpl::CreateBlock(int size, Addr* address) { | |
| 1176 DCHECK(!address->is_initialized()); | |
| 1177 if (!backend_.get()) | |
| 1178 return false; | |
| 1179 | |
| 1180 FileType file_type = Addr::RequiredFileType(size); | |
| 1181 if (EXTERNAL == file_type) { | |
| 1182 if (size > backend_->MaxFileSize()) | |
| 1183 return false; | |
| 1184 if (!backend_->CreateExternalFile(address)) | |
| 1185 return false; | |
| 1186 } else { | |
| 1187 int num_blocks = Addr::RequiredBlocks(size, file_type); | |
| 1188 | |
| 1189 if (!backend_->CreateBlock(file_type, num_blocks, address)) | |
| 1190 return false; | |
| 1191 } | |
| 1192 return true; | |
| 1193 } | |
| 1194 | |
| 1195 // Note that this method may end up modifying a block file so upon return the | |
| 1196 // involved block will be free, and could be reused for something else. If there | |
| 1197 // is a crash after that point (and maybe before returning to the caller), the | |
| 1198 // entry will be left dirty... and at some point it will be discarded; it is | |
| 1199 // important that the entry doesn't keep a reference to this address, or we'll | |
| 1200 // end up deleting the contents of |address| once again. | |
| 1201 void EntryImpl::DeleteData(Addr address, int index) { | |
| 1202 DCHECK(backend_.get()); | |
| 1203 if (!address.is_initialized()) | |
| 1204 return; | |
| 1205 if (address.is_separate_file()) { | |
| 1206 int failure = !DeleteCacheFile(backend_->GetFileName(address)); | |
| 1207 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); | |
| 1208 if (failure) { | |
| 1209 LOG(ERROR) << "Failed to delete " << | |
| 1210 backend_->GetFileName(address).value() << " from the cache."; | |
| 1211 } | |
| 1212 if (files_[index].get()) | |
| 1213 files_[index] = NULL; // Releases the object. | |
| 1214 } else { | |
| 1215 backend_->DeleteBlock(address, true); | |
| 1216 } | |
| 1217 } | |
| 1218 | |
| 1219 void EntryImpl::UpdateRank(bool modified) { | |
| 1220 if (!backend_.get()) | |
| 1221 return; | |
| 1222 | |
| 1223 if (!doomed_) { | |
| 1224 // Everything is handled by the backend. | |
| 1225 backend_->UpdateRank(this, modified); | |
| 1226 return; | |
| 1227 } | |
| 1228 | |
| 1229 Time current = Time::Now(); | |
| 1230 node_.Data()->last_used = current.ToInternalValue(); | |
| 1231 | |
| 1232 if (modified) | |
| 1233 node_.Data()->last_modified = current.ToInternalValue(); | |
| 1234 } | |
| 1235 | |
| 1236 File* EntryImpl::GetBackingFile(Addr address, int index) { | |
| 1237 if (!backend_.get()) | |
| 1238 return NULL; | |
| 1239 | |
| 1240 File* file; | |
| 1241 if (address.is_separate_file()) | |
| 1242 file = GetExternalFile(address, index); | |
| 1243 else | |
| 1244 file = backend_->File(address); | |
| 1245 return file; | |
| 1246 } | |
| 1247 | |
| 1248 File* EntryImpl::GetExternalFile(Addr address, int index) { | |
| 1249 DCHECK(index >= 0 && index <= kKeyFileIndex); | |
| 1250 if (!files_[index].get()) { | |
| 1251 // For a key file, use mixed mode IO. | |
| 1252 scoped_refptr<File> file(new File(kKeyFileIndex == index)); | |
| 1253 if (file->Init(backend_->GetFileName(address))) | |
| 1254 files_[index].swap(file); | |
| 1255 } | |
| 1256 return files_[index].get(); | |
| 1257 } | |
| 1258 | |
| 1259 // We keep a memory buffer for everything that ends up stored on a block file | |
| 1260 // (because we don't know yet the final data size), and for some of the data | |
| 1261 // that end up on external files. This function will initialize that memory | |
| 1262 // buffer and / or the files needed to store the data. | |
| 1263 // | |
| 1264 // In general, a buffer may overlap data already stored on disk, and in that | |
| 1265 // case, the contents of the buffer are the most accurate. It may also extend | |
| 1266 // the file, but we don't want to read from disk just to keep the buffer up to | |
| 1267 // date. This means that as soon as there is a chance to get confused about what | |
| 1268 // is the most recent version of some part of a file, we'll flush the buffer and | |
| 1269 // reuse it for the new data. Keep in mind that the normal use pattern is quite | |
| 1270 // simple (write sequentially from the beginning), so we optimize for handling | |
| 1271 // that case. | |
| 1272 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, | |
| 1273 bool truncate) { | |
| 1274 if (truncate) | |
| 1275 return HandleTruncation(index, offset, buf_len); | |
| 1276 | |
| 1277 if (!offset && !buf_len) | |
| 1278 return true; | |
| 1279 | |
| 1280 Addr address(entry_.Data()->data_addr[index]); | |
| 1281 if (address.is_initialized()) { | |
| 1282 if (address.is_block_file() && !MoveToLocalBuffer(index)) | |
| 1283 return false; | |
| 1284 | |
| 1285 if (!user_buffers_[index].get() && offset < kMaxBlockSize) { | |
| 1286 // We are about to create a buffer for the first 16KB, make sure that we | |
| 1287 // preserve existing data. | |
| 1288 if (!CopyToLocalBuffer(index)) | |
| 1289 return false; | |
| 1290 } | |
| 1291 } | |
| 1292 | |
| 1293 if (!user_buffers_[index].get()) | |
| 1294 user_buffers_[index].reset(new UserBuffer(backend_.get())); | |
| 1295 | |
| 1296 return PrepareBuffer(index, offset, buf_len); | |
| 1297 } | |
| 1298 | |
| 1299 // We get to this function with some data already stored. If there is a | |
| 1300 // truncation that results on data stored internally, we'll explicitly | |
| 1301 // handle the case here. | |
| 1302 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) { | |
| 1303 Addr address(entry_.Data()->data_addr[index]); | |
| 1304 | |
| 1305 int current_size = entry_.Data()->data_size[index]; | |
| 1306 int new_size = offset + buf_len; | |
| 1307 | |
| 1308 if (!new_size) { | |
| 1309 // This is by far the most common scenario. | |
| 1310 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0); | |
| 1311 entry_.Data()->data_addr[index] = 0; | |
| 1312 entry_.Data()->data_size[index] = 0; | |
| 1313 unreported_size_[index] = 0; | |
| 1314 entry_.Store(); | |
| 1315 DeleteData(address, index); | |
| 1316 | |
| 1317 user_buffers_[index].reset(); | |
| 1318 return true; | |
| 1319 } | |
| 1320 | |
| 1321 // We never postpone truncating a file, if there is one, but we may postpone | |
| 1322 // telling the backend about the size reduction. | |
| 1323 if (user_buffers_[index].get()) { | |
| 1324 DCHECK_GE(current_size, user_buffers_[index]->Start()); | |
| 1325 if (!address.is_initialized()) { | |
| 1326 // There is no overlap between the buffer and disk. | |
| 1327 if (new_size > user_buffers_[index]->Start()) { | |
| 1328 // Just truncate our buffer. | |
| 1329 DCHECK_LT(new_size, user_buffers_[index]->End()); | |
| 1330 user_buffers_[index]->Truncate(new_size); | |
| 1331 return true; | |
| 1332 } | |
| 1333 | |
| 1334 // Just discard our buffer. | |
| 1335 user_buffers_[index]->Reset(); | |
| 1336 return PrepareBuffer(index, offset, buf_len); | |
| 1337 } | |
| 1338 | |
| 1339 // There is some overlap or we need to extend the file before the | |
| 1340 // truncation. | |
| 1341 if (offset > user_buffers_[index]->Start()) | |
| 1342 user_buffers_[index]->Truncate(new_size); | |
| 1343 UpdateSize(index, current_size, new_size); | |
| 1344 if (!Flush(index, 0)) | |
| 1345 return false; | |
| 1346 user_buffers_[index].reset(); | |
| 1347 } | |
| 1348 | |
| 1349 // We have data somewhere, and it is not in a buffer. | |
| 1350 DCHECK(!user_buffers_[index].get()); | |
| 1351 DCHECK(address.is_initialized()); | |
| 1352 | |
| 1353 if (new_size > kMaxBlockSize) | |
| 1354 return true; // Let the operation go directly to disk. | |
| 1355 | |
| 1356 return ImportSeparateFile(index, offset + buf_len); | |
| 1357 } | |
| 1358 | |
| 1359 bool EntryImpl::CopyToLocalBuffer(int index) { | |
| 1360 Addr address(entry_.Data()->data_addr[index]); | |
| 1361 DCHECK(!user_buffers_[index].get()); | |
| 1362 DCHECK(address.is_initialized()); | |
| 1363 | |
| 1364 int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize); | |
| 1365 user_buffers_[index].reset(new UserBuffer(backend_.get())); | |
| 1366 user_buffers_[index]->Write(len, NULL, 0); | |
| 1367 | |
| 1368 File* file = GetBackingFile(address, index); | |
| 1369 int offset = 0; | |
| 1370 | |
| 1371 if (address.is_block_file()) | |
| 1372 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
| 1373 | |
| 1374 if (!file || | |
| 1375 !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) { | |
| 1376 user_buffers_[index].reset(); | |
| 1377 return false; | |
| 1378 } | |
| 1379 return true; | |
| 1380 } | |
| 1381 | |
| 1382 bool EntryImpl::MoveToLocalBuffer(int index) { | |
| 1383 if (!CopyToLocalBuffer(index)) | |
| 1384 return false; | |
| 1385 | |
| 1386 Addr address(entry_.Data()->data_addr[index]); | |
| 1387 entry_.Data()->data_addr[index] = 0; | |
| 1388 entry_.Store(); | |
| 1389 DeleteData(address, index); | |
| 1390 | |
| 1391 // If we lose this entry we'll see it as zero sized. | |
| 1392 int len = entry_.Data()->data_size[index]; | |
| 1393 backend_->ModifyStorageSize(len - unreported_size_[index], 0); | |
| 1394 unreported_size_[index] = len; | |
| 1395 return true; | |
| 1396 } | |
| 1397 | |
| 1398 bool EntryImpl::ImportSeparateFile(int index, int new_size) { | |
| 1399 if (entry_.Data()->data_size[index] > new_size) | |
| 1400 UpdateSize(index, entry_.Data()->data_size[index], new_size); | |
| 1401 | |
| 1402 return MoveToLocalBuffer(index); | |
| 1403 } | |
| 1404 | |
| 1405 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) { | |
| 1406 DCHECK(user_buffers_[index].get()); | |
| 1407 if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) || | |
| 1408 offset > entry_.Data()->data_size[index]) { | |
| 1409 // We are about to extend the buffer or the file (with zeros), so make sure | |
| 1410 // that we are not overwriting anything. | |
| 1411 Addr address(entry_.Data()->data_addr[index]); | |
| 1412 if (address.is_initialized() && address.is_separate_file()) { | |
| 1413 if (!Flush(index, 0)) | |
| 1414 return false; | |
| 1415 // There is an actual file already, and we don't want to keep track of | |
| 1416 // its length so we let this operation go straight to disk. | |
| 1417 // The only case when a buffer is allowed to extend the file (as in fill | |
| 1418 // with zeros before the start) is when there is no file yet to extend. | |
| 1419 user_buffers_[index].reset(); | |
| 1420 return true; | |
| 1421 } | |
| 1422 } | |
| 1423 | |
| 1424 if (!user_buffers_[index]->PreWrite(offset, buf_len)) { | |
| 1425 if (!Flush(index, offset + buf_len)) | |
| 1426 return false; | |
| 1427 | |
| 1428 // Lets try again. | |
| 1429 if (offset > user_buffers_[index]->End() || | |
| 1430 !user_buffers_[index]->PreWrite(offset, buf_len)) { | |
| 1431 // We cannot complete the operation with a buffer. | |
| 1432 DCHECK(!user_buffers_[index]->Size()); | |
| 1433 DCHECK(!user_buffers_[index]->Start()); | |
| 1434 user_buffers_[index].reset(); | |
| 1435 } | |
| 1436 } | |
| 1437 return true; | |
| 1438 } | |
| 1439 | |
| 1440 bool EntryImpl::Flush(int index, int min_len) { | |
| 1441 Addr address(entry_.Data()->data_addr[index]); | |
| 1442 DCHECK(user_buffers_[index].get()); | |
| 1443 DCHECK(!address.is_initialized() || address.is_separate_file()); | |
| 1444 DVLOG(3) << "Flush"; | |
| 1445 | |
| 1446 int size = std::max(entry_.Data()->data_size[index], min_len); | |
| 1447 if (size && !address.is_initialized() && !CreateDataBlock(index, size)) | |
| 1448 return false; | |
| 1449 | |
| 1450 if (!entry_.Data()->data_size[index]) { | |
| 1451 DCHECK(!user_buffers_[index]->Size()); | |
| 1452 return true; | |
| 1453 } | |
| 1454 | |
| 1455 address.set_value(entry_.Data()->data_addr[index]); | |
| 1456 | |
| 1457 int len = user_buffers_[index]->Size(); | |
| 1458 int offset = user_buffers_[index]->Start(); | |
| 1459 if (!len && !offset) | |
| 1460 return true; | |
| 1461 | |
| 1462 if (address.is_block_file()) { | |
| 1463 DCHECK_EQ(len, entry_.Data()->data_size[index]); | |
| 1464 DCHECK(!offset); | |
| 1465 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | |
| 1466 } | |
| 1467 | |
| 1468 File* file = GetBackingFile(address, index); | |
| 1469 if (!file) | |
| 1470 return false; | |
| 1471 | |
| 1472 if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL)) | |
| 1473 return false; | |
| 1474 user_buffers_[index]->Reset(); | |
| 1475 | |
| 1476 return true; | |
| 1477 } | |
| 1478 | |
| 1479 void EntryImpl::UpdateSize(int index, int old_size, int new_size) { | |
| 1480 if (entry_.Data()->data_size[index] == new_size) | |
| 1481 return; | |
| 1482 | |
| 1483 unreported_size_[index] += new_size - old_size; | |
| 1484 entry_.Data()->data_size[index] = new_size; | |
| 1485 entry_.set_modified(); | |
| 1486 } | |
| 1487 | |
| 1488 int EntryImpl::InitSparseData() { | |
| 1489 if (sparse_.get()) | |
| 1490 return net::OK; | |
| 1491 | |
| 1492 // Use a local variable so that sparse_ never goes from 'valid' to NULL. | |
| 1493 scoped_ptr<SparseControl> sparse(new SparseControl(this)); | |
| 1494 int result = sparse->Init(); | |
| 1495 if (net::OK == result) | |
| 1496 sparse_.swap(sparse); | |
| 1497 | |
| 1498 return result; | |
| 1499 } | |
| 1500 | |
| 1501 void EntryImpl::SetEntryFlags(uint32 flags) { | |
| 1502 entry_.Data()->flags |= flags; | |
| 1503 entry_.set_modified(); | |
| 1504 } | |
| 1505 | |
| 1506 uint32 EntryImpl::GetEntryFlags() { | |
| 1507 return entry_.Data()->flags; | |
| 1508 } | |
| 1509 | |
| 1510 void EntryImpl::GetData(int index, char** buffer, Addr* address) { | |
| 1511 DCHECK(backend_.get()); | |
| 1512 if (user_buffers_[index].get() && user_buffers_[index]->Size() && | |
| 1513 !user_buffers_[index]->Start()) { | |
| 1514 // The data is already in memory, just copy it and we're done. | |
| 1515 int data_len = entry_.Data()->data_size[index]; | |
| 1516 if (data_len <= user_buffers_[index]->Size()) { | |
| 1517 DCHECK(!user_buffers_[index]->Start()); | |
| 1518 *buffer = new char[data_len]; | |
| 1519 memcpy(*buffer, user_buffers_[index]->Data(), data_len); | |
| 1520 return; | |
| 1521 } | |
| 1522 } | |
| 1523 | |
| 1524 // Bad news: we'd have to read the info from disk so instead we'll just tell | |
| 1525 // the caller where to read from. | |
| 1526 *buffer = NULL; | |
| 1527 address->set_value(entry_.Data()->data_addr[index]); | |
| 1528 if (address->is_initialized()) { | |
| 1529 // Prevent us from deleting the block from the backing store. | |
| 1530 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | |
| 1531 unreported_size_[index], 0); | |
| 1532 entry_.Data()->data_addr[index] = 0; | |
| 1533 entry_.Data()->data_size[index] = 0; | |
| 1534 } | |
| 1535 } | |
| 1536 | |
| 1537 void EntryImpl::Log(const char* msg) { | |
| 1538 int dirty = 0; | |
| 1539 if (node_.HasData()) { | |
| 1540 dirty = node_.Data()->dirty; | |
| 1541 } | |
| 1542 | |
| 1543 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | |
| 1544 entry_.address().value(), node_.address().value()); | |
| 1545 | |
| 1546 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | |
| 1547 entry_.Data()->data_addr[1], entry_.Data()->long_key); | |
| 1548 | |
| 1549 Trace(" doomed: %d 0x%x", doomed_, dirty); | |
| 1550 } | |
| 1551 | |
| 1552 } // namespace disk_cache | |
| OLD | NEW |