OLD | NEW |
1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/disk_cache/entry_impl.h" | 5 #include "net/disk_cache/entry_impl.h" |
6 | 6 |
7 #include "base/histogram.h" | 7 #include "base/histogram.h" |
8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
10 #include "net/base/io_buffer.h" | 10 #include "net/base/io_buffer.h" |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
70 DCHECK_GE(offset, 0); | 70 DCHECK_GE(offset, 0); |
71 DCHECK_GE(valid_len, 0); | 71 DCHECK_GE(valid_len, 0); |
72 DCHECK(disk_cache::kMaxBlockSize >= offset + valid_len); | 72 DCHECK(disk_cache::kMaxBlockSize >= offset + valid_len); |
73 if (offset) | 73 if (offset) |
74 memset(buffer, 0, offset); | 74 memset(buffer, 0, offset); |
75 int end = disk_cache::kMaxBlockSize - offset - valid_len; | 75 int end = disk_cache::kMaxBlockSize - offset - valid_len; |
76 if (end) | 76 if (end) |
77 memset(buffer + offset + valid_len, 0, end); | 77 memset(buffer + offset + valid_len, 0, end); |
78 } | 78 } |
79 | 79 |
| 80 const int kMaxBufferSize = 1024 * 1024; // 1 MB. |
| 81 |
80 } // namespace | 82 } // namespace |
81 | 83 |
82 namespace disk_cache { | 84 namespace disk_cache { |
83 | 85 |
| 86 // This class handles individual memory buffers that store data before it is |
| 87 // sent to disk. The buffer can start at any offset, but if we try to write to |
| 88 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to |
| 89 // zero. The buffer grows up to a size determined by the backend, to keep the |
| 90 // total memory used under control. |
| 91 class EntryImpl::UserBuffer { |
| 92 public: |
| 93 explicit UserBuffer(BackendImpl* backend) |
| 94 : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) { |
| 95 buffer_.reserve(kMaxBlockSize); |
| 96 } |
| 97 ~UserBuffer() { |
| 98 if (backend_) |
| 99 backend_->BufferDeleted(capacity() - kMaxBlockSize); |
| 100 } |
| 101 |
| 102 // Returns true if we can handle writing |len| bytes to |offset|. |
| 103 bool PreWrite(int offset, int len); |
| 104 |
| 105 // Truncates the buffer to |offset| bytes. |
| 106 void Truncate(int offset); |
| 107 |
| 108 // Writes |len| bytes from |buf| at the given |offset|. |
| 109 void Write(int offset, net::IOBuffer* buf, int len); |
| 110 |
| 111 // Returns true if we can read |len| bytes from |offset|, given that the |
| 112 // actual file has |eof| bytes stored. Note that the number of bytes to read |
| 113 // may be modified by this method even though it returns false: that means we |
| 114 // should do a smaller read from disk. |
| 115 bool PreRead(int eof, int offset, int* len); |
| 116 |
| 117 // Read |len| bytes from |buf| at the given |offset|. |
| 118 int Read(int offset, net::IOBuffer* buf, int len); |
| 119 |
| 120 // Prepare this buffer for reuse. |
| 121 void Reset(); |
| 122 |
| 123 char* Data() { return buffer_.size() ? &buffer_[0] : NULL; } |
| 124 int Size() { return static_cast<int>(buffer_.size()); } |
| 125 int Start() { return offset_; } |
| 126 int End() { return offset_ + Size(); } |
| 127 |
| 128 private: |
| 129 int capacity() { return static_cast<int>(buffer_.capacity()); } |
| 130 bool GrowBuffer(int required, int limit); |
| 131 |
| 132 base::WeakPtr<BackendImpl> backend_; |
| 133 int offset_; |
| 134 std::vector<char> buffer_; |
| 135 bool grow_allowed_; |
| 136 DISALLOW_COPY_AND_ASSIGN(UserBuffer); |
| 137 }; |
| 138 |
| 139 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) { |
| 140 DCHECK_GE(offset, 0); |
| 141 DCHECK_GE(len, 0); |
| 142 DCHECK_GE(offset + len, 0); |
| 143 |
| 144 // We don't want to write before our current start. |
| 145 if (offset < offset_) |
| 146 return false; |
| 147 |
| 148 // Lets get the common case out of the way. |
| 149 if (offset + len <= capacity()) |
| 150 return true; |
| 151 |
| 152 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the |
| 153 // buffer offset_ at 0. |
| 154 if (!Size() && offset > kMaxBlockSize) |
| 155 return GrowBuffer(len, kMaxBufferSize); |
| 156 |
| 157 int required = offset - offset_ + len; |
| 158 return GrowBuffer(required, kMaxBufferSize * 6 / 5); |
| 159 } |
| 160 |
| 161 void EntryImpl::UserBuffer::Truncate(int offset) { |
| 162 DCHECK_GE(offset, 0); |
| 163 DCHECK_GE(offset, offset_); |
| 164 |
| 165 offset -= offset_; |
| 166 if (Size() >= offset) |
| 167 buffer_.resize(offset); |
| 168 } |
| 169 |
| 170 void EntryImpl::UserBuffer::Write(int offset, net::IOBuffer* buf, int len) { |
| 171 DCHECK_GE(offset, 0); |
| 172 DCHECK_GE(len, 0); |
| 173 DCHECK_GE(offset + len, 0); |
| 174 DCHECK_GE(offset, offset_); |
| 175 |
| 176 if (!Size() && offset > kMaxBlockSize) |
| 177 offset_ = offset; |
| 178 |
| 179 offset -= offset_; |
| 180 |
| 181 if (offset > Size()) |
| 182 buffer_.resize(offset); |
| 183 |
| 184 if (!len) |
| 185 return; |
| 186 |
| 187 char* buffer = buf->data(); |
| 188 int valid_len = Size() - offset; |
| 189 int copy_len = std::min(valid_len, len); |
| 190 if (copy_len) { |
| 191 memcpy(&buffer_[offset], buffer, copy_len); |
| 192 len -= copy_len; |
| 193 buffer += copy_len; |
| 194 } |
| 195 if (!len) |
| 196 return; |
| 197 |
| 198 buffer_.insert(buffer_.end(), buffer, buffer + len); |
| 199 } |
| 200 |
| 201 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) { |
| 202 DCHECK_GE(offset, 0); |
| 203 DCHECK_GT(*len, 0); |
| 204 |
| 205 if (offset < offset_) { |
| 206 // We are reading before this buffer. |
| 207 if (offset >= eof) |
| 208 return true; |
| 209 |
| 210 // If the read overlaps with the buffer, change its length so that there is |
| 211 // no overlap. |
| 212 *len = std::min(*len, offset_ - offset); |
| 213 *len = std::min(*len, eof - offset); |
| 214 |
| 215 // We should read from disk. |
| 216 return false; |
| 217 } |
| 218 |
| 219 if (!Size()) |
| 220 return false; |
| 221 |
| 222 // See if we can fulfill the first part of the operation. |
| 223 return (offset - offset_ < Size()); |
| 224 } |
| 225 |
| 226 int EntryImpl::UserBuffer::Read(int offset, net::IOBuffer* buf, int len) { |
| 227 DCHECK_GE(offset, 0); |
| 228 DCHECK_GT(len, 0); |
| 229 DCHECK(Size() || offset < offset_); |
| 230 |
| 231 int clean_bytes = 0; |
| 232 if (offset < offset_) { |
| 233 // We don't have a file so lets fill the first part with 0. |
| 234 clean_bytes = std::min(offset_ - offset, len); |
| 235 memset(buf->data(), 0, clean_bytes); |
| 236 if (len == clean_bytes) |
| 237 return len; |
| 238 offset = offset_; |
| 239 len -= clean_bytes; |
| 240 } |
| 241 |
| 242 int start = offset - offset_; |
| 243 int available = Size() - start; |
| 244 DCHECK_GE(start, 0); |
| 245 DCHECK_GE(available, 0); |
| 246 len = std::min(len, available); |
| 247 memcpy(buf->data() + clean_bytes, &buffer_[start], len); |
| 248 return len + clean_bytes; |
| 249 } |
| 250 |
| 251 void EntryImpl::UserBuffer::Reset() { |
| 252 if (!grow_allowed_) { |
| 253 if (backend_) |
| 254 backend_->BufferDeleted(capacity() - kMaxBlockSize); |
| 255 grow_allowed_ = true; |
| 256 std::vector<char> tmp; |
| 257 buffer_.swap(tmp); |
| 258 } |
| 259 offset_ = 0; |
| 260 buffer_.clear(); |
| 261 } |
| 262 |
| 263 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) { |
| 264 DCHECK_GE(required, 0); |
| 265 int current_size = capacity(); |
| 266 if (required <= current_size) |
| 267 return true; |
| 268 |
| 269 if (required > limit) |
| 270 return false; |
| 271 |
| 272 if (!backend_) |
| 273 return false; |
| 274 |
| 275 int to_add = std::max(required - current_size, kMaxBlockSize * 4); |
| 276 to_add = std::max(current_size, to_add); |
| 277 required = std::min(current_size + to_add, limit); |
| 278 |
| 279 grow_allowed_ = backend_->IsAllocAllowed(current_size, required); |
| 280 if (!grow_allowed_) |
| 281 return false; |
| 282 |
| 283 buffer_.reserve(required); |
| 284 return true; |
| 285 } |
| 286 |
| 287 // ------------------------------------------------------------------------ |
| 288 |
84 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) | 289 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) |
85 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { | 290 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { |
86 entry_.LazyInit(backend->File(address), address); | 291 entry_.LazyInit(backend->File(address), address); |
87 doomed_ = false; | 292 doomed_ = false; |
88 backend_ = backend; | 293 backend_ = backend; |
89 for (int i = 0; i < kNumStreams; i++) { | 294 for (int i = 0; i < kNumStreams; i++) { |
90 unreported_size_[i] = 0; | 295 unreported_size_[i] = 0; |
91 } | 296 } |
92 } | 297 } |
93 | 298 |
94 // When an entry is deleted from the cache, we clean up all the data associated | 299 // When an entry is deleted from the cache, we clean up all the data associated |
95 // with it for two reasons: to simplify the reuse of the block (we know that any | 300 // with it for two reasons: to simplify the reuse of the block (we know that any |
96 // unused block is filled with zeros), and to simplify the handling of write / | 301 // unused block is filled with zeros), and to simplify the handling of write / |
97 // read partial information from an entry (don't have to worry about returning | 302 // read partial information from an entry (don't have to worry about returning |
98 // data related to a previous cache entry because the range was not fully | 303 // data related to a previous cache entry because the range was not fully |
99 // written before). | 304 // written before). |
100 EntryImpl::~EntryImpl() { | 305 EntryImpl::~EntryImpl() { |
101 // Save the sparse info to disk before deleting this entry. | 306 // Save the sparse info to disk before deleting this entry. |
102 sparse_.reset(); | 307 sparse_.reset(); |
103 | 308 |
104 if (doomed_) { | 309 if (doomed_) { |
105 DeleteEntryData(true); | 310 DeleteEntryData(true); |
106 } else { | 311 } else { |
107 bool ret = true; | 312 bool ret = true; |
108 for (int index = 0; index < kNumStreams; index++) { | 313 for (int index = 0; index < kNumStreams; index++) { |
109 if (user_buffers_[index].get()) { | 314 if (user_buffers_[index].get()) { |
110 if (!(ret = Flush(index, entry_.Data()->data_size[index], false))) | 315 if (!(ret = Flush(index))) |
111 LOG(ERROR) << "Failed to save user data"; | 316 LOG(ERROR) << "Failed to save user data"; |
112 } else if (unreported_size_[index]) { | 317 } |
| 318 if (unreported_size_[index]) { |
113 backend_->ModifyStorageSize( | 319 backend_->ModifyStorageSize( |
114 entry_.Data()->data_size[index] - unreported_size_[index], | 320 entry_.Data()->data_size[index] - unreported_size_[index], |
115 entry_.Data()->data_size[index]); | 321 entry_.Data()->data_size[index]); |
116 } | 322 } |
117 } | 323 } |
118 | 324 |
119 if (!ret) { | 325 if (!ret) { |
120 // There was a failure writing the actual data. Mark the entry as dirty. | 326 // There was a failure writing the actual data. Mark the entry as dirty. |
121 int current_id = backend_->GetCurrentEntryId(); | 327 int current_id = backend_->GetCurrentEntryId(); |
122 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1; | 328 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1; |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
295 TimeTicks start = TimeTicks::Now(); | 501 TimeTicks start = TimeTicks::Now(); |
296 | 502 |
297 if (offset + buf_len > entry_size) | 503 if (offset + buf_len > entry_size) |
298 buf_len = entry_size - offset; | 504 buf_len = entry_size - offset; |
299 | 505 |
300 UpdateRank(false); | 506 UpdateRank(false); |
301 | 507 |
302 backend_->OnEvent(Stats::READ_DATA); | 508 backend_->OnEvent(Stats::READ_DATA); |
303 backend_->OnRead(buf_len); | 509 backend_->OnRead(buf_len); |
304 | 510 |
305 if (user_buffers_[index].get()) { | 511 // We need the current size in disk. |
| 512 int eof = entry_size - unreported_size_[index]; |
| 513 if (user_buffers_[index].get() && |
| 514 user_buffers_[index]->PreRead(eof, offset, &buf_len)) { |
306 // Complete the operation locally. | 515 // Complete the operation locally. |
307 DCHECK(kMaxBlockSize >= offset + buf_len); | 516 buf_len = user_buffers_[index]->Read(offset, buf, buf_len); |
308 memcpy(buf->data() , user_buffers_[index].get() + offset, buf_len); | |
309 ReportIOTime(kRead, start); | 517 ReportIOTime(kRead, start); |
310 return buf_len; | 518 return buf_len; |
311 } | 519 } |
312 | 520 |
313 Addr address(entry_.Data()->data_addr[index]); | 521 Addr address(entry_.Data()->data_addr[index]); |
314 DCHECK(address.is_initialized()); | 522 DCHECK(address.is_initialized()); |
315 if (!address.is_initialized()) | 523 if (!address.is_initialized()) |
316 return net::ERR_FAILED; | 524 return net::ERR_FAILED; |
317 | 525 |
318 File* file = GetBackingFile(address, index); | 526 File* file = GetBackingFile(address, index); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
361 if (size <= max_file_size) | 569 if (size <= max_file_size) |
362 size = kint32max; | 570 size = kint32max; |
363 backend_->TooMuchStorageRequested(size); | 571 backend_->TooMuchStorageRequested(size); |
364 return net::ERR_FAILED; | 572 return net::ERR_FAILED; |
365 } | 573 } |
366 | 574 |
367 TimeTicks start = TimeTicks::Now(); | 575 TimeTicks start = TimeTicks::Now(); |
368 | 576 |
369 // Read the size at this point (it may change inside prepare). | 577 // Read the size at this point (it may change inside prepare). |
370 int entry_size = entry_.Data()->data_size[index]; | 578 int entry_size = entry_.Data()->data_size[index]; |
| 579 bool extending = entry_size < offset + buf_len; |
| 580 truncate = truncate && entry_size > offset + buf_len; |
371 if (!PrepareTarget(index, offset, buf_len, truncate)) | 581 if (!PrepareTarget(index, offset, buf_len, truncate)) |
372 return net::ERR_FAILED; | 582 return net::ERR_FAILED; |
373 | 583 |
374 if (entry_size < offset + buf_len) { | 584 if (extending || truncate) |
375 unreported_size_[index] += offset + buf_len - entry_size; | 585 UpdateSize(index, entry_size, offset + buf_len); |
376 entry_.Data()->data_size[index] = offset + buf_len; | |
377 entry_.set_modified(); | |
378 if (!buf_len) | |
379 truncate = true; // Force file extension. | |
380 } else if (truncate) { | |
381 // If the size was modified inside PrepareTarget, we should not do | |
382 // anything here. | |
383 if ((entry_size > offset + buf_len) && | |
384 (entry_size == entry_.Data()->data_size[index])) { | |
385 unreported_size_[index] += offset + buf_len - entry_size; | |
386 entry_.Data()->data_size[index] = offset + buf_len; | |
387 entry_.set_modified(); | |
388 } else { | |
389 // Nothing to truncate. | |
390 truncate = false; | |
391 } | |
392 } | |
393 | 586 |
394 UpdateRank(true); | 587 UpdateRank(true); |
395 | 588 |
396 backend_->OnEvent(Stats::WRITE_DATA); | 589 backend_->OnEvent(Stats::WRITE_DATA); |
397 backend_->OnWrite(buf_len); | 590 backend_->OnWrite(buf_len); |
398 | 591 |
399 if (user_buffers_[index].get()) { | 592 if (user_buffers_[index].get()) { |
400 // Complete the operation locally. | 593 // Complete the operation locally. |
401 if (!buf_len) | 594 user_buffers_[index]->Write(offset, buf, buf_len); |
402 return 0; | |
403 | |
404 DCHECK(kMaxBlockSize >= offset + buf_len); | |
405 memcpy(user_buffers_[index].get() + offset, buf->data(), buf_len); | |
406 ReportIOTime(kWrite, start); | 595 ReportIOTime(kWrite, start); |
407 return buf_len; | 596 return buf_len; |
408 } | 597 } |
409 | 598 |
410 Addr address(entry_.Data()->data_addr[index]); | 599 Addr address(entry_.Data()->data_addr[index]); |
| 600 if (truncate && offset + buf_len == 0) { |
| 601 DCHECK(!address.is_initialized()); |
| 602 return 0; |
| 603 } |
| 604 |
411 File* file = GetBackingFile(address, index); | 605 File* file = GetBackingFile(address, index); |
412 if (!file) | 606 if (!file) |
413 return net::ERR_FAILED; | 607 return net::ERR_FAILED; |
414 | 608 |
415 size_t file_offset = offset; | 609 size_t file_offset = offset; |
416 if (address.is_block_file()) { | 610 if (address.is_block_file()) { |
417 file_offset += address.start_block() * address.BlockSize() + | 611 file_offset += address.start_block() * address.BlockSize() + |
418 kBlockHeaderSize; | 612 kBlockHeaderSize; |
419 } else if (truncate) { | 613 } else if (truncate || (extending && !buf_len)) { |
420 if (!file->SetLength(offset + buf_len)) | 614 if (!file->SetLength(offset + buf_len)) |
421 return net::ERR_FAILED; | 615 return net::ERR_FAILED; |
422 } | 616 } |
423 | 617 |
424 if (!buf_len) | 618 if (!buf_len) |
425 return 0; | 619 return 0; |
426 | 620 |
427 SyncCallback* io_callback = NULL; | 621 SyncCallback* io_callback = NULL; |
428 if (callback) | 622 if (callback) |
429 io_callback = new SyncCallback(this, buf, callback); | 623 io_callback = new SyncCallback(this, buf, callback); |
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
786 DCHECK(index >= 0 && index <= kKeyFileIndex); | 980 DCHECK(index >= 0 && index <= kKeyFileIndex); |
787 if (!files_[index].get()) { | 981 if (!files_[index].get()) { |
788 // For a key file, use mixed mode IO. | 982 // For a key file, use mixed mode IO. |
789 scoped_refptr<File> file(new File(kKeyFileIndex == index)); | 983 scoped_refptr<File> file(new File(kKeyFileIndex == index)); |
790 if (file->Init(backend_->GetFileName(address))) | 984 if (file->Init(backend_->GetFileName(address))) |
791 files_[index].swap(file); | 985 files_[index].swap(file); |
792 } | 986 } |
793 return files_[index].get(); | 987 return files_[index].get(); |
794 } | 988 } |
795 | 989 |
| 990 // We keep a memory buffer for everything that ends up stored on a block file |
| 991 // (because we don't know yet the final data size), and for some of the data |
| 992 // that end up on external files. This function will initialize that memory |
| 993 // buffer and / or the files needed to store the data. |
| 994 // |
| 995 // In general, a buffer may overlap data already stored on disk, and in that |
| 996 // case, the contents of the buffer are the most accurate. It may also extend |
| 997 // the file, but we don't want to read from disk just to keep the buffer up to |
| 998 // date. This means that as soon as there is a chance to get confused about what |
| 999 // is the most recent version of some part of a file, we'll flush the buffer and |
| 1000 // reuse it for the new data. Keep in mind that the normal use pattern is quite |
| 1001 // simple (write sequentially from the beginning), so we optimize for handling |
| 1002 // that case. |
796 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, | 1003 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, |
797 bool truncate) { | 1004 bool truncate) { |
798 Addr address(entry_.Data()->data_addr[index]); | 1005 if (truncate) |
799 | 1006 return HandleTruncation(index, offset, buf_len); |
800 if (address.is_initialized() || user_buffers_[index].get()) | 1007 |
801 return GrowUserBuffer(index, offset, buf_len, truncate); | 1008 Addr address(entry_.Data()->data_addr[index]); |
802 | 1009 if (address.is_initialized()) { |
803 if (offset + buf_len > kMaxBlockSize) | 1010 if (address.is_block_file() && !MoveToLocalBuffer(index)) |
804 return CreateDataBlock(index, offset + buf_len); | 1011 return false; |
805 | 1012 |
806 user_buffers_[index].reset(new char[kMaxBlockSize]); | 1013 if (!user_buffers_[index].get() && offset < kMaxBlockSize) { |
807 | 1014 // We are about to create a buffer for the first 16KB, make sure that we |
808 // Overwrite the parts of the buffer that are not going to be written | 1015 // preserve existing data. |
809 // by the current operation (and yes, let's assume that nothing is going | 1016 if (!CopyToLocalBuffer(index)) |
810 // to fail, and we'll actually write over the part that we are not cleaning | 1017 return false; |
811 // here). The point is to avoid writing random stuff to disk later on. | 1018 } |
812 ClearInvalidData(user_buffers_[index].get(), offset, buf_len); | 1019 } |
813 | 1020 |
814 return true; | 1021 if (!user_buffers_[index].get()) |
| 1022 user_buffers_[index].reset(new UserBuffer(backend_)); |
| 1023 |
| 1024 return PrepareBuffer(index, offset, buf_len); |
815 } | 1025 } |
816 | 1026 |
817 // We get to this function with some data already stored. If there is a | 1027 // We get to this function with some data already stored. If there is a |
818 // truncation that results on data stored internally, we'll explicitly | 1028 // truncation that results on data stored internally, we'll explicitly |
819 // handle the case here. | 1029 // handle the case here. |
820 bool EntryImpl::GrowUserBuffer(int index, int offset, int buf_len, | 1030 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) { |
821 bool truncate) { | 1031 Addr address(entry_.Data()->data_addr[index]); |
822 Addr address(entry_.Data()->data_addr[index]); | 1032 |
823 | 1033 int current_size = entry_.Data()->data_size[index]; |
824 if (offset + buf_len > kMaxBlockSize) { | 1034 int new_size = offset + buf_len; |
825 // The data has to be stored externally. | 1035 |
826 if (address.is_initialized()) { | 1036 if (!new_size) { |
827 if (address.is_separate_file()) | 1037 // This is by far the most common scenario. |
| 1038 DeleteData(address, index); |
| 1039 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0); |
| 1040 entry_.Data()->data_addr[index] = 0; |
| 1041 entry_.Data()->data_size[index] = 0; |
| 1042 unreported_size_[index] = 0; |
| 1043 entry_.Store(); |
| 1044 |
| 1045 user_buffers_[index].reset(); |
| 1046 return true; |
| 1047 } |
| 1048 |
| 1049 // We never postpone truncating a file, if there is one, but we may postpone |
| 1050 // telling the backend about the size reduction. |
| 1051 if (user_buffers_[index].get()) { |
| 1052 DCHECK_GE(current_size, user_buffers_[index]->Start()); |
| 1053 if (!address.is_initialized()) { |
| 1054 // There is no overlap between the buffer and disk. |
| 1055 if (new_size > user_buffers_[index]->Start()) { |
| 1056 // Just truncate our buffer. |
| 1057 DCHECK_LT(new_size, user_buffers_[index]->End()); |
| 1058 user_buffers_[index]->Truncate(new_size); |
828 return true; | 1059 return true; |
829 if (!MoveToLocalBuffer(index)) | 1060 } |
830 return false; | 1061 |
831 } | 1062 // Just discard our buffer. |
832 return Flush(index, offset + buf_len, true); | 1063 user_buffers_[index]->Reset(); |
833 } | 1064 return PrepareBuffer(index, offset, buf_len); |
834 | 1065 } |
835 if (!address.is_initialized()) { | 1066 |
836 DCHECK(user_buffers_[index].get()); | 1067 // There is some overlap or we need to extend the file before the |
837 if (truncate) | 1068 // truncation. |
838 ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len); | 1069 if (offset > user_buffers_[index]->Start()) |
839 return true; | 1070 user_buffers_[index]->Truncate(new_size); |
840 } | 1071 UpdateSize(index, current_size, new_size); |
841 if (address.is_separate_file()) { | 1072 if (!Flush(index)) |
842 if (!truncate) | 1073 return false; |
843 return true; | 1074 user_buffers_[index].reset(); |
844 return ImportSeparateFile(index, offset, buf_len); | 1075 } |
845 } | 1076 |
846 | 1077 // We have data somewhere, and it is not in a buffer. |
847 // At this point we are dealing with data stored on disk, inside a block file. | |
848 if (offset + buf_len <= address.BlockSize() * address.num_blocks()) | |
849 return true; | |
850 | |
851 // ... and the allocated block has to change. | |
852 if (!MoveToLocalBuffer(index)) | |
853 return false; | |
854 | |
855 int clear_start = entry_.Data()->data_size[index]; | |
856 if (truncate) | |
857 clear_start = std::min(clear_start, offset + buf_len); | |
858 else if (offset < clear_start) | |
859 clear_start = std::max(offset + buf_len, clear_start); | |
860 | |
861 // Clear the end of the buffer. | |
862 ClearInvalidData(user_buffers_[index].get(), 0, clear_start); | |
863 return true; | |
864 } | |
865 | |
866 bool EntryImpl::MoveToLocalBuffer(int index) { | |
867 Addr address(entry_.Data()->data_addr[index]); | |
868 DCHECK(!user_buffers_[index].get()); | 1078 DCHECK(!user_buffers_[index].get()); |
869 DCHECK(address.is_initialized()); | 1079 DCHECK(address.is_initialized()); |
870 scoped_array<char> buffer(new char[kMaxBlockSize]); | 1080 |
| 1081 if (new_size > kMaxBlockSize) |
| 1082 return true; // Let the operation go directly to disk. |
| 1083 |
| 1084 return ImportSeparateFile(index, offset + buf_len); |
| 1085 } |
| 1086 |
| 1087 bool EntryImpl::CopyToLocalBuffer(int index) { |
| 1088 Addr address(entry_.Data()->data_addr[index]); |
| 1089 DCHECK(!user_buffers_[index].get()); |
| 1090 DCHECK(address.is_initialized()); |
| 1091 |
| 1092 int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize); |
| 1093 user_buffers_[index].reset(new UserBuffer(backend_)); |
| 1094 user_buffers_[index]->Write(len, NULL, 0); |
871 | 1095 |
872 File* file = GetBackingFile(address, index); | 1096 File* file = GetBackingFile(address, index); |
873 size_t len = entry_.Data()->data_size[index]; | 1097 int offset = 0; |
874 size_t offset = 0; | |
875 | 1098 |
876 if (address.is_block_file()) | 1099 if (address.is_block_file()) |
877 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 1100 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
878 | 1101 |
879 if (!file || !file->Read(buffer.get(), len, offset, NULL, NULL)) | 1102 if (!file || |
880 return false; | 1103 !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) { |
881 | 1104 user_buffers_[index].reset(); |
| 1105 return false; |
| 1106 } |
| 1107 return true; |
| 1108 } |
| 1109 |
| 1110 bool EntryImpl::MoveToLocalBuffer(int index) { |
| 1111 if (!CopyToLocalBuffer(index)) |
| 1112 return false; |
| 1113 |
| 1114 Addr address(entry_.Data()->data_addr[index]); |
882 DeleteData(address, index); | 1115 DeleteData(address, index); |
883 entry_.Data()->data_addr[index] = 0; | 1116 entry_.Data()->data_addr[index] = 0; |
884 entry_.Store(); | 1117 entry_.Store(); |
885 | 1118 |
886 // If we lose this entry we'll see it as zero sized. | 1119 // If we lose this entry we'll see it as zero sized. |
887 backend_->ModifyStorageSize(static_cast<int>(len) - unreported_size_[index], | 1120 int len = entry_.Data()->data_size[index]; |
888 0); | 1121 backend_->ModifyStorageSize(len - unreported_size_[index], 0); |
889 unreported_size_[index] = static_cast<int>(len); | 1122 unreported_size_[index] = len; |
890 | 1123 return true; |
891 user_buffers_[index].swap(buffer); | 1124 } |
892 return true; | 1125 |
893 } | 1126 bool EntryImpl::ImportSeparateFile(int index, int new_size) { |
894 | 1127 if (entry_.Data()->data_size[index] > new_size) |
895 bool EntryImpl::ImportSeparateFile(int index, int offset, int buf_len) { | 1128 UpdateSize(index, entry_.Data()->data_size[index], new_size); |
896 if (entry_.Data()->data_size[index] > offset + buf_len) { | 1129 |
897 unreported_size_[index] += offset + buf_len - | 1130 return MoveToLocalBuffer(index); |
898 entry_.Data()->data_size[index]; | 1131 } |
899 entry_.Data()->data_size[index] = offset + buf_len; | 1132 |
900 } | 1133 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) { |
901 | |
902 if (!MoveToLocalBuffer(index)) | |
903 return false; | |
904 | |
905 // Clear the end of the buffer. | |
906 ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len); | |
907 return true; | |
908 } | |
909 | |
910 // The common scenario is that this is called from the destructor of the entry, | |
911 // to write to disk what we have buffered. We don't want to hold the destructor | |
912 // until the actual IO finishes, so we'll send an asynchronous write that will | |
913 // free up the memory containing the data. To be consistent, this method always | |
914 // returns with the buffer freed up (on success). | |
915 bool EntryImpl::Flush(int index, int size, bool async) { | |
916 Addr address(entry_.Data()->data_addr[index]); | |
917 DCHECK(user_buffers_[index].get()); | 1134 DCHECK(user_buffers_[index].get()); |
918 DCHECK(!address.is_initialized()); | 1135 if (offset > user_buffers_[index]->End()) { |
919 | 1136 // We are about to extend the buffer (with zeros), so make sure that we are |
920 if (!size) | 1137 // not overwriting anything. |
| 1138 Addr address(entry_.Data()->data_addr[index]); |
| 1139 if (address.is_initialized() && address.is_separate_file()) { |
| 1140 int eof = entry_.Data()->data_size[index]; |
| 1141 if (eof > user_buffers_[index]->Start() && !Flush(index)) |
| 1142 return false; |
| 1143 } |
| 1144 } |
| 1145 |
| 1146 if (!user_buffers_[index]->PreWrite(offset, buf_len)) { |
| 1147 if (!Flush(index)) |
| 1148 return false; |
| 1149 |
| 1150 // Lets try again. |
| 1151 if (!user_buffers_[index]->PreWrite(offset, buf_len)) { |
| 1152 // We cannot complete the operation with a buffer. |
| 1153 DCHECK(!user_buffers_[index]->Size()); |
| 1154 DCHECK(!user_buffers_[index]->Start()); |
| 1155 user_buffers_[index].reset(); |
| 1156 } |
| 1157 } |
| 1158 return true; |
| 1159 } |
| 1160 |
| 1161 bool EntryImpl::Flush(int index) { |
| 1162 Addr address(entry_.Data()->data_addr[index]); |
| 1163 DCHECK(user_buffers_[index].get()); |
| 1164 |
| 1165 if (!entry_.Data()->data_size[index]) { |
| 1166 DCHECK(!user_buffers_[index]->Size()); |
921 return true; | 1167 return true; |
922 | 1168 } |
923 if (!CreateDataBlock(index, size)) | 1169 |
| 1170 if (!address.is_initialized() && |
| 1171 !CreateDataBlock(index, entry_.Data()->data_size[index])) |
924 return false; | 1172 return false; |
925 | 1173 |
926 address.set_value(entry_.Data()->data_addr[index]); | 1174 address.set_value(entry_.Data()->data_addr[index]); |
927 | 1175 |
928 File* file = GetBackingFile(address, index); | 1176 File* file = GetBackingFile(address, index); |
929 size_t len = entry_.Data()->data_size[index]; | 1177 int len = user_buffers_[index]->Size(); |
930 size_t offset = 0; | 1178 int offset = user_buffers_[index]->Start(); |
931 if (address.is_block_file()) | 1179 if (address.is_block_file()) { |
| 1180 DCHECK_EQ(len, entry_.Data()->data_size[index]); |
| 1181 DCHECK(!offset); |
932 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 1182 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
933 | 1183 } |
934 // We just told the backend to store len bytes for real. | |
935 DCHECK(len == static_cast<size_t>(unreported_size_[index])); | |
936 backend_->ModifyStorageSize(0, static_cast<int>(len)); | |
937 unreported_size_[index] = 0; | |
938 | 1184 |
939 if (!file) | 1185 if (!file) |
940 return false; | 1186 return false; |
941 | 1187 |
942 // TODO(rvargas): figure out if it's worth to re-enable posting operations. | 1188 if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL)) |
943 // Right now it is only used from GrowUserBuffer, not the destructor, and | 1189 return false; |
944 // it is not accounted for from the point of view of the total number of | 1190 user_buffers_[index]->Reset(); |
945 // pending operations of the cache. It is also racing with the actual write | 1191 |
946 // on the GrowUserBuffer path because there is no code to exclude the range | 1192 return true; |
947 // that is going to be written. | 1193 } |
948 async = false; | 1194 |
949 if (async) { | 1195 void EntryImpl::UpdateSize(int index, int old_size, int new_size) { |
950 if (!file->PostWrite(user_buffers_[index].get(), len, offset)) | 1196 if (entry_.Data()->data_size[index] == new_size) |
951 return false; | 1197 return; |
952 // The buffer is deleted from the PostWrite operation. | 1198 |
953 ignore_result(user_buffers_[index].release()); | 1199 unreported_size_[index] += new_size - old_size; |
954 } else { | 1200 entry_.Data()->data_size[index] = new_size; |
955 if (!file->Write(user_buffers_[index].get(), len, offset, NULL, NULL)) | 1201 entry_.set_modified(); |
956 return false; | |
957 user_buffers_[index].reset(NULL); | |
958 } | |
959 | |
960 return true; | |
961 } | 1202 } |
962 | 1203 |
963 int EntryImpl::InitSparseData() { | 1204 int EntryImpl::InitSparseData() { |
964 if (sparse_.get()) | 1205 if (sparse_.get()) |
965 return net::OK; | 1206 return net::OK; |
966 | 1207 |
967 // Use a local variable so that sparse_ never goes from 'valid' to NULL. | 1208 // Use a local variable so that sparse_ never goes from 'valid' to NULL. |
968 scoped_ptr<SparseControl> sparse(new SparseControl(this)); | 1209 scoped_ptr<SparseControl> sparse(new SparseControl(this)); |
969 int result = sparse->Init(); | 1210 int result = sparse->Init(); |
970 if (net::OK == result) | 1211 if (net::OK == result) |
971 sparse_.swap(sparse); | 1212 sparse_.swap(sparse); |
972 | 1213 |
973 return result; | 1214 return result; |
974 } | 1215 } |
975 | 1216 |
976 void EntryImpl::SetEntryFlags(uint32 flags) { | 1217 void EntryImpl::SetEntryFlags(uint32 flags) { |
977 entry_.Data()->flags |= flags; | 1218 entry_.Data()->flags |= flags; |
978 entry_.set_modified(); | 1219 entry_.set_modified(); |
979 } | 1220 } |
980 | 1221 |
981 uint32 EntryImpl::GetEntryFlags() { | 1222 uint32 EntryImpl::GetEntryFlags() { |
982 return entry_.Data()->flags; | 1223 return entry_.Data()->flags; |
983 } | 1224 } |
984 | 1225 |
985 void EntryImpl::GetData(int index, char** buffer, Addr* address) { | 1226 void EntryImpl::GetData(int index, char** buffer, Addr* address) { |
986 if (user_buffers_[index].get()) { | 1227 if (user_buffers_[index].get() && user_buffers_[index]->Size() && |
| 1228 !user_buffers_[index]->Start()) { |
987 // The data is already in memory, just copy it and we're done. | 1229 // The data is already in memory, just copy it and we're done. |
988 int data_len = entry_.Data()->data_size[index]; | 1230 int data_len = entry_.Data()->data_size[index]; |
989 DCHECK(data_len <= kMaxBlockSize); | 1231 if (data_len <= user_buffers_[index]->Size()) { |
990 *buffer = new char[data_len]; | 1232 DCHECK(!user_buffers_[index]->Start()); |
991 memcpy(*buffer, user_buffers_[index].get(), data_len); | 1233 *buffer = new char[data_len]; |
992 return; | 1234 memcpy(*buffer, user_buffers_[index]->Data(), data_len); |
| 1235 return; |
| 1236 } |
993 } | 1237 } |
994 | 1238 |
995 // Bad news: we'd have to read the info from disk so instead we'll just tell | 1239 // Bad news: we'd have to read the info from disk so instead we'll just tell |
996 // the caller where to read from. | 1240 // the caller where to read from. |
997 *buffer = NULL; | 1241 *buffer = NULL; |
998 address->set_value(entry_.Data()->data_addr[index]); | 1242 address->set_value(entry_.Data()->data_addr[index]); |
999 if (address->is_initialized()) { | 1243 if (address->is_initialized()) { |
1000 // Prevent us from deleting the block from the backing store. | 1244 // Prevent us from deleting the block from the backing store. |
1001 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | 1245 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
1002 unreported_size_[index], 0); | 1246 unreported_size_[index], 0); |
(...skipping 11 matching lines...) Expand all Loading... |
1014 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 1258 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
1015 entry_.address().value(), node_.address().value()); | 1259 entry_.address().value(), node_.address().value()); |
1016 | 1260 |
1017 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 1261 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
1018 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 1262 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
1019 | 1263 |
1020 Trace(" doomed: %d 0x%x", doomed_, dirty); | 1264 Trace(" doomed: %d 0x%x", doomed_, dirty); |
1021 } | 1265 } |
1022 | 1266 |
1023 } // namespace disk_cache | 1267 } // namespace disk_cache |
OLD | NEW |