| OLD | NEW |
| 1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/entry_impl.h" | 5 #include "net/disk_cache/entry_impl.h" |
| 6 | 6 |
| 7 #include "base/histogram.h" | 7 #include "base/histogram.h" |
| 8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
| 9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
| 10 #include "net/base/io_buffer.h" | 10 #include "net/base/io_buffer.h" |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 74 | 74 |
| 75 } // namespace | 75 } // namespace |
| 76 | 76 |
| 77 namespace disk_cache { | 77 namespace disk_cache { |
| 78 | 78 |
| 79 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) | 79 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) |
| 80 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { | 80 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { |
| 81 entry_.LazyInit(backend->File(address), address); | 81 entry_.LazyInit(backend->File(address), address); |
| 82 doomed_ = false; | 82 doomed_ = false; |
| 83 backend_ = backend; | 83 backend_ = backend; |
| 84 for (int i = 0; i < NUM_STREAMS; i++) { | 84 for (int i = 0; i < kNumStreams; i++) { |
| 85 unreported_size_[i] = 0; | 85 unreported_size_[i] = 0; |
| 86 } | 86 } |
| 87 } | 87 } |
| 88 | 88 |
| 89 // When an entry is deleted from the cache, we clean up all the data associated | 89 // When an entry is deleted from the cache, we clean up all the data associated |
| 90 // with it for two reasons: to simplify the reuse of the block (we know that any | 90 // with it for two reasons: to simplify the reuse of the block (we know that any |
| 91 // unused block is filled with zeros), and to simplify the handling of write / | 91 // unused block is filled with zeros), and to simplify the handling of write / |
| 92 // read partial information from an entry (don't have to worry about returning | 92 // read partial information from an entry (don't have to worry about returning |
| 93 // data related to a previous cache entry because the range was not fully | 93 // data related to a previous cache entry because the range was not fully |
| 94 // written before). | 94 // written before). |
| 95 EntryImpl::~EntryImpl() { | 95 EntryImpl::~EntryImpl() { |
| 96 // Save the sparse info to disk before deleting this entry. | 96 // Save the sparse info to disk before deleting this entry. |
| 97 sparse_.reset(); | 97 sparse_.reset(); |
| 98 | 98 |
| 99 if (doomed_) { | 99 if (doomed_) { |
| 100 DeleteEntryData(true); | 100 DeleteEntryData(true); |
| 101 } else { | 101 } else { |
| 102 bool ret = true; | 102 bool ret = true; |
| 103 for (int index = 0; index < NUM_STREAMS; index++) { | 103 for (int index = 0; index < kNumStreams; index++) { |
| 104 if (user_buffers_[index].get()) { | 104 if (user_buffers_[index].get()) { |
| 105 if (!(ret = Flush(index, entry_.Data()->data_size[index], false))) | 105 if (!(ret = Flush(index, entry_.Data()->data_size[index], false))) |
| 106 LOG(ERROR) << "Failed to save user data"; | 106 LOG(ERROR) << "Failed to save user data"; |
| 107 } else if (unreported_size_[index]) { | 107 } else if (unreported_size_[index]) { |
| 108 backend_->ModifyStorageSize( | 108 backend_->ModifyStorageSize( |
| 109 entry_.Data()->data_size[index] - unreported_size_[index], | 109 entry_.Data()->data_size[index] - unreported_size_[index], |
| 110 entry_.Data()->data_size[index]); | 110 entry_.Data()->data_size[index]); |
| 111 } | 111 } |
| 112 } | 112 } |
| 113 if (node_.HasData() && this == node_.Data()->pointer) { | 113 if (node_.HasData() && this == node_.Data()->pointer) { |
| (...skipping 27 matching lines...) Expand all Loading... |
| 141 | 141 |
| 142 void EntryImpl::Close() { | 142 void EntryImpl::Close() { |
| 143 Release(); | 143 Release(); |
| 144 } | 144 } |
| 145 | 145 |
| 146 std::string EntryImpl::GetKey() const { | 146 std::string EntryImpl::GetKey() const { |
| 147 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 147 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 148 if (entry->Data()->key_len > kMaxInternalKeyLength) { | 148 if (entry->Data()->key_len > kMaxInternalKeyLength) { |
| 149 Addr address(entry->Data()->long_key); | 149 Addr address(entry->Data()->long_key); |
| 150 DCHECK(address.is_initialized()); | 150 DCHECK(address.is_initialized()); |
| 151 COMPILE_ASSERT(NUM_STREAMS == kKeyFileIndex, invalid_key_index); | 151 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); |
| 152 File* file = const_cast<EntryImpl*>(this)->GetBackingFile(address, | 152 File* file = const_cast<EntryImpl*>(this)->GetBackingFile(address, |
| 153 kKeyFileIndex); | 153 kKeyFileIndex); |
| 154 | 154 |
| 155 size_t offset = 0; | 155 size_t offset = 0; |
| 156 if (address.is_block_file()) | 156 if (address.is_block_file()) |
| 157 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 157 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 158 | 158 |
| 159 std::string key; | 159 std::string key; |
| 160 if (!file || !file->Read(WriteInto(&key, entry->Data()->key_len + 1), | 160 if (!file || !file->Read(WriteInto(&key, entry->Data()->key_len + 1), |
| 161 entry->Data()->key_len + 1, offset)) | 161 entry->Data()->key_len + 1, offset)) |
| 162 key.clear(); | 162 key.clear(); |
| 163 return key; | 163 return key; |
| 164 } else { | 164 } else { |
| 165 return std::string(entry->Data()->key); | 165 return std::string(entry->Data()->key); |
| 166 } | 166 } |
| 167 } | 167 } |
| 168 | 168 |
| 169 Time EntryImpl::GetLastUsed() const { | 169 Time EntryImpl::GetLastUsed() const { |
| 170 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 170 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 171 return Time::FromInternalValue(node->Data()->last_used); | 171 return Time::FromInternalValue(node->Data()->last_used); |
| 172 } | 172 } |
| 173 | 173 |
| 174 Time EntryImpl::GetLastModified() const { | 174 Time EntryImpl::GetLastModified() const { |
| 175 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 175 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 176 return Time::FromInternalValue(node->Data()->last_modified); | 176 return Time::FromInternalValue(node->Data()->last_modified); |
| 177 } | 177 } |
| 178 | 178 |
| 179 int32 EntryImpl::GetDataSize(int index) const { | 179 int32 EntryImpl::GetDataSize(int index) const { |
| 180 if (index < 0 || index >= NUM_STREAMS) | 180 if (index < 0 || index >= kNumStreams) |
| 181 return 0; | 181 return 0; |
| 182 | 182 |
| 183 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 183 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 184 return entry->Data()->data_size[index]; | 184 return entry->Data()->data_size[index]; |
| 185 } | 185 } |
| 186 | 186 |
| 187 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, | 187 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, |
| 188 net::CompletionCallback* completion_callback) { | 188 net::CompletionCallback* completion_callback) { |
| 189 DCHECK(node_.Data()->dirty); | 189 DCHECK(node_.Data()->dirty); |
| 190 if (index < 0 || index >= NUM_STREAMS) | 190 if (index < 0 || index >= kNumStreams) |
| 191 return net::ERR_INVALID_ARGUMENT; | 191 return net::ERR_INVALID_ARGUMENT; |
| 192 | 192 |
| 193 int entry_size = entry_.Data()->data_size[index]; | 193 int entry_size = entry_.Data()->data_size[index]; |
| 194 if (offset >= entry_size || offset < 0 || !buf_len) | 194 if (offset >= entry_size || offset < 0 || !buf_len) |
| 195 return 0; | 195 return 0; |
| 196 | 196 |
| 197 if (buf_len < 0) | 197 if (buf_len < 0) |
| 198 return net::ERR_INVALID_ARGUMENT; | 198 return net::ERR_INVALID_ARGUMENT; |
| 199 | 199 |
| 200 Time start = Time::Now(); | 200 Time start = Time::Now(); |
| 201 static Histogram stats("DiskCache.ReadTime", TimeDelta::FromMilliseconds(1), | |
| 202 TimeDelta::FromSeconds(10), 50); | |
| 203 stats.SetFlags(kUmaTargetedHistogramFlag); | |
| 204 | 201 |
| 205 if (offset + buf_len > entry_size) | 202 if (offset + buf_len > entry_size) |
| 206 buf_len = entry_size - offset; | 203 buf_len = entry_size - offset; |
| 207 | 204 |
| 208 UpdateRank(false); | 205 UpdateRank(false); |
| 209 | 206 |
| 210 backend_->OnEvent(Stats::READ_DATA); | 207 backend_->OnEvent(Stats::READ_DATA); |
| 211 | 208 |
| 212 if (user_buffers_[index].get()) { | 209 if (user_buffers_[index].get()) { |
| 213 // Complete the operation locally. | 210 // Complete the operation locally. |
| 214 DCHECK(kMaxBlockSize >= offset + buf_len); | 211 DCHECK(kMaxBlockSize >= offset + buf_len); |
| 215 memcpy(buf->data() , user_buffers_[index].get() + offset, buf_len); | 212 memcpy(buf->data() , user_buffers_[index].get() + offset, buf_len); |
| 216 if (backend_->cache_type() == net::DISK_CACHE) | 213 ReportIOTime(kRead, start); |
| 217 stats.AddTime(Time::Now() - start); | |
| 218 return buf_len; | 214 return buf_len; |
| 219 } | 215 } |
| 220 | 216 |
| 221 Addr address(entry_.Data()->data_addr[index]); | 217 Addr address(entry_.Data()->data_addr[index]); |
| 222 DCHECK(address.is_initialized()); | 218 DCHECK(address.is_initialized()); |
| 223 if (!address.is_initialized()) | 219 if (!address.is_initialized()) |
| 224 return net::ERR_FAILED; | 220 return net::ERR_FAILED; |
| 225 | 221 |
| 226 File* file = GetBackingFile(address, index); | 222 File* file = GetBackingFile(address, index); |
| 227 if (!file) | 223 if (!file) |
| (...skipping 11 matching lines...) Expand all Loading... |
| 239 bool completed; | 235 bool completed; |
| 240 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { | 236 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { |
| 241 if (io_callback) | 237 if (io_callback) |
| 242 io_callback->Discard(); | 238 io_callback->Discard(); |
| 243 return net::ERR_FAILED; | 239 return net::ERR_FAILED; |
| 244 } | 240 } |
| 245 | 241 |
| 246 if (io_callback && completed) | 242 if (io_callback && completed) |
| 247 io_callback->Discard(); | 243 io_callback->Discard(); |
| 248 | 244 |
| 249 if (backend_->cache_type() == net::DISK_CACHE) | 245 ReportIOTime(kRead, start); |
| 250 stats.AddTime(Time::Now() - start); | |
| 251 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; | 246 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; |
| 252 } | 247 } |
| 253 | 248 |
| 254 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, | 249 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, |
| 255 net::CompletionCallback* completion_callback, | 250 net::CompletionCallback* completion_callback, |
| 256 bool truncate) { | 251 bool truncate) { |
| 257 DCHECK(node_.Data()->dirty); | 252 DCHECK(node_.Data()->dirty); |
| 258 if (index < 0 || index >= NUM_STREAMS) | 253 if (index < 0 || index >= kNumStreams) |
| 259 return net::ERR_INVALID_ARGUMENT; | 254 return net::ERR_INVALID_ARGUMENT; |
| 260 | 255 |
| 261 if (offset < 0 || buf_len < 0) | 256 if (offset < 0 || buf_len < 0) |
| 262 return net::ERR_INVALID_ARGUMENT; | 257 return net::ERR_INVALID_ARGUMENT; |
| 263 | 258 |
| 264 int max_file_size = backend_->MaxFileSize(); | 259 int max_file_size = backend_->MaxFileSize(); |
| 265 | 260 |
| 266 // offset of buf_len could be negative numbers. | 261 // offset of buf_len could be negative numbers. |
| 267 if (offset > max_file_size || buf_len > max_file_size || | 262 if (offset > max_file_size || buf_len > max_file_size || |
| 268 offset + buf_len > max_file_size) { | 263 offset + buf_len > max_file_size) { |
| 269 int size = offset + buf_len; | 264 int size = offset + buf_len; |
| 270 if (size <= max_file_size) | 265 if (size <= max_file_size) |
| 271 size = kint32max; | 266 size = kint32max; |
| 272 backend_->TooMuchStorageRequested(size); | 267 backend_->TooMuchStorageRequested(size); |
| 273 return net::ERR_FAILED; | 268 return net::ERR_FAILED; |
| 274 } | 269 } |
| 275 | 270 |
| 276 Time start = Time::Now(); | 271 Time start = Time::Now(); |
| 277 static Histogram stats("DiskCache.WriteTime", TimeDelta::FromMilliseconds(1), | |
| 278 TimeDelta::FromSeconds(10), 50); | |
| 279 stats.SetFlags(kUmaTargetedHistogramFlag); | |
| 280 | 272 |
| 281 // Read the size at this point (it may change inside prepare). | 273 // Read the size at this point (it may change inside prepare). |
| 282 int entry_size = entry_.Data()->data_size[index]; | 274 int entry_size = entry_.Data()->data_size[index]; |
| 283 if (!PrepareTarget(index, offset, buf_len, truncate)) | 275 if (!PrepareTarget(index, offset, buf_len, truncate)) |
| 284 return net::ERR_FAILED; | 276 return net::ERR_FAILED; |
| 285 | 277 |
| 286 if (entry_size < offset + buf_len) { | 278 if (entry_size < offset + buf_len) { |
| 287 unreported_size_[index] += offset + buf_len - entry_size; | 279 unreported_size_[index] += offset + buf_len - entry_size; |
| 288 entry_.Data()->data_size[index] = offset + buf_len; | 280 entry_.Data()->data_size[index] = offset + buf_len; |
| 289 entry_.set_modified(); | 281 entry_.set_modified(); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 307 | 299 |
| 308 backend_->OnEvent(Stats::WRITE_DATA); | 300 backend_->OnEvent(Stats::WRITE_DATA); |
| 309 | 301 |
| 310 if (user_buffers_[index].get()) { | 302 if (user_buffers_[index].get()) { |
| 311 // Complete the operation locally. | 303 // Complete the operation locally. |
| 312 if (!buf_len) | 304 if (!buf_len) |
| 313 return 0; | 305 return 0; |
| 314 | 306 |
| 315 DCHECK(kMaxBlockSize >= offset + buf_len); | 307 DCHECK(kMaxBlockSize >= offset + buf_len); |
| 316 memcpy(user_buffers_[index].get() + offset, buf->data(), buf_len); | 308 memcpy(user_buffers_[index].get() + offset, buf->data(), buf_len); |
| 317 if (backend_->cache_type() == net::DISK_CACHE) | 309 ReportIOTime(kWrite, start); |
| 318 stats.AddTime(Time::Now() - start); | |
| 319 return buf_len; | 310 return buf_len; |
| 320 } | 311 } |
| 321 | 312 |
| 322 Addr address(entry_.Data()->data_addr[index]); | 313 Addr address(entry_.Data()->data_addr[index]); |
| 323 File* file = GetBackingFile(address, index); | 314 File* file = GetBackingFile(address, index); |
| 324 if (!file) | 315 if (!file) |
| 325 return net::ERR_FAILED; | 316 return net::ERR_FAILED; |
| 326 | 317 |
| 327 size_t file_offset = offset; | 318 size_t file_offset = offset; |
| 328 if (address.is_block_file()) { | 319 if (address.is_block_file()) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 344 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, | 335 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, |
| 345 &completed)) { | 336 &completed)) { |
| 346 if (io_callback) | 337 if (io_callback) |
| 347 io_callback->Discard(); | 338 io_callback->Discard(); |
| 348 return net::ERR_FAILED; | 339 return net::ERR_FAILED; |
| 349 } | 340 } |
| 350 | 341 |
| 351 if (io_callback && completed) | 342 if (io_callback && completed) |
| 352 io_callback->Discard(); | 343 io_callback->Discard(); |
| 353 | 344 |
| 354 if (backend_->cache_type() == net::DISK_CACHE) | 345 ReportIOTime(kWrite, start); |
| 355 stats.AddTime(Time::Now() - start); | |
| 356 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; | 346 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; |
| 357 } | 347 } |
| 358 | 348 |
| 359 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 349 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 360 net::CompletionCallback* completion_callback) { | 350 net::CompletionCallback* completion_callback) { |
| 361 DCHECK(node_.Data()->dirty); | 351 DCHECK(node_.Data()->dirty); |
| 362 int result = InitSparseData(); | 352 int result = InitSparseData(); |
| 363 if (net::OK != result) | 353 if (net::OK != result) |
| 364 return result; | 354 return result; |
| 365 | 355 |
| 366 return sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | 356 Time start = Time::Now(); |
| 367 completion_callback); | 357 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
| 358 completion_callback); |
| 359 ReportIOTime(kSparseRead, start); |
| 360 return result; |
| 368 } | 361 } |
| 369 | 362 |
| 370 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 363 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 371 net::CompletionCallback* completion_callback) { | 364 net::CompletionCallback* completion_callback) { |
| 372 DCHECK(node_.Data()->dirty); | 365 DCHECK(node_.Data()->dirty); |
| 373 int result = InitSparseData(); | 366 int result = InitSparseData(); |
| 374 if (net::OK != result) | 367 if (net::OK != result) |
| 375 return result; | 368 return result; |
| 376 | 369 |
| 377 return sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, buf_len, | 370 Time start = Time::Now(); |
| 378 completion_callback); | 371 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
| 372 buf_len, completion_callback); |
| 373 ReportIOTime(kSparseWrite, start); |
| 374 return result; |
| 379 } | 375 } |
| 380 | 376 |
| 381 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { | 377 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { |
| 382 int result = InitSparseData(); | 378 int result = InitSparseData(); |
| 383 if (net::OK != result) | 379 if (net::OK != result) |
| 384 return result; | 380 return result; |
| 385 | 381 |
| 386 return sparse_->GetAvailableRange(offset, len, start); | 382 return sparse_->GetAvailableRange(offset, len, start); |
| 387 } | 383 } |
| 388 | 384 |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 454 doomed_ = true; | 450 doomed_ = true; |
| 455 } | 451 } |
| 456 | 452 |
| 457 void EntryImpl::DeleteEntryData(bool everything) { | 453 void EntryImpl::DeleteEntryData(bool everything) { |
| 458 DCHECK(doomed_ || !everything); | 454 DCHECK(doomed_ || !everything); |
| 459 | 455 |
| 460 if (GetDataSize(0)) | 456 if (GetDataSize(0)) |
| 461 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); | 457 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); |
| 462 if (GetDataSize(1)) | 458 if (GetDataSize(1)) |
| 463 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); | 459 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); |
| 464 for (int index = 0; index < NUM_STREAMS; index++) { | 460 for (int index = 0; index < kNumStreams; index++) { |
| 465 Addr address(entry_.Data()->data_addr[index]); | 461 Addr address(entry_.Data()->data_addr[index]); |
| 466 if (address.is_initialized()) { | 462 if (address.is_initialized()) { |
| 467 DeleteData(address, index); | 463 DeleteData(address, index); |
| 468 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | 464 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
| 469 unreported_size_[index], 0); | 465 unreported_size_[index], 0); |
| 470 entry_.Data()->data_addr[index] = 0; | 466 entry_.Data()->data_addr[index] = 0; |
| 471 entry_.Data()->data_size[index] = 0; | 467 entry_.Data()->data_size[index] = 0; |
| 472 } | 468 } |
| 473 } | 469 } |
| 474 | 470 |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 578 backend_->DecrementIoCount(); | 574 backend_->DecrementIoCount(); |
| 579 } | 575 } |
| 580 | 576 |
| 581 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { | 577 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { |
| 582 node_.Data()->last_used = last_used.ToInternalValue(); | 578 node_.Data()->last_used = last_used.ToInternalValue(); |
| 583 node_.Data()->last_modified = last_modified.ToInternalValue(); | 579 node_.Data()->last_modified = last_modified.ToInternalValue(); |
| 584 node_.set_modified(); | 580 node_.set_modified(); |
| 585 } | 581 } |
| 586 | 582 |
| 587 bool EntryImpl::CreateDataBlock(int index, int size) { | 583 bool EntryImpl::CreateDataBlock(int index, int size) { |
| 588 DCHECK(index >= 0 && index < NUM_STREAMS); | 584 DCHECK(index >= 0 && index < kNumStreams); |
| 589 | 585 |
| 590 Addr address(entry_.Data()->data_addr[index]); | 586 Addr address(entry_.Data()->data_addr[index]); |
| 591 if (!CreateBlock(size, &address)) | 587 if (!CreateBlock(size, &address)) |
| 592 return false; | 588 return false; |
| 593 | 589 |
| 594 entry_.Data()->data_addr[index] = address.value(); | 590 entry_.Data()->data_addr[index] = address.value(); |
| 595 entry_.Store(); | 591 entry_.Store(); |
| 596 return true; | 592 return true; |
| 597 } | 593 } |
| 598 | 594 |
| (...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 838 if (sparse_.get()) | 834 if (sparse_.get()) |
| 839 return net::OK; | 835 return net::OK; |
| 840 | 836 |
| 841 sparse_.reset(new SparseControl(this)); | 837 sparse_.reset(new SparseControl(this)); |
| 842 int result = sparse_->Init(); | 838 int result = sparse_->Init(); |
| 843 if (net::OK != result) | 839 if (net::OK != result) |
| 844 sparse_.reset(); | 840 sparse_.reset(); |
| 845 return result; | 841 return result; |
| 846 } | 842 } |
| 847 | 843 |
| 844 void EntryImpl::ReportIOTime(Operation op, const base::Time& start) { |
| 845 int group = backend_->GetSizeGroup(); |
| 846 switch (op) { |
| 847 case kRead: |
| 848 CACHE_UMA(AGE_MS, "ReadTime", group, start); |
| 849 break; |
| 850 case kWrite: |
| 851 CACHE_UMA(AGE_MS, "WriteTime", group, start); |
| 852 break; |
| 853 case kSparseRead: |
| 854 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); |
| 855 break; |
| 856 case kSparseWrite: |
| 857 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); |
| 858 break; |
| 859 default: |
| 860 NOTREACHED(); |
| 861 } |
| 862 } |
| 863 |
| 848 void EntryImpl::Log(const char* msg) { | 864 void EntryImpl::Log(const char* msg) { |
| 849 void* pointer = NULL; | 865 void* pointer = NULL; |
| 850 int dirty = 0; | 866 int dirty = 0; |
| 851 if (node_.HasData()) { | 867 if (node_.HasData()) { |
| 852 pointer = node_.Data()->pointer; | 868 pointer = node_.Data()->pointer; |
| 853 dirty = node_.Data()->dirty; | 869 dirty = node_.Data()->dirty; |
| 854 } | 870 } |
| 855 | 871 |
| 856 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 872 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
| 857 entry_.address().value(), node_.address().value()); | 873 entry_.address().value(), node_.address().value()); |
| 858 | 874 |
| 859 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 875 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
| 860 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 876 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
| 861 | 877 |
| 862 Trace(" doomed: %d 0x%p 0x%x", doomed_, pointer, dirty); | 878 Trace(" doomed: %d 0x%p 0x%x", doomed_, pointer, dirty); |
| 863 } | 879 } |
| 864 | 880 |
| 865 } // namespace disk_cache | 881 } // namespace disk_cache |
| OLD | NEW |