| OLD | NEW |
| 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/entry_impl.h" | 5 #include "net/disk_cache/entry_impl.h" |
| 6 | 6 |
| 7 #include "base/histogram.h" | 7 #include "base/histogram.h" |
| 8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
| 9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
| 10 #include "net/base/io_buffer.h" | 10 #include "net/base/io_buffer.h" |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 namespace disk_cache { | 82 namespace disk_cache { |
| 83 | 83 |
| 84 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) | 84 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) |
| 85 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { | 85 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { |
| 86 entry_.LazyInit(backend->File(address), address); | 86 entry_.LazyInit(backend->File(address), address); |
| 87 doomed_ = false; | 87 doomed_ = false; |
| 88 backend_ = backend; | 88 backend_ = backend; |
| 89 for (int i = 0; i < kNumStreams; i++) { | 89 for (int i = 0; i < kNumStreams; i++) { |
| 90 unreported_size_[i] = 0; | 90 unreported_size_[i] = 0; |
| 91 } | 91 } |
| 92 key_file_ = NULL; | |
| 93 } | 92 } |
| 94 | 93 |
| 95 // When an entry is deleted from the cache, we clean up all the data associated | 94 // When an entry is deleted from the cache, we clean up all the data associated |
| 96 // with it for two reasons: to simplify the reuse of the block (we know that any | 95 // with it for two reasons: to simplify the reuse of the block (we know that any |
| 97 // unused block is filled with zeros), and to simplify the handling of write / | 96 // unused block is filled with zeros), and to simplify the handling of write / |
| 98 // read partial information from an entry (don't have to worry about returning | 97 // read partial information from an entry (don't have to worry about returning |
| 99 // data related to a previous cache entry because the range was not fully | 98 // data related to a previous cache entry because the range was not fully |
| 100 // written before). | 99 // written before). |
| 101 EntryImpl::~EntryImpl() { | 100 EntryImpl::~EntryImpl() { |
| 102 // Save the sparse info to disk before deleting this entry. | 101 // Save the sparse info to disk before deleting this entry. |
| (...skipping 21 matching lines...) Expand all Loading... |
| 124 node_.Store(); | 123 node_.Store(); |
| 125 } else if (node_.HasData() && node_.Data()->dirty) { | 124 } else if (node_.HasData() && node_.Data()->dirty) { |
| 126 node_.Data()->dirty = 0; | 125 node_.Data()->dirty = 0; |
| 127 node_.Store(); | 126 node_.Store(); |
| 128 } | 127 } |
| 129 } | 128 } |
| 130 | 129 |
| 131 backend_->CacheEntryDestroyed(entry_.address()); | 130 backend_->CacheEntryDestroyed(entry_.address()); |
| 132 } | 131 } |
| 133 | 132 |
| 134 void EntryImpl::Doom() { | 133 void EntryImpl::DoomImpl() { |
| 135 if (doomed_) | 134 if (doomed_) |
| 136 return; | 135 return; |
| 137 | 136 |
| 138 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | 137 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
| 139 backend_->InternalDoomEntry(this); | 138 backend_->InternalDoomEntry(this); |
| 140 } | 139 } |
| 141 | 140 |
| 141 void EntryImpl::Doom() { |
| 142 backend_->background_queue()->DoomEntryImpl(this); |
| 143 } |
| 144 |
| 142 void EntryImpl::Close() { | 145 void EntryImpl::Close() { |
| 143 Release(); | 146 backend_->background_queue()->CloseEntryImpl(this); |
| 144 } | 147 } |
| 145 | 148 |
| 146 std::string EntryImpl::GetKey() const { | 149 std::string EntryImpl::GetKey() const { |
| 147 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 150 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 148 if (entry->Data()->key_len <= kMaxInternalKeyLength) | 151 if (entry->Data()->key_len <= kMaxInternalKeyLength) |
| 149 return std::string(entry->Data()->key); | 152 return std::string(entry->Data()->key); |
| 150 | 153 |
| 154 // We keep a copy of the key so that we can always return it, even if the |
| 155 // backend is disabled. |
| 156 if (!key_.empty()) |
| 157 return key_; |
| 158 |
| 151 Addr address(entry->Data()->long_key); | 159 Addr address(entry->Data()->long_key); |
| 152 DCHECK(address.is_initialized()); | 160 DCHECK(address.is_initialized()); |
| 153 size_t offset = 0; | 161 size_t offset = 0; |
| 154 if (address.is_block_file()) | 162 if (address.is_block_file()) |
| 155 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 163 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 156 | 164 |
| 157 if (!key_file_) { | 165 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); |
| 158 // We keep a copy of the file needed to access the key so that we can | 166 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, |
| 159 // always return this object's key, even if the backend is disabled. | 167 kKeyFileIndex); |
| 160 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); | |
| 161 key_file_ = const_cast<EntryImpl*>(this)->GetBackingFile(address, | |
| 162 kKeyFileIndex); | |
| 163 } | |
| 164 | 168 |
| 165 std::string key; | 169 if (!key_file || |
| 166 if (!key_file_ || | 170 !key_file->Read(WriteInto(&key_, entry->Data()->key_len + 1), |
| 167 !key_file_->Read(WriteInto(&key, entry->Data()->key_len + 1), | 171 entry->Data()->key_len + 1, offset)) |
| 168 entry->Data()->key_len + 1, offset)) | 172 key_.clear(); |
| 169 key.clear(); | 173 return key_; |
| 170 return key; | |
| 171 } | 174 } |
| 172 | 175 |
| 173 Time EntryImpl::GetLastUsed() const { | 176 Time EntryImpl::GetLastUsed() const { |
| 174 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 177 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 175 return Time::FromInternalValue(node->Data()->last_used); | 178 return Time::FromInternalValue(node->Data()->last_used); |
| 176 } | 179 } |
| 177 | 180 |
| 178 Time EntryImpl::GetLastModified() const { | 181 Time EntryImpl::GetLastModified() const { |
| 179 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 182 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 180 return Time::FromInternalValue(node->Data()->last_modified); | 183 return Time::FromInternalValue(node->Data()->last_modified); |
| 181 } | 184 } |
| 182 | 185 |
| 183 int32 EntryImpl::GetDataSize(int index) const { | 186 int32 EntryImpl::GetDataSize(int index) const { |
| 184 if (index < 0 || index >= kNumStreams) | 187 if (index < 0 || index >= kNumStreams) |
| 185 return 0; | 188 return 0; |
| 186 | 189 |
| 187 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 190 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 188 return entry->Data()->data_size[index]; | 191 return entry->Data()->data_size[index]; |
| 189 } | 192 } |
| 190 | 193 |
| 191 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, | 194 int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf, |
| 192 net::CompletionCallback* completion_callback) { | 195 int buf_len, CompletionCallback* callback) { |
| 193 DCHECK(node_.Data()->dirty); | 196 DCHECK(node_.Data()->dirty); |
| 194 if (index < 0 || index >= kNumStreams) | 197 if (index < 0 || index >= kNumStreams) |
| 195 return net::ERR_INVALID_ARGUMENT; | 198 return net::ERR_INVALID_ARGUMENT; |
| 196 | 199 |
| 197 int entry_size = entry_.Data()->data_size[index]; | 200 int entry_size = entry_.Data()->data_size[index]; |
| 198 if (offset >= entry_size || offset < 0 || !buf_len) | 201 if (offset >= entry_size || offset < 0 || !buf_len) |
| 199 return 0; | 202 return 0; |
| 200 | 203 |
| 201 if (buf_len < 0) | 204 if (buf_len < 0) |
| 202 return net::ERR_INVALID_ARGUMENT; | 205 return net::ERR_INVALID_ARGUMENT; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 226 File* file = GetBackingFile(address, index); | 229 File* file = GetBackingFile(address, index); |
| 227 if (!file) | 230 if (!file) |
| 228 return net::ERR_FAILED; | 231 return net::ERR_FAILED; |
| 229 | 232 |
| 230 size_t file_offset = offset; | 233 size_t file_offset = offset; |
| 231 if (address.is_block_file()) | 234 if (address.is_block_file()) |
| 232 file_offset += address.start_block() * address.BlockSize() + | 235 file_offset += address.start_block() * address.BlockSize() + |
| 233 kBlockHeaderSize; | 236 kBlockHeaderSize; |
| 234 | 237 |
| 235 SyncCallback* io_callback = NULL; | 238 SyncCallback* io_callback = NULL; |
| 236 if (completion_callback) | 239 if (callback) |
| 237 io_callback = new SyncCallback(this, buf, completion_callback); | 240 io_callback = new SyncCallback(this, buf, callback); |
| 238 | 241 |
| 239 bool completed; | 242 bool completed; |
| 240 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { | 243 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { |
| 241 if (io_callback) | 244 if (io_callback) |
| 242 io_callback->Discard(); | 245 io_callback->Discard(); |
| 243 return net::ERR_FAILED; | 246 return net::ERR_FAILED; |
| 244 } | 247 } |
| 245 | 248 |
| 246 if (io_callback && completed) | 249 if (io_callback && completed) |
| 247 io_callback->Discard(); | 250 io_callback->Discard(); |
| 248 | 251 |
| 249 ReportIOTime(kRead, start); | 252 ReportIOTime(kRead, start); |
| 250 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; | 253 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING; |
| 251 } | 254 } |
| 252 | 255 |
| 253 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, | 256 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, |
| 254 net::CompletionCallback* completion_callback, | 257 net::CompletionCallback* callback) { |
| 255 bool truncate) { | 258 if (!callback) |
| 259 return ReadDataImpl(index, offset, buf, buf_len, callback); |
| 260 |
| 261 DCHECK(node_.Data()->dirty); |
| 262 if (index < 0 || index >= kNumStreams) |
| 263 return net::ERR_INVALID_ARGUMENT; |
| 264 |
| 265 int entry_size = entry_.Data()->data_size[index]; |
| 266 if (offset >= entry_size || offset < 0 || !buf_len) |
| 267 return 0; |
| 268 |
| 269 if (buf_len < 0) |
| 270 return net::ERR_INVALID_ARGUMENT; |
| 271 |
| 272 backend_->background_queue()->ReadData(this, index, offset, buf, buf_len, |
| 273 callback); |
| 274 return net::ERR_IO_PENDING; |
| 275 } |
| 276 |
| 277 int EntryImpl::WriteDataImpl(int index, int offset, net::IOBuffer* buf, |
| 278 int buf_len, CompletionCallback* callback, |
| 279 bool truncate) { |
| 256 DCHECK(node_.Data()->dirty); | 280 DCHECK(node_.Data()->dirty); |
| 257 if (index < 0 || index >= kNumStreams) | 281 if (index < 0 || index >= kNumStreams) |
| 258 return net::ERR_INVALID_ARGUMENT; | 282 return net::ERR_INVALID_ARGUMENT; |
| 259 | 283 |
| 260 if (offset < 0 || buf_len < 0) | 284 if (offset < 0 || buf_len < 0) |
| 261 return net::ERR_INVALID_ARGUMENT; | 285 return net::ERR_INVALID_ARGUMENT; |
| 262 | 286 |
| 263 int max_file_size = backend_->MaxFileSize(); | 287 int max_file_size = backend_->MaxFileSize(); |
| 264 | 288 |
| 265 // offset of buf_len could be negative numbers. | 289 // offset or buf_len could be negative numbers. |
| 266 if (offset > max_file_size || buf_len > max_file_size || | 290 if (offset > max_file_size || buf_len > max_file_size || |
| 267 offset + buf_len > max_file_size) { | 291 offset + buf_len > max_file_size) { |
| 268 int size = offset + buf_len; | 292 int size = offset + buf_len; |
| 269 if (size <= max_file_size) | 293 if (size <= max_file_size) |
| 270 size = kint32max; | 294 size = kint32max; |
| 271 backend_->TooMuchStorageRequested(size); | 295 backend_->TooMuchStorageRequested(size); |
| 272 return net::ERR_FAILED; | 296 return net::ERR_FAILED; |
| 273 } | 297 } |
| 274 | 298 |
| 275 TimeTicks start = TimeTicks::Now(); | 299 TimeTicks start = TimeTicks::Now(); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 325 kBlockHeaderSize; | 349 kBlockHeaderSize; |
| 326 } else if (truncate) { | 350 } else if (truncate) { |
| 327 if (!file->SetLength(offset + buf_len)) | 351 if (!file->SetLength(offset + buf_len)) |
| 328 return net::ERR_FAILED; | 352 return net::ERR_FAILED; |
| 329 } | 353 } |
| 330 | 354 |
| 331 if (!buf_len) | 355 if (!buf_len) |
| 332 return 0; | 356 return 0; |
| 333 | 357 |
| 334 SyncCallback* io_callback = NULL; | 358 SyncCallback* io_callback = NULL; |
| 335 if (completion_callback) | 359 if (callback) |
| 336 io_callback = new SyncCallback(this, buf, completion_callback); | 360 io_callback = new SyncCallback(this, buf, callback); |
| 337 | 361 |
| 338 bool completed; | 362 bool completed; |
| 339 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, | 363 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, |
| 340 &completed)) { | 364 &completed)) { |
| 341 if (io_callback) | 365 if (io_callback) |
| 342 io_callback->Discard(); | 366 io_callback->Discard(); |
| 343 return net::ERR_FAILED; | 367 return net::ERR_FAILED; |
| 344 } | 368 } |
| 345 | 369 |
| 346 if (io_callback && completed) | 370 if (io_callback && completed) |
| 347 io_callback->Discard(); | 371 io_callback->Discard(); |
| 348 | 372 |
| 349 ReportIOTime(kWrite, start); | 373 ReportIOTime(kWrite, start); |
| 350 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; | 374 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING; |
| 351 } | 375 } |
| 352 | 376 |
| 353 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 377 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, |
| 354 net::CompletionCallback* completion_callback) { | 378 CompletionCallback* callback, bool truncate) { |
| 379 if (!callback) |
| 380 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); |
| 381 |
| 382 DCHECK(node_.Data()->dirty); |
| 383 if (index < 0 || index >= kNumStreams) |
| 384 return net::ERR_INVALID_ARGUMENT; |
| 385 |
| 386 if (offset < 0 || buf_len < 0) |
| 387 return net::ERR_INVALID_ARGUMENT; |
| 388 |
| 389 backend_->background_queue()->WriteData(this, index, offset, buf, buf_len, |
| 390 truncate, callback); |
| 391 return net::ERR_IO_PENDING; |
| 392 } |
| 393 |
| 394 int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, |
| 395 CompletionCallback* callback) { |
| 355 DCHECK(node_.Data()->dirty); | 396 DCHECK(node_.Data()->dirty); |
| 356 int result = InitSparseData(); | 397 int result = InitSparseData(); |
| 357 if (net::OK != result) | 398 if (net::OK != result) |
| 358 return result; | 399 return result; |
| 359 | 400 |
| 360 TimeTicks start = TimeTicks::Now(); | 401 TimeTicks start = TimeTicks::Now(); |
| 361 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | 402 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
| 362 completion_callback); | 403 callback); |
| 363 ReportIOTime(kSparseRead, start); | 404 ReportIOTime(kSparseRead, start); |
| 364 return result; | 405 return result; |
| 365 } | 406 } |
| 366 | 407 |
| 367 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 408 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 368 net::CompletionCallback* completion_callback) { | 409 net::CompletionCallback* callback) { |
| 410 if (!callback) |
| 411 return ReadSparseDataImpl(offset, buf, buf_len, callback); |
| 412 |
| 413 backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len, |
| 414 callback); |
| 415 return net::ERR_IO_PENDING; |
| 416 } |
| 417 |
| 418 int EntryImpl::WriteSparseDataImpl(int64 offset, net::IOBuffer* buf, |
| 419 int buf_len, CompletionCallback* callback) { |
| 369 DCHECK(node_.Data()->dirty); | 420 DCHECK(node_.Data()->dirty); |
| 370 int result = InitSparseData(); | 421 int result = InitSparseData(); |
| 371 if (net::OK != result) | 422 if (net::OK != result) |
| 372 return result; | 423 return result; |
| 373 | 424 |
| 374 TimeTicks start = TimeTicks::Now(); | 425 TimeTicks start = TimeTicks::Now(); |
| 375 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | 426 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
| 376 buf_len, completion_callback); | 427 buf_len, callback); |
| 377 ReportIOTime(kSparseWrite, start); | 428 ReportIOTime(kSparseWrite, start); |
| 378 return result; | 429 return result; |
| 379 } | 430 } |
| 380 | 431 |
| 432 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 433 net::CompletionCallback* callback) { |
| 434 if (!callback) |
| 435 return WriteSparseDataImpl(offset, buf, buf_len, callback); |
| 436 |
| 437 backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len, |
| 438 callback); |
| 439 return net::ERR_IO_PENDING; |
| 440 } |
| 441 |
| 442 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { |
| 443 return GetAvailableRange(offset, len, start); |
| 444 } |
| 445 |
| 381 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { | 446 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { |
| 382 int result = InitSparseData(); | 447 int result = InitSparseData(); |
| 383 if (net::OK != result) | 448 if (net::OK != result) |
| 384 return result; | 449 return result; |
| 385 | 450 |
| 386 return sparse_->GetAvailableRange(offset, len, start); | 451 return sparse_->GetAvailableRange(offset, len, start); |
| 387 } | 452 } |
| 388 | 453 |
| 389 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | 454 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, |
| 390 CompletionCallback* callback) { | 455 CompletionCallback* callback) { |
| 391 return GetAvailableRange(offset, len, start); | 456 backend_->background_queue()->GetAvailableRange(this, offset, len, start, |
| 457 callback); |
| 458 return net::ERR_IO_PENDING; |
| 392 } | 459 } |
| 393 | 460 |
| 394 bool EntryImpl::CouldBeSparse() const { | 461 bool EntryImpl::CouldBeSparse() const { |
| 395 if (sparse_.get()) | 462 if (sparse_.get()) |
| 396 return true; | 463 return true; |
| 397 | 464 |
| 398 scoped_ptr<SparseControl> sparse; | 465 scoped_ptr<SparseControl> sparse; |
| 399 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); | 466 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); |
| 400 return sparse->CouldBeSparse(); | 467 return sparse->CouldBeSparse(); |
| 401 } | 468 } |
| 402 | 469 |
| 403 void EntryImpl::CancelSparseIO() { | 470 void EntryImpl::CancelSparseIO() { |
| 471 backend_->background_queue()->CancelSparseIO(this); |
| 472 } |
| 473 |
| 474 void EntryImpl::CancelSparseIOImpl() { |
| 404 if (!sparse_.get()) | 475 if (!sparse_.get()) |
| 405 return; | 476 return; |
| 406 | 477 |
| 407 sparse_->CancelIO(); | 478 sparse_->CancelIO(); |
| 408 } | 479 } |
| 409 | 480 |
| 410 int EntryImpl::ReadyForSparseIO(net::CompletionCallback* completion_callback) { | 481 int EntryImpl::ReadyForSparseIOImpl(CompletionCallback* callback) { |
| 482 return sparse_->ReadyToUse(callback); |
| 483 } |
| 484 |
| 485 int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) { |
| 411 if (!sparse_.get()) | 486 if (!sparse_.get()) |
| 412 return net::OK; | 487 return net::OK; |
| 413 | 488 |
| 414 return sparse_->ReadyToUse(completion_callback); | 489 backend_->background_queue()->ReadyForSparseIO(this, callback); |
| 490 return net::ERR_IO_PENDING; |
| 415 } | 491 } |
| 416 | 492 |
| 417 // ------------------------------------------------------------------------ | 493 // ------------------------------------------------------------------------ |
| 418 | 494 |
| 419 uint32 EntryImpl::GetHash() { | 495 uint32 EntryImpl::GetHash() { |
| 420 return entry_.Data()->hash; | 496 return entry_.Data()->hash; |
| 421 } | 497 } |
| 422 | 498 |
| 423 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | 499 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, |
| 424 uint32 hash) { | 500 uint32 hash) { |
| (...skipping 10 matching lines...) Expand all Loading... |
| 435 | 511 |
| 436 entry_store->hash = hash; | 512 entry_store->hash = hash; |
| 437 entry_store->creation_time = Time::Now().ToInternalValue(); | 513 entry_store->creation_time = Time::Now().ToInternalValue(); |
| 438 entry_store->key_len = static_cast<int32>(key.size()); | 514 entry_store->key_len = static_cast<int32>(key.size()); |
| 439 if (entry_store->key_len > kMaxInternalKeyLength) { | 515 if (entry_store->key_len > kMaxInternalKeyLength) { |
| 440 Addr address(0); | 516 Addr address(0); |
| 441 if (!CreateBlock(entry_store->key_len + 1, &address)) | 517 if (!CreateBlock(entry_store->key_len + 1, &address)) |
| 442 return false; | 518 return false; |
| 443 | 519 |
| 444 entry_store->long_key = address.value(); | 520 entry_store->long_key = address.value(); |
| 445 key_file_ = GetBackingFile(address, kKeyFileIndex); | 521 File* key_file = GetBackingFile(address, kKeyFileIndex); |
| 522 key_ = key; |
| 446 | 523 |
| 447 size_t offset = 0; | 524 size_t offset = 0; |
| 448 if (address.is_block_file()) | 525 if (address.is_block_file()) |
| 449 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 526 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 450 | 527 |
| 451 if (!key_file_ || !key_file_->Write(key.data(), key.size(), offset)) { | 528 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { |
| 452 DeleteData(address, kKeyFileIndex); | 529 DeleteData(address, kKeyFileIndex); |
| 453 return false; | 530 return false; |
| 454 } | 531 } |
| 455 | 532 |
| 456 if (address.is_separate_file()) | 533 if (address.is_separate_file()) |
| 457 key_file_->SetLength(key.size() + 1); | 534 key_file->SetLength(key.size() + 1); |
| 458 } else { | 535 } else { |
| 459 memcpy(entry_store->key, key.data(), key.size()); | 536 memcpy(entry_store->key, key.data(), key.size()); |
| 460 entry_store->key[key.size()] = '\0'; | 537 entry_store->key[key.size()] = '\0'; |
| 461 } | 538 } |
| 462 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); | 539 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); |
| 463 node->dirty = backend_->GetCurrentEntryId(); | 540 node->dirty = backend_->GetCurrentEntryId(); |
| 464 Log("Create Entry "); | 541 Log("Create Entry "); |
| 465 return true; | 542 return true; |
| 466 } | 543 } |
| 467 | 544 |
| (...skipping 465 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 933 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 1010 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
| 934 entry_.address().value(), node_.address().value()); | 1011 entry_.address().value(), node_.address().value()); |
| 935 | 1012 |
| 936 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 1013 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
| 937 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 1014 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
| 938 | 1015 |
| 939 Trace(" doomed: %d 0x%x", doomed_, dirty); | 1016 Trace(" doomed: %d 0x%x", doomed_, dirty); |
| 940 } | 1017 } |
| 941 | 1018 |
| 942 } // namespace disk_cache | 1019 } // namespace disk_cache |
| OLD | NEW |