| OLD | NEW |
| 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/entry_impl.h" | 5 #include "net/disk_cache/entry_impl.h" |
| 6 | 6 |
| 7 #include "base/histogram.h" | 7 #include "base/histogram.h" |
| 8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
| 9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
| 10 #include "net/base/io_buffer.h" | 10 #include "net/base/io_buffer.h" |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 namespace disk_cache { | 82 namespace disk_cache { |
| 83 | 83 |
| 84 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) | 84 EntryImpl::EntryImpl(BackendImpl* backend, Addr address) |
| 85 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { | 85 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) { |
| 86 entry_.LazyInit(backend->File(address), address); | 86 entry_.LazyInit(backend->File(address), address); |
| 87 doomed_ = false; | 87 doomed_ = false; |
| 88 backend_ = backend; | 88 backend_ = backend; |
| 89 for (int i = 0; i < kNumStreams; i++) { | 89 for (int i = 0; i < kNumStreams; i++) { |
| 90 unreported_size_[i] = 0; | 90 unreported_size_[i] = 0; |
| 91 } | 91 } |
| 92 key_file_ = NULL; |
| 92 } | 93 } |
| 93 | 94 |
| 94 // When an entry is deleted from the cache, we clean up all the data associated | 95 // When an entry is deleted from the cache, we clean up all the data associated |
| 95 // with it for two reasons: to simplify the reuse of the block (we know that any | 96 // with it for two reasons: to simplify the reuse of the block (we know that any |
| 96 // unused block is filled with zeros), and to simplify the handling of write / | 97 // unused block is filled with zeros), and to simplify the handling of write / |
| 97 // read partial information from an entry (don't have to worry about returning | 98 // read partial information from an entry (don't have to worry about returning |
| 98 // data related to a previous cache entry because the range was not fully | 99 // data related to a previous cache entry because the range was not fully |
| 99 // written before). | 100 // written before). |
| 100 EntryImpl::~EntryImpl() { | 101 EntryImpl::~EntryImpl() { |
| 101 // Save the sparse info to disk before deleting this entry. | 102 // Save the sparse info to disk before deleting this entry. |
| (...skipping 21 matching lines...) Expand all Loading... |
| 123 node_.Store(); | 124 node_.Store(); |
| 124 } else if (node_.HasData() && node_.Data()->dirty) { | 125 } else if (node_.HasData() && node_.Data()->dirty) { |
| 125 node_.Data()->dirty = 0; | 126 node_.Data()->dirty = 0; |
| 126 node_.Store(); | 127 node_.Store(); |
| 127 } | 128 } |
| 128 } | 129 } |
| 129 | 130 |
| 130 backend_->CacheEntryDestroyed(entry_.address()); | 131 backend_->CacheEntryDestroyed(entry_.address()); |
| 131 } | 132 } |
| 132 | 133 |
| 133 void EntryImpl::DoomImpl() { | 134 void EntryImpl::Doom() { |
| 134 if (doomed_) | 135 if (doomed_) |
| 135 return; | 136 return; |
| 136 | 137 |
| 137 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | 138 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
| 138 backend_->InternalDoomEntry(this); | 139 backend_->InternalDoomEntry(this); |
| 139 } | 140 } |
| 140 | 141 |
| 141 void EntryImpl::Doom() { | |
| 142 backend_->background_queue()->DoomEntryImpl(this); | |
| 143 } | |
| 144 | |
| 145 void EntryImpl::Close() { | 142 void EntryImpl::Close() { |
| 146 backend_->background_queue()->CloseEntryImpl(this); | 143 Release(); |
| 147 } | 144 } |
| 148 | 145 |
| 149 std::string EntryImpl::GetKey() const { | 146 std::string EntryImpl::GetKey() const { |
| 150 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 147 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 151 if (entry->Data()->key_len <= kMaxInternalKeyLength) | 148 if (entry->Data()->key_len <= kMaxInternalKeyLength) |
| 152 return std::string(entry->Data()->key); | 149 return std::string(entry->Data()->key); |
| 153 | 150 |
| 154 // We keep a copy of the key so that we can always return it, even if the | |
| 155 // backend is disabled. | |
| 156 if (!key_.empty()) | |
| 157 return key_; | |
| 158 | |
| 159 Addr address(entry->Data()->long_key); | 151 Addr address(entry->Data()->long_key); |
| 160 DCHECK(address.is_initialized()); | 152 DCHECK(address.is_initialized()); |
| 161 size_t offset = 0; | 153 size_t offset = 0; |
| 162 if (address.is_block_file()) | 154 if (address.is_block_file()) |
| 163 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 155 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 164 | 156 |
| 165 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); | 157 if (!key_file_) { |
| 166 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, | 158 // We keep a copy of the file needed to access the key so that we can |
| 167 kKeyFileIndex); | 159 // always return this object's key, even if the backend is disabled. |
| 160 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); |
| 161 key_file_ = const_cast<EntryImpl*>(this)->GetBackingFile(address, |
| 162 kKeyFileIndex); |
| 163 } |
| 168 | 164 |
| 169 if (!key_file || | 165 std::string key; |
| 170 !key_file->Read(WriteInto(&key_, entry->Data()->key_len + 1), | 166 if (!key_file_ || |
| 171 entry->Data()->key_len + 1, offset)) | 167 !key_file_->Read(WriteInto(&key, entry->Data()->key_len + 1), |
| 172 key_.clear(); | 168 entry->Data()->key_len + 1, offset)) |
| 173 return key_; | 169 key.clear(); |
| 170 return key; |
| 174 } | 171 } |
| 175 | 172 |
| 176 Time EntryImpl::GetLastUsed() const { | 173 Time EntryImpl::GetLastUsed() const { |
| 177 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 174 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 178 return Time::FromInternalValue(node->Data()->last_used); | 175 return Time::FromInternalValue(node->Data()->last_used); |
| 179 } | 176 } |
| 180 | 177 |
| 181 Time EntryImpl::GetLastModified() const { | 178 Time EntryImpl::GetLastModified() const { |
| 182 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 179 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 183 return Time::FromInternalValue(node->Data()->last_modified); | 180 return Time::FromInternalValue(node->Data()->last_modified); |
| 184 } | 181 } |
| 185 | 182 |
| 186 int32 EntryImpl::GetDataSize(int index) const { | 183 int32 EntryImpl::GetDataSize(int index) const { |
| 187 if (index < 0 || index >= kNumStreams) | 184 if (index < 0 || index >= kNumStreams) |
| 188 return 0; | 185 return 0; |
| 189 | 186 |
| 190 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 187 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 191 return entry->Data()->data_size[index]; | 188 return entry->Data()->data_size[index]; |
| 192 } | 189 } |
| 193 | 190 |
| 194 int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf, | 191 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, |
| 195 int buf_len, CompletionCallback* callback) { | 192 net::CompletionCallback* completion_callback) { |
| 196 DCHECK(node_.Data()->dirty); | 193 DCHECK(node_.Data()->dirty); |
| 197 if (index < 0 || index >= kNumStreams) | 194 if (index < 0 || index >= kNumStreams) |
| 198 return net::ERR_INVALID_ARGUMENT; | 195 return net::ERR_INVALID_ARGUMENT; |
| 199 | 196 |
| 200 int entry_size = entry_.Data()->data_size[index]; | 197 int entry_size = entry_.Data()->data_size[index]; |
| 201 if (offset >= entry_size || offset < 0 || !buf_len) | 198 if (offset >= entry_size || offset < 0 || !buf_len) |
| 202 return 0; | 199 return 0; |
| 203 | 200 |
| 204 if (buf_len < 0) | 201 if (buf_len < 0) |
| 205 return net::ERR_INVALID_ARGUMENT; | 202 return net::ERR_INVALID_ARGUMENT; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 229 File* file = GetBackingFile(address, index); | 226 File* file = GetBackingFile(address, index); |
| 230 if (!file) | 227 if (!file) |
| 231 return net::ERR_FAILED; | 228 return net::ERR_FAILED; |
| 232 | 229 |
| 233 size_t file_offset = offset; | 230 size_t file_offset = offset; |
| 234 if (address.is_block_file()) | 231 if (address.is_block_file()) |
| 235 file_offset += address.start_block() * address.BlockSize() + | 232 file_offset += address.start_block() * address.BlockSize() + |
| 236 kBlockHeaderSize; | 233 kBlockHeaderSize; |
| 237 | 234 |
| 238 SyncCallback* io_callback = NULL; | 235 SyncCallback* io_callback = NULL; |
| 239 if (callback) | 236 if (completion_callback) |
| 240 io_callback = new SyncCallback(this, buf, callback); | 237 io_callback = new SyncCallback(this, buf, completion_callback); |
| 241 | 238 |
| 242 bool completed; | 239 bool completed; |
| 243 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { | 240 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { |
| 244 if (io_callback) | 241 if (io_callback) |
| 245 io_callback->Discard(); | 242 io_callback->Discard(); |
| 246 return net::ERR_FAILED; | 243 return net::ERR_FAILED; |
| 247 } | 244 } |
| 248 | 245 |
| 249 if (io_callback && completed) | 246 if (io_callback && completed) |
| 250 io_callback->Discard(); | 247 io_callback->Discard(); |
| 251 | 248 |
| 252 ReportIOTime(kRead, start); | 249 ReportIOTime(kRead, start); |
| 253 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING; | 250 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; |
| 254 } | 251 } |
| 255 | 252 |
| 256 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, | 253 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, |
| 257 net::CompletionCallback* callback) { | 254 net::CompletionCallback* completion_callback, |
| 258 if (!callback) | 255 bool truncate) { |
| 259 return ReadDataImpl(index, offset, buf, buf_len, callback); | |
| 260 | |
| 261 DCHECK(node_.Data()->dirty); | |
| 262 if (index < 0 || index >= kNumStreams) | |
| 263 return net::ERR_INVALID_ARGUMENT; | |
| 264 | |
| 265 int entry_size = entry_.Data()->data_size[index]; | |
| 266 if (offset >= entry_size || offset < 0 || !buf_len) | |
| 267 return 0; | |
| 268 | |
| 269 if (buf_len < 0) | |
| 270 return net::ERR_INVALID_ARGUMENT; | |
| 271 | |
| 272 backend_->background_queue()->ReadData(this, index, offset, buf, buf_len, | |
| 273 callback); | |
| 274 return net::ERR_IO_PENDING; | |
| 275 } | |
| 276 | |
| 277 int EntryImpl::WriteDataImpl(int index, int offset, net::IOBuffer* buf, | |
| 278 int buf_len, CompletionCallback* callback, | |
| 279 bool truncate) { | |
| 280 DCHECK(node_.Data()->dirty); | 256 DCHECK(node_.Data()->dirty); |
| 281 if (index < 0 || index >= kNumStreams) | 257 if (index < 0 || index >= kNumStreams) |
| 282 return net::ERR_INVALID_ARGUMENT; | 258 return net::ERR_INVALID_ARGUMENT; |
| 283 | 259 |
| 284 if (offset < 0 || buf_len < 0) | 260 if (offset < 0 || buf_len < 0) |
| 285 return net::ERR_INVALID_ARGUMENT; | 261 return net::ERR_INVALID_ARGUMENT; |
| 286 | 262 |
| 287 int max_file_size = backend_->MaxFileSize(); | 263 int max_file_size = backend_->MaxFileSize(); |
| 288 | 264 |
| 289 // offset or buf_len could be negative numbers. | 265 // offset of buf_len could be negative numbers. |
| 290 if (offset > max_file_size || buf_len > max_file_size || | 266 if (offset > max_file_size || buf_len > max_file_size || |
| 291 offset + buf_len > max_file_size) { | 267 offset + buf_len > max_file_size) { |
| 292 int size = offset + buf_len; | 268 int size = offset + buf_len; |
| 293 if (size <= max_file_size) | 269 if (size <= max_file_size) |
| 294 size = kint32max; | 270 size = kint32max; |
| 295 backend_->TooMuchStorageRequested(size); | 271 backend_->TooMuchStorageRequested(size); |
| 296 return net::ERR_FAILED; | 272 return net::ERR_FAILED; |
| 297 } | 273 } |
| 298 | 274 |
| 299 TimeTicks start = TimeTicks::Now(); | 275 TimeTicks start = TimeTicks::Now(); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 349 kBlockHeaderSize; | 325 kBlockHeaderSize; |
| 350 } else if (truncate) { | 326 } else if (truncate) { |
| 351 if (!file->SetLength(offset + buf_len)) | 327 if (!file->SetLength(offset + buf_len)) |
| 352 return net::ERR_FAILED; | 328 return net::ERR_FAILED; |
| 353 } | 329 } |
| 354 | 330 |
| 355 if (!buf_len) | 331 if (!buf_len) |
| 356 return 0; | 332 return 0; |
| 357 | 333 |
| 358 SyncCallback* io_callback = NULL; | 334 SyncCallback* io_callback = NULL; |
| 359 if (callback) | 335 if (completion_callback) |
| 360 io_callback = new SyncCallback(this, buf, callback); | 336 io_callback = new SyncCallback(this, buf, completion_callback); |
| 361 | 337 |
| 362 bool completed; | 338 bool completed; |
| 363 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, | 339 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, |
| 364 &completed)) { | 340 &completed)) { |
| 365 if (io_callback) | 341 if (io_callback) |
| 366 io_callback->Discard(); | 342 io_callback->Discard(); |
| 367 return net::ERR_FAILED; | 343 return net::ERR_FAILED; |
| 368 } | 344 } |
| 369 | 345 |
| 370 if (io_callback && completed) | 346 if (io_callback && completed) |
| 371 io_callback->Discard(); | 347 io_callback->Discard(); |
| 372 | 348 |
| 373 ReportIOTime(kWrite, start); | 349 ReportIOTime(kWrite, start); |
| 374 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING; | 350 return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; |
| 375 } | 351 } |
| 376 | 352 |
| 377 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, | 353 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 378 CompletionCallback* callback, bool truncate) { | 354 net::CompletionCallback* completion_callback) { |
| 379 if (!callback) | |
| 380 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); | |
| 381 | |
| 382 DCHECK(node_.Data()->dirty); | |
| 383 if (index < 0 || index >= kNumStreams) | |
| 384 return net::ERR_INVALID_ARGUMENT; | |
| 385 | |
| 386 if (offset < 0 || buf_len < 0) | |
| 387 return net::ERR_INVALID_ARGUMENT; | |
| 388 | |
| 389 backend_->background_queue()->WriteData(this, index, offset, buf, buf_len, | |
| 390 truncate, callback); | |
| 391 return net::ERR_IO_PENDING; | |
| 392 } | |
| 393 | |
| 394 int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, | |
| 395 CompletionCallback* callback) { | |
| 396 DCHECK(node_.Data()->dirty); | 355 DCHECK(node_.Data()->dirty); |
| 397 int result = InitSparseData(); | 356 int result = InitSparseData(); |
| 398 if (net::OK != result) | 357 if (net::OK != result) |
| 399 return result; | 358 return result; |
| 400 | 359 |
| 401 TimeTicks start = TimeTicks::Now(); | 360 TimeTicks start = TimeTicks::Now(); |
| 402 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | 361 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
| 403 callback); | 362 completion_callback); |
| 404 ReportIOTime(kSparseRead, start); | 363 ReportIOTime(kSparseRead, start); |
| 405 return result; | 364 return result; |
| 406 } | 365 } |
| 407 | 366 |
| 408 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 367 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 409 net::CompletionCallback* callback) { | 368 net::CompletionCallback* completion_callback) { |
| 410 if (!callback) | |
| 411 return ReadSparseDataImpl(offset, buf, buf_len, callback); | |
| 412 | |
| 413 backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len, | |
| 414 callback); | |
| 415 return net::ERR_IO_PENDING; | |
| 416 } | |
| 417 | |
| 418 int EntryImpl::WriteSparseDataImpl(int64 offset, net::IOBuffer* buf, | |
| 419 int buf_len, CompletionCallback* callback) { | |
| 420 DCHECK(node_.Data()->dirty); | 369 DCHECK(node_.Data()->dirty); |
| 421 int result = InitSparseData(); | 370 int result = InitSparseData(); |
| 422 if (net::OK != result) | 371 if (net::OK != result) |
| 423 return result; | 372 return result; |
| 424 | 373 |
| 425 TimeTicks start = TimeTicks::Now(); | 374 TimeTicks start = TimeTicks::Now(); |
| 426 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | 375 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
| 427 buf_len, callback); | 376 buf_len, completion_callback); |
| 428 ReportIOTime(kSparseWrite, start); | 377 ReportIOTime(kSparseWrite, start); |
| 429 return result; | 378 return result; |
| 430 } | 379 } |
| 431 | 380 |
| 432 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | |
| 433 net::CompletionCallback* callback) { | |
| 434 if (!callback) | |
| 435 return WriteSparseDataImpl(offset, buf, buf_len, callback); | |
| 436 | |
| 437 backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len, | |
| 438 callback); | |
| 439 return net::ERR_IO_PENDING; | |
| 440 } | |
| 441 | |
| 442 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { | |
| 443 return GetAvailableRange(offset, len, start); | |
| 444 } | |
| 445 | |
| 446 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { | 381 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { |
| 447 int result = InitSparseData(); | 382 int result = InitSparseData(); |
| 448 if (net::OK != result) | 383 if (net::OK != result) |
| 449 return result; | 384 return result; |
| 450 | 385 |
| 451 return sparse_->GetAvailableRange(offset, len, start); | 386 return sparse_->GetAvailableRange(offset, len, start); |
| 452 } | 387 } |
| 453 | 388 |
| 454 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | 389 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, |
| 455 CompletionCallback* callback) { | 390 CompletionCallback* callback) { |
| 456 backend_->background_queue()->GetAvailableRange(this, offset, len, start, | 391 return GetAvailableRange(offset, len, start); |
| 457 callback); | |
| 458 return net::ERR_IO_PENDING; | |
| 459 } | 392 } |
| 460 | 393 |
| 461 bool EntryImpl::CouldBeSparse() const { | 394 bool EntryImpl::CouldBeSparse() const { |
| 462 if (sparse_.get()) | 395 if (sparse_.get()) |
| 463 return true; | 396 return true; |
| 464 | 397 |
| 465 scoped_ptr<SparseControl> sparse; | 398 scoped_ptr<SparseControl> sparse; |
| 466 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); | 399 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); |
| 467 return sparse->CouldBeSparse(); | 400 return sparse->CouldBeSparse(); |
| 468 } | 401 } |
| 469 | 402 |
| 470 void EntryImpl::CancelSparseIO() { | 403 void EntryImpl::CancelSparseIO() { |
| 471 backend_->background_queue()->CancelSparseIO(this); | |
| 472 } | |
| 473 | |
| 474 void EntryImpl::CancelSparseIOImpl() { | |
| 475 if (!sparse_.get()) | 404 if (!sparse_.get()) |
| 476 return; | 405 return; |
| 477 | 406 |
| 478 sparse_->CancelIO(); | 407 sparse_->CancelIO(); |
| 479 } | 408 } |
| 480 | 409 |
| 481 int EntryImpl::ReadyForSparseIOImpl(CompletionCallback* callback) { | 410 int EntryImpl::ReadyForSparseIO(net::CompletionCallback* completion_callback) { |
| 482 return sparse_->ReadyToUse(callback); | |
| 483 } | |
| 484 | |
| 485 int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) { | |
| 486 if (!sparse_.get()) | 411 if (!sparse_.get()) |
| 487 return net::OK; | 412 return net::OK; |
| 488 | 413 |
| 489 backend_->background_queue()->ReadyForSparseIO(this, callback); | 414 return sparse_->ReadyToUse(completion_callback); |
| 490 return net::ERR_IO_PENDING; | |
| 491 } | 415 } |
| 492 | 416 |
| 493 // ------------------------------------------------------------------------ | 417 // ------------------------------------------------------------------------ |
| 494 | 418 |
| 495 uint32 EntryImpl::GetHash() { | 419 uint32 EntryImpl::GetHash() { |
| 496 return entry_.Data()->hash; | 420 return entry_.Data()->hash; |
| 497 } | 421 } |
| 498 | 422 |
| 499 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | 423 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, |
| 500 uint32 hash) { | 424 uint32 hash) { |
| (...skipping 10 matching lines...) Expand all Loading... |
| 511 | 435 |
| 512 entry_store->hash = hash; | 436 entry_store->hash = hash; |
| 513 entry_store->creation_time = Time::Now().ToInternalValue(); | 437 entry_store->creation_time = Time::Now().ToInternalValue(); |
| 514 entry_store->key_len = static_cast<int32>(key.size()); | 438 entry_store->key_len = static_cast<int32>(key.size()); |
| 515 if (entry_store->key_len > kMaxInternalKeyLength) { | 439 if (entry_store->key_len > kMaxInternalKeyLength) { |
| 516 Addr address(0); | 440 Addr address(0); |
| 517 if (!CreateBlock(entry_store->key_len + 1, &address)) | 441 if (!CreateBlock(entry_store->key_len + 1, &address)) |
| 518 return false; | 442 return false; |
| 519 | 443 |
| 520 entry_store->long_key = address.value(); | 444 entry_store->long_key = address.value(); |
| 521 File* key_file = GetBackingFile(address, kKeyFileIndex); | 445 key_file_ = GetBackingFile(address, kKeyFileIndex); |
| 522 key_ = key; | |
| 523 | 446 |
| 524 size_t offset = 0; | 447 size_t offset = 0; |
| 525 if (address.is_block_file()) | 448 if (address.is_block_file()) |
| 526 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 449 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 527 | 450 |
| 528 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { | 451 if (!key_file_ || !key_file_->Write(key.data(), key.size(), offset)) { |
| 529 DeleteData(address, kKeyFileIndex); | 452 DeleteData(address, kKeyFileIndex); |
| 530 return false; | 453 return false; |
| 531 } | 454 } |
| 532 | 455 |
| 533 if (address.is_separate_file()) | 456 if (address.is_separate_file()) |
| 534 key_file->SetLength(key.size() + 1); | 457 key_file_->SetLength(key.size() + 1); |
| 535 } else { | 458 } else { |
| 536 memcpy(entry_store->key, key.data(), key.size()); | 459 memcpy(entry_store->key, key.data(), key.size()); |
| 537 entry_store->key[key.size()] = '\0'; | 460 entry_store->key[key.size()] = '\0'; |
| 538 } | 461 } |
| 539 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); | 462 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); |
| 540 node->dirty = backend_->GetCurrentEntryId(); | 463 node->dirty = backend_->GetCurrentEntryId(); |
| 541 Log("Create Entry "); | 464 Log("Create Entry "); |
| 542 return true; | 465 return true; |
| 543 } | 466 } |
| 544 | 467 |
| (...skipping 465 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1010 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 933 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
| 1011 entry_.address().value(), node_.address().value()); | 934 entry_.address().value(), node_.address().value()); |
| 1012 | 935 |
| 1013 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 936 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
| 1014 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 937 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
| 1015 | 938 |
| 1016 Trace(" doomed: %d 0x%x", doomed_, dirty); | 939 Trace(" doomed: %d 0x%x", doomed_, dirty); |
| 1017 } | 940 } |
| 1018 | 941 |
| 1019 } // namespace disk_cache | 942 } // namespace disk_cache |
| OLD | NEW |