OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/disk_cache/blockfile/sparse_control.h" | 5 #include "net/disk_cache/blockfile/sparse_control.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/format_macros.h" | 8 #include "base/format_macros.h" |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "base/message_loop/message_loop.h" | 10 #include "base/message_loop/message_loop.h" |
(...skipping 24 matching lines...) Expand all Loading... |
35 const int kMaxEntrySize = 0x100000; | 35 const int kMaxEntrySize = 0x100000; |
36 | 36 |
37 // The size of each data block (tracked by the child allocation bitmap). | 37 // The size of each data block (tracked by the child allocation bitmap). |
38 const int kBlockSize = 1024; | 38 const int kBlockSize = 1024; |
39 | 39 |
40 // Returns the name of a child entry given the base_name and signature of the | 40 // Returns the name of a child entry given the base_name and signature of the |
41 // parent and the child_id. | 41 // parent and the child_id. |
42 // If the entry is called entry_name, child entries will be named something | 42 // If the entry is called entry_name, child entries will be named something |
43 // like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the | 43 // like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the |
44 // number of the particular child. | 44 // number of the particular child. |
45 std::string GenerateChildName(const std::string& base_name, int64 signature, | 45 std::string GenerateChildName(const std::string& base_name, |
| 46 int64 signature, |
46 int64 child_id) { | 47 int64 child_id) { |
47 return base::StringPrintf("Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(), | 48 return base::StringPrintf( |
48 signature, child_id); | 49 "Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(), signature, child_id); |
49 } | 50 } |
50 | 51 |
51 // This class deletes the children of a sparse entry. | 52 // This class deletes the children of a sparse entry. |
52 class ChildrenDeleter | 53 class ChildrenDeleter : public base::RefCounted<ChildrenDeleter>, |
53 : public base::RefCounted<ChildrenDeleter>, | 54 public disk_cache::FileIOCallback { |
54 public disk_cache::FileIOCallback { | |
55 public: | 55 public: |
56 ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name) | 56 ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name) |
57 : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {} | 57 : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {} |
58 | 58 |
59 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; | 59 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; |
60 | 60 |
61 // Two ways of deleting the children: if we have the children map, use Start() | 61 // Two ways of deleting the children: if we have the children map, use Start() |
62 // directly, otherwise pass the data address to ReadData(). | 62 // directly, otherwise pass the data address to ReadData(). |
63 void Start(char* buffer, int len); | 63 void Start(char* buffer, int len); |
64 void ReadData(disk_cache::Addr address, int len); | 64 void ReadData(disk_cache::Addr address, int len); |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
234 bool SparseControl::CouldBeSparse() const { | 234 bool SparseControl::CouldBeSparse() const { |
235 DCHECK(!init_); | 235 DCHECK(!init_); |
236 | 236 |
237 if (entry_->GetDataSize(kSparseData)) | 237 if (entry_->GetDataSize(kSparseData)) |
238 return false; | 238 return false; |
239 | 239 |
240 // We don't verify the data, just see if it could be there. | 240 // We don't verify the data, just see if it could be there. |
241 return (entry_->GetDataSize(kSparseIndex) != 0); | 241 return (entry_->GetDataSize(kSparseIndex) != 0); |
242 } | 242 } |
243 | 243 |
244 int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf, | 244 int SparseControl::StartIO(SparseOperation op, |
245 int buf_len, const CompletionCallback& callback) { | 245 int64 offset, |
| 246 net::IOBuffer* buf, |
| 247 int buf_len, |
| 248 const CompletionCallback& callback) { |
246 DCHECK(init_); | 249 DCHECK(init_); |
247 // We don't support simultaneous IO for sparse data. | 250 // We don't support simultaneous IO for sparse data. |
248 if (operation_ != kNoOperation) | 251 if (operation_ != kNoOperation) |
249 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 252 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
250 | 253 |
251 if (offset < 0 || buf_len < 0) | 254 if (offset < 0 || buf_len < 0) |
252 return net::ERR_INVALID_ARGUMENT; | 255 return net::ERR_INVALID_ARGUMENT; |
253 | 256 |
254 // We only support up to 64 GB. | 257 // We only support up to 64 GB. |
255 if (static_cast<uint64>(offset) + static_cast<unsigned int>(buf_len) >= | 258 if (static_cast<uint64>(offset) + static_cast<unsigned int>(buf_len) >= |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
295 | 298 |
296 int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) { | 299 int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) { |
297 DCHECK(init_); | 300 DCHECK(init_); |
298 // We don't support simultaneous IO for sparse data. | 301 // We don't support simultaneous IO for sparse data. |
299 if (operation_ != kNoOperation) | 302 if (operation_ != kNoOperation) |
300 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 303 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
301 | 304 |
302 DCHECK(start); | 305 DCHECK(start); |
303 | 306 |
304 range_found_ = false; | 307 range_found_ = false; |
305 int result = StartIO( | 308 int result = |
306 kGetRangeOperation, offset, NULL, len, CompletionCallback()); | 309 StartIO(kGetRangeOperation, offset, NULL, len, CompletionCallback()); |
307 if (range_found_) { | 310 if (range_found_) { |
308 *start = offset_; | 311 *start = offset_; |
309 return result; | 312 return result; |
310 } | 313 } |
311 | 314 |
312 // This is a failure. We want to return a valid start value in any case. | 315 // This is a failure. We want to return a valid start value in any case. |
313 *start = offset; | 316 *start = offset; |
314 return result < 0 ? result : 0; // Don't mask error codes to the caller. | 317 return result < 0 ? result : 0; // Don't mask error codes to the caller. |
315 } | 318 } |
316 | 319 |
(...skipping 29 matching lines...) Expand all Loading... |
346 | 349 |
347 char* buffer; | 350 char* buffer; |
348 Addr address; | 351 Addr address; |
349 entry->GetData(kSparseIndex, &buffer, &address); | 352 entry->GetData(kSparseIndex, &buffer, &address); |
350 if (!buffer && !address.is_initialized()) | 353 if (!buffer && !address.is_initialized()) |
351 return; | 354 return; |
352 | 355 |
353 entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN); | 356 entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN); |
354 | 357 |
355 DCHECK(entry->backend_.get()); | 358 DCHECK(entry->backend_.get()); |
356 ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_.get(), | 359 ChildrenDeleter* deleter = |
357 entry->GetKey()); | 360 new ChildrenDeleter(entry->backend_.get(), entry->GetKey()); |
358 // The object will self destruct when finished. | 361 // The object will self destruct when finished. |
359 deleter->AddRef(); | 362 deleter->AddRef(); |
360 | 363 |
361 if (buffer) { | 364 if (buffer) { |
362 base::MessageLoop::current()->PostTask( | 365 base::MessageLoop::current()->PostTask( |
363 FROM_HERE, | 366 FROM_HERE, |
364 base::Bind(&ChildrenDeleter::Start, deleter, buffer, data_len)); | 367 base::Bind(&ChildrenDeleter::Start, deleter, buffer, data_len)); |
365 } else { | 368 } else { |
366 base::MessageLoop::current()->PostTask( | 369 base::MessageLoop::current()->PostTask( |
367 FROM_HERE, | 370 FROM_HERE, |
(...skipping 10 matching lines...) Expand all Loading... |
378 memset(&sparse_header_, 0, sizeof(sparse_header_)); | 381 memset(&sparse_header_, 0, sizeof(sparse_header_)); |
379 sparse_header_.signature = Time::Now().ToInternalValue(); | 382 sparse_header_.signature = Time::Now().ToInternalValue(); |
380 sparse_header_.magic = kIndexMagic; | 383 sparse_header_.magic = kIndexMagic; |
381 sparse_header_.parent_key_len = entry_->GetKey().size(); | 384 sparse_header_.parent_key_len = entry_->GetKey().size(); |
382 children_map_.Resize(kNumSparseBits, true); | 385 children_map_.Resize(kNumSparseBits, true); |
383 | 386 |
384 // Save the header. The bitmap is saved in the destructor. | 387 // Save the header. The bitmap is saved in the destructor. |
385 scoped_refptr<net::IOBuffer> buf( | 388 scoped_refptr<net::IOBuffer> buf( |
386 new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_))); | 389 new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_))); |
387 | 390 |
388 int rv = entry_->WriteData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_), | 391 int rv = entry_->WriteData(kSparseIndex, |
389 CompletionCallback(), false); | 392 0, |
| 393 buf.get(), |
| 394 sizeof(sparse_header_), |
| 395 CompletionCallback(), |
| 396 false); |
390 if (rv != sizeof(sparse_header_)) { | 397 if (rv != sizeof(sparse_header_)) { |
391 DLOG(ERROR) << "Unable to save sparse_header_"; | 398 DLOG(ERROR) << "Unable to save sparse_header_"; |
392 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 399 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
393 } | 400 } |
394 | 401 |
395 entry_->SetEntryFlags(PARENT_ENTRY); | 402 entry_->SetEntryFlags(PARENT_ENTRY); |
396 return net::OK; | 403 return net::OK; |
397 } | 404 } |
398 | 405 |
399 // We are opening an entry from disk. Make sure that our control data is there. | 406 // We are opening an entry from disk. Make sure that our control data is there. |
400 int SparseControl::OpenSparseEntry(int data_len) { | 407 int SparseControl::OpenSparseEntry(int data_len) { |
401 if (data_len < static_cast<int>(sizeof(SparseData))) | 408 if (data_len < static_cast<int>(sizeof(SparseData))) |
402 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 409 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
403 | 410 |
404 if (entry_->GetDataSize(kSparseData)) | 411 if (entry_->GetDataSize(kSparseData)) |
405 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 412 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
406 | 413 |
407 if (!(PARENT_ENTRY & entry_->GetEntryFlags())) | 414 if (!(PARENT_ENTRY & entry_->GetEntryFlags())) |
408 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 415 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
409 | 416 |
410 // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB. | 417 // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB. |
411 int map_len = data_len - sizeof(sparse_header_); | 418 int map_len = data_len - sizeof(sparse_header_); |
412 if (map_len > kMaxMapSize || map_len % 4) | 419 if (map_len > kMaxMapSize || map_len % 4) |
413 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 420 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
414 | 421 |
415 scoped_refptr<net::IOBuffer> buf( | 422 scoped_refptr<net::IOBuffer> buf( |
416 new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_))); | 423 new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_))); |
417 | 424 |
418 // Read header. | 425 // Read header. |
419 int rv = entry_->ReadData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_), | 426 int rv = entry_->ReadData( |
420 CompletionCallback()); | 427 kSparseIndex, 0, buf.get(), sizeof(sparse_header_), CompletionCallback()); |
421 if (rv != static_cast<int>(sizeof(sparse_header_))) | 428 if (rv != static_cast<int>(sizeof(sparse_header_))) |
422 return net::ERR_CACHE_READ_FAILURE; | 429 return net::ERR_CACHE_READ_FAILURE; |
423 | 430 |
424 // The real validation should be performed by the caller. This is just to | 431 // The real validation should be performed by the caller. This is just to |
425 // double check. | 432 // double check. |
426 if (sparse_header_.magic != kIndexMagic || | 433 if (sparse_header_.magic != kIndexMagic || |
427 sparse_header_.parent_key_len != | 434 sparse_header_.parent_key_len != |
428 static_cast<int>(entry_->GetKey().size())) | 435 static_cast<int>(entry_->GetKey().size())) |
429 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 436 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
430 | 437 |
431 // Read the actual bitmap. | 438 // Read the actual bitmap. |
432 buf = new net::IOBuffer(map_len); | 439 buf = new net::IOBuffer(map_len); |
433 rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf.get(), | 440 rv = entry_->ReadData(kSparseIndex, |
434 map_len, CompletionCallback()); | 441 sizeof(sparse_header_), |
| 442 buf.get(), |
| 443 map_len, |
| 444 CompletionCallback()); |
435 if (rv != map_len) | 445 if (rv != map_len) |
436 return net::ERR_CACHE_READ_FAILURE; | 446 return net::ERR_CACHE_READ_FAILURE; |
437 | 447 |
438 // Grow the bitmap to the current size and copy the bits. | 448 // Grow the bitmap to the current size and copy the bits. |
439 children_map_.Resize(map_len * 8, false); | 449 children_map_.Resize(map_len * 8, false); |
440 children_map_.SetMap(reinterpret_cast<uint32*>(buf->data()), map_len); | 450 children_map_.SetMap(reinterpret_cast<uint32*>(buf->data()), map_len); |
441 return net::OK; | 451 return net::OK; |
442 } | 452 } |
443 | 453 |
444 bool SparseControl::OpenChild() { | 454 bool SparseControl::OpenChild() { |
(...skipping 13 matching lines...) Expand all Loading... |
458 | 468 |
459 if (!entry_->backend_.get()) | 469 if (!entry_->backend_.get()) |
460 return false; | 470 return false; |
461 | 471 |
462 child_ = entry_->backend_->OpenEntryImpl(key); | 472 child_ = entry_->backend_->OpenEntryImpl(key); |
463 if (!child_) | 473 if (!child_) |
464 return ContinueWithoutChild(key); | 474 return ContinueWithoutChild(key); |
465 | 475 |
466 EntryImpl* child = static_cast<EntryImpl*>(child_); | 476 EntryImpl* child = static_cast<EntryImpl*>(child_); |
467 if (!(CHILD_ENTRY & child->GetEntryFlags()) || | 477 if (!(CHILD_ENTRY & child->GetEntryFlags()) || |
468 child->GetDataSize(kSparseIndex) < | 478 child->GetDataSize(kSparseIndex) < static_cast<int>(sizeof(child_data_))) |
469 static_cast<int>(sizeof(child_data_))) | |
470 return KillChildAndContinue(key, false); | 479 return KillChildAndContinue(key, false); |
471 | 480 |
472 scoped_refptr<net::WrappedIOBuffer> buf( | 481 scoped_refptr<net::WrappedIOBuffer> buf( |
473 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); | 482 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); |
474 | 483 |
475 // Read signature. | 484 // Read signature. |
476 int rv = child_->ReadData(kSparseIndex, 0, buf.get(), sizeof(child_data_), | 485 int rv = child_->ReadData( |
477 CompletionCallback()); | 486 kSparseIndex, 0, buf.get(), sizeof(child_data_), CompletionCallback()); |
478 if (rv != sizeof(child_data_)) | 487 if (rv != sizeof(child_data_)) |
479 return KillChildAndContinue(key, true); // This is a fatal failure. | 488 return KillChildAndContinue(key, true); // This is a fatal failure. |
480 | 489 |
481 if (child_data_.header.signature != sparse_header_.signature || | 490 if (child_data_.header.signature != sparse_header_.signature || |
482 child_data_.header.magic != kIndexMagic) | 491 child_data_.header.magic != kIndexMagic) |
483 return KillChildAndContinue(key, false); | 492 return KillChildAndContinue(key, false); |
484 | 493 |
485 if (child_data_.header.last_block_len < 0 || | 494 if (child_data_.header.last_block_len < 0 || |
486 child_data_.header.last_block_len > kBlockSize) { | 495 child_data_.header.last_block_len > kBlockSize) { |
487 // Make sure these values are always within range. | 496 // Make sure these values are always within range. |
488 child_data_.header.last_block_len = 0; | 497 child_data_.header.last_block_len = 0; |
489 child_data_.header.last_block = -1; | 498 child_data_.header.last_block = -1; |
490 } | 499 } |
491 | 500 |
492 return true; | 501 return true; |
493 } | 502 } |
494 | 503 |
495 void SparseControl::CloseChild() { | 504 void SparseControl::CloseChild() { |
496 scoped_refptr<net::WrappedIOBuffer> buf( | 505 scoped_refptr<net::WrappedIOBuffer> buf( |
497 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); | 506 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); |
498 | 507 |
499 // Save the allocation bitmap before closing the child entry. | 508 // Save the allocation bitmap before closing the child entry. |
500 int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_), | 509 int rv = child_->WriteData(kSparseIndex, |
501 CompletionCallback(), false); | 510 0, |
| 511 buf.get(), |
| 512 sizeof(child_data_), |
| 513 CompletionCallback(), |
| 514 false); |
502 if (rv != sizeof(child_data_)) | 515 if (rv != sizeof(child_data_)) |
503 DLOG(ERROR) << "Failed to save child data"; | 516 DLOG(ERROR) << "Failed to save child data"; |
504 child_->Release(); | 517 child_->Release(); |
505 child_ = NULL; | 518 child_ = NULL; |
506 } | 519 } |
507 | 520 |
508 std::string SparseControl::GenerateChildKey() { | 521 std::string SparseControl::GenerateChildKey() { |
509 return GenerateChildName(entry_->GetKey(), sparse_header_.signature, | 522 return GenerateChildName( |
510 offset_ >> 20); | 523 entry_->GetKey(), sparse_header_.signature, offset_ >> 20); |
511 } | 524 } |
512 | 525 |
513 // We are deleting the child because something went wrong. | 526 // We are deleting the child because something went wrong. |
514 bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) { | 527 bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) { |
515 SetChildBit(false); | 528 SetChildBit(false); |
516 child_->DoomImpl(); | 529 child_->DoomImpl(); |
517 child_->Release(); | 530 child_->Release(); |
518 child_ = NULL; | 531 child_ = NULL; |
519 if (fatal) { | 532 if (fatal) { |
520 result_ = net::ERR_CACHE_READ_FAILURE; | 533 result_ = net::ERR_CACHE_READ_FAILURE; |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
560 children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true); | 573 children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true); |
561 | 574 |
562 children_map_.Set(child_bit, value); | 575 children_map_.Set(child_bit, value); |
563 } | 576 } |
564 | 577 |
565 void SparseControl::WriteSparseData() { | 578 void SparseControl::WriteSparseData() { |
566 scoped_refptr<net::IOBuffer> buf(new net::WrappedIOBuffer( | 579 scoped_refptr<net::IOBuffer> buf(new net::WrappedIOBuffer( |
567 reinterpret_cast<const char*>(children_map_.GetMap()))); | 580 reinterpret_cast<const char*>(children_map_.GetMap()))); |
568 | 581 |
569 int len = children_map_.ArraySize() * 4; | 582 int len = children_map_.ArraySize() * 4; |
570 int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf.get(), | 583 int rv = entry_->WriteData(kSparseIndex, |
571 len, CompletionCallback(), false); | 584 sizeof(sparse_header_), |
| 585 buf.get(), |
| 586 len, |
| 587 CompletionCallback(), |
| 588 false); |
572 if (rv != len) { | 589 if (rv != len) { |
573 DLOG(ERROR) << "Unable to save sparse map"; | 590 DLOG(ERROR) << "Unable to save sparse map"; |
574 } | 591 } |
575 } | 592 } |
576 | 593 |
577 bool SparseControl::VerifyRange() { | 594 bool SparseControl::VerifyRange() { |
578 DCHECK_GE(result_, 0); | 595 DCHECK_GE(result_, 0); |
579 | 596 |
580 child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1); | 597 child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1); |
581 child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_); | 598 child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_); |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
664 // We know the real type of child_. | 681 // We know the real type of child_. |
665 EntryImpl* child = static_cast<EntryImpl*>(child_); | 682 EntryImpl* child = static_cast<EntryImpl*>(child_); |
666 child->SetEntryFlags(CHILD_ENTRY); | 683 child->SetEntryFlags(CHILD_ENTRY); |
667 | 684 |
668 memset(&child_data_, 0, sizeof(child_data_)); | 685 memset(&child_data_, 0, sizeof(child_data_)); |
669 child_data_.header = sparse_header_; | 686 child_data_.header = sparse_header_; |
670 | 687 |
671 scoped_refptr<net::WrappedIOBuffer> buf( | 688 scoped_refptr<net::WrappedIOBuffer> buf( |
672 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); | 689 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); |
673 | 690 |
674 int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_), | 691 int rv = child_->WriteData(kSparseIndex, |
675 CompletionCallback(), false); | 692 0, |
| 693 buf.get(), |
| 694 sizeof(child_data_), |
| 695 CompletionCallback(), |
| 696 false); |
676 if (rv != sizeof(child_data_)) | 697 if (rv != sizeof(child_data_)) |
677 DLOG(ERROR) << "Failed to save child data"; | 698 DLOG(ERROR) << "Failed to save child data"; |
678 SetChildBit(true); | 699 SetChildBit(true); |
679 } | 700 } |
680 | 701 |
681 void SparseControl::DoChildrenIO() { | 702 void SparseControl::DoChildrenIO() { |
682 while (DoChildIO()) continue; | 703 while (DoChildIO()) |
| 704 continue; |
683 | 705 |
684 // Range operations are finished synchronously, often without setting | 706 // Range operations are finished synchronously, often without setting |
685 // |finished_| to true. | 707 // |finished_| to true. |
686 if (kGetRangeOperation == operation_ && | 708 if (kGetRangeOperation == operation_ && entry_->net_log().IsLogging()) { |
687 entry_->net_log().IsLogging()) { | |
688 entry_->net_log().EndEvent( | 709 entry_->net_log().EndEvent( |
689 net::NetLog::TYPE_SPARSE_GET_RANGE, | 710 net::NetLog::TYPE_SPARSE_GET_RANGE, |
690 CreateNetLogGetAvailableRangeResultCallback(offset_, result_)); | 711 CreateNetLogGetAvailableRangeResultCallback(offset_, result_)); |
691 } | 712 } |
692 if (finished_) { | 713 if (finished_) { |
693 if (kGetRangeOperation != operation_ && | 714 if (kGetRangeOperation != operation_ && entry_->net_log().IsLogging()) { |
694 entry_->net_log().IsLogging()) { | |
695 entry_->net_log().EndEvent(GetSparseEventType(operation_)); | 715 entry_->net_log().EndEvent(GetSparseEventType(operation_)); |
696 } | 716 } |
697 if (pending_) | 717 if (pending_) |
698 DoUserCallback(); // Don't touch this object after this point. | 718 DoUserCallback(); // Don't touch this object after this point. |
699 } | 719 } |
700 } | 720 } |
701 | 721 |
702 bool SparseControl::DoChildIO() { | 722 bool SparseControl::DoChildIO() { |
703 finished_ = true; | 723 finished_ = true; |
704 if (!buf_len_ || result_ < 0) | 724 if (!buf_len_ || result_ < 0) |
(...skipping 15 matching lines...) Expand all Loading... |
720 | 740 |
721 int rv = 0; | 741 int rv = 0; |
722 switch (operation_) { | 742 switch (operation_) { |
723 case kReadOperation: | 743 case kReadOperation: |
724 if (entry_->net_log().IsLogging()) { | 744 if (entry_->net_log().IsLogging()) { |
725 entry_->net_log().BeginEvent( | 745 entry_->net_log().BeginEvent( |
726 net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, | 746 net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, |
727 CreateNetLogSparseReadWriteCallback(child_->net_log().source(), | 747 CreateNetLogSparseReadWriteCallback(child_->net_log().source(), |
728 child_len_)); | 748 child_len_)); |
729 } | 749 } |
730 rv = child_->ReadDataImpl(kSparseData, child_offset_, user_buf_.get(), | 750 rv = child_->ReadDataImpl( |
731 child_len_, callback); | 751 kSparseData, child_offset_, user_buf_.get(), child_len_, callback); |
732 break; | 752 break; |
733 case kWriteOperation: | 753 case kWriteOperation: |
734 if (entry_->net_log().IsLogging()) { | 754 if (entry_->net_log().IsLogging()) { |
735 entry_->net_log().BeginEvent( | 755 entry_->net_log().BeginEvent( |
736 net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, | 756 net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, |
737 CreateNetLogSparseReadWriteCallback(child_->net_log().source(), | 757 CreateNetLogSparseReadWriteCallback(child_->net_log().source(), |
738 child_len_)); | 758 child_len_)); |
739 } | 759 } |
740 rv = child_->WriteDataImpl(kSparseData, child_offset_, user_buf_.get(), | 760 rv = child_->WriteDataImpl(kSparseData, |
741 child_len_, callback, false); | 761 child_offset_, |
| 762 user_buf_.get(), |
| 763 child_len_, |
| 764 callback, |
| 765 false); |
742 break; | 766 break; |
743 case kGetRangeOperation: | 767 case kGetRangeOperation: |
744 rv = DoGetAvailableRange(); | 768 rv = DoGetAvailableRange(); |
745 break; | 769 break; |
746 default: | 770 default: |
747 NOTREACHED(); | 771 NOTREACHED(); |
748 } | 772 } |
749 | 773 |
750 if (rv == net::ERR_IO_PENDING) { | 774 if (rv == net::ERR_IO_PENDING) { |
751 if (!pending_) { | 775 if (!pending_) { |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
877 CompletionCallback cb = abort_callbacks_[i]; | 901 CompletionCallback cb = abort_callbacks_[i]; |
878 if (i == abort_callbacks_.size() - 1) | 902 if (i == abort_callbacks_.size() - 1) |
879 abort_callbacks_.clear(); | 903 abort_callbacks_.clear(); |
880 | 904 |
881 entry_->Release(); // Don't touch object after this line. | 905 entry_->Release(); // Don't touch object after this line. |
882 cb.Run(net::OK); | 906 cb.Run(net::OK); |
883 } | 907 } |
884 } | 908 } |
885 | 909 |
886 } // namespace disk_cache | 910 } // namespace disk_cache |
OLD | NEW |