Chromium Code Reviews| Index: net/disk_cache/simple/simple_entry_impl.cc |
| diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc |
| index 4d8363a56d359942f1c03b547c43806c0fc404a3..3824eacde0213ea03386e293a8b169510fac6dd0 100644 |
| --- a/net/disk_cache/simple/simple_entry_impl.cc |
| +++ b/net/disk_cache/simple/simple_entry_impl.cc |
| @@ -15,6 +15,7 @@ |
| #include "base/logging.h" |
| #include "base/message_loop_proxy.h" |
| #include "base/metrics/histogram.h" |
| +#include "base/task_runner.h" |
| #include "base/threading/worker_pool.h" |
| #include "base/time.h" |
| #include "net/base/io_buffer.h" |
| @@ -25,6 +26,13 @@ |
| #include "net/disk_cache/simple/simple_util.h" |
| #include "third_party/zlib/zlib.h" |
| +using base::Closure; |
| +using base::FilePath; |
| +using base::MessageLoopProxy; |
| +using base::TaskRunner; |
| +using base::Time; |
| +using base::WorkerPool; |
| + |
| namespace { |
| // Used in histograms, please only add entries at the end. |
| @@ -71,28 +79,8 @@ void CallCompletionCallback(const net::CompletionCallback& callback, |
| namespace disk_cache { |
| -using base::Closure; |
| -using base::FilePath; |
| -using base::MessageLoopProxy; |
| -using base::Time; |
| -using base::WorkerPool; |
| - |
| -// A helper class to insure that RunNextOperationIfNeeded() is called when |
| -// exiting the current stack frame. |
| -class SimpleEntryImpl::ScopedOperationRunner { |
| - public: |
| - explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) { |
| - } |
| - |
| - ~ScopedOperationRunner() { |
| - entry_->RunNextOperationIfNeeded(); |
| - } |
| - |
| - private: |
| - SimpleEntryImpl* const entry_; |
| -}; |
| - |
| SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend, |
| + TaskRunner* task_runner, |
| const FilePath& path, |
| const std::string& key, |
| const uint64 entry_hash) |
| @@ -102,9 +90,8 @@ SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend, |
| entry_hash_(entry_hash), |
| last_used_(Time::Now()), |
| last_modified_(last_used_), |
| - open_count_(0), |
| - state_(STATE_UNINITIALIZED), |
| - synchronous_entry_(NULL) { |
| + synchronous_entry_(NULL), |
| + task_runner_(task_runner) { |
| DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key)); |
| COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), |
| arrays_should_be_same_size); |
| @@ -139,32 +126,12 @@ int SimpleEntryImpl::OpenEntry(Entry** out_entry, |
| if (open_entry_index_enum == INDEX_MISS) |
| return net::ERR_FAILED; |
| - pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal, |
| - this, callback, out_entry)); |
| - RunNextOperationIfNeeded(); |
| - return net::ERR_IO_PENDING; |
| + return OpenEntryInternal(callback, out_entry); |
|
gavinp
2013/05/21 13:18:00
Really this should be rolled in here, but the idea
pasko
2013/05/21 16:00:01
Thanks.
|
| } |
| int SimpleEntryImpl::CreateEntry(Entry** out_entry, |
| const CompletionCallback& callback) { |
| DCHECK(backend_); |
| - int ret_value = net::ERR_FAILED; |
| - if (state_ == STATE_UNINITIALIZED && |
| - pending_operations_.size() == 0) { |
| - ReturnEntryToCaller(out_entry); |
| - // We can do optimistic Create. |
| - pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal, |
| - this, |
| - CompletionCallback(), |
| - static_cast<Entry**>(NULL))); |
| - ret_value = net::OK; |
| - } else { |
| - pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal, |
| - this, |
| - callback, |
| - out_entry)); |
| - ret_value = net::ERR_IO_PENDING; |
| - } |
| // We insert the entry in the index before creating the entry files in the |
| // SimpleSynchronousEntry, because this way the worst scenario is when we |
| @@ -174,8 +141,7 @@ int SimpleEntryImpl::CreateEntry(Entry** out_entry, |
| if (backend_) |
| backend_->index()->Insert(key_); |
| - RunNextOperationIfNeeded(); |
| - return ret_value; |
| + return CreateEntryInternal(callback, out_entry); |
| } |
| int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) { |
| @@ -185,7 +151,7 @@ int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) { |
| entry_hash_, result.get()); |
| Closure reply = base::Bind(&CallCompletionCallback, |
| callback, base::Passed(&result)); |
| - WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); |
| + task_runner_->PostTaskAndReply(FROM_HERE, task, reply); |
| return net::ERR_IO_PENDING; |
| } |
| @@ -196,18 +162,9 @@ void SimpleEntryImpl::Doom() { |
| void SimpleEntryImpl::Close() { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - DCHECK_LT(0, open_count_); |
| - |
| - if (--open_count_ > 0) { |
| - DCHECK(!HasOneRef()); |
| - Release(); // Balanced in ReturnEntryToCaller(). |
| - return; |
| - } |
| - pending_operations_.push(base::Bind(&SimpleEntryImpl::CloseInternal, this)); |
| - DCHECK(!HasOneRef()); |
| Release(); // Balanced in ReturnEntryToCaller(). |
| - RunNextOperationIfNeeded(); |
| + return; |
| } |
| std::string SimpleEntryImpl::GetKey() const { |
| @@ -242,24 +199,8 @@ int SimpleEntryImpl::ReadData(int stream_index, |
| RecordReadResult(READ_RESULT_INVALID_ARGUMENT); |
| return net::ERR_INVALID_ARGUMENT; |
| } |
| - if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || |
| - offset < 0 || !buf_len)) { |
| - RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN); |
| - return 0; |
| - } |
| - // TODO(felipeg): Optimization: Add support for truly parallel read |
| - // operations. |
| - pending_operations_.push( |
| - base::Bind(&SimpleEntryImpl::ReadDataInternal, |
| - this, |
| - stream_index, |
| - offset, |
| - make_scoped_refptr(buf), |
| - buf_len, |
| - callback)); |
| - RunNextOperationIfNeeded(); |
| - return net::ERR_IO_PENDING; |
| + return ReadDataInternal(stream_index, offset, buf, buf_len, callback); |
| } |
| int SimpleEntryImpl::WriteData(int stream_index, |
| @@ -279,29 +220,8 @@ int SimpleEntryImpl::WriteData(int stream_index, |
| return net::ERR_FAILED; |
| } |
| - int ret_value = net::ERR_FAILED; |
| - if (state_ == STATE_READY && pending_operations_.size() == 0) { |
| - // We can only do optimistic Write if there is no pending operations, so |
| - // that we are sure that the next call to RunNextOperationIfNeeded will |
| - // actually run the write operation that sets the stream size. It also |
| - // prevents from previous possibly-conflicting writes that could be stacked |
| - // in the |pending_operations_|. We could optimize this for when we have |
| - // only read operations enqueued. |
| - pending_operations_.push( |
| - base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index, |
| - offset, make_scoped_refptr(buf), buf_len, |
| - CompletionCallback(), truncate)); |
| - ret_value = buf_len; |
| - } else { |
| - pending_operations_.push( |
| - base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index, |
| - offset, make_scoped_refptr(buf), buf_len, callback, |
| - truncate)); |
| - ret_value = net::ERR_IO_PENDING; |
| - } |
| - |
| - RunNextOperationIfNeeded(); |
| - return ret_value; |
| + return WriteDataInternal(stream_index, offset, buf, buf_len, callback, |
| + truncate); |
| } |
| int SimpleEntryImpl::ReadSparseData(int64 offset, |
| @@ -353,16 +273,15 @@ int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { |
| return net::ERR_FAILED; |
| } |
| +// private: |
| + |
| SimpleEntryImpl::~SimpleEntryImpl() { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - DCHECK_EQ(0U, pending_operations_.size()); |
| - DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); |
| - DCHECK(!synchronous_entry_); |
| + CloseInternal(); |
| RemoveSelfFromBackend(); |
| } |
| void SimpleEntryImpl::MakeUninitialized() { |
| - state_ = STATE_UNINITIALIZED; |
| std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_)); |
| std::memset(crc32s_, 0, sizeof(crc32s_)); |
| std::memset(have_written_, 0, sizeof(have_written_)); |
| @@ -371,7 +290,6 @@ void SimpleEntryImpl::MakeUninitialized() { |
| void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) { |
| DCHECK(out_entry); |
| - ++open_count_; |
| AddRef(); // Balanced in Close() |
| *out_entry = this; |
| } |
| @@ -379,7 +297,7 @@ void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) { |
| void SimpleEntryImpl::RemoveSelfFromBackend() { |
| if (!backend_) |
| return; |
| - backend_->OnDeactivated(this); |
| + backend_->DeactivateEntry(entry_hash()); |
| backend_.reset(); |
| } |
| @@ -390,62 +308,33 @@ void SimpleEntryImpl::MarkAsDoomed() { |
| RemoveSelfFromBackend(); |
| } |
| -void SimpleEntryImpl::RunNextOperationIfNeeded() { |
| - DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending", |
| - pending_operations_.size(), 0, 100, 20); |
| - if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) { |
| - base::Closure operation = pending_operations_.front(); |
| - pending_operations_.pop(); |
| - operation.Run(); |
| - // |this| may have been deleted. |
| - } |
| -} |
| - |
| -void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback, |
| +int SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback, |
| Entry** out_entry) { |
| - ScopedOperationRunner operation_runner(this); |
| - if (state_ == STATE_READY) { |
| + if (synchronous_entry_) { |
| ReturnEntryToCaller(out_entry); |
| - MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, |
| - net::OK)); |
| - return; |
| - } else if (state_ == STATE_FAILURE) { |
| - if (!callback.is_null()) { |
| - MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( |
| - callback, net::ERR_FAILED)); |
| - } |
| - return; |
| + return net::OK; |
| } |
| - DCHECK_EQ(STATE_UNINITIALIZED, state_); |
| - state_ = STATE_IO_PENDING; |
| + |
| + synchronous_entry_ = new SimpleSynchronousEntry(path_, key_, entry_hash_); |
| + |
| const base::TimeTicks start_time = base::TimeTicks::Now(); |
| - typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; |
| - scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( |
| - new PointerToSimpleSynchronousEntry()); |
| scoped_ptr<int> result(new int()); |
| - Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_, key_, |
| - entry_hash_, sync_entry.get(), result.get()); |
| + Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, |
| + base::Unretained(synchronous_entry_), path_, key_, |
| + entry_hash_, result.get()); |
| Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, |
| - callback, start_time, base::Passed(&sync_entry), |
| - base::Passed(&result), out_entry); |
| - WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); |
| + callback, start_time, base::Passed(&result), |
| + out_entry); |
| + task_runner_->PostTaskAndReply(FROM_HERE, task, reply); |
| + return net::ERR_IO_PENDING; |
| } |
| -void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback, |
| +int SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback, |
| Entry** out_entry) { |
| - ScopedOperationRunner operation_runner(this); |
| - if (state_ != STATE_UNINITIALIZED) { |
| - // There is already an active normal entry. |
| - if (!callback.is_null()) { |
| - MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( |
| - callback, net::ERR_FAILED)); |
| - } |
| - return; |
| - } |
| - DCHECK_EQ(STATE_UNINITIALIZED, state_); |
| + if (synchronous_entry_) |
| + return net::ERR_FAILED; |
| - state_ = STATE_IO_PENDING; |
| + synchronous_entry_ = new SimpleSynchronousEntry(path_, key_, entry_hash_); |
| // Since we don't know the correct values for |last_used_| and |
| // |last_modified_| yet, we make this approximation. |
| @@ -456,83 +345,69 @@ void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback, |
| have_written_[i] = true; |
| const base::TimeTicks start_time = base::TimeTicks::Now(); |
| - typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; |
| - scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( |
| - new PointerToSimpleSynchronousEntry()); |
| scoped_ptr<int> result(new int()); |
| - Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_, |
| - entry_hash_, sync_entry.get(), result.get()); |
| + Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, |
| + base::Unretained(synchronous_entry_), path_, key_, |
| + entry_hash_, result.get()); |
| Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, |
| - callback, start_time, base::Passed(&sync_entry), |
| - base::Passed(&result), out_entry); |
| - WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); |
| + CompletionCallback(), start_time, |
| + base::Passed(&result), static_cast<Entry**>(NULL)); |
| + task_runner_->PostTaskAndReply(FROM_HERE, task, reply); |
| + ReturnEntryToCaller(out_entry); |
| + |
| + return net::OK; |
| } |
| void SimpleEntryImpl::CloseInternal() { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| + if (!synchronous_entry_) |
| + return; |
| + |
| typedef SimpleSynchronousEntry::CRCRecord CRCRecord; |
| scoped_ptr<std::vector<CRCRecord> > |
| crc32s_to_write(new std::vector<CRCRecord>()); |
| - if (state_ == STATE_READY) { |
| - DCHECK(synchronous_entry_); |
| - state_ = STATE_IO_PENDING; |
| - for (int i = 0; i < kSimpleEntryFileCount; ++i) { |
| - if (have_written_[i]) { |
| - if (GetDataSize(i) == crc32s_end_offset_[i]) { |
| - int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; |
| - crc32s_to_write->push_back(CRCRecord(i, true, crc)); |
| - } else { |
| - crc32s_to_write->push_back(CRCRecord(i, false, 0)); |
| - } |
| + for (int i = 0; i < kSimpleEntryFileCount; ++i) { |
| + if (have_written_[i]) { |
| + if (GetDataSize(i) == crc32s_end_offset_[i]) { |
| + int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; |
| + crc32s_to_write->push_back(CRCRecord(i, true, crc)); |
| + } else { |
| + crc32s_to_write->push_back(CRCRecord(i, false, 0)); |
| } |
| } |
| - } else { |
| - DCHECK_EQ(STATE_FAILURE, state_); |
| } |
| - if (synchronous_entry_) { |
| - Closure task = base::Bind(&SimpleSynchronousEntry::Close, |
| - base::Unretained(synchronous_entry_), |
| - base::Passed(&crc32s_to_write)); |
| - Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); |
| - synchronous_entry_ = NULL; |
| - WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); |
| + Closure task = base::Bind(&SimpleSynchronousEntry::Close, |
| + base::Unretained(synchronous_entry_), |
| + base::Passed(&crc32s_to_write)); |
| + if (!backend_) { |
| + task_runner_->PostTask(FROM_HERE, task); |
| } else { |
| - synchronous_entry_ = NULL; |
| - CloseOperationComplete(); |
| + Closure reply = base::Bind(&SimpleBackendImpl::ClosedEntry, backend_, |
| + entry_hash_); |
| + task_runner_->PostTaskAndReply(FROM_HERE, task, reply); |
| } |
| + synchronous_entry_ = NULL; |
| } |
| -void SimpleEntryImpl::ReadDataInternal(int stream_index, |
| +int SimpleEntryImpl::ReadDataInternal(int stream_index, |
| int offset, |
| net::IOBuffer* buf, |
| int buf_len, |
| const CompletionCallback& callback) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - ScopedOperationRunner operation_runner(this); |
| - if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { |
| - if (!callback.is_null()) { |
| - RecordReadResult(READ_RESULT_BAD_STATE); |
| - MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( |
| - callback, net::ERR_FAILED)); |
| - } |
| - return; |
| + if (!synchronous_entry_) { |
| + RecordReadResult(READ_RESULT_BAD_STATE); |
| + return net::ERR_FAILED; |
| } |
| - DCHECK_EQ(STATE_READY, state_); |
| if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) { |
| RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN); |
| - // If there is nothing to read, we bail out before setting state_ to |
| - // STATE_IO_PENDING. |
| - if (!callback.is_null()) |
| - MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( |
| - callback, 0)); |
| - return; |
| + return 0; |
| } |
| buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); |
| - state_ = STATE_IO_PENDING; |
| if (backend_) |
| backend_->index()->UseIfExists(key_); |
| @@ -545,30 +420,21 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index, |
| Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this, |
| stream_index, offset, callback, |
| base::Passed(&read_crc32), base::Passed(&result)); |
| - WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); |
| + task_runner_->PostTaskAndReply(FROM_HERE, task, reply); |
| + return net::ERR_IO_PENDING; |
| } |
| -void SimpleEntryImpl::WriteDataInternal(int stream_index, |
| +int SimpleEntryImpl::WriteDataInternal(int stream_index, |
| int offset, |
| net::IOBuffer* buf, |
| int buf_len, |
| const CompletionCallback& callback, |
| bool truncate) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - ScopedOperationRunner operation_runner(this); |
| - if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { |
| + if (!synchronous_entry_) { |
| RecordWriteResult(WRITE_RESULT_BAD_STATE); |
| - if (!callback.is_null()) { |
| - // We need to posttask so that we don't go in a loop when we call the |
| - // callback directly. |
| - MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( |
| - callback, net::ERR_FAILED)); |
| - } |
| - // |this| may be destroyed after return here. |
| - return; |
| + return net::ERR_FAILED; |
| } |
| - DCHECK_EQ(STATE_READY, state_); |
| - state_ = STATE_IO_PENDING; |
| if (backend_) |
| backend_->index()->UseIfExists(key_); |
| // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) |
| @@ -607,23 +473,22 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index, |
| buf_len, truncate, result.get()); |
| Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, this, |
| stream_index, callback, base::Passed(&result)); |
| - WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); |
| + task_runner_->PostTaskAndReply(FROM_HERE, task, reply); |
| + return net::ERR_IO_PENDING; |
| } |
| void SimpleEntryImpl::CreationOperationComplete( |
| const CompletionCallback& completion_callback, |
| const base::TimeTicks& start_time, |
| - scoped_ptr<SimpleSynchronousEntry*> in_sync_entry, |
| scoped_ptr<int> in_result, |
| Entry** out_entry) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - DCHECK_EQ(state_, STATE_IO_PENDING); |
| - DCHECK(in_sync_entry); |
| DCHECK(in_result); |
| - ScopedOperationRunner operation_runner(this); |
| UMA_HISTOGRAM_BOOLEAN( |
| "SimpleCache.EntryCreationResult", *in_result == net::OK); |
| if (*in_result != net::OK) { |
| + delete synchronous_entry_; |
| + synchronous_entry_ = NULL; |
| if (*in_result!= net::ERR_FILE_EXISTS) |
| MarkAsDoomed(); |
| if (!completion_callback.is_null()) { |
| @@ -631,7 +496,6 @@ void SimpleEntryImpl::CreationOperationComplete( |
| completion_callback, net::ERR_FAILED)); |
| } |
| MakeUninitialized(); |
| - state_ = STATE_FAILURE; |
| return; |
| } |
| // If out_entry is NULL, it means we already called ReturnEntryToCaller from |
| @@ -639,8 +503,6 @@ void SimpleEntryImpl::CreationOperationComplete( |
| if (out_entry) |
| ReturnEntryToCaller(out_entry); |
| - state_ = STATE_READY; |
| - synchronous_entry_ = *in_sync_entry; |
| SetSynchronousData(); |
| UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime", |
| (base::TimeTicks::Now() - start_time)); |
| @@ -657,12 +519,9 @@ void SimpleEntryImpl::EntryOperationComplete( |
| scoped_ptr<int> result) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| DCHECK(synchronous_entry_); |
| - DCHECK_EQ(STATE_IO_PENDING, state_); |
| DCHECK(result); |
| - state_ = STATE_READY; |
| if (*result < 0) { |
| MarkAsDoomed(); |
|
pasko
2013/05/21 16:00:01
synchronous_entry_ = NULL?
synchronous_entry_ seem
|
| - state_ = STATE_FAILURE; |
| crc32s_end_offset_[stream_index] = 0; |
| } else { |
| SetSynchronousData(); |
| @@ -672,7 +531,6 @@ void SimpleEntryImpl::EntryOperationComplete( |
| MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( |
| completion_callback, *result)); |
| } |
| - RunNextOperationIfNeeded(); |
| } |
| void SimpleEntryImpl::ReadOperationComplete( |
| @@ -683,7 +541,6 @@ void SimpleEntryImpl::ReadOperationComplete( |
| scoped_ptr<int> result) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| DCHECK(synchronous_entry_); |
| - DCHECK_EQ(STATE_IO_PENDING, state_); |
| DCHECK(read_crc32); |
| DCHECK(result); |
| @@ -742,7 +599,6 @@ void SimpleEntryImpl::ChecksumOperationComplete( |
| scoped_ptr<int> result) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| DCHECK(synchronous_entry_); |
| - DCHECK_EQ(STATE_IO_PENDING, state_); |
| DCHECK(result); |
| if (*result == net::OK) { |
| *result = orig_result; |
| @@ -756,18 +612,9 @@ void SimpleEntryImpl::ChecksumOperationComplete( |
| EntryOperationComplete(stream_index, completion_callback, result.Pass()); |
| } |
| -void SimpleEntryImpl::CloseOperationComplete() { |
| - DCHECK(!synchronous_entry_); |
| - DCHECK_EQ(0, open_count_); |
| - DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_); |
| - MakeUninitialized(); |
| - RunNextOperationIfNeeded(); |
| -} |
| - |
| void SimpleEntryImpl::SetSynchronousData() { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| DCHECK(synchronous_entry_); |
| - DCHECK_EQ(STATE_READY, state_); |
| // TODO(felipeg): These copies to avoid data races are not optimal. While |
| // adding an IO thread index (for fast misses etc...), we can store this data |
| // in that structure. This also solves problems with last_used() on ext4 |