Index: net/disk_cache/simple/simple_entry_impl.cc |
diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc |
index 1917a88715091ac1a37be9dae3b8fd104b6f4edf..7d2de7a76a99a767740c95f445d202980ef289fd 100644 |
--- a/net/disk_cache/simple/simple_entry_impl.cc |
+++ b/net/disk_cache/simple/simple_entry_impl.cc |
@@ -88,6 +88,7 @@ int SimpleEntryImpl::DoomEntry(WeakPtr<SimpleIndex> index, |
void SimpleEntryImpl::Doom() { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ DCHECK(!operation_running_); |
#if defined(OS_POSIX) |
// This call to static SimpleEntryImpl::DoomEntry() will just erase the |
// underlying files. On POSIX, this is fine; the files are still open on the |
@@ -101,12 +102,22 @@ void SimpleEntryImpl::Doom() { |
void SimpleEntryImpl::Close() { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- if (!synchronous_entry_in_use_by_worker_) { |
- WorkerPool::PostTask(FROM_HERE, |
- base::Bind(&SimpleSynchronousEntry::Close, |
- base::Unretained(synchronous_entry_)), |
- true); |
+ if (operation_running_) { |
gavinp
2013/04/17 11:04:36
I think if you rebase on top of https://codereview
felipeg
2013/04/17 11:51:22
If I am going to land before you, I think I should
gavinp
2013/04/17 13:13:45
Agreed. And whoever wins the race dodges the merge
|
+ // Postpone close operation. |
+ // Push the close operation to the end of the line. This way we run all |
+ // operations before we are able close. |
+ operations_.push( |
+ base::Bind(&SimpleEntryImpl::Close, |
+ weak_ptr_factory_.GetWeakPtr())); |
+ return; |
} |
+ DCHECK(operations_.size() == 0); |
+ DCHECK(!operation_running_); |
+ |
+ WorkerPool::PostTask(FROM_HERE, |
+ base::Bind(&SimpleSynchronousEntry::Close, |
+ base::Unretained(synchronous_entry_)), |
+ true); |
// Entry::Close() is expected to release this entry. See disk_cache.h for |
// details. |
delete this; |
@@ -138,15 +149,32 @@ int SimpleEntryImpl::ReadData(int index, |
int buf_len, |
const CompletionCallback& callback) { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- // TODO(gavinp): Add support for overlapping reads. The net::HttpCache does |
- // make overlapping read requests when multiple transactions access the same |
- // entry as read only. This might make calling SimpleSynchronousEntry::Close() |
- // correctly more tricky (see SimpleEntryImpl::EntryOperationComplete). |
- if (synchronous_entry_in_use_by_worker_) { |
- NOTIMPLEMENTED(); |
- CHECK(false); |
- } |
- synchronous_entry_in_use_by_worker_ = true; |
+ if (index < 0 || index >= kSimpleEntryFileCount || buf_len < 0) |
+ return net::ERR_INVALID_ARGUMENT; |
+ if (offset >= data_size_[index] || offset < 0 || !buf_len) |
+ return 0; |
+ // TODO(felipeg): Optimization: Add support for truly parallel read |
+ // operations. |
+ operations_.push( |
+ base::Bind(&SimpleEntryImpl::ReadDataInternal, |
+ weak_ptr_factory_.GetWeakPtr(), |
+ index, |
+ offset, |
+ make_scoped_refptr(buf), |
+ buf_len, |
+ callback)); |
+ RunNextOperationIfNeeded(); |
+ return net::ERR_IO_PENDING; |
+} |
+ |
+void SimpleEntryImpl::ReadDataInternal(int index, |
gavinp
2013/04/17 11:04:36
Method ordering: this should go to the end of the
felipeg
2013/04/17 11:51:22
Done.
|
+ int offset, |
+ scoped_refptr<net::IOBuffer> buf, |
+ int buf_len, |
+ const CompletionCallback& callback) { |
+ DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ DCHECK(!operation_running_); |
+ operation_running_ = true; |
if (index_) |
index_->UseIfExists(key_); |
SynchronousOperationCallback sync_operation_callback = |
@@ -156,10 +184,9 @@ int SimpleEntryImpl::ReadData(int index, |
WorkerPool::PostTask(FROM_HERE, |
base::Bind(&SimpleSynchronousEntry::ReadData, |
base::Unretained(synchronous_entry_), |
- index, offset, make_scoped_refptr(buf), |
+ index, offset, buf, |
buf_len, sync_operation_callback), |
true); |
- return net::ERR_IO_PENDING; |
} |
int SimpleEntryImpl::WriteData(int index, |
@@ -169,13 +196,41 @@ int SimpleEntryImpl::WriteData(int index, |
const CompletionCallback& callback, |
bool truncate) { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- if (synchronous_entry_in_use_by_worker_) { |
- NOTIMPLEMENTED(); |
- CHECK(false); |
- } |
- synchronous_entry_in_use_by_worker_ = true; |
+ if (index < 0 || index >= kSimpleEntryFileCount || offset < 0 || buf_len < 0) |
+ return net::ERR_INVALID_ARGUMENT; |
+ |
+ operations_.push( |
+ base::Bind(&SimpleEntryImpl::WriteDataInternal, |
+ weak_ptr_factory_.GetWeakPtr(), |
+ index, |
+ offset, |
+ make_scoped_refptr(buf), |
+ buf_len, |
+ callback, |
+ truncate)); |
+ RunNextOperationIfNeeded(); |
+ |
+ // TODO(felipeg): Optimization: Add support for optimistic writes, quickly |
+ // returning net::OK here. |
+ return net::ERR_IO_PENDING; |
+} |
+ |
+void SimpleEntryImpl::WriteDataInternal(int index, |
+ int offset, |
+ scoped_refptr<net::IOBuffer> buf, |
+ int buf_len, |
+ const CompletionCallback& callback, |
+ bool truncate) { |
+ DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ DCHECK(!operation_running_); |
+ operation_running_ = true; |
if (index_) |
index_->UseIfExists(key_); |
+ |
+ last_used_ = base::Time::Now(); |
+ last_modified_ = base::Time::Now(); |
+ data_size_[index] = buf_len; |
+ |
SynchronousOperationCallback sync_operation_callback = |
base::Bind(&SimpleEntryImpl::EntryOperationComplete, |
index_, callback, weak_ptr_factory_.GetWeakPtr(), |
@@ -183,10 +238,9 @@ int SimpleEntryImpl::WriteData(int index, |
WorkerPool::PostTask(FROM_HERE, |
base::Bind(&SimpleSynchronousEntry::WriteData, |
base::Unretained(synchronous_entry_), |
- index, offset, make_scoped_refptr(buf), |
+ index, offset, buf, |
buf_len, sync_operation_callback, truncate), |
true); |
- return net::ERR_IO_PENDING; |
} |
int SimpleEntryImpl::ReadSparseData(int64 offset, |
@@ -245,7 +299,7 @@ SimpleEntryImpl::SimpleEntryImpl( |
path_(synchronous_entry->path()), |
key_(synchronous_entry->key()), |
synchronous_entry_(synchronous_entry), |
- synchronous_entry_in_use_by_worker_(false), |
+ operation_running_(false), |
index_(index) { |
DCHECK(synchronous_entry); |
SetSynchronousData(); |
@@ -291,9 +345,10 @@ void SimpleEntryImpl::EntryOperationComplete( |
} |
if (entry) { |
- DCHECK(entry->synchronous_entry_in_use_by_worker_); |
- entry->synchronous_entry_in_use_by_worker_ = false; |
+ DCHECK(entry->operation_running_); |
+ entry->operation_running_ = false; |
entry->SetSynchronousData(); |
+ entry->RunNextOperationIfNeeded(); |
} else { |
// |entry| must have had Close() called while this operation was in flight. |
// Since the simple cache now only supports one pending entry operation in |
@@ -306,9 +361,19 @@ void SimpleEntryImpl::EntryOperationComplete( |
completion_callback.Run(result); |
} |
+bool SimpleEntryImpl::RunNextOperationIfNeeded() { |
+ DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ if (operations_.size() <= 0 || operation_running_) |
+ return false; |
+ base::Closure operation = operations_.front(); |
+ operations_.pop(); |
+ operation.Run(); |
+ return true; |
+} |
+ |
void SimpleEntryImpl::SetSynchronousData() { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- DCHECK(!synchronous_entry_in_use_by_worker_); |
+ DCHECK(!operation_running_); |
// TODO(felipeg): These copies to avoid data races are not optimal. While |
// adding an IO thread index (for fast misses etc...), we can store this data |
// in that structure. This also solves problems with last_used() on ext4 |