Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: net/disk_cache/simple/simple_entry_impl.cc

Issue 14130015: Support overlapping operations on the SimpleEntryImpl. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: add unittests Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/disk_cache/simple/simple_entry_impl.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/simple/simple_entry_impl.h" 5 #include "net/disk_cache/simple/simple_entry_impl.h"
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/bind_helpers.h" 8 #include "base/bind_helpers.h"
9 #include "base/callback.h" 9 #include "base/callback.h"
10 #include "base/location.h" 10 #include "base/location.h"
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
81 index->Remove(key); 81 index->Remove(key);
82 WorkerPool::PostTask(FROM_HERE, 82 WorkerPool::PostTask(FROM_HERE,
83 base::Bind(&SimpleSynchronousEntry::DoomEntry, path, key, 83 base::Bind(&SimpleSynchronousEntry::DoomEntry, path, key,
84 MessageLoopProxy::current(), callback), 84 MessageLoopProxy::current(), callback),
85 true); 85 true);
86 return net::ERR_IO_PENDING; 86 return net::ERR_IO_PENDING;
87 } 87 }
88 88
89 void SimpleEntryImpl::Doom() { 89 void SimpleEntryImpl::Doom() {
90 DCHECK(io_thread_checker_.CalledOnValidThread()); 90 DCHECK(io_thread_checker_.CalledOnValidThread());
91 DCHECK(!operation_running_);
91 #if defined(OS_POSIX) 92 #if defined(OS_POSIX)
92 // This call to static SimpleEntryImpl::DoomEntry() will just erase the 93 // This call to static SimpleEntryImpl::DoomEntry() will just erase the
93 // underlying files. On POSIX, this is fine; the files are still open on the 94 // underlying files. On POSIX, this is fine; the files are still open on the
94 // SimpleSynchronousEntry, and operations can even happen on them. The files 95 // SimpleSynchronousEntry, and operations can even happen on them. The files
95 // will be removed from the filesystem when they are closed. 96 // will be removed from the filesystem when they are closed.
96 DoomEntry(index_, path_, key_, CompletionCallback()); 97 DoomEntry(index_, path_, key_, CompletionCallback());
97 #else 98 #else
98 NOTIMPLEMENTED(); 99 NOTIMPLEMENTED();
99 #endif 100 #endif
100 } 101 }
101 102
102 void SimpleEntryImpl::Close() { 103 void SimpleEntryImpl::Close() {
103 DCHECK(io_thread_checker_.CalledOnValidThread()); 104 DCHECK(io_thread_checker_.CalledOnValidThread());
104 if (!synchronous_entry_in_use_by_worker_) { 105 if (operation_running_) {
gavinp 2013/04/17 11:04:36 I think if you rebase on top of https://codereview
felipeg 2013/04/17 11:51:22 If I am going to land before you, I think I should
gavinp 2013/04/17 13:13:45 Agreed. And whoever wins the race dodges the merge
105 WorkerPool::PostTask(FROM_HERE, 106 // Postpone close operation.
106 base::Bind(&SimpleSynchronousEntry::Close, 107 // Push the close operation to the end of the line. This way we run all
107 base::Unretained(synchronous_entry_)), 108 // operations before we are able close.
108 true); 109 operations_.push(
110 base::Bind(&SimpleEntryImpl::Close,
111 weak_ptr_factory_.GetWeakPtr()));
112 return;
109 } 113 }
114 DCHECK(operations_.size() == 0);
115 DCHECK(!operation_running_);
116
117 WorkerPool::PostTask(FROM_HERE,
118 base::Bind(&SimpleSynchronousEntry::Close,
119 base::Unretained(synchronous_entry_)),
120 true);
110 // Entry::Close() is expected to release this entry. See disk_cache.h for 121 // Entry::Close() is expected to release this entry. See disk_cache.h for
111 // details. 122 // details.
112 delete this; 123 delete this;
113 } 124 }
114 125
115 std::string SimpleEntryImpl::GetKey() const { 126 std::string SimpleEntryImpl::GetKey() const {
116 DCHECK(io_thread_checker_.CalledOnValidThread()); 127 DCHECK(io_thread_checker_.CalledOnValidThread());
117 return key_; 128 return key_;
118 } 129 }
119 130
(...skipping 11 matching lines...) Expand all
131 DCHECK(io_thread_checker_.CalledOnValidThread()); 142 DCHECK(io_thread_checker_.CalledOnValidThread());
132 return data_size_[index]; 143 return data_size_[index];
133 } 144 }
134 145
135 int SimpleEntryImpl::ReadData(int index, 146 int SimpleEntryImpl::ReadData(int index,
136 int offset, 147 int offset,
137 net::IOBuffer* buf, 148 net::IOBuffer* buf,
138 int buf_len, 149 int buf_len,
139 const CompletionCallback& callback) { 150 const CompletionCallback& callback) {
140 DCHECK(io_thread_checker_.CalledOnValidThread()); 151 DCHECK(io_thread_checker_.CalledOnValidThread());
141 // TODO(gavinp): Add support for overlapping reads. The net::HttpCache does 152 if (index < 0 || index >= kSimpleEntryFileCount || buf_len < 0)
142 // make overlapping read requests when multiple transactions access the same 153 return net::ERR_INVALID_ARGUMENT;
143 // entry as read only. This might make calling SimpleSynchronousEntry::Close() 154 if (offset >= data_size_[index] || offset < 0 || !buf_len)
144 // correctly more tricky (see SimpleEntryImpl::EntryOperationComplete). 155 return 0;
145 if (synchronous_entry_in_use_by_worker_) { 156 // TODO(felipeg): Optimization: Add support for truly parallel read
146 NOTIMPLEMENTED(); 157 // operations.
147 CHECK(false); 158 operations_.push(
148 } 159 base::Bind(&SimpleEntryImpl::ReadDataInternal,
149 synchronous_entry_in_use_by_worker_ = true; 160 weak_ptr_factory_.GetWeakPtr(),
161 index,
162 offset,
163 make_scoped_refptr(buf),
164 buf_len,
165 callback));
166 RunNextOperationIfNeeded();
167 return net::ERR_IO_PENDING;
168 }
169
170 void SimpleEntryImpl::ReadDataInternal(int index,
gavinp 2013/04/17 11:04:36 Method ordering: this should go to the end of the
felipeg 2013/04/17 11:51:22 Done.
171 int offset,
172 scoped_refptr<net::IOBuffer> buf,
173 int buf_len,
174 const CompletionCallback& callback) {
175 DCHECK(io_thread_checker_.CalledOnValidThread());
176 DCHECK(!operation_running_);
177 operation_running_ = true;
150 if (index_) 178 if (index_)
151 index_->UseIfExists(key_); 179 index_->UseIfExists(key_);
152 SynchronousOperationCallback sync_operation_callback = 180 SynchronousOperationCallback sync_operation_callback =
153 base::Bind(&SimpleEntryImpl::EntryOperationComplete, 181 base::Bind(&SimpleEntryImpl::EntryOperationComplete,
154 index_, callback, weak_ptr_factory_.GetWeakPtr(), 182 index_, callback, weak_ptr_factory_.GetWeakPtr(),
155 synchronous_entry_); 183 synchronous_entry_);
156 WorkerPool::PostTask(FROM_HERE, 184 WorkerPool::PostTask(FROM_HERE,
157 base::Bind(&SimpleSynchronousEntry::ReadData, 185 base::Bind(&SimpleSynchronousEntry::ReadData,
158 base::Unretained(synchronous_entry_), 186 base::Unretained(synchronous_entry_),
159 index, offset, make_scoped_refptr(buf), 187 index, offset, buf,
160 buf_len, sync_operation_callback), 188 buf_len, sync_operation_callback),
161 true); 189 true);
162 return net::ERR_IO_PENDING;
163 } 190 }
164 191
165 int SimpleEntryImpl::WriteData(int index, 192 int SimpleEntryImpl::WriteData(int index,
166 int offset, 193 int offset,
167 net::IOBuffer* buf, 194 net::IOBuffer* buf,
168 int buf_len, 195 int buf_len,
169 const CompletionCallback& callback, 196 const CompletionCallback& callback,
170 bool truncate) { 197 bool truncate) {
171 DCHECK(io_thread_checker_.CalledOnValidThread()); 198 DCHECK(io_thread_checker_.CalledOnValidThread());
172 if (synchronous_entry_in_use_by_worker_) { 199 if (index < 0 || index >= kSimpleEntryFileCount || offset < 0 || buf_len < 0)
173 NOTIMPLEMENTED(); 200 return net::ERR_INVALID_ARGUMENT;
174 CHECK(false); 201
175 } 202 operations_.push(
176 synchronous_entry_in_use_by_worker_ = true; 203 base::Bind(&SimpleEntryImpl::WriteDataInternal,
204 weak_ptr_factory_.GetWeakPtr(),
205 index,
206 offset,
207 make_scoped_refptr(buf),
208 buf_len,
209 callback,
210 truncate));
211 RunNextOperationIfNeeded();
212
213 // TODO(felipeg): Optimization: Add support for optimistic writes, quickly
214 // returning net::OK here.
215 return net::ERR_IO_PENDING;
216 }
217
218 void SimpleEntryImpl::WriteDataInternal(int index,
219 int offset,
220 scoped_refptr<net::IOBuffer> buf,
221 int buf_len,
222 const CompletionCallback& callback,
223 bool truncate) {
224 DCHECK(io_thread_checker_.CalledOnValidThread());
225 DCHECK(!operation_running_);
226 operation_running_ = true;
177 if (index_) 227 if (index_)
178 index_->UseIfExists(key_); 228 index_->UseIfExists(key_);
229
230 last_used_ = base::Time::Now();
231 last_modified_ = base::Time::Now();
232 data_size_[index] = buf_len;
233
179 SynchronousOperationCallback sync_operation_callback = 234 SynchronousOperationCallback sync_operation_callback =
180 base::Bind(&SimpleEntryImpl::EntryOperationComplete, 235 base::Bind(&SimpleEntryImpl::EntryOperationComplete,
181 index_, callback, weak_ptr_factory_.GetWeakPtr(), 236 index_, callback, weak_ptr_factory_.GetWeakPtr(),
182 synchronous_entry_); 237 synchronous_entry_);
183 WorkerPool::PostTask(FROM_HERE, 238 WorkerPool::PostTask(FROM_HERE,
184 base::Bind(&SimpleSynchronousEntry::WriteData, 239 base::Bind(&SimpleSynchronousEntry::WriteData,
185 base::Unretained(synchronous_entry_), 240 base::Unretained(synchronous_entry_),
186 index, offset, make_scoped_refptr(buf), 241 index, offset, buf,
187 buf_len, sync_operation_callback, truncate), 242 buf_len, sync_operation_callback, truncate),
188 true); 243 true);
189 return net::ERR_IO_PENDING;
190 } 244 }
191 245
192 int SimpleEntryImpl::ReadSparseData(int64 offset, 246 int SimpleEntryImpl::ReadSparseData(int64 offset,
193 net::IOBuffer* buf, 247 net::IOBuffer* buf,
194 int buf_len, 248 int buf_len,
195 const CompletionCallback& callback) { 249 const CompletionCallback& callback) {
196 DCHECK(io_thread_checker_.CalledOnValidThread()); 250 DCHECK(io_thread_checker_.CalledOnValidThread());
197 // TODO(gavinp): Determine if the simple backend should support sparse data. 251 // TODO(gavinp): Determine if the simple backend should support sparse data.
198 NOTIMPLEMENTED(); 252 NOTIMPLEMENTED();
199 return net::ERR_FAILED; 253 return net::ERR_FAILED;
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
238 return net::ERR_FAILED; 292 return net::ERR_FAILED;
239 } 293 }
240 294
241 SimpleEntryImpl::SimpleEntryImpl( 295 SimpleEntryImpl::SimpleEntryImpl(
242 SimpleSynchronousEntry* synchronous_entry, 296 SimpleSynchronousEntry* synchronous_entry,
243 WeakPtr<SimpleIndex> index) 297 WeakPtr<SimpleIndex> index)
244 : ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)), 298 : ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
245 path_(synchronous_entry->path()), 299 path_(synchronous_entry->path()),
246 key_(synchronous_entry->key()), 300 key_(synchronous_entry->key()),
247 synchronous_entry_(synchronous_entry), 301 synchronous_entry_(synchronous_entry),
248 synchronous_entry_in_use_by_worker_(false), 302 operation_running_(false),
249 index_(index) { 303 index_(index) {
250 DCHECK(synchronous_entry); 304 DCHECK(synchronous_entry);
251 SetSynchronousData(); 305 SetSynchronousData();
252 } 306 }
253 307
254 SimpleEntryImpl::~SimpleEntryImpl() { 308 SimpleEntryImpl::~SimpleEntryImpl() {
255 DCHECK(io_thread_checker_.CalledOnValidThread()); 309 DCHECK(io_thread_checker_.CalledOnValidThread());
256 } 310 }
257 311
258 // static 312 // static
(...skipping 25 matching lines...) Expand all
284 int result) { 338 int result) {
285 DCHECK(sync_entry); 339 DCHECK(sync_entry);
286 if (index) { 340 if (index) {
287 if (result >= 0) 341 if (result >= 0)
288 index->UpdateEntrySize(sync_entry->key(), sync_entry->GetFileSize()); 342 index->UpdateEntrySize(sync_entry->key(), sync_entry->GetFileSize());
289 else 343 else
290 index->Remove(sync_entry->key()); 344 index->Remove(sync_entry->key());
291 } 345 }
292 346
293 if (entry) { 347 if (entry) {
294 DCHECK(entry->synchronous_entry_in_use_by_worker_); 348 DCHECK(entry->operation_running_);
295 entry->synchronous_entry_in_use_by_worker_ = false; 349 entry->operation_running_ = false;
296 entry->SetSynchronousData(); 350 entry->SetSynchronousData();
351 entry->RunNextOperationIfNeeded();
297 } else { 352 } else {
298 // |entry| must have had Close() called while this operation was in flight. 353 // |entry| must have had Close() called while this operation was in flight.
299 // Since the simple cache now only supports one pending entry operation in 354 // Since the simple cache now only supports one pending entry operation in
300 // flight at a time, it's safe to now call Close() on |sync_entry|. 355 // flight at a time, it's safe to now call Close() on |sync_entry|.
301 WorkerPool::PostTask(FROM_HERE, 356 WorkerPool::PostTask(FROM_HERE,
302 base::Bind(&SimpleSynchronousEntry::Close, 357 base::Bind(&SimpleSynchronousEntry::Close,
303 base::Unretained(sync_entry)), 358 base::Unretained(sync_entry)),
304 true); 359 true);
305 } 360 }
306 completion_callback.Run(result); 361 completion_callback.Run(result);
307 } 362 }
308 363
364 bool SimpleEntryImpl::RunNextOperationIfNeeded() {
365 DCHECK(io_thread_checker_.CalledOnValidThread());
366 if (operations_.size() <= 0 || operation_running_)
367 return false;
368 base::Closure operation = operations_.front();
369 operations_.pop();
370 operation.Run();
371 return true;
372 }
373
309 void SimpleEntryImpl::SetSynchronousData() { 374 void SimpleEntryImpl::SetSynchronousData() {
310 DCHECK(io_thread_checker_.CalledOnValidThread()); 375 DCHECK(io_thread_checker_.CalledOnValidThread());
311 DCHECK(!synchronous_entry_in_use_by_worker_); 376 DCHECK(!operation_running_);
312 // TODO(felipeg): These copies to avoid data races are not optimal. While 377 // TODO(felipeg): These copies to avoid data races are not optimal. While
313 // adding an IO thread index (for fast misses etc...), we can store this data 378 // adding an IO thread index (for fast misses etc...), we can store this data
314 // in that structure. This also solves problems with last_used() on ext4 379 // in that structure. This also solves problems with last_used() on ext4
315 // filesystems not being accurate. 380 // filesystems not being accurate.
316 last_used_ = synchronous_entry_->last_used(); 381 last_used_ = synchronous_entry_->last_used();
317 last_modified_ = synchronous_entry_->last_modified(); 382 last_modified_ = synchronous_entry_->last_modified();
318 for (int i = 0; i < kSimpleEntryFileCount; ++i) 383 for (int i = 0; i < kSimpleEntryFileCount; ++i)
319 data_size_[i] = synchronous_entry_->data_size(i); 384 data_size_[i] = synchronous_entry_->data_size(i);
320 } 385 }
321 386
322 } // namespace disk_cache 387 } // namespace disk_cache
OLDNEW
« no previous file with comments | « net/disk_cache/simple/simple_entry_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698