Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(451)

Side by Side Diff: net/disk_cache/simple/simple_entry_impl.cc

Issue 13907009: Support optimistic Create and Write operations on the SimpleCache. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: fix SimpleCacheOptimistic4 Created 7 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/simple/simple_entry_impl.h" 5 #include "net/disk_cache/simple/simple_entry_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cstring> 8 #include <cstring>
9 #include <vector> 9 #include <vector>
10 10
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 open_count_(0), 72 open_count_(0),
73 state_(STATE_UNINITIALIZED), 73 state_(STATE_UNINITIALIZED),
74 synchronous_entry_(NULL) { 74 synchronous_entry_(NULL) {
75 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key)); 75 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
76 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), 76 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
77 arrays_should_be_same_size); 77 arrays_should_be_same_size);
78 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), 78 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
79 arrays_should_be_same_size2); 79 arrays_should_be_same_size2);
80 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), 80 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
81 arrays_should_be_same_size3); 81 arrays_should_be_same_size3);
82
83 MakeUninitialized(); 82 MakeUninitialized();
84 } 83 }
85 84
86 int SimpleEntryImpl::OpenEntry(Entry** out_entry, 85 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
87 const CompletionCallback& callback) { 86 const CompletionCallback& callback) {
88 DCHECK(backend_); 87 DCHECK(backend_);
88 // This enumeration is used in histograms, add entries only at end.
89 enum OpenEntryIndexEnum {
90 INDEX_NOEXIST = 0,
91 INDEX_MISS = 1,
92 INDEX_HIT = 2,
93 INDEX_MAX = 3,
94 };
95 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
96 if (backend_) {
97 if (backend_->index()->Has(key_))
98 open_entry_index_enum = INDEX_HIT;
99 else
100 open_entry_index_enum = INDEX_MISS;
101 }
102 UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
103 open_entry_index_enum, INDEX_MAX);
104
105 // If entry is not known to the index, initiate fast failover to the network.
106 if (open_entry_index_enum == INDEX_MISS)
107 return net::ERR_FAILED;
89 108
90 pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal, 109 pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal,
91 this, out_entry, callback)); 110 this, callback, out_entry));
92 RunNextOperationIfNeeded(); 111 RunNextOperationIfNeeded();
93 return net::ERR_IO_PENDING; 112 return net::ERR_IO_PENDING;
94 } 113 }
95 114
96 int SimpleEntryImpl::CreateEntry(Entry** out_entry, 115 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
97 const CompletionCallback& callback) { 116 const CompletionCallback& callback) {
98 DCHECK(backend_); 117 DCHECK(backend_);
99 pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal, 118 int ret_value = net::ERR_FAILED;
100 this, out_entry, callback)); 119 if (state_ == STATE_UNINITIALIZED &&
120 pending_operations_.size() == 0) {
121 ReturnEntryToCaller(out_entry);
122 // We can do optimistic Create.
123 pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
124 this,
125 CompletionCallback(),
126 static_cast<Entry**>(NULL)));
127 ret_value = net::OK;
128 } else {
129 pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
130 this,
131 callback,
132 out_entry));
133 ret_value = net::ERR_IO_PENDING;
134 }
135
136 // We insert the entry in the index before creating the entry files in the
137 // SimpleSynchronousEntry, because this way the worst scenario is when we
138 // have the entry in the index but we don't have the created files yet, this
139 // way we never leak files. CreationOperationComplete will remove the entry
140 // from the index if the creation fails.
141 if (backend_)
142 backend_->index()->Insert(key_);
143
144 // Since we don't know the correct values for |last_used_| and
145 // |last_modified_| yet, we make this approximation.
146 last_used_ = last_modified_ = base::Time::Now();
147
101 RunNextOperationIfNeeded(); 148 RunNextOperationIfNeeded();
102 return net::ERR_IO_PENDING; 149 return ret_value;
103 } 150 }
104 151
105 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) { 152 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
106 MarkAsDoomed(); 153 MarkAsDoomed();
107
108 scoped_ptr<int> result(new int()); 154 scoped_ptr<int> result(new int());
109 Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_, 155 Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
110 entry_hash_, result.get()); 156 entry_hash_, result.get());
111 Closure reply = base::Bind(&CallCompletionCallback, 157 Closure reply = base::Bind(&CallCompletionCallback,
112 callback, base::Passed(&result)); 158 callback, base::Passed(&result));
113 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 159 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
114 return net::ERR_IO_PENDING; 160 return net::ERR_IO_PENDING;
115 } 161 }
116 162
117 163
(...skipping 27 matching lines...) Expand all
145 return last_used_; 191 return last_used_;
146 } 192 }
147 193
148 Time SimpleEntryImpl::GetLastModified() const { 194 Time SimpleEntryImpl::GetLastModified() const {
149 DCHECK(io_thread_checker_.CalledOnValidThread()); 195 DCHECK(io_thread_checker_.CalledOnValidThread());
150 return last_modified_; 196 return last_modified_;
151 } 197 }
152 198
153 int32 SimpleEntryImpl::GetDataSize(int stream_index) const { 199 int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
154 DCHECK(io_thread_checker_.CalledOnValidThread()); 200 DCHECK(io_thread_checker_.CalledOnValidThread());
201 DCHECK_LE(0, data_size_[stream_index]);
155 return data_size_[stream_index]; 202 return data_size_[stream_index];
156 } 203 }
157 204
158 int SimpleEntryImpl::ReadData(int stream_index, 205 int SimpleEntryImpl::ReadData(int stream_index,
159 int offset, 206 int offset,
160 net::IOBuffer* buf, 207 net::IOBuffer* buf,
161 int buf_len, 208 int buf_len,
162 const CompletionCallback& callback) { 209 const CompletionCallback& callback) {
163 DCHECK(io_thread_checker_.CalledOnValidThread()); 210 DCHECK(io_thread_checker_.CalledOnValidThread());
164 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || buf_len < 0) 211 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || buf_len < 0)
165 return net::ERR_INVALID_ARGUMENT; 212 return net::ERR_INVALID_ARGUMENT;
166 if (offset >= data_size_[stream_index] || offset < 0 || !buf_len) 213 if (offset >= data_size_[stream_index] || offset < 0 || !buf_len)
167 return 0; 214 return 0;
168 buf_len = std::min(buf_len, data_size_[stream_index] - offset); 215
169 // TODO(felipeg): Optimization: Add support for truly parallel read 216 // TODO(felipeg): Optimization: Add support for truly parallel read
170 // operations. 217 // operations.
171 pending_operations_.push( 218 pending_operations_.push(
172 base::Bind(&SimpleEntryImpl::ReadDataInternal, 219 base::Bind(&SimpleEntryImpl::ReadDataInternal,
173 this, 220 this,
174 stream_index, 221 stream_index,
175 offset, 222 offset,
176 make_scoped_refptr(buf), 223 make_scoped_refptr(buf),
177 buf_len, 224 buf_len,
178 callback)); 225 callback));
179 RunNextOperationIfNeeded(); 226 RunNextOperationIfNeeded();
180 return net::ERR_IO_PENDING; 227 return net::ERR_IO_PENDING;
181 } 228 }
182 229
183 int SimpleEntryImpl::WriteData(int stream_index, 230 int SimpleEntryImpl::WriteData(int stream_index,
184 int offset, 231 int offset,
185 net::IOBuffer* buf, 232 net::IOBuffer* buf,
186 int buf_len, 233 int buf_len,
187 const CompletionCallback& callback, 234 const CompletionCallback& callback,
188 bool truncate) { 235 bool truncate) {
189 DCHECK(io_thread_checker_.CalledOnValidThread()); 236 DCHECK(io_thread_checker_.CalledOnValidThread());
190 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 || 237 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
191 buf_len < 0) { 238 buf_len < 0) {
192 return net::ERR_INVALID_ARGUMENT; 239 return net::ERR_INVALID_ARGUMENT;
193 } 240 }
194 pending_operations_.push( 241
195 base::Bind(&SimpleEntryImpl::WriteDataInternal, 242 int ret_value = net::ERR_FAILED;
196 this, 243 if (state_ == STATE_READY && pending_operations_.size() == 0) {
197 stream_index, 244 // We can only do optimistic Write if there is no pending operations, so
198 offset, 245 // that we are sure that the next call to RunNextOperationIfNeeded will
199 make_scoped_refptr(buf), 246 // actually run the write operation that sets the stream size. It also
200 buf_len, 247 // prevents from previous possibly-conflicting writes that could be stacked
201 callback, 248 // in the |pending_operations_|. We could optimize this for when we have
202 truncate)); 249 // only read operations enqueued.
250 pending_operations_.push(
251 base::Bind(&SimpleEntryImpl::WriteDataInternal,
252 this,
gavinp 2013/05/02 12:47:39 Minor nit: I really think these look better all bu
felipeg 2013/05/02 13:55:58 Done.
253 stream_index,
254 offset,
255 make_scoped_refptr(buf),
256 buf_len,
257 CompletionCallback(),
258 truncate));
259 ret_value = buf_len;
260 } else {
261 pending_operations_.push(
262 base::Bind(&SimpleEntryImpl::WriteDataInternal,
263 this,
264 stream_index,
265 offset,
266 make_scoped_refptr(buf),
267 buf_len,
268 callback,
269 truncate));
270 ret_value = net::ERR_IO_PENDING;
271 }
272
273 if (truncate)
274 data_size_[stream_index] = offset + buf_len;
275 else
276 data_size_[stream_index] = std::max(offset + buf_len,
277 data_size_[stream_index]);
278
279 // Since we don't know the correct values for |last_used_| and
280 // |last_modified_| yet, we make this approximation.
281 last_used_ = last_modified_ = base::Time::Now();
282
203 RunNextOperationIfNeeded(); 283 RunNextOperationIfNeeded();
204 // TODO(felipeg): Optimization: Add support for optimistic writes, quickly 284 return ret_value;
205 // returning net::OK here.
206 return net::ERR_IO_PENDING;
207 } 285 }
208 286
209 int SimpleEntryImpl::ReadSparseData(int64 offset, 287 int SimpleEntryImpl::ReadSparseData(int64 offset,
210 net::IOBuffer* buf, 288 net::IOBuffer* buf,
211 int buf_len, 289 int buf_len,
212 const CompletionCallback& callback) { 290 const CompletionCallback& callback) {
213 DCHECK(io_thread_checker_.CalledOnValidThread()); 291 DCHECK(io_thread_checker_.CalledOnValidThread());
214 // TODO(gavinp): Determine if the simple backend should support sparse data. 292 // TODO(gavinp): Determine if the simple backend should support sparse data.
215 NOTIMPLEMENTED(); 293 NOTIMPLEMENTED();
216 return net::ERR_FAILED; 294 return net::ERR_FAILED;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
251 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { 329 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
252 DCHECK(io_thread_checker_.CalledOnValidThread()); 330 DCHECK(io_thread_checker_.CalledOnValidThread());
253 // TODO(gavinp): Determine if the simple backend should support sparse data. 331 // TODO(gavinp): Determine if the simple backend should support sparse data.
254 NOTIMPLEMENTED(); 332 NOTIMPLEMENTED();
255 return net::ERR_FAILED; 333 return net::ERR_FAILED;
256 } 334 }
257 335
258 SimpleEntryImpl::~SimpleEntryImpl() { 336 SimpleEntryImpl::~SimpleEntryImpl() {
259 DCHECK(io_thread_checker_.CalledOnValidThread()); 337 DCHECK(io_thread_checker_.CalledOnValidThread());
260 DCHECK_EQ(0U, pending_operations_.size()); 338 DCHECK_EQ(0U, pending_operations_.size());
261 DCHECK_EQ(STATE_UNINITIALIZED, state_); 339 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
262 DCHECK(!synchronous_entry_); 340 DCHECK(!synchronous_entry_);
263 RemoveSelfFromBackend(); 341 RemoveSelfFromBackend();
264 } 342 }
265 343
266 void SimpleEntryImpl::MakeUninitialized() { 344 void SimpleEntryImpl::MakeUninitialized() {
345
gavinp 2013/05/02 12:47:39 Lose this blank line.
felipeg 2013/05/02 13:55:58 Done.
267 state_ = STATE_UNINITIALIZED; 346 state_ = STATE_UNINITIALIZED;
268 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_)); 347 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
269 std::memset(crc32s_, 0, sizeof(crc32s_)); 348 std::memset(crc32s_, 0, sizeof(crc32s_));
270 std::memset(have_written_, 0, sizeof(have_written_)); 349 std::memset(have_written_, 0, sizeof(have_written_));
350 std::memset(data_size_, 0, sizeof(data_size_));
271 } 351 }
272 352
273 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) { 353 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
354 DCHECK(out_entry);
274 ++open_count_; 355 ++open_count_;
275 AddRef(); // Balanced in Close() 356 AddRef(); // Balanced in Close()
276 *out_entry = this; 357 *out_entry = this;
277 } 358 }
278 359
279 void SimpleEntryImpl::RemoveSelfFromBackend() { 360 void SimpleEntryImpl::RemoveSelfFromBackend() {
280 if (!backend_) 361 if (!backend_)
281 return; 362 return;
282 backend_->OnDeactivated(this); 363 backend_->OnDeactivated(this);
283 backend_.reset(); 364 backend_.reset();
284 } 365 }
285 366
286 void SimpleEntryImpl::MarkAsDoomed() { 367 void SimpleEntryImpl::MarkAsDoomed() {
287 if (!backend_) 368 if (!backend_)
288 return; 369 return;
289 backend_->index()->Remove(key_); 370 backend_->index()->Remove(key_);
290 RemoveSelfFromBackend(); 371 RemoveSelfFromBackend();
291 } 372 }
292 373
293 void SimpleEntryImpl::RunNextOperationIfNeeded() { 374 void SimpleEntryImpl::RunNextOperationIfNeeded() {
294 DCHECK(io_thread_checker_.CalledOnValidThread()); 375 DCHECK(io_thread_checker_.CalledOnValidThread());
295
296 UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending", 376 UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
297 pending_operations_.size(), 0, 100, 20); 377 pending_operations_.size(), 0, 100, 20);
298
299 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) { 378 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
300 base::Closure operation = pending_operations_.front(); 379 base::Closure operation = pending_operations_.front();
301 pending_operations_.pop(); 380 pending_operations_.pop();
302 operation.Run(); 381 operation.Run();
303 // |this| may have been deleted. 382 // |this| may have been deleted.
304 } 383 }
305 } 384 }
306 385
307 void SimpleEntryImpl::OpenEntryInternal(Entry** out_entry, 386 void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback,
308 const CompletionCallback& callback) { 387 Entry** out_entry) {
309 ScopedOperationRunner operation_runner(this); 388 ScopedOperationRunner operation_runner(this);
310
311 if (state_ == STATE_READY) { 389 if (state_ == STATE_READY) {
312 ReturnEntryToCaller(out_entry); 390 ReturnEntryToCaller(out_entry);
313 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 391 if (!callback.is_null())
314 net::OK)); 392 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
393 net::OK));
394 return;
395 } else if (state_ == STATE_FAILURE) {
396 if (!callback.is_null())
397 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
398 callback, net::ERR_FAILED));
315 return; 399 return;
316 } 400 }
317 DCHECK_EQ(STATE_UNINITIALIZED, state_); 401 DCHECK_EQ(STATE_UNINITIALIZED, state_);
318
319 // This enumeration is used in histograms, add entries only at end.
320 enum OpenEntryIndexEnum {
321 INDEX_NOEXIST = 0,
322 INDEX_MISS = 1,
323 INDEX_HIT = 2,
324 INDEX_MAX = 3,
325 };
326 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
327 if (backend_) {
328 if (backend_->index()->Has(key_))
329 open_entry_index_enum = INDEX_HIT;
330 else
331 open_entry_index_enum = INDEX_MISS;
332 }
333 UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
334 open_entry_index_enum, INDEX_MAX);
335 // If entry is not known to the index, initiate fast failover to the network.
336 if (open_entry_index_enum == INDEX_MISS) {
337 MessageLoopProxy::current()->PostTask(FROM_HERE,
338 base::Bind(callback,
339 net::ERR_FAILED));
340 return;
341 }
342 state_ = STATE_IO_PENDING; 402 state_ = STATE_IO_PENDING;
343
344 const base::TimeTicks start_time = base::TimeTicks::Now(); 403 const base::TimeTicks start_time = base::TimeTicks::Now();
345 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; 404 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
346 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( 405 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
347 new PointerToSimpleSynchronousEntry()); 406 new PointerToSimpleSynchronousEntry());
348 scoped_ptr<int> result(new int()); 407 scoped_ptr<int> result(new int());
349 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_, key_, 408 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_, key_,
350 entry_hash_, sync_entry.get(), result.get()); 409 entry_hash_, sync_entry.get(), result.get());
351 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, 410 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
352 callback, start_time, base::Passed(&sync_entry), 411 callback, start_time, base::Passed(&sync_entry),
353 base::Passed(&result), out_entry); 412 base::Passed(&result), out_entry);
354 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 413 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
355 } 414 }
356 415
357 void SimpleEntryImpl::CreateEntryInternal(Entry** out_entry, 416 void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback,
358 const CompletionCallback& callback) { 417 Entry** out_entry) {
359 ScopedOperationRunner operation_runner(this); 418 ScopedOperationRunner operation_runner(this);
360 419 if (state_ != STATE_UNINITIALIZED) {
361 if (state_ == STATE_READY) {
362 // There is already an active normal entry. 420 // There is already an active normal entry.
363 MessageLoopProxy::current()->PostTask(FROM_HERE, 421 if (!callback.is_null())
364 base::Bind(callback, 422 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
365 net::ERR_FAILED)); 423 callback, net::ERR_FAILED));
366 return; 424 return;
367 } 425 }
368 DCHECK_EQ(STATE_UNINITIALIZED, state_); 426 DCHECK_EQ(STATE_UNINITIALIZED, state_);
369 427
370 state_ = STATE_IO_PENDING; 428 state_ = STATE_IO_PENDING;
371 429
372 // If creation succeeds, we should mark all streams to be saved on close. 430 // If creation succeeds, we should mark all streams to be saved on close.
373 for (int i = 0; i < kSimpleEntryFileCount; ++i) 431 for (int i = 0; i < kSimpleEntryFileCount; ++i)
374 have_written_[i] = true; 432 have_written_[i] = true;
375 433
376 // We insert the entry in the index before creating the entry files in the
377 // SimpleSynchronousEntry, because this way the worst scenario is when we
378 // have the entry in the index but we don't have the created files yet, this
379 // way we never leak files. CreationOperationComplete will remove the entry
380 // from the index if the creation fails.
381 if (backend_)
382 backend_->index()->Insert(key_);
383 const base::TimeTicks start_time = base::TimeTicks::Now(); 434 const base::TimeTicks start_time = base::TimeTicks::Now();
384 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; 435 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
385 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( 436 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
386 new PointerToSimpleSynchronousEntry()); 437 new PointerToSimpleSynchronousEntry());
387 scoped_ptr<int> result(new int()); 438 scoped_ptr<int> result(new int());
388 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_, 439 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_,
389 entry_hash_, sync_entry.get(), result.get()); 440 entry_hash_, sync_entry.get(), result.get());
390 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, 441 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
391 callback, start_time, base::Passed(&sync_entry), 442 callback, start_time, base::Passed(&sync_entry),
392 base::Passed(&result), out_entry); 443 base::Passed(&result), out_entry);
393 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 444 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
394 } 445 }
395 446
396 void SimpleEntryImpl::CloseInternal() { 447 void SimpleEntryImpl::CloseInternal() {
397 DCHECK(io_thread_checker_.CalledOnValidThread()); 448 DCHECK(io_thread_checker_.CalledOnValidThread());
398 DCHECK_EQ(0U, pending_operations_.size());
399 DCHECK_EQ(STATE_READY, state_);
400 DCHECK(synchronous_entry_);
401
402 state_ = STATE_IO_PENDING;
403
404 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; 449 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
405
406 scoped_ptr<std::vector<CRCRecord> > 450 scoped_ptr<std::vector<CRCRecord> >
407 crc32s_to_write(new std::vector<CRCRecord>()); 451 crc32s_to_write(new std::vector<CRCRecord>());
408 for (int i = 0; i < kSimpleEntryFileCount; ++i) { 452
409 if (have_written_[i]) { 453 if (state_ == STATE_READY) {
410 if (data_size_[i] == crc32s_end_offset_[i]) { 454 DCHECK(synchronous_entry_);
411 int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; 455 state_ = STATE_IO_PENDING;
412 crc32s_to_write->push_back(CRCRecord(i, true, crc)); 456 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
413 } else { 457 if (have_written_[i]) {
414 crc32s_to_write->push_back(CRCRecord(i, false, 0)); 458 if (data_size_[i] == crc32s_end_offset_[i]) {
459 int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
460 crc32s_to_write->push_back(CRCRecord(i, true, crc));
461 } else {
462 crc32s_to_write->push_back(CRCRecord(i, false, 0));
463 }
415 } 464 }
416 } 465 }
466 } else {
467 DCHECK_EQ(STATE_FAILURE, state_);
417 } 468 }
418 Closure task = base::Bind(&SimpleSynchronousEntry::Close, 469
419 base::Unretained(synchronous_entry_), 470 if (synchronous_entry_) {
420 base::Passed(&crc32s_to_write)); 471 Closure task = base::Bind(&SimpleSynchronousEntry::Close,
421 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); 472 base::Unretained(synchronous_entry_),
422 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 473 base::Passed(&crc32s_to_write));
423 synchronous_entry_ = NULL; 474 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
475 synchronous_entry_ = NULL;
476 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
477 } else {
478 synchronous_entry_ = NULL;
gavinp 2013/05/02 12:47:39 We probably don't need this line.
felipeg 2013/05/02 13:55:58 Yes we need. we have a dcheck in CloseOperationCom
479 CloseOperationComplete();
480 }
424 } 481 }
425 482
426 void SimpleEntryImpl::ReadDataInternal(int stream_index, 483 void SimpleEntryImpl::ReadDataInternal(int stream_index,
427 int offset, 484 int offset,
428 net::IOBuffer* buf, 485 net::IOBuffer* buf,
429 int buf_len, 486 int buf_len,
430 const CompletionCallback& callback) { 487 const CompletionCallback& callback) {
431 DCHECK(io_thread_checker_.CalledOnValidThread()); 488 DCHECK(io_thread_checker_.CalledOnValidThread());
489 ScopedOperationRunner operation_runner(this);
490
491 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
492 if (!callback.is_null())
gavinp 2013/05/02 12:47:39 Multi-line if statements should have braces. There
felipeg 2013/05/02 13:55:58 Done.
493 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
494 callback, net::ERR_FAILED));
495 return;
496 }
432 DCHECK_EQ(STATE_READY, state_); 497 DCHECK_EQ(STATE_READY, state_);
498 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
499 if (offset < 0 || buf_len <= 0) {
500 // If there is nothing to read, we bail out before setting state_ to
501 // STATE_IO_PENDING.
502 if (!callback.is_null())
503 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
504 callback, 0));
505 return;
506 }
507
433 state_ = STATE_IO_PENDING; 508 state_ = STATE_IO_PENDING;
434 if (backend_) 509 if (backend_)
435 backend_->index()->UseIfExists(key_); 510 backend_->index()->UseIfExists(key_);
436 511
437 scoped_ptr<uint32> read_crc32(new uint32()); 512 scoped_ptr<uint32> read_crc32(new uint32());
438 scoped_ptr<int> result(new int()); 513 scoped_ptr<int> result(new int());
439 Closure task = base::Bind(&SimpleSynchronousEntry::ReadData, 514 Closure task = base::Bind(&SimpleSynchronousEntry::ReadData,
440 base::Unretained(synchronous_entry_), 515 base::Unretained(synchronous_entry_),
441 stream_index, offset, make_scoped_refptr(buf), 516 stream_index, offset, make_scoped_refptr(buf),
442 buf_len, read_crc32.get(), result.get()); 517 buf_len, read_crc32.get(), result.get());
443 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this, 518 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this,
444 stream_index, offset, callback, 519 stream_index, offset, callback,
445 base::Passed(&read_crc32), base::Passed(&result)); 520 base::Passed(&read_crc32), base::Passed(&result));
446 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 521 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
447 } 522 }
448 523
449 void SimpleEntryImpl::WriteDataInternal(int stream_index, 524 void SimpleEntryImpl::WriteDataInternal(int stream_index,
450 int offset, 525 int offset,
451 net::IOBuffer* buf, 526 net::IOBuffer* buf,
452 int buf_len, 527 int buf_len,
453 const CompletionCallback& callback, 528 const CompletionCallback& callback,
454 bool truncate) { 529 bool truncate) {
455 DCHECK(io_thread_checker_.CalledOnValidThread()); 530 DCHECK(io_thread_checker_.CalledOnValidThread());
531 ScopedOperationRunner operation_runner(this);
532 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
533 if (!callback.is_null()) {
534 // We need to posttask so that we don't go in a loop when we call the
535 // callback directly.
536 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
537 callback, net::ERR_FAILED));
538 }
539 // |this| may be destroyed after return here.
pasko-google - do not use 2013/05/02 14:01:43 actually no, |this| is still alive
felipeg 2013/05/02 14:10:05 Nope, It can be destroyed after we return of the r
pasko-google - do not use 2013/05/02 14:39:57 That's fine to be, the word 'here' in the end made
540 return;
541 }
456 DCHECK_EQ(STATE_READY, state_); 542 DCHECK_EQ(STATE_READY, state_);
457 state_ = STATE_IO_PENDING; 543 state_ = STATE_IO_PENDING;
458 if (backend_) 544 if (backend_)
459 backend_->index()->UseIfExists(key_); 545 backend_->index()->UseIfExists(key_);
460 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) 546 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
461 // if |offset == 0| or we have already computed the CRC for [0 .. offset). 547 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
462 // We rely on most write operations being sequential, start to end to compute 548 // We rely on most write operations being sequential, start to end to compute
463 // the crc of the data. When we write to an entry and close without having 549 // the crc of the data. When we write to an entry and close without having
464 // done a sequential write, we don't check the CRC on read. 550 // done a sequential write, we don't check the CRC on read.
465 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { 551 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
(...skipping 22 matching lines...) Expand all
488 void SimpleEntryImpl::CreationOperationComplete( 574 void SimpleEntryImpl::CreationOperationComplete(
489 const CompletionCallback& completion_callback, 575 const CompletionCallback& completion_callback,
490 const base::TimeTicks& start_time, 576 const base::TimeTicks& start_time,
491 scoped_ptr<SimpleSynchronousEntry*> in_sync_entry, 577 scoped_ptr<SimpleSynchronousEntry*> in_sync_entry,
492 scoped_ptr<int> in_result, 578 scoped_ptr<int> in_result,
493 Entry** out_entry) { 579 Entry** out_entry) {
494 DCHECK(io_thread_checker_.CalledOnValidThread()); 580 DCHECK(io_thread_checker_.CalledOnValidThread());
495 DCHECK_EQ(state_, STATE_IO_PENDING); 581 DCHECK_EQ(state_, STATE_IO_PENDING);
496 DCHECK(in_sync_entry); 582 DCHECK(in_sync_entry);
497 DCHECK(in_result); 583 DCHECK(in_result);
498
499 ScopedOperationRunner operation_runner(this); 584 ScopedOperationRunner operation_runner(this);
500
501 UMA_HISTOGRAM_BOOLEAN( 585 UMA_HISTOGRAM_BOOLEAN(
502 "SimpleCache.EntryCreationResult", *in_result == net::OK); 586 "SimpleCache.EntryCreationResult", *in_result == net::OK);
503 if (*in_result != net::OK) { 587 if (*in_result != net::OK) {
504 if (*in_result!= net::ERR_FILE_EXISTS) 588 if (*in_result!= net::ERR_FILE_EXISTS)
505 MarkAsDoomed(); 589 MarkAsDoomed();
590 if (!completion_callback.is_null())
gavinp 2013/05/02 12:47:39 I'm not convinced we need these PostTasks if we ar
felipeg 2013/05/02 13:55:58 We do need. In case of failure of an operation, wh
pasko-google - do not use 2013/05/02 14:39:57 I think if you move completion_callback.Run(...) _
felipeg 2013/05/02 14:48:02 It doesnt have anything to do with MakeUninitializ
pasko-google - do not use 2013/05/02 15:08:45 I just ran DiskCacheBackendTest.* and DiskCacheEnt
591 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
592 completion_callback, net::ERR_FAILED));
506 MakeUninitialized(); 593 MakeUninitialized();
507 completion_callback.Run(net::ERR_FAILED); 594 state_ = STATE_FAILURE;
508 return; 595 return;
509 } 596 }
597 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
598 // the optimistic Create case.
599 if (out_entry)
600 ReturnEntryToCaller(out_entry);
601
510 state_ = STATE_READY; 602 state_ = STATE_READY;
511 synchronous_entry_ = *in_sync_entry; 603 synchronous_entry_ = *in_sync_entry;
512 SetSynchronousData(); 604 SetSynchronousData();
513 ReturnEntryToCaller(out_entry);
514 UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime", 605 UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
515 (base::TimeTicks::Now() - start_time)); 606 (base::TimeTicks::Now() - start_time));
516 completion_callback.Run(net::OK); 607
608 if (!completion_callback.is_null())
609 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
610 completion_callback, net::OK));
517 } 611 }
518 612
519 void SimpleEntryImpl::EntryOperationComplete( 613 void SimpleEntryImpl::EntryOperationComplete(
520 int stream_index, 614 int stream_index,
521 const CompletionCallback& completion_callback, 615 const CompletionCallback& completion_callback,
522 scoped_ptr<int> result) { 616 scoped_ptr<int> result) {
523 DCHECK(io_thread_checker_.CalledOnValidThread()); 617 DCHECK(io_thread_checker_.CalledOnValidThread());
524 DCHECK(synchronous_entry_); 618 DCHECK(synchronous_entry_);
525 DCHECK_EQ(STATE_IO_PENDING, state_); 619 DCHECK_EQ(STATE_IO_PENDING, state_);
526 DCHECK(result); 620 DCHECK(result);
527
528 state_ = STATE_READY; 621 state_ = STATE_READY;
529
530 if (*result < 0) { 622 if (*result < 0) {
531 MarkAsDoomed(); 623 MarkAsDoomed();
624 state_ = STATE_FAILURE;
532 crc32s_end_offset_[stream_index] = 0; 625 crc32s_end_offset_[stream_index] = 0;
626 } else {
627 SetSynchronousData();
533 } 628 }
534 SetSynchronousData(); 629
535 completion_callback.Run(*result); 630 if (!completion_callback.is_null())
631 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
632 completion_callback, *result));
536 RunNextOperationIfNeeded(); 633 RunNextOperationIfNeeded();
537 } 634 }
538 635
539 void SimpleEntryImpl::ReadOperationComplete( 636 void SimpleEntryImpl::ReadOperationComplete(
540 int stream_index, 637 int stream_index,
541 int offset, 638 int offset,
542 const CompletionCallback& completion_callback, 639 const CompletionCallback& completion_callback,
543 scoped_ptr<uint32> read_crc32, 640 scoped_ptr<uint32> read_crc32,
544 scoped_ptr<int> result) { 641 scoped_ptr<int> result) {
545 DCHECK(io_thread_checker_.CalledOnValidThread()); 642 DCHECK(io_thread_checker_.CalledOnValidThread());
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
591 DCHECK_EQ(STATE_IO_PENDING, state_); 688 DCHECK_EQ(STATE_IO_PENDING, state_);
592 DCHECK(result); 689 DCHECK(result);
593 if (*result == net::OK) 690 if (*result == net::OK)
594 *result = orig_result; 691 *result = orig_result;
595 EntryOperationComplete(stream_index, completion_callback, result.Pass()); 692 EntryOperationComplete(stream_index, completion_callback, result.Pass());
596 } 693 }
597 694
598 void SimpleEntryImpl::CloseOperationComplete() { 695 void SimpleEntryImpl::CloseOperationComplete() {
599 DCHECK(!synchronous_entry_); 696 DCHECK(!synchronous_entry_);
600 DCHECK_EQ(0, open_count_); 697 DCHECK_EQ(0, open_count_);
601 DCHECK_EQ(STATE_IO_PENDING, state_); 698 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_);
602
603 MakeUninitialized(); 699 MakeUninitialized();
604 RunNextOperationIfNeeded(); 700 RunNextOperationIfNeeded();
605 } 701 }
606 702
607 void SimpleEntryImpl::SetSynchronousData() { 703 void SimpleEntryImpl::SetSynchronousData() {
608 DCHECK(io_thread_checker_.CalledOnValidThread()); 704 DCHECK(io_thread_checker_.CalledOnValidThread());
705 DCHECK(synchronous_entry_);
609 DCHECK_EQ(STATE_READY, state_); 706 DCHECK_EQ(STATE_READY, state_);
610 // TODO(felipeg): These copies to avoid data races are not optimal. While 707 // TODO(felipeg): These copies to avoid data races are not optimal. While
611 // adding an IO thread index (for fast misses etc...), we can store this data 708 // adding an IO thread index (for fast misses etc...), we can store this data
612 // in that structure. This also solves problems with last_used() on ext4 709 // in that structure. This also solves problems with last_used() on ext4
613 // filesystems not being accurate. 710 // filesystems not being accurate.
614 last_used_ = synchronous_entry_->last_used(); 711 last_used_ = synchronous_entry_->last_used();
615 last_modified_ = synchronous_entry_->last_modified(); 712 last_modified_ = synchronous_entry_->last_modified();
616 for (int i = 0; i < kSimpleEntryFileCount; ++i) 713 for (int i = 0; i < kSimpleEntryFileCount; ++i)
617 data_size_[i] = synchronous_entry_->data_size(i); 714 data_size_[i] = synchronous_entry_->data_size(i);
618 if (backend_) 715 if (backend_)
619 backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize()); 716 backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize());
620 } 717 }
621 718
622 } // namespace disk_cache 719 } // namespace disk_cache
OLDNEW
« net/disk_cache/simple/simple_entry_impl.h ('K') | « net/disk_cache/simple/simple_entry_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698