OLD | NEW |
| (Empty) |
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/disk_cache/simple/simple_entry_impl.h" | |
6 | |
7 #include <algorithm> | |
8 #include <cstring> | |
9 #include <vector> | |
10 | |
11 #include "base/bind.h" | |
12 #include "base/bind_helpers.h" | |
13 #include "base/callback.h" | |
14 #include "base/location.h" | |
15 #include "base/logging.h" | |
16 #include "base/single_thread_task_runner.h" | |
17 #include "base/task_runner.h" | |
18 #include "base/task_runner_util.h" | |
19 #include "base/thread_task_runner_handle.h" | |
20 #include "base/time/time.h" | |
21 #include "net/base/io_buffer.h" | |
22 #include "net/base/net_errors.h" | |
23 #include "net/disk_cache/net_log_parameters.h" | |
24 #include "net/disk_cache/simple/simple_backend_impl.h" | |
25 #include "net/disk_cache/simple/simple_histogram_macros.h" | |
26 #include "net/disk_cache/simple/simple_index.h" | |
27 #include "net/disk_cache/simple/simple_net_log_parameters.h" | |
28 #include "net/disk_cache/simple/simple_synchronous_entry.h" | |
29 #include "net/disk_cache/simple/simple_util.h" | |
30 #include "third_party/zlib/zlib.h" | |
31 | |
32 namespace disk_cache { | |
33 namespace { | |
34 | |
35 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of | |
36 // the cache. | |
37 const int64 kMaxSparseDataSizeDivisor = 10; | |
38 | |
39 // Used in histograms, please only add entries at the end. | |
40 enum ReadResult { | |
41 READ_RESULT_SUCCESS = 0, | |
42 READ_RESULT_INVALID_ARGUMENT = 1, | |
43 READ_RESULT_NONBLOCK_EMPTY_RETURN = 2, | |
44 READ_RESULT_BAD_STATE = 3, | |
45 READ_RESULT_FAST_EMPTY_RETURN = 4, | |
46 READ_RESULT_SYNC_READ_FAILURE = 5, | |
47 READ_RESULT_SYNC_CHECKSUM_FAILURE = 6, | |
48 READ_RESULT_MAX = 7, | |
49 }; | |
50 | |
51 // Used in histograms, please only add entries at the end. | |
52 enum WriteResult { | |
53 WRITE_RESULT_SUCCESS = 0, | |
54 WRITE_RESULT_INVALID_ARGUMENT = 1, | |
55 WRITE_RESULT_OVER_MAX_SIZE = 2, | |
56 WRITE_RESULT_BAD_STATE = 3, | |
57 WRITE_RESULT_SYNC_WRITE_FAILURE = 4, | |
58 WRITE_RESULT_FAST_EMPTY_RETURN = 5, | |
59 WRITE_RESULT_MAX = 6, | |
60 }; | |
61 | |
62 // Used in histograms, please only add entries at the end. | |
63 enum HeaderSizeChange { | |
64 HEADER_SIZE_CHANGE_INITIAL, | |
65 HEADER_SIZE_CHANGE_SAME, | |
66 HEADER_SIZE_CHANGE_INCREASE, | |
67 HEADER_SIZE_CHANGE_DECREASE, | |
68 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, | |
69 HEADER_SIZE_CHANGE_MAX | |
70 }; | |
71 | |
72 void RecordReadResult(net::CacheType cache_type, ReadResult result) { | |
73 SIMPLE_CACHE_UMA(ENUMERATION, | |
74 "ReadResult", cache_type, result, READ_RESULT_MAX); | |
75 } | |
76 | |
77 void RecordWriteResult(net::CacheType cache_type, WriteResult result) { | |
78 SIMPLE_CACHE_UMA(ENUMERATION, | |
79 "WriteResult2", cache_type, result, WRITE_RESULT_MAX); | |
80 } | |
81 | |
82 // TODO(ttuttle): Consider removing this once we have a good handle on header | |
83 // size changes. | |
84 void RecordHeaderSizeChange(net::CacheType cache_type, | |
85 int old_size, int new_size) { | |
86 HeaderSizeChange size_change; | |
87 | |
88 SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size); | |
89 | |
90 if (old_size == 0) { | |
91 size_change = HEADER_SIZE_CHANGE_INITIAL; | |
92 } else if (new_size == old_size) { | |
93 size_change = HEADER_SIZE_CHANGE_SAME; | |
94 } else if (new_size > old_size) { | |
95 int delta = new_size - old_size; | |
96 SIMPLE_CACHE_UMA(COUNTS_10000, | |
97 "HeaderSizeIncreaseAbsolute", cache_type, delta); | |
98 SIMPLE_CACHE_UMA(PERCENTAGE, | |
99 "HeaderSizeIncreasePercentage", cache_type, | |
100 delta * 100 / old_size); | |
101 size_change = HEADER_SIZE_CHANGE_INCREASE; | |
102 } else { // new_size < old_size | |
103 int delta = old_size - new_size; | |
104 SIMPLE_CACHE_UMA(COUNTS_10000, | |
105 "HeaderSizeDecreaseAbsolute", cache_type, delta); | |
106 SIMPLE_CACHE_UMA(PERCENTAGE, | |
107 "HeaderSizeDecreasePercentage", cache_type, | |
108 delta * 100 / old_size); | |
109 size_change = HEADER_SIZE_CHANGE_DECREASE; | |
110 } | |
111 | |
112 SIMPLE_CACHE_UMA(ENUMERATION, | |
113 "HeaderSizeChange", cache_type, | |
114 size_change, HEADER_SIZE_CHANGE_MAX); | |
115 } | |
116 | |
117 void RecordUnexpectedStream0Write(net::CacheType cache_type) { | |
118 SIMPLE_CACHE_UMA(ENUMERATION, | |
119 "HeaderSizeChange", cache_type, | |
120 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, HEADER_SIZE_CHANGE_MAX); | |
121 } | |
122 | |
123 int g_open_entry_count = 0; | |
124 | |
125 void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) { | |
126 g_open_entry_count += offset; | |
127 SIMPLE_CACHE_UMA(COUNTS_10000, | |
128 "GlobalOpenEntryCount", cache_type, g_open_entry_count); | |
129 } | |
130 | |
131 void InvokeCallbackIfBackendIsAlive( | |
132 const base::WeakPtr<SimpleBackendImpl>& backend, | |
133 const net::CompletionCallback& completion_callback, | |
134 int result) { | |
135 DCHECK(!completion_callback.is_null()); | |
136 if (!backend.get()) | |
137 return; | |
138 completion_callback.Run(result); | |
139 } | |
140 | |
141 } // namespace | |
142 | |
143 using base::Closure; | |
144 using base::FilePath; | |
145 using base::MessageLoopProxy; | |
146 using base::Time; | |
147 using base::TaskRunner; | |
148 | |
149 // A helper class to insure that RunNextOperationIfNeeded() is called when | |
150 // exiting the current stack frame. | |
151 class SimpleEntryImpl::ScopedOperationRunner { | |
152 public: | |
153 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) { | |
154 } | |
155 | |
156 ~ScopedOperationRunner() { | |
157 entry_->RunNextOperationIfNeeded(); | |
158 } | |
159 | |
160 private: | |
161 SimpleEntryImpl* const entry_; | |
162 }; | |
163 | |
164 SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() {} | |
165 | |
166 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type, | |
167 const FilePath& path, | |
168 const uint64 entry_hash, | |
169 OperationsMode operations_mode, | |
170 SimpleBackendImpl* backend, | |
171 net::NetLog* net_log) | |
172 : backend_(backend->AsWeakPtr()), | |
173 cache_type_(cache_type), | |
174 worker_pool_(backend->worker_pool()), | |
175 path_(path), | |
176 entry_hash_(entry_hash), | |
177 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS), | |
178 last_used_(Time::Now()), | |
179 last_modified_(last_used_), | |
180 sparse_data_size_(0), | |
181 open_count_(0), | |
182 doomed_(false), | |
183 state_(STATE_UNINITIALIZED), | |
184 synchronous_entry_(NULL), | |
185 net_log_(net::BoundNetLog::Make( | |
186 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)), | |
187 stream_0_data_(new net::GrowableIOBuffer()) { | |
188 static_assert(arraysize(data_size_) == arraysize(crc32s_end_offset_), | |
189 "arrays should be the same size"); | |
190 static_assert(arraysize(data_size_) == arraysize(crc32s_), | |
191 "arrays should be the same size"); | |
192 static_assert(arraysize(data_size_) == arraysize(have_written_), | |
193 "arrays should be the same size"); | |
194 static_assert(arraysize(data_size_) == arraysize(crc_check_state_), | |
195 "arrays should be the same size"); | |
196 MakeUninitialized(); | |
197 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, | |
198 CreateNetLogSimpleEntryConstructionCallback(this)); | |
199 } | |
200 | |
201 void SimpleEntryImpl::SetActiveEntryProxy( | |
202 scoped_ptr<ActiveEntryProxy> active_entry_proxy) { | |
203 DCHECK(!active_entry_proxy_); | |
204 active_entry_proxy_.reset(active_entry_proxy.release()); | |
205 } | |
206 | |
207 int SimpleEntryImpl::OpenEntry(Entry** out_entry, | |
208 const CompletionCallback& callback) { | |
209 DCHECK(backend_.get()); | |
210 | |
211 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL); | |
212 | |
213 bool have_index = backend_->index()->initialized(); | |
214 // This enumeration is used in histograms, add entries only at end. | |
215 enum OpenEntryIndexEnum { | |
216 INDEX_NOEXIST = 0, | |
217 INDEX_MISS = 1, | |
218 INDEX_HIT = 2, | |
219 INDEX_MAX = 3, | |
220 }; | |
221 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST; | |
222 if (have_index) { | |
223 if (backend_->index()->Has(entry_hash_)) | |
224 open_entry_index_enum = INDEX_HIT; | |
225 else | |
226 open_entry_index_enum = INDEX_MISS; | |
227 } | |
228 SIMPLE_CACHE_UMA(ENUMERATION, | |
229 "OpenEntryIndexState", cache_type_, | |
230 open_entry_index_enum, INDEX_MAX); | |
231 | |
232 // If entry is not known to the index, initiate fast failover to the network. | |
233 if (open_entry_index_enum == INDEX_MISS) { | |
234 net_log_.AddEventWithNetErrorCode( | |
235 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END, | |
236 net::ERR_FAILED); | |
237 return net::ERR_FAILED; | |
238 } | |
239 | |
240 pending_operations_.push(SimpleEntryOperation::OpenOperation( | |
241 this, have_index, callback, out_entry)); | |
242 RunNextOperationIfNeeded(); | |
243 return net::ERR_IO_PENDING; | |
244 } | |
245 | |
246 int SimpleEntryImpl::CreateEntry(Entry** out_entry, | |
247 const CompletionCallback& callback) { | |
248 DCHECK(backend_.get()); | |
249 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_)); | |
250 | |
251 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL); | |
252 | |
253 bool have_index = backend_->index()->initialized(); | |
254 int ret_value = net::ERR_FAILED; | |
255 if (use_optimistic_operations_ && | |
256 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) { | |
257 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC); | |
258 | |
259 ReturnEntryToCaller(out_entry); | |
260 pending_operations_.push(SimpleEntryOperation::CreateOperation( | |
261 this, have_index, CompletionCallback(), static_cast<Entry**>(NULL))); | |
262 ret_value = net::OK; | |
263 } else { | |
264 pending_operations_.push(SimpleEntryOperation::CreateOperation( | |
265 this, have_index, callback, out_entry)); | |
266 ret_value = net::ERR_IO_PENDING; | |
267 } | |
268 | |
269 // We insert the entry in the index before creating the entry files in the | |
270 // SimpleSynchronousEntry, because this way the worst scenario is when we | |
271 // have the entry in the index but we don't have the created files yet, this | |
272 // way we never leak files. CreationOperationComplete will remove the entry | |
273 // from the index if the creation fails. | |
274 backend_->index()->Insert(entry_hash_); | |
275 | |
276 RunNextOperationIfNeeded(); | |
277 return ret_value; | |
278 } | |
279 | |
280 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) { | |
281 if (doomed_) | |
282 return net::OK; | |
283 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL); | |
284 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN); | |
285 | |
286 MarkAsDoomed(); | |
287 if (backend_.get()) | |
288 backend_->OnDoomStart(entry_hash_); | |
289 pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback)); | |
290 RunNextOperationIfNeeded(); | |
291 return net::ERR_IO_PENDING; | |
292 } | |
293 | |
294 void SimpleEntryImpl::SetKey(const std::string& key) { | |
295 key_ = key; | |
296 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY, | |
297 net::NetLog::StringCallback("key", &key)); | |
298 } | |
299 | |
300 void SimpleEntryImpl::Doom() { | |
301 DoomEntry(CompletionCallback()); | |
302 } | |
303 | |
304 void SimpleEntryImpl::Close() { | |
305 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
306 DCHECK_LT(0, open_count_); | |
307 | |
308 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL); | |
309 | |
310 if (--open_count_ > 0) { | |
311 DCHECK(!HasOneRef()); | |
312 Release(); // Balanced in ReturnEntryToCaller(). | |
313 return; | |
314 } | |
315 | |
316 pending_operations_.push(SimpleEntryOperation::CloseOperation(this)); | |
317 DCHECK(!HasOneRef()); | |
318 Release(); // Balanced in ReturnEntryToCaller(). | |
319 RunNextOperationIfNeeded(); | |
320 } | |
321 | |
322 std::string SimpleEntryImpl::GetKey() const { | |
323 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
324 return key_; | |
325 } | |
326 | |
327 Time SimpleEntryImpl::GetLastUsed() const { | |
328 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
329 return last_used_; | |
330 } | |
331 | |
332 Time SimpleEntryImpl::GetLastModified() const { | |
333 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
334 return last_modified_; | |
335 } | |
336 | |
337 int32 SimpleEntryImpl::GetDataSize(int stream_index) const { | |
338 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
339 DCHECK_LE(0, data_size_[stream_index]); | |
340 return data_size_[stream_index]; | |
341 } | |
342 | |
343 int SimpleEntryImpl::ReadData(int stream_index, | |
344 int offset, | |
345 net::IOBuffer* buf, | |
346 int buf_len, | |
347 const CompletionCallback& callback) { | |
348 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
349 | |
350 if (net_log_.IsLogging()) { | |
351 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, | |
352 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | |
353 false)); | |
354 } | |
355 | |
356 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || | |
357 buf_len < 0) { | |
358 if (net_log_.IsLogging()) { | |
359 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | |
360 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); | |
361 } | |
362 | |
363 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT); | |
364 return net::ERR_INVALID_ARGUMENT; | |
365 } | |
366 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || | |
367 offset < 0 || !buf_len)) { | |
368 if (net_log_.IsLogging()) { | |
369 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | |
370 CreateNetLogReadWriteCompleteCallback(0)); | |
371 } | |
372 | |
373 RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN); | |
374 return 0; | |
375 } | |
376 | |
377 // TODO(clamy): return immediatly when reading from stream 0. | |
378 | |
379 // TODO(felipeg): Optimization: Add support for truly parallel read | |
380 // operations. | |
381 bool alone_in_queue = | |
382 pending_operations_.size() == 0 && state_ == STATE_READY; | |
383 pending_operations_.push(SimpleEntryOperation::ReadOperation( | |
384 this, stream_index, offset, buf_len, buf, callback, alone_in_queue)); | |
385 RunNextOperationIfNeeded(); | |
386 return net::ERR_IO_PENDING; | |
387 } | |
388 | |
389 int SimpleEntryImpl::WriteData(int stream_index, | |
390 int offset, | |
391 net::IOBuffer* buf, | |
392 int buf_len, | |
393 const CompletionCallback& callback, | |
394 bool truncate) { | |
395 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
396 | |
397 if (net_log_.IsLogging()) { | |
398 net_log_.AddEvent( | |
399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, | |
400 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | |
401 truncate)); | |
402 } | |
403 | |
404 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || | |
405 offset < 0 || buf_len < 0) { | |
406 if (net_log_.IsLogging()) { | |
407 net_log_.AddEvent( | |
408 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | |
409 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); | |
410 } | |
411 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT); | |
412 return net::ERR_INVALID_ARGUMENT; | |
413 } | |
414 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { | |
415 if (net_log_.IsLogging()) { | |
416 net_log_.AddEvent( | |
417 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | |
418 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); | |
419 } | |
420 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE); | |
421 return net::ERR_FAILED; | |
422 } | |
423 ScopedOperationRunner operation_runner(this); | |
424 | |
425 // Stream 0 data is kept in memory, so can be written immediatly if there are | |
426 // no IO operations pending. | |
427 if (stream_index == 0 && state_ == STATE_READY && | |
428 pending_operations_.size() == 0) | |
429 return SetStream0Data(buf, offset, buf_len, truncate); | |
430 | |
431 // We can only do optimistic Write if there is no pending operations, so | |
432 // that we are sure that the next call to RunNextOperationIfNeeded will | |
433 // actually run the write operation that sets the stream size. It also | |
434 // prevents from previous possibly-conflicting writes that could be stacked | |
435 // in the |pending_operations_|. We could optimize this for when we have | |
436 // only read operations enqueued. | |
437 const bool optimistic = | |
438 (use_optimistic_operations_ && state_ == STATE_READY && | |
439 pending_operations_.size() == 0); | |
440 CompletionCallback op_callback; | |
441 scoped_refptr<net::IOBuffer> op_buf; | |
442 int ret_value = net::ERR_FAILED; | |
443 if (!optimistic) { | |
444 op_buf = buf; | |
445 op_callback = callback; | |
446 ret_value = net::ERR_IO_PENDING; | |
447 } else { | |
448 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer | |
449 // here to avoid paying the price of the RefCountedThreadSafe atomic | |
450 // operations. | |
451 if (buf) { | |
452 op_buf = new IOBuffer(buf_len); | |
453 memcpy(op_buf->data(), buf->data(), buf_len); | |
454 } | |
455 op_callback = CompletionCallback(); | |
456 ret_value = buf_len; | |
457 if (net_log_.IsLogging()) { | |
458 net_log_.AddEvent( | |
459 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC, | |
460 CreateNetLogReadWriteCompleteCallback(buf_len)); | |
461 } | |
462 } | |
463 | |
464 pending_operations_.push(SimpleEntryOperation::WriteOperation(this, | |
465 stream_index, | |
466 offset, | |
467 buf_len, | |
468 op_buf.get(), | |
469 truncate, | |
470 optimistic, | |
471 op_callback)); | |
472 return ret_value; | |
473 } | |
474 | |
475 int SimpleEntryImpl::ReadSparseData(int64 offset, | |
476 net::IOBuffer* buf, | |
477 int buf_len, | |
478 const CompletionCallback& callback) { | |
479 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
480 | |
481 ScopedOperationRunner operation_runner(this); | |
482 pending_operations_.push(SimpleEntryOperation::ReadSparseOperation( | |
483 this, offset, buf_len, buf, callback)); | |
484 return net::ERR_IO_PENDING; | |
485 } | |
486 | |
487 int SimpleEntryImpl::WriteSparseData(int64 offset, | |
488 net::IOBuffer* buf, | |
489 int buf_len, | |
490 const CompletionCallback& callback) { | |
491 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
492 | |
493 ScopedOperationRunner operation_runner(this); | |
494 pending_operations_.push(SimpleEntryOperation::WriteSparseOperation( | |
495 this, offset, buf_len, buf, callback)); | |
496 return net::ERR_IO_PENDING; | |
497 } | |
498 | |
499 int SimpleEntryImpl::GetAvailableRange(int64 offset, | |
500 int len, | |
501 int64* start, | |
502 const CompletionCallback& callback) { | |
503 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
504 | |
505 ScopedOperationRunner operation_runner(this); | |
506 pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation( | |
507 this, offset, len, start, callback)); | |
508 return net::ERR_IO_PENDING; | |
509 } | |
510 | |
511 bool SimpleEntryImpl::CouldBeSparse() const { | |
512 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
513 // TODO(ttuttle): Actually check. | |
514 return true; | |
515 } | |
516 | |
517 void SimpleEntryImpl::CancelSparseIO() { | |
518 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
519 // The Simple Cache does not return distinct objects for the same non-doomed | |
520 // entry, so there's no need to coordinate which object is performing sparse | |
521 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly. | |
522 } | |
523 | |
524 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { | |
525 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
526 // The simple Cache does not return distinct objects for the same non-doomed | |
527 // entry, so there's no need to coordinate which object is performing sparse | |
528 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly. | |
529 return net::OK; | |
530 } | |
531 | |
532 SimpleEntryImpl::~SimpleEntryImpl() { | |
533 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
534 DCHECK_EQ(0U, pending_operations_.size()); | |
535 DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE); | |
536 DCHECK(!synchronous_entry_); | |
537 net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY); | |
538 } | |
539 | |
540 void SimpleEntryImpl::PostClientCallback(const CompletionCallback& callback, | |
541 int result) { | |
542 if (callback.is_null()) | |
543 return; | |
544 // Note that the callback is posted rather than directly invoked to avoid | |
545 // reentrancy issues. | |
546 base::ThreadTaskRunnerHandle::Get()->PostTask( | |
547 FROM_HERE, | |
548 base::Bind(&InvokeCallbackIfBackendIsAlive, backend_, callback, result)); | |
549 } | |
550 | |
551 void SimpleEntryImpl::MakeUninitialized() { | |
552 state_ = STATE_UNINITIALIZED; | |
553 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_)); | |
554 std::memset(crc32s_, 0, sizeof(crc32s_)); | |
555 std::memset(have_written_, 0, sizeof(have_written_)); | |
556 std::memset(data_size_, 0, sizeof(data_size_)); | |
557 for (size_t i = 0; i < arraysize(crc_check_state_); ++i) { | |
558 crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL; | |
559 } | |
560 } | |
561 | |
562 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) { | |
563 DCHECK(out_entry); | |
564 ++open_count_; | |
565 AddRef(); // Balanced in Close() | |
566 if (!backend_.get()) { | |
567 // This method can be called when an asynchronous operation completed. | |
568 // If the backend no longer exists, the callback won't be invoked, and so we | |
569 // must close ourselves to avoid leaking. As well, there's no guarantee the | |
570 // client-provided pointer (|out_entry|) hasn't been freed, and no point | |
571 // dereferencing it, either. | |
572 Close(); | |
573 return; | |
574 } | |
575 *out_entry = this; | |
576 } | |
577 | |
578 void SimpleEntryImpl::MarkAsDoomed() { | |
579 doomed_ = true; | |
580 if (!backend_.get()) | |
581 return; | |
582 backend_->index()->Remove(entry_hash_); | |
583 active_entry_proxy_.reset(); | |
584 } | |
585 | |
586 void SimpleEntryImpl::RunNextOperationIfNeeded() { | |
587 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
588 SIMPLE_CACHE_UMA(CUSTOM_COUNTS, | |
589 "EntryOperationsPending", cache_type_, | |
590 pending_operations_.size(), 0, 100, 20); | |
591 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) { | |
592 scoped_ptr<SimpleEntryOperation> operation( | |
593 new SimpleEntryOperation(pending_operations_.front())); | |
594 pending_operations_.pop(); | |
595 switch (operation->type()) { | |
596 case SimpleEntryOperation::TYPE_OPEN: | |
597 OpenEntryInternal(operation->have_index(), | |
598 operation->callback(), | |
599 operation->out_entry()); | |
600 break; | |
601 case SimpleEntryOperation::TYPE_CREATE: | |
602 CreateEntryInternal(operation->have_index(), | |
603 operation->callback(), | |
604 operation->out_entry()); | |
605 break; | |
606 case SimpleEntryOperation::TYPE_CLOSE: | |
607 CloseInternal(); | |
608 break; | |
609 case SimpleEntryOperation::TYPE_READ: | |
610 RecordReadIsParallelizable(*operation); | |
611 ReadDataInternal(operation->index(), | |
612 operation->offset(), | |
613 operation->buf(), | |
614 operation->length(), | |
615 operation->callback()); | |
616 break; | |
617 case SimpleEntryOperation::TYPE_WRITE: | |
618 RecordWriteDependencyType(*operation); | |
619 WriteDataInternal(operation->index(), | |
620 operation->offset(), | |
621 operation->buf(), | |
622 operation->length(), | |
623 operation->callback(), | |
624 operation->truncate()); | |
625 break; | |
626 case SimpleEntryOperation::TYPE_READ_SPARSE: | |
627 ReadSparseDataInternal(operation->sparse_offset(), | |
628 operation->buf(), | |
629 operation->length(), | |
630 operation->callback()); | |
631 break; | |
632 case SimpleEntryOperation::TYPE_WRITE_SPARSE: | |
633 WriteSparseDataInternal(operation->sparse_offset(), | |
634 operation->buf(), | |
635 operation->length(), | |
636 operation->callback()); | |
637 break; | |
638 case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE: | |
639 GetAvailableRangeInternal(operation->sparse_offset(), | |
640 operation->length(), | |
641 operation->out_start(), | |
642 operation->callback()); | |
643 break; | |
644 case SimpleEntryOperation::TYPE_DOOM: | |
645 DoomEntryInternal(operation->callback()); | |
646 break; | |
647 default: | |
648 NOTREACHED(); | |
649 } | |
650 // The operation is kept for histograms. Makes sure it does not leak | |
651 // resources. | |
652 executing_operation_.swap(operation); | |
653 executing_operation_->ReleaseReferences(); | |
654 // |this| may have been deleted. | |
655 } | |
656 } | |
657 | |
658 void SimpleEntryImpl::OpenEntryInternal(bool have_index, | |
659 const CompletionCallback& callback, | |
660 Entry** out_entry) { | |
661 ScopedOperationRunner operation_runner(this); | |
662 | |
663 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN); | |
664 | |
665 if (state_ == STATE_READY) { | |
666 ReturnEntryToCaller(out_entry); | |
667 PostClientCallback(callback, net::OK); | |
668 net_log_.AddEvent( | |
669 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END, | |
670 CreateNetLogSimpleEntryCreationCallback(this, net::OK)); | |
671 return; | |
672 } | |
673 if (state_ == STATE_FAILURE) { | |
674 PostClientCallback(callback, net::ERR_FAILED); | |
675 net_log_.AddEvent( | |
676 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END, | |
677 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED)); | |
678 return; | |
679 } | |
680 | |
681 DCHECK_EQ(STATE_UNINITIALIZED, state_); | |
682 DCHECK(!synchronous_entry_); | |
683 state_ = STATE_IO_PENDING; | |
684 const base::TimeTicks start_time = base::TimeTicks::Now(); | |
685 scoped_ptr<SimpleEntryCreationResults> results( | |
686 new SimpleEntryCreationResults( | |
687 SimpleEntryStat(last_used_, last_modified_, data_size_, | |
688 sparse_data_size_))); | |
689 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, | |
690 cache_type_, | |
691 path_, | |
692 entry_hash_, | |
693 have_index, | |
694 results.get()); | |
695 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, | |
696 this, | |
697 callback, | |
698 start_time, | |
699 base::Passed(&results), | |
700 out_entry, | |
701 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END); | |
702 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
703 } | |
704 | |
705 void SimpleEntryImpl::CreateEntryInternal(bool have_index, | |
706 const CompletionCallback& callback, | |
707 Entry** out_entry) { | |
708 ScopedOperationRunner operation_runner(this); | |
709 | |
710 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN); | |
711 | |
712 if (state_ != STATE_UNINITIALIZED) { | |
713 // There is already an active normal entry. | |
714 net_log_.AddEvent( | |
715 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END, | |
716 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED)); | |
717 PostClientCallback(callback, net::ERR_FAILED); | |
718 return; | |
719 } | |
720 DCHECK_EQ(STATE_UNINITIALIZED, state_); | |
721 DCHECK(!synchronous_entry_); | |
722 | |
723 state_ = STATE_IO_PENDING; | |
724 | |
725 // Since we don't know the correct values for |last_used_| and | |
726 // |last_modified_| yet, we make this approximation. | |
727 last_used_ = last_modified_ = base::Time::Now(); | |
728 | |
729 // If creation succeeds, we should mark all streams to be saved on close. | |
730 for (int i = 0; i < kSimpleEntryStreamCount; ++i) | |
731 have_written_[i] = true; | |
732 | |
733 const base::TimeTicks start_time = base::TimeTicks::Now(); | |
734 scoped_ptr<SimpleEntryCreationResults> results( | |
735 new SimpleEntryCreationResults( | |
736 SimpleEntryStat(last_used_, last_modified_, data_size_, | |
737 sparse_data_size_))); | |
738 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, | |
739 cache_type_, | |
740 path_, | |
741 key_, | |
742 entry_hash_, | |
743 have_index, | |
744 results.get()); | |
745 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, | |
746 this, | |
747 callback, | |
748 start_time, | |
749 base::Passed(&results), | |
750 out_entry, | |
751 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END); | |
752 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
753 } | |
754 | |
755 void SimpleEntryImpl::CloseInternal() { | |
756 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
757 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; | |
758 scoped_ptr<std::vector<CRCRecord> > | |
759 crc32s_to_write(new std::vector<CRCRecord>()); | |
760 | |
761 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN); | |
762 | |
763 if (state_ == STATE_READY) { | |
764 DCHECK(synchronous_entry_); | |
765 state_ = STATE_IO_PENDING; | |
766 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { | |
767 if (have_written_[i]) { | |
768 if (GetDataSize(i) == crc32s_end_offset_[i]) { | |
769 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; | |
770 crc32s_to_write->push_back(CRCRecord(i, true, crc)); | |
771 } else { | |
772 crc32s_to_write->push_back(CRCRecord(i, false, 0)); | |
773 } | |
774 } | |
775 } | |
776 } else { | |
777 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); | |
778 } | |
779 | |
780 if (synchronous_entry_) { | |
781 Closure task = | |
782 base::Bind(&SimpleSynchronousEntry::Close, | |
783 base::Unretained(synchronous_entry_), | |
784 SimpleEntryStat(last_used_, last_modified_, data_size_, | |
785 sparse_data_size_), | |
786 base::Passed(&crc32s_to_write), | |
787 stream_0_data_); | |
788 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); | |
789 synchronous_entry_ = NULL; | |
790 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
791 | |
792 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { | |
793 if (!have_written_[i]) { | |
794 SIMPLE_CACHE_UMA(ENUMERATION, | |
795 "CheckCRCResult", cache_type_, | |
796 crc_check_state_[i], CRC_CHECK_MAX); | |
797 } | |
798 } | |
799 } else { | |
800 CloseOperationComplete(); | |
801 } | |
802 } | |
803 | |
804 void SimpleEntryImpl::ReadDataInternal(int stream_index, | |
805 int offset, | |
806 net::IOBuffer* buf, | |
807 int buf_len, | |
808 const CompletionCallback& callback) { | |
809 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
810 ScopedOperationRunner operation_runner(this); | |
811 | |
812 if (net_log_.IsLogging()) { | |
813 net_log_.AddEvent( | |
814 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN, | |
815 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | |
816 false)); | |
817 } | |
818 | |
819 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { | |
820 if (!callback.is_null()) { | |
821 RecordReadResult(cache_type_, READ_RESULT_BAD_STATE); | |
822 // Note that the API states that client-provided callbacks for entry-level | |
823 // (i.e. non-backend) operations (e.g. read, write) are invoked even if | |
824 // the backend was already destroyed. | |
825 base::ThreadTaskRunnerHandle::Get()->PostTask( | |
826 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); | |
827 } | |
828 if (net_log_.IsLogging()) { | |
829 net_log_.AddEvent( | |
830 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | |
831 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); | |
832 } | |
833 return; | |
834 } | |
835 DCHECK_EQ(STATE_READY, state_); | |
836 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) { | |
837 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN); | |
838 // If there is nothing to read, we bail out before setting state_ to | |
839 // STATE_IO_PENDING. | |
840 if (!callback.is_null()) | |
841 base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, | |
842 base::Bind(callback, 0)); | |
843 return; | |
844 } | |
845 | |
846 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); | |
847 | |
848 // Since stream 0 data is kept in memory, it is read immediately. | |
849 if (stream_index == 0) { | |
850 int ret_value = ReadStream0Data(buf, offset, buf_len); | |
851 if (!callback.is_null()) { | |
852 base::ThreadTaskRunnerHandle::Get()->PostTask( | |
853 FROM_HERE, base::Bind(callback, ret_value)); | |
854 } | |
855 return; | |
856 } | |
857 | |
858 state_ = STATE_IO_PENDING; | |
859 if (!doomed_ && backend_.get()) | |
860 backend_->index()->UseIfExists(entry_hash_); | |
861 | |
862 scoped_ptr<uint32> read_crc32(new uint32()); | |
863 scoped_ptr<int> result(new int()); | |
864 scoped_ptr<SimpleEntryStat> entry_stat( | |
865 new SimpleEntryStat(last_used_, last_modified_, data_size_, | |
866 sparse_data_size_)); | |
867 Closure task = base::Bind( | |
868 &SimpleSynchronousEntry::ReadData, | |
869 base::Unretained(synchronous_entry_), | |
870 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len), | |
871 make_scoped_refptr(buf), | |
872 read_crc32.get(), | |
873 entry_stat.get(), | |
874 result.get()); | |
875 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, | |
876 this, | |
877 stream_index, | |
878 offset, | |
879 callback, | |
880 base::Passed(&read_crc32), | |
881 base::Passed(&entry_stat), | |
882 base::Passed(&result)); | |
883 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
884 } | |
885 | |
886 void SimpleEntryImpl::WriteDataInternal(int stream_index, | |
887 int offset, | |
888 net::IOBuffer* buf, | |
889 int buf_len, | |
890 const CompletionCallback& callback, | |
891 bool truncate) { | |
892 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
893 ScopedOperationRunner operation_runner(this); | |
894 | |
895 if (net_log_.IsLogging()) { | |
896 net_log_.AddEvent( | |
897 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN, | |
898 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | |
899 truncate)); | |
900 } | |
901 | |
902 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { | |
903 RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE); | |
904 if (net_log_.IsLogging()) { | |
905 net_log_.AddEvent( | |
906 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | |
907 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); | |
908 } | |
909 if (!callback.is_null()) { | |
910 base::ThreadTaskRunnerHandle::Get()->PostTask( | |
911 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); | |
912 } | |
913 // |this| may be destroyed after return here. | |
914 return; | |
915 } | |
916 | |
917 DCHECK_EQ(STATE_READY, state_); | |
918 | |
919 // Since stream 0 data is kept in memory, it will be written immediatly. | |
920 if (stream_index == 0) { | |
921 int ret_value = SetStream0Data(buf, offset, buf_len, truncate); | |
922 if (!callback.is_null()) { | |
923 base::ThreadTaskRunnerHandle::Get()->PostTask( | |
924 FROM_HERE, base::Bind(callback, ret_value)); | |
925 } | |
926 return; | |
927 } | |
928 | |
929 // Ignore zero-length writes that do not change the file size. | |
930 if (buf_len == 0) { | |
931 int32 data_size = data_size_[stream_index]; | |
932 if (truncate ? (offset == data_size) : (offset <= data_size)) { | |
933 RecordWriteResult(cache_type_, WRITE_RESULT_FAST_EMPTY_RETURN); | |
934 if (!callback.is_null()) { | |
935 base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, | |
936 base::Bind(callback, 0)); | |
937 } | |
938 return; | |
939 } | |
940 } | |
941 state_ = STATE_IO_PENDING; | |
942 if (!doomed_ && backend_.get()) | |
943 backend_->index()->UseIfExists(entry_hash_); | |
944 | |
945 AdvanceCrc(buf, offset, buf_len, stream_index); | |
946 | |
947 // |entry_stat| needs to be initialized before modifying |data_size_|. | |
948 scoped_ptr<SimpleEntryStat> entry_stat( | |
949 new SimpleEntryStat(last_used_, last_modified_, data_size_, | |
950 sparse_data_size_)); | |
951 if (truncate) { | |
952 data_size_[stream_index] = offset + buf_len; | |
953 } else { | |
954 data_size_[stream_index] = std::max(offset + buf_len, | |
955 GetDataSize(stream_index)); | |
956 } | |
957 | |
958 // Since we don't know the correct values for |last_used_| and | |
959 // |last_modified_| yet, we make this approximation. | |
960 last_used_ = last_modified_ = base::Time::Now(); | |
961 | |
962 have_written_[stream_index] = true; | |
963 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF | |
964 // record will have to be rewritten. | |
965 if (stream_index == 1) | |
966 have_written_[0] = true; | |
967 | |
968 scoped_ptr<int> result(new int()); | |
969 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData, | |
970 base::Unretained(synchronous_entry_), | |
971 SimpleSynchronousEntry::EntryOperationData( | |
972 stream_index, offset, buf_len, truncate, | |
973 doomed_), | |
974 make_scoped_refptr(buf), | |
975 entry_stat.get(), | |
976 result.get()); | |
977 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, | |
978 this, | |
979 stream_index, | |
980 callback, | |
981 base::Passed(&entry_stat), | |
982 base::Passed(&result)); | |
983 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
984 } | |
985 | |
986 void SimpleEntryImpl::ReadSparseDataInternal( | |
987 int64 sparse_offset, | |
988 net::IOBuffer* buf, | |
989 int buf_len, | |
990 const CompletionCallback& callback) { | |
991 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
992 ScopedOperationRunner operation_runner(this); | |
993 | |
994 DCHECK_EQ(STATE_READY, state_); | |
995 state_ = STATE_IO_PENDING; | |
996 | |
997 scoped_ptr<int> result(new int()); | |
998 scoped_ptr<base::Time> last_used(new base::Time()); | |
999 Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData, | |
1000 base::Unretained(synchronous_entry_), | |
1001 SimpleSynchronousEntry::EntryOperationData( | |
1002 sparse_offset, buf_len), | |
1003 make_scoped_refptr(buf), | |
1004 last_used.get(), | |
1005 result.get()); | |
1006 Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete, | |
1007 this, | |
1008 callback, | |
1009 base::Passed(&last_used), | |
1010 base::Passed(&result)); | |
1011 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
1012 } | |
1013 | |
1014 void SimpleEntryImpl::WriteSparseDataInternal( | |
1015 int64 sparse_offset, | |
1016 net::IOBuffer* buf, | |
1017 int buf_len, | |
1018 const CompletionCallback& callback) { | |
1019 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1020 ScopedOperationRunner operation_runner(this); | |
1021 | |
1022 DCHECK_EQ(STATE_READY, state_); | |
1023 state_ = STATE_IO_PENDING; | |
1024 | |
1025 uint64 max_sparse_data_size = kint64max; | |
1026 if (backend_.get()) { | |
1027 uint64 max_cache_size = backend_->index()->max_size(); | |
1028 max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor; | |
1029 } | |
1030 | |
1031 scoped_ptr<SimpleEntryStat> entry_stat( | |
1032 new SimpleEntryStat(last_used_, last_modified_, data_size_, | |
1033 sparse_data_size_)); | |
1034 | |
1035 last_used_ = last_modified_ = base::Time::Now(); | |
1036 | |
1037 scoped_ptr<int> result(new int()); | |
1038 Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData, | |
1039 base::Unretained(synchronous_entry_), | |
1040 SimpleSynchronousEntry::EntryOperationData( | |
1041 sparse_offset, buf_len), | |
1042 make_scoped_refptr(buf), | |
1043 max_sparse_data_size, | |
1044 entry_stat.get(), | |
1045 result.get()); | |
1046 Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete, | |
1047 this, | |
1048 callback, | |
1049 base::Passed(&entry_stat), | |
1050 base::Passed(&result)); | |
1051 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
1052 } | |
1053 | |
1054 void SimpleEntryImpl::GetAvailableRangeInternal( | |
1055 int64 sparse_offset, | |
1056 int len, | |
1057 int64* out_start, | |
1058 const CompletionCallback& callback) { | |
1059 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1060 ScopedOperationRunner operation_runner(this); | |
1061 | |
1062 DCHECK_EQ(STATE_READY, state_); | |
1063 state_ = STATE_IO_PENDING; | |
1064 | |
1065 scoped_ptr<int> result(new int()); | |
1066 Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange, | |
1067 base::Unretained(synchronous_entry_), | |
1068 SimpleSynchronousEntry::EntryOperationData( | |
1069 sparse_offset, len), | |
1070 out_start, | |
1071 result.get()); | |
1072 Closure reply = base::Bind( | |
1073 &SimpleEntryImpl::GetAvailableRangeOperationComplete, | |
1074 this, | |
1075 callback, | |
1076 base::Passed(&result)); | |
1077 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
1078 } | |
1079 | |
1080 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) { | |
1081 PostTaskAndReplyWithResult( | |
1082 worker_pool_.get(), | |
1083 FROM_HERE, | |
1084 base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, entry_hash_), | |
1085 base::Bind( | |
1086 &SimpleEntryImpl::DoomOperationComplete, this, callback, state_)); | |
1087 state_ = STATE_IO_PENDING; | |
1088 } | |
1089 | |
1090 void SimpleEntryImpl::CreationOperationComplete( | |
1091 const CompletionCallback& completion_callback, | |
1092 const base::TimeTicks& start_time, | |
1093 scoped_ptr<SimpleEntryCreationResults> in_results, | |
1094 Entry** out_entry, | |
1095 net::NetLog::EventType end_event_type) { | |
1096 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1097 DCHECK_EQ(state_, STATE_IO_PENDING); | |
1098 DCHECK(in_results); | |
1099 ScopedOperationRunner operation_runner(this); | |
1100 SIMPLE_CACHE_UMA(BOOLEAN, | |
1101 "EntryCreationResult", cache_type_, | |
1102 in_results->result == net::OK); | |
1103 if (in_results->result != net::OK) { | |
1104 if (in_results->result != net::ERR_FILE_EXISTS) | |
1105 MarkAsDoomed(); | |
1106 | |
1107 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED); | |
1108 PostClientCallback(completion_callback, net::ERR_FAILED); | |
1109 MakeUninitialized(); | |
1110 return; | |
1111 } | |
1112 // If out_entry is NULL, it means we already called ReturnEntryToCaller from | |
1113 // the optimistic Create case. | |
1114 if (out_entry) | |
1115 ReturnEntryToCaller(out_entry); | |
1116 | |
1117 state_ = STATE_READY; | |
1118 synchronous_entry_ = in_results->sync_entry; | |
1119 if (in_results->stream_0_data.get()) { | |
1120 stream_0_data_ = in_results->stream_0_data; | |
1121 // The crc was read in SimpleSynchronousEntry. | |
1122 crc_check_state_[0] = CRC_CHECK_DONE; | |
1123 crc32s_[0] = in_results->stream_0_crc32; | |
1124 crc32s_end_offset_[0] = in_results->entry_stat.data_size(0); | |
1125 } | |
1126 if (key_.empty()) { | |
1127 SetKey(synchronous_entry_->key()); | |
1128 } else { | |
1129 // This should only be triggered when creating an entry. The key check in | |
1130 // the open case is handled in SimpleBackendImpl. | |
1131 DCHECK_EQ(key_, synchronous_entry_->key()); | |
1132 } | |
1133 UpdateDataFromEntryStat(in_results->entry_stat); | |
1134 SIMPLE_CACHE_UMA(TIMES, | |
1135 "EntryCreationTime", cache_type_, | |
1136 (base::TimeTicks::Now() - start_time)); | |
1137 AdjustOpenEntryCountBy(cache_type_, 1); | |
1138 | |
1139 net_log_.AddEvent(end_event_type); | |
1140 PostClientCallback(completion_callback, net::OK); | |
1141 } | |
1142 | |
1143 void SimpleEntryImpl::EntryOperationComplete( | |
1144 const CompletionCallback& completion_callback, | |
1145 const SimpleEntryStat& entry_stat, | |
1146 scoped_ptr<int> result) { | |
1147 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1148 DCHECK(synchronous_entry_); | |
1149 DCHECK_EQ(STATE_IO_PENDING, state_); | |
1150 DCHECK(result); | |
1151 if (*result < 0) { | |
1152 state_ = STATE_FAILURE; | |
1153 MarkAsDoomed(); | |
1154 } else { | |
1155 state_ = STATE_READY; | |
1156 UpdateDataFromEntryStat(entry_stat); | |
1157 } | |
1158 | |
1159 if (!completion_callback.is_null()) { | |
1160 base::ThreadTaskRunnerHandle::Get()->PostTask( | |
1161 FROM_HERE, base::Bind(completion_callback, *result)); | |
1162 } | |
1163 RunNextOperationIfNeeded(); | |
1164 } | |
1165 | |
1166 void SimpleEntryImpl::ReadOperationComplete( | |
1167 int stream_index, | |
1168 int offset, | |
1169 const CompletionCallback& completion_callback, | |
1170 scoped_ptr<uint32> read_crc32, | |
1171 scoped_ptr<SimpleEntryStat> entry_stat, | |
1172 scoped_ptr<int> result) { | |
1173 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1174 DCHECK(synchronous_entry_); | |
1175 DCHECK_EQ(STATE_IO_PENDING, state_); | |
1176 DCHECK(read_crc32); | |
1177 DCHECK(result); | |
1178 | |
1179 if (*result > 0 && | |
1180 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) { | |
1181 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END; | |
1182 } | |
1183 | |
1184 if (*result > 0 && crc32s_end_offset_[stream_index] == offset) { | |
1185 uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0) | |
1186 : crc32s_[stream_index]; | |
1187 crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result); | |
1188 crc32s_end_offset_[stream_index] += *result; | |
1189 if (!have_written_[stream_index] && | |
1190 GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) { | |
1191 // We have just read a file from start to finish, and so we have | |
1192 // computed a crc of the entire file. We can check it now. If a cache | |
1193 // entry has a single reader, the normal pattern is to read from start | |
1194 // to finish. | |
1195 | |
1196 // Other cases are possible. In the case of two readers on the same | |
1197 // entry, one reader can be behind the other. In this case we compute | |
1198 // the crc as the most advanced reader progresses, and check it for | |
1199 // both readers as they read the last byte. | |
1200 | |
1201 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN); | |
1202 | |
1203 scoped_ptr<int> new_result(new int()); | |
1204 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord, | |
1205 base::Unretained(synchronous_entry_), | |
1206 stream_index, | |
1207 *entry_stat, | |
1208 crc32s_[stream_index], | |
1209 new_result.get()); | |
1210 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete, | |
1211 this, *result, stream_index, | |
1212 completion_callback, | |
1213 base::Passed(&new_result)); | |
1214 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | |
1215 crc_check_state_[stream_index] = CRC_CHECK_DONE; | |
1216 return; | |
1217 } | |
1218 } | |
1219 | |
1220 if (*result < 0) { | |
1221 crc32s_end_offset_[stream_index] = 0; | |
1222 } | |
1223 | |
1224 if (*result < 0) { | |
1225 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); | |
1226 } else { | |
1227 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); | |
1228 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END && | |
1229 offset + *result == GetDataSize(stream_index)) { | |
1230 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; | |
1231 } | |
1232 } | |
1233 if (net_log_.IsLogging()) { | |
1234 net_log_.AddEvent( | |
1235 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | |
1236 CreateNetLogReadWriteCompleteCallback(*result)); | |
1237 } | |
1238 | |
1239 EntryOperationComplete(completion_callback, *entry_stat, result.Pass()); | |
1240 } | |
1241 | |
1242 void SimpleEntryImpl::WriteOperationComplete( | |
1243 int stream_index, | |
1244 const CompletionCallback& completion_callback, | |
1245 scoped_ptr<SimpleEntryStat> entry_stat, | |
1246 scoped_ptr<int> result) { | |
1247 if (*result >= 0) | |
1248 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); | |
1249 else | |
1250 RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE); | |
1251 if (net_log_.IsLogging()) { | |
1252 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | |
1253 CreateNetLogReadWriteCompleteCallback(*result)); | |
1254 } | |
1255 | |
1256 if (*result < 0) { | |
1257 crc32s_end_offset_[stream_index] = 0; | |
1258 } | |
1259 | |
1260 EntryOperationComplete(completion_callback, *entry_stat, result.Pass()); | |
1261 } | |
1262 | |
1263 void SimpleEntryImpl::ReadSparseOperationComplete( | |
1264 const CompletionCallback& completion_callback, | |
1265 scoped_ptr<base::Time> last_used, | |
1266 scoped_ptr<int> result) { | |
1267 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1268 DCHECK(synchronous_entry_); | |
1269 DCHECK(result); | |
1270 | |
1271 SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_, | |
1272 sparse_data_size_); | |
1273 EntryOperationComplete(completion_callback, entry_stat, result.Pass()); | |
1274 } | |
1275 | |
1276 void SimpleEntryImpl::WriteSparseOperationComplete( | |
1277 const CompletionCallback& completion_callback, | |
1278 scoped_ptr<SimpleEntryStat> entry_stat, | |
1279 scoped_ptr<int> result) { | |
1280 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1281 DCHECK(synchronous_entry_); | |
1282 DCHECK(result); | |
1283 | |
1284 EntryOperationComplete(completion_callback, *entry_stat, result.Pass()); | |
1285 } | |
1286 | |
1287 void SimpleEntryImpl::GetAvailableRangeOperationComplete( | |
1288 const CompletionCallback& completion_callback, | |
1289 scoped_ptr<int> result) { | |
1290 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1291 DCHECK(synchronous_entry_); | |
1292 DCHECK(result); | |
1293 | |
1294 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_, | |
1295 sparse_data_size_); | |
1296 EntryOperationComplete(completion_callback, entry_stat, result.Pass()); | |
1297 } | |
1298 | |
1299 void SimpleEntryImpl::DoomOperationComplete( | |
1300 const CompletionCallback& callback, | |
1301 State state_to_restore, | |
1302 int result) { | |
1303 state_ = state_to_restore; | |
1304 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_END); | |
1305 if (!callback.is_null()) | |
1306 callback.Run(result); | |
1307 RunNextOperationIfNeeded(); | |
1308 if (backend_) | |
1309 backend_->OnDoomComplete(entry_hash_); | |
1310 } | |
1311 | |
1312 void SimpleEntryImpl::ChecksumOperationComplete( | |
1313 int orig_result, | |
1314 int stream_index, | |
1315 const CompletionCallback& completion_callback, | |
1316 scoped_ptr<int> result) { | |
1317 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1318 DCHECK(synchronous_entry_); | |
1319 DCHECK_EQ(STATE_IO_PENDING, state_); | |
1320 DCHECK(result); | |
1321 | |
1322 if (net_log_.IsLogging()) { | |
1323 net_log_.AddEventWithNetErrorCode( | |
1324 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END, | |
1325 *result); | |
1326 } | |
1327 | |
1328 if (*result == net::OK) { | |
1329 *result = orig_result; | |
1330 if (orig_result >= 0) | |
1331 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); | |
1332 else | |
1333 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); | |
1334 } else { | |
1335 RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE); | |
1336 } | |
1337 if (net_log_.IsLogging()) { | |
1338 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | |
1339 CreateNetLogReadWriteCompleteCallback(*result)); | |
1340 } | |
1341 | |
1342 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_, | |
1343 sparse_data_size_); | |
1344 EntryOperationComplete(completion_callback, entry_stat, result.Pass()); | |
1345 } | |
1346 | |
1347 void SimpleEntryImpl::CloseOperationComplete() { | |
1348 DCHECK(!synchronous_entry_); | |
1349 DCHECK_EQ(0, open_count_); | |
1350 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ || | |
1351 STATE_UNINITIALIZED == state_); | |
1352 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END); | |
1353 AdjustOpenEntryCountBy(cache_type_, -1); | |
1354 MakeUninitialized(); | |
1355 RunNextOperationIfNeeded(); | |
1356 } | |
1357 | |
1358 void SimpleEntryImpl::UpdateDataFromEntryStat( | |
1359 const SimpleEntryStat& entry_stat) { | |
1360 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
1361 DCHECK(synchronous_entry_); | |
1362 DCHECK_EQ(STATE_READY, state_); | |
1363 | |
1364 last_used_ = entry_stat.last_used(); | |
1365 last_modified_ = entry_stat.last_modified(); | |
1366 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { | |
1367 data_size_[i] = entry_stat.data_size(i); | |
1368 } | |
1369 sparse_data_size_ = entry_stat.sparse_data_size(); | |
1370 if (!doomed_ && backend_.get()) | |
1371 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage()); | |
1372 } | |
1373 | |
1374 int64 SimpleEntryImpl::GetDiskUsage() const { | |
1375 int64 file_size = 0; | |
1376 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { | |
1377 file_size += | |
1378 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]); | |
1379 } | |
1380 file_size += sparse_data_size_; | |
1381 return file_size; | |
1382 } | |
1383 | |
1384 void SimpleEntryImpl::RecordReadIsParallelizable( | |
1385 const SimpleEntryOperation& operation) const { | |
1386 if (!executing_operation_) | |
1387 return; | |
1388 // Used in histograms, please only add entries at the end. | |
1389 enum ReadDependencyType { | |
1390 // READ_STANDALONE = 0, Deprecated. | |
1391 READ_FOLLOWS_READ = 1, | |
1392 READ_FOLLOWS_CONFLICTING_WRITE = 2, | |
1393 READ_FOLLOWS_NON_CONFLICTING_WRITE = 3, | |
1394 READ_FOLLOWS_OTHER = 4, | |
1395 READ_ALONE_IN_QUEUE = 5, | |
1396 READ_DEPENDENCY_TYPE_MAX = 6, | |
1397 }; | |
1398 | |
1399 ReadDependencyType type = READ_FOLLOWS_OTHER; | |
1400 if (operation.alone_in_queue()) { | |
1401 type = READ_ALONE_IN_QUEUE; | |
1402 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) { | |
1403 type = READ_FOLLOWS_READ; | |
1404 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) { | |
1405 if (executing_operation_->ConflictsWith(operation)) | |
1406 type = READ_FOLLOWS_CONFLICTING_WRITE; | |
1407 else | |
1408 type = READ_FOLLOWS_NON_CONFLICTING_WRITE; | |
1409 } | |
1410 SIMPLE_CACHE_UMA(ENUMERATION, | |
1411 "ReadIsParallelizable", cache_type_, | |
1412 type, READ_DEPENDENCY_TYPE_MAX); | |
1413 } | |
1414 | |
1415 void SimpleEntryImpl::RecordWriteDependencyType( | |
1416 const SimpleEntryOperation& operation) const { | |
1417 if (!executing_operation_) | |
1418 return; | |
1419 // Used in histograms, please only add entries at the end. | |
1420 enum WriteDependencyType { | |
1421 WRITE_OPTIMISTIC = 0, | |
1422 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1, | |
1423 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2, | |
1424 WRITE_FOLLOWS_CONFLICTING_WRITE = 3, | |
1425 WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4, | |
1426 WRITE_FOLLOWS_CONFLICTING_READ = 5, | |
1427 WRITE_FOLLOWS_NON_CONFLICTING_READ = 6, | |
1428 WRITE_FOLLOWS_OTHER = 7, | |
1429 WRITE_DEPENDENCY_TYPE_MAX = 8, | |
1430 }; | |
1431 | |
1432 WriteDependencyType type = WRITE_FOLLOWS_OTHER; | |
1433 if (operation.optimistic()) { | |
1434 type = WRITE_OPTIMISTIC; | |
1435 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ || | |
1436 executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) { | |
1437 bool conflicting = executing_operation_->ConflictsWith(operation); | |
1438 | |
1439 if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) { | |
1440 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ | |
1441 : WRITE_FOLLOWS_NON_CONFLICTING_READ; | |
1442 } else if (executing_operation_->optimistic()) { | |
1443 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC | |
1444 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC; | |
1445 } else { | |
1446 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE | |
1447 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE; | |
1448 } | |
1449 } | |
1450 SIMPLE_CACHE_UMA(ENUMERATION, | |
1451 "WriteDependencyType", cache_type_, | |
1452 type, WRITE_DEPENDENCY_TYPE_MAX); | |
1453 } | |
1454 | |
1455 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf, | |
1456 int offset, | |
1457 int buf_len) { | |
1458 if (buf_len < 0) { | |
1459 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); | |
1460 return 0; | |
1461 } | |
1462 memcpy(buf->data(), stream_0_data_->data() + offset, buf_len); | |
1463 UpdateDataFromEntryStat( | |
1464 SimpleEntryStat(base::Time::Now(), last_modified_, data_size_, | |
1465 sparse_data_size_)); | |
1466 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); | |
1467 return buf_len; | |
1468 } | |
1469 | |
1470 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf, | |
1471 int offset, | |
1472 int buf_len, | |
1473 bool truncate) { | |
1474 // Currently, stream 0 is only used for HTTP headers, and always writes them | |
1475 // with a single, truncating write. Detect these writes and record the size | |
1476 // changes of the headers. Also, support writes to stream 0 that have | |
1477 // different access patterns, as required by the API contract. | |
1478 // All other clients of the Simple Cache are encouraged to use stream 1. | |
1479 have_written_[0] = true; | |
1480 int data_size = GetDataSize(0); | |
1481 if (offset == 0 && truncate) { | |
1482 RecordHeaderSizeChange(cache_type_, data_size, buf_len); | |
1483 stream_0_data_->SetCapacity(buf_len); | |
1484 memcpy(stream_0_data_->data(), buf->data(), buf_len); | |
1485 data_size_[0] = buf_len; | |
1486 } else { | |
1487 RecordUnexpectedStream0Write(cache_type_); | |
1488 const int buffer_size = | |
1489 truncate ? offset + buf_len : std::max(offset + buf_len, data_size); | |
1490 stream_0_data_->SetCapacity(buffer_size); | |
1491 // If |stream_0_data_| was extended, the extension until offset needs to be | |
1492 // zero-filled. | |
1493 const int fill_size = offset <= data_size ? 0 : offset - data_size; | |
1494 if (fill_size > 0) | |
1495 memset(stream_0_data_->data() + data_size, 0, fill_size); | |
1496 if (buf) | |
1497 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len); | |
1498 data_size_[0] = buffer_size; | |
1499 } | |
1500 base::Time modification_time = base::Time::Now(); | |
1501 AdvanceCrc(buf, offset, buf_len, 0); | |
1502 UpdateDataFromEntryStat( | |
1503 SimpleEntryStat(modification_time, modification_time, data_size_, | |
1504 sparse_data_size_)); | |
1505 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); | |
1506 return buf_len; | |
1507 } | |
1508 | |
1509 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer, | |
1510 int offset, | |
1511 int length, | |
1512 int stream_index) { | |
1513 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) | |
1514 // if |offset == 0| or we have already computed the CRC for [0 .. offset). | |
1515 // We rely on most write operations being sequential, start to end to compute | |
1516 // the crc of the data. When we write to an entry and close without having | |
1517 // done a sequential write, we don't check the CRC on read. | |
1518 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { | |
1519 uint32 initial_crc = | |
1520 (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0); | |
1521 if (length > 0) { | |
1522 crc32s_[stream_index] = crc32( | |
1523 initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length); | |
1524 } | |
1525 crc32s_end_offset_[stream_index] = offset + length; | |
1526 } else if (offset < crc32s_end_offset_[stream_index]) { | |
1527 // If a range for which the crc32 was already computed is rewritten, the | |
1528 // computation of the crc32 need to start from 0 again. | |
1529 crc32s_end_offset_[stream_index] = 0; | |
1530 } | |
1531 } | |
1532 | |
1533 } // namespace disk_cache | |
OLD | NEW |