Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: net/disk_cache/simple/simple_entry_impl.cc

Issue 266243004: Clang format slam. Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/simple/simple_entry_impl.h" 5 #include "net/disk_cache/simple/simple_entry_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cstring> 8 #include <cstring>
9 #include <vector> 9 #include <vector>
10 10
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
62 enum HeaderSizeChange { 62 enum HeaderSizeChange {
63 HEADER_SIZE_CHANGE_INITIAL, 63 HEADER_SIZE_CHANGE_INITIAL,
64 HEADER_SIZE_CHANGE_SAME, 64 HEADER_SIZE_CHANGE_SAME,
65 HEADER_SIZE_CHANGE_INCREASE, 65 HEADER_SIZE_CHANGE_INCREASE,
66 HEADER_SIZE_CHANGE_DECREASE, 66 HEADER_SIZE_CHANGE_DECREASE,
67 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, 67 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
68 HEADER_SIZE_CHANGE_MAX 68 HEADER_SIZE_CHANGE_MAX
69 }; 69 };
70 70
71 void RecordReadResult(net::CacheType cache_type, ReadResult result) { 71 void RecordReadResult(net::CacheType cache_type, ReadResult result) {
72 SIMPLE_CACHE_UMA(ENUMERATION, 72 SIMPLE_CACHE_UMA(
73 "ReadResult", cache_type, result, READ_RESULT_MAX); 73 ENUMERATION, "ReadResult", cache_type, result, READ_RESULT_MAX);
74 } 74 }
75 75
76 void RecordWriteResult(net::CacheType cache_type, WriteResult result) { 76 void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
77 SIMPLE_CACHE_UMA(ENUMERATION, 77 SIMPLE_CACHE_UMA(
78 "WriteResult2", cache_type, result, WRITE_RESULT_MAX); 78 ENUMERATION, "WriteResult2", cache_type, result, WRITE_RESULT_MAX);
79 } 79 }
80 80
81 // TODO(ttuttle): Consider removing this once we have a good handle on header 81 // TODO(ttuttle): Consider removing this once we have a good handle on header
82 // size changes. 82 // size changes.
83 void RecordHeaderSizeChange(net::CacheType cache_type, 83 void RecordHeaderSizeChange(net::CacheType cache_type,
84 int old_size, int new_size) { 84 int old_size,
85 int new_size) {
85 HeaderSizeChange size_change; 86 HeaderSizeChange size_change;
86 87
87 SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size); 88 SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size);
88 89
89 if (old_size == 0) { 90 if (old_size == 0) {
90 size_change = HEADER_SIZE_CHANGE_INITIAL; 91 size_change = HEADER_SIZE_CHANGE_INITIAL;
91 } else if (new_size == old_size) { 92 } else if (new_size == old_size) {
92 size_change = HEADER_SIZE_CHANGE_SAME; 93 size_change = HEADER_SIZE_CHANGE_SAME;
93 } else if (new_size > old_size) { 94 } else if (new_size > old_size) {
94 int delta = new_size - old_size; 95 int delta = new_size - old_size;
95 SIMPLE_CACHE_UMA(COUNTS_10000, 96 SIMPLE_CACHE_UMA(
96 "HeaderSizeIncreaseAbsolute", cache_type, delta); 97 COUNTS_10000, "HeaderSizeIncreaseAbsolute", cache_type, delta);
97 SIMPLE_CACHE_UMA(PERCENTAGE, 98 SIMPLE_CACHE_UMA(PERCENTAGE,
98 "HeaderSizeIncreasePercentage", cache_type, 99 "HeaderSizeIncreasePercentage",
100 cache_type,
99 delta * 100 / old_size); 101 delta * 100 / old_size);
100 size_change = HEADER_SIZE_CHANGE_INCREASE; 102 size_change = HEADER_SIZE_CHANGE_INCREASE;
101 } else { // new_size < old_size 103 } else { // new_size < old_size
102 int delta = old_size - new_size; 104 int delta = old_size - new_size;
103 SIMPLE_CACHE_UMA(COUNTS_10000, 105 SIMPLE_CACHE_UMA(
104 "HeaderSizeDecreaseAbsolute", cache_type, delta); 106 COUNTS_10000, "HeaderSizeDecreaseAbsolute", cache_type, delta);
105 SIMPLE_CACHE_UMA(PERCENTAGE, 107 SIMPLE_CACHE_UMA(PERCENTAGE,
106 "HeaderSizeDecreasePercentage", cache_type, 108 "HeaderSizeDecreasePercentage",
109 cache_type,
107 delta * 100 / old_size); 110 delta * 100 / old_size);
108 size_change = HEADER_SIZE_CHANGE_DECREASE; 111 size_change = HEADER_SIZE_CHANGE_DECREASE;
109 } 112 }
110 113
111 SIMPLE_CACHE_UMA(ENUMERATION, 114 SIMPLE_CACHE_UMA(ENUMERATION,
112 "HeaderSizeChange", cache_type, 115 "HeaderSizeChange",
113 size_change, HEADER_SIZE_CHANGE_MAX); 116 cache_type,
117 size_change,
118 HEADER_SIZE_CHANGE_MAX);
114 } 119 }
115 120
116 void RecordUnexpectedStream0Write(net::CacheType cache_type) { 121 void RecordUnexpectedStream0Write(net::CacheType cache_type) {
117 SIMPLE_CACHE_UMA(ENUMERATION, 122 SIMPLE_CACHE_UMA(ENUMERATION,
118 "HeaderSizeChange", cache_type, 123 "HeaderSizeChange",
119 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, HEADER_SIZE_CHANGE_MAX); 124 cache_type,
125 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
126 HEADER_SIZE_CHANGE_MAX);
120 } 127 }
121 128
122 int g_open_entry_count = 0; 129 int g_open_entry_count = 0;
123 130
124 void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) { 131 void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) {
125 g_open_entry_count += offset; 132 g_open_entry_count += offset;
126 SIMPLE_CACHE_UMA(COUNTS_10000, 133 SIMPLE_CACHE_UMA(
127 "GlobalOpenEntryCount", cache_type, g_open_entry_count); 134 COUNTS_10000, "GlobalOpenEntryCount", cache_type, g_open_entry_count);
128 } 135 }
129 136
130 void InvokeCallbackIfBackendIsAlive( 137 void InvokeCallbackIfBackendIsAlive(
131 const base::WeakPtr<SimpleBackendImpl>& backend, 138 const base::WeakPtr<SimpleBackendImpl>& backend,
132 const net::CompletionCallback& completion_callback, 139 const net::CompletionCallback& completion_callback,
133 int result) { 140 int result) {
134 DCHECK(!completion_callback.is_null()); 141 DCHECK(!completion_callback.is_null());
135 if (!backend.get()) 142 if (!backend.get())
136 return; 143 return;
137 completion_callback.Run(result); 144 completion_callback.Run(result);
138 } 145 }
139 146
140 } // namespace 147 } // namespace
141 148
142 using base::Closure; 149 using base::Closure;
143 using base::FilePath; 150 using base::FilePath;
144 using base::MessageLoopProxy; 151 using base::MessageLoopProxy;
145 using base::Time; 152 using base::Time;
146 using base::TaskRunner; 153 using base::TaskRunner;
147 154
148 // A helper class to insure that RunNextOperationIfNeeded() is called when 155 // A helper class to insure that RunNextOperationIfNeeded() is called when
149 // exiting the current stack frame. 156 // exiting the current stack frame.
150 class SimpleEntryImpl::ScopedOperationRunner { 157 class SimpleEntryImpl::ScopedOperationRunner {
151 public: 158 public:
152 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) { 159 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {}
153 }
154 160
155 ~ScopedOperationRunner() { 161 ~ScopedOperationRunner() { entry_->RunNextOperationIfNeeded(); }
156 entry_->RunNextOperationIfNeeded();
157 }
158 162
159 private: 163 private:
160 SimpleEntryImpl* const entry_; 164 SimpleEntryImpl* const entry_;
161 }; 165 };
162 166
163 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type, 167 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
164 const FilePath& path, 168 const FilePath& path,
165 const uint64 entry_hash, 169 const uint64 entry_hash,
166 OperationsMode operations_mode, 170 OperationsMode operations_mode,
167 SimpleBackendImpl* backend, 171 SimpleBackendImpl* backend,
168 net::NetLog* net_log) 172 net::NetLog* net_log)
169 : backend_(backend->AsWeakPtr()), 173 : backend_(backend->AsWeakPtr()),
170 cache_type_(cache_type), 174 cache_type_(cache_type),
171 worker_pool_(backend->worker_pool()), 175 worker_pool_(backend->worker_pool()),
172 path_(path), 176 path_(path),
173 entry_hash_(entry_hash), 177 entry_hash_(entry_hash),
174 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS), 178 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
175 last_used_(Time::Now()), 179 last_used_(Time::Now()),
176 last_modified_(last_used_), 180 last_modified_(last_used_),
177 sparse_data_size_(0), 181 sparse_data_size_(0),
178 open_count_(0), 182 open_count_(0),
179 doomed_(false), 183 doomed_(false),
180 state_(STATE_UNINITIALIZED), 184 state_(STATE_UNINITIALIZED),
181 synchronous_entry_(NULL), 185 synchronous_entry_(NULL),
182 net_log_(net::BoundNetLog::Make( 186 net_log_(net::BoundNetLog::Make(net_log,
183 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)), 187 net::NetLog::SOURCE_DISK_CACHE_ENTRY)),
184 stream_0_data_(new net::GrowableIOBuffer()) { 188 stream_0_data_(new net::GrowableIOBuffer()) {
185 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), 189 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
186 arrays_should_be_same_size); 190 arrays_should_be_same_size);
187 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), 191 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
188 arrays_should_be_same_size); 192 arrays_should_be_same_size);
189 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), 193 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
190 arrays_should_be_same_size); 194 arrays_should_be_same_size);
191 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_), 195 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
192 arrays_should_be_same_size); 196 arrays_should_be_same_size);
193 MakeUninitialized(); 197 MakeUninitialized();
194 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, 198 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
195 CreateNetLogSimpleEntryConstructionCallback(this)); 199 CreateNetLogSimpleEntryConstructionCallback(this));
196 } 200 }
197 201
198 int SimpleEntryImpl::OpenEntry(Entry** out_entry, 202 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
199 const CompletionCallback& callback) { 203 const CompletionCallback& callback) {
200 DCHECK(backend_.get()); 204 DCHECK(backend_.get());
201 205
202 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL); 206 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
203 207
204 bool have_index = backend_->index()->initialized(); 208 bool have_index = backend_->index()->initialized();
205 // This enumeration is used in histograms, add entries only at end. 209 // This enumeration is used in histograms, add entries only at end.
206 enum OpenEntryIndexEnum { 210 enum OpenEntryIndexEnum {
207 INDEX_NOEXIST = 0, 211 INDEX_NOEXIST = 0,
208 INDEX_MISS = 1, 212 INDEX_MISS = 1,
209 INDEX_HIT = 2, 213 INDEX_HIT = 2,
210 INDEX_MAX = 3, 214 INDEX_MAX = 3,
211 }; 215 };
212 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST; 216 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
213 if (have_index) { 217 if (have_index) {
214 if (backend_->index()->Has(entry_hash_)) 218 if (backend_->index()->Has(entry_hash_))
215 open_entry_index_enum = INDEX_HIT; 219 open_entry_index_enum = INDEX_HIT;
216 else 220 else
217 open_entry_index_enum = INDEX_MISS; 221 open_entry_index_enum = INDEX_MISS;
218 } 222 }
219 SIMPLE_CACHE_UMA(ENUMERATION, 223 SIMPLE_CACHE_UMA(ENUMERATION,
220 "OpenEntryIndexState", cache_type_, 224 "OpenEntryIndexState",
221 open_entry_index_enum, INDEX_MAX); 225 cache_type_,
226 open_entry_index_enum,
227 INDEX_MAX);
222 228
223 // If entry is not known to the index, initiate fast failover to the network. 229 // If entry is not known to the index, initiate fast failover to the network.
224 if (open_entry_index_enum == INDEX_MISS) { 230 if (open_entry_index_enum == INDEX_MISS) {
225 net_log_.AddEventWithNetErrorCode( 231 net_log_.AddEventWithNetErrorCode(
226 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END, 232 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END, net::ERR_FAILED);
227 net::ERR_FAILED);
228 return net::ERR_FAILED; 233 return net::ERR_FAILED;
229 } 234 }
230 235
231 pending_operations_.push(SimpleEntryOperation::OpenOperation( 236 pending_operations_.push(SimpleEntryOperation::OpenOperation(
232 this, have_index, callback, out_entry)); 237 this, have_index, callback, out_entry));
233 RunNextOperationIfNeeded(); 238 RunNextOperationIfNeeded();
234 return net::ERR_IO_PENDING; 239 return net::ERR_IO_PENDING;
235 } 240 }
236 241
237 int SimpleEntryImpl::CreateEntry(Entry** out_entry, 242 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
238 const CompletionCallback& callback) { 243 const CompletionCallback& callback) {
239 DCHECK(backend_.get()); 244 DCHECK(backend_.get());
240 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_)); 245 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
241 246
242 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL); 247 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
243 248
244 bool have_index = backend_->index()->initialized(); 249 bool have_index = backend_->index()->initialized();
245 int ret_value = net::ERR_FAILED; 250 int ret_value = net::ERR_FAILED;
246 if (use_optimistic_operations_ && 251 if (use_optimistic_operations_ && state_ == STATE_UNINITIALIZED &&
247 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) { 252 pending_operations_.size() == 0) {
248 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC); 253 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
249 254
250 ReturnEntryToCaller(out_entry); 255 ReturnEntryToCaller(out_entry);
251 pending_operations_.push(SimpleEntryOperation::CreateOperation( 256 pending_operations_.push(SimpleEntryOperation::CreateOperation(
252 this, have_index, CompletionCallback(), static_cast<Entry**>(NULL))); 257 this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
253 ret_value = net::OK; 258 ret_value = net::OK;
254 } else { 259 } else {
255 pending_operations_.push(SimpleEntryOperation::CreateOperation( 260 pending_operations_.push(SimpleEntryOperation::CreateOperation(
256 this, have_index, callback, out_entry)); 261 this, have_index, callback, out_entry));
257 ret_value = net::ERR_IO_PENDING; 262 ret_value = net::ERR_IO_PENDING;
(...skipping 20 matching lines...) Expand all
278 if (backend_.get()) 283 if (backend_.get())
279 backend_->OnDoomStart(entry_hash_); 284 backend_->OnDoomStart(entry_hash_);
280 pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback)); 285 pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback));
281 RunNextOperationIfNeeded(); 286 RunNextOperationIfNeeded();
282 return net::ERR_IO_PENDING; 287 return net::ERR_IO_PENDING;
283 } 288 }
284 289
285 void SimpleEntryImpl::SetKey(const std::string& key) { 290 void SimpleEntryImpl::SetKey(const std::string& key) {
286 key_ = key; 291 key_ = key;
287 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY, 292 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
288 net::NetLog::StringCallback("key", &key)); 293 net::NetLog::StringCallback("key", &key));
289 } 294 }
290 295
291 void SimpleEntryImpl::Doom() { 296 void SimpleEntryImpl::Doom() {
292 DoomEntry(CompletionCallback()); 297 DoomEntry(CompletionCallback());
293 } 298 }
294 299
295 void SimpleEntryImpl::Close() { 300 void SimpleEntryImpl::Close() {
296 DCHECK(io_thread_checker_.CalledOnValidThread()); 301 DCHECK(io_thread_checker_.CalledOnValidThread());
297 DCHECK_LT(0, open_count_); 302 DCHECK_LT(0, open_count_);
298 303
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
333 338
334 int SimpleEntryImpl::ReadData(int stream_index, 339 int SimpleEntryImpl::ReadData(int stream_index,
335 int offset, 340 int offset,
336 net::IOBuffer* buf, 341 net::IOBuffer* buf,
337 int buf_len, 342 int buf_len,
338 const CompletionCallback& callback) { 343 const CompletionCallback& callback) {
339 DCHECK(io_thread_checker_.CalledOnValidThread()); 344 DCHECK(io_thread_checker_.CalledOnValidThread());
340 345
341 if (net_log_.IsLogging()) { 346 if (net_log_.IsLogging()) {
342 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, 347 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
343 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, 348 CreateNetLogReadWriteDataCallback(
344 false)); 349 stream_index, offset, buf_len, false));
345 } 350 }
346 351
347 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || 352 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
348 buf_len < 0) { 353 buf_len < 0) {
349 if (net_log_.IsLogging()) { 354 if (net_log_.IsLogging()) {
350 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, 355 net_log_.AddEvent(
356 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
351 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); 357 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
352 } 358 }
353 359
354 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT); 360 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT);
355 return net::ERR_INVALID_ARGUMENT; 361 return net::ERR_INVALID_ARGUMENT;
356 } 362 }
357 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || 363 if (pending_operations_.empty() &&
358 offset < 0 || !buf_len)) { 364 (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len)) {
359 if (net_log_.IsLogging()) { 365 if (net_log_.IsLogging()) {
360 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, 366 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
361 CreateNetLogReadWriteCompleteCallback(0)); 367 CreateNetLogReadWriteCompleteCallback(0));
362 } 368 }
363 369
364 RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN); 370 RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN);
365 return 0; 371 return 0;
366 } 372 }
367 373
368 // TODO(clamy): return immediatly when reading from stream 0. 374 // TODO(clamy): return immediatly when reading from stream 0.
369 375
370 // TODO(felipeg): Optimization: Add support for truly parallel read 376 // TODO(felipeg): Optimization: Add support for truly parallel read
371 // operations. 377 // operations.
372 bool alone_in_queue = 378 bool alone_in_queue =
373 pending_operations_.size() == 0 && state_ == STATE_READY; 379 pending_operations_.size() == 0 && state_ == STATE_READY;
374 pending_operations_.push(SimpleEntryOperation::ReadOperation( 380 pending_operations_.push(SimpleEntryOperation::ReadOperation(
375 this, stream_index, offset, buf_len, buf, callback, alone_in_queue)); 381 this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
376 RunNextOperationIfNeeded(); 382 RunNextOperationIfNeeded();
377 return net::ERR_IO_PENDING; 383 return net::ERR_IO_PENDING;
378 } 384 }
379 385
380 int SimpleEntryImpl::WriteData(int stream_index, 386 int SimpleEntryImpl::WriteData(int stream_index,
381 int offset, 387 int offset,
382 net::IOBuffer* buf, 388 net::IOBuffer* buf,
383 int buf_len, 389 int buf_len,
384 const CompletionCallback& callback, 390 const CompletionCallback& callback,
385 bool truncate) { 391 bool truncate) {
386 DCHECK(io_thread_checker_.CalledOnValidThread()); 392 DCHECK(io_thread_checker_.CalledOnValidThread());
387 393
388 if (net_log_.IsLogging()) { 394 if (net_log_.IsLogging()) {
389 net_log_.AddEvent( 395 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, 396 CreateNetLogReadWriteDataCallback(
391 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, 397 stream_index, offset, buf_len, truncate));
392 truncate));
393 } 398 }
394 399
395 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || 400 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
396 offset < 0 || buf_len < 0) { 401 offset < 0 || buf_len < 0) {
397 if (net_log_.IsLogging()) { 402 if (net_log_.IsLogging()) {
398 net_log_.AddEvent( 403 net_log_.AddEvent(
399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, 404 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
400 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); 405 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
401 } 406 }
402 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT); 407 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT);
403 return net::ERR_INVALID_ARGUMENT; 408 return net::ERR_INVALID_ARGUMENT;
404 } 409 }
405 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { 410 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
406 if (net_log_.IsLogging()) { 411 if (net_log_.IsLogging()) {
407 net_log_.AddEvent( 412 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
408 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, 413 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
409 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
410 } 414 }
411 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE); 415 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE);
412 return net::ERR_FAILED; 416 return net::ERR_FAILED;
413 } 417 }
414 ScopedOperationRunner operation_runner(this); 418 ScopedOperationRunner operation_runner(this);
415 419
416 // Stream 0 data is kept in memory, so can be written immediatly if there are 420 // Stream 0 data is kept in memory, so can be written immediatly if there are
417 // no IO operations pending. 421 // no IO operations pending.
418 if (stream_index == 0 && state_ == STATE_READY && 422 if (stream_index == 0 && state_ == STATE_READY &&
419 pending_operations_.size() == 0) 423 pending_operations_.size() == 0)
(...skipping 19 matching lines...) Expand all
439 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer 443 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
440 // here to avoid paying the price of the RefCountedThreadSafe atomic 444 // here to avoid paying the price of the RefCountedThreadSafe atomic
441 // operations. 445 // operations.
442 if (buf) { 446 if (buf) {
443 op_buf = new IOBuffer(buf_len); 447 op_buf = new IOBuffer(buf_len);
444 memcpy(op_buf->data(), buf->data(), buf_len); 448 memcpy(op_buf->data(), buf->data(), buf_len);
445 } 449 }
446 op_callback = CompletionCallback(); 450 op_callback = CompletionCallback();
447 ret_value = buf_len; 451 ret_value = buf_len;
448 if (net_log_.IsLogging()) { 452 if (net_log_.IsLogging()) {
449 net_log_.AddEvent( 453 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
450 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC, 454 CreateNetLogReadWriteCompleteCallback(buf_len));
451 CreateNetLogReadWriteCompleteCallback(buf_len));
452 } 455 }
453 } 456 }
454 457
455 pending_operations_.push(SimpleEntryOperation::WriteOperation(this, 458 pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
456 stream_index, 459 stream_index,
457 offset, 460 offset,
458 buf_len, 461 buf_len,
459 op_buf.get(), 462 op_buf.get(),
460 truncate, 463 truncate,
461 optimistic, 464 optimistic,
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
577 doomed_ = true; 580 doomed_ = true;
578 if (!backend_.get()) 581 if (!backend_.get())
579 return; 582 return;
580 backend_->index()->Remove(entry_hash_); 583 backend_->index()->Remove(entry_hash_);
581 RemoveSelfFromBackend(); 584 RemoveSelfFromBackend();
582 } 585 }
583 586
584 void SimpleEntryImpl::RunNextOperationIfNeeded() { 587 void SimpleEntryImpl::RunNextOperationIfNeeded() {
585 DCHECK(io_thread_checker_.CalledOnValidThread()); 588 DCHECK(io_thread_checker_.CalledOnValidThread());
586 SIMPLE_CACHE_UMA(CUSTOM_COUNTS, 589 SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
587 "EntryOperationsPending", cache_type_, 590 "EntryOperationsPending",
588 pending_operations_.size(), 0, 100, 20); 591 cache_type_,
592 pending_operations_.size(),
593 0,
594 100,
595 20);
589 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) { 596 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
590 scoped_ptr<SimpleEntryOperation> operation( 597 scoped_ptr<SimpleEntryOperation> operation(
591 new SimpleEntryOperation(pending_operations_.front())); 598 new SimpleEntryOperation(pending_operations_.front()));
592 pending_operations_.pop(); 599 pending_operations_.pop();
593 switch (operation->type()) { 600 switch (operation->type()) {
594 case SimpleEntryOperation::TYPE_OPEN: 601 case SimpleEntryOperation::TYPE_OPEN:
595 OpenEntryInternal(operation->have_index(), 602 OpenEntryInternal(operation->have_index(),
596 operation->callback(), 603 operation->callback(),
597 operation->out_entry()); 604 operation->out_entry());
598 break; 605 break;
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
656 void SimpleEntryImpl::OpenEntryInternal(bool have_index, 663 void SimpleEntryImpl::OpenEntryInternal(bool have_index,
657 const CompletionCallback& callback, 664 const CompletionCallback& callback,
658 Entry** out_entry) { 665 Entry** out_entry) {
659 ScopedOperationRunner operation_runner(this); 666 ScopedOperationRunner operation_runner(this);
660 667
661 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN); 668 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
662 669
663 if (state_ == STATE_READY) { 670 if (state_ == STATE_READY) {
664 ReturnEntryToCaller(out_entry); 671 ReturnEntryToCaller(out_entry);
665 PostClientCallback(callback, net::OK); 672 PostClientCallback(callback, net::OK);
666 net_log_.AddEvent( 673 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
667 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END, 674 CreateNetLogSimpleEntryCreationCallback(this, net::OK));
668 CreateNetLogSimpleEntryCreationCallback(this, net::OK));
669 return; 675 return;
670 } 676 }
671 if (state_ == STATE_FAILURE) { 677 if (state_ == STATE_FAILURE) {
672 PostClientCallback(callback, net::ERR_FAILED); 678 PostClientCallback(callback, net::ERR_FAILED);
673 net_log_.AddEvent( 679 net_log_.AddEvent(
674 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END, 680 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
675 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED)); 681 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
676 return; 682 return;
677 } 683 }
678 684
679 DCHECK_EQ(STATE_UNINITIALIZED, state_); 685 DCHECK_EQ(STATE_UNINITIALIZED, state_);
680 DCHECK(!synchronous_entry_); 686 DCHECK(!synchronous_entry_);
681 state_ = STATE_IO_PENDING; 687 state_ = STATE_IO_PENDING;
682 const base::TimeTicks start_time = base::TimeTicks::Now(); 688 const base::TimeTicks start_time = base::TimeTicks::Now();
683 scoped_ptr<SimpleEntryCreationResults> results( 689 scoped_ptr<SimpleEntryCreationResults> results(
684 new SimpleEntryCreationResults( 690 new SimpleEntryCreationResults(SimpleEntryStat(
685 SimpleEntryStat(last_used_, last_modified_, data_size_, 691 last_used_, last_modified_, data_size_, sparse_data_size_)));
686 sparse_data_size_)));
687 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, 692 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
688 cache_type_, 693 cache_type_,
689 path_, 694 path_,
690 entry_hash_, 695 entry_hash_,
691 have_index, 696 have_index,
692 results.get()); 697 results.get());
693 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, 698 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
694 this, 699 this,
695 callback, 700 callback,
696 start_time, 701 start_time,
(...skipping 26 matching lines...) Expand all
723 // Since we don't know the correct values for |last_used_| and 728 // Since we don't know the correct values for |last_used_| and
724 // |last_modified_| yet, we make this approximation. 729 // |last_modified_| yet, we make this approximation.
725 last_used_ = last_modified_ = base::Time::Now(); 730 last_used_ = last_modified_ = base::Time::Now();
726 731
727 // If creation succeeds, we should mark all streams to be saved on close. 732 // If creation succeeds, we should mark all streams to be saved on close.
728 for (int i = 0; i < kSimpleEntryStreamCount; ++i) 733 for (int i = 0; i < kSimpleEntryStreamCount; ++i)
729 have_written_[i] = true; 734 have_written_[i] = true;
730 735
731 const base::TimeTicks start_time = base::TimeTicks::Now(); 736 const base::TimeTicks start_time = base::TimeTicks::Now();
732 scoped_ptr<SimpleEntryCreationResults> results( 737 scoped_ptr<SimpleEntryCreationResults> results(
733 new SimpleEntryCreationResults( 738 new SimpleEntryCreationResults(SimpleEntryStat(
734 SimpleEntryStat(last_used_, last_modified_, data_size_, 739 last_used_, last_modified_, data_size_, sparse_data_size_)));
735 sparse_data_size_)));
736 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, 740 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
737 cache_type_, 741 cache_type_,
738 path_, 742 path_,
739 key_, 743 key_,
740 entry_hash_, 744 entry_hash_,
741 have_index, 745 have_index,
742 results.get()); 746 results.get());
743 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, 747 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
744 this, 748 this,
745 callback, 749 callback,
746 start_time, 750 start_time,
747 base::Passed(&results), 751 base::Passed(&results),
748 out_entry, 752 out_entry,
749 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END); 753 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
750 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 754 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
751 } 755 }
752 756
753 void SimpleEntryImpl::CloseInternal() { 757 void SimpleEntryImpl::CloseInternal() {
754 DCHECK(io_thread_checker_.CalledOnValidThread()); 758 DCHECK(io_thread_checker_.CalledOnValidThread());
755 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; 759 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
756 scoped_ptr<std::vector<CRCRecord> > 760 scoped_ptr<std::vector<CRCRecord> > crc32s_to_write(
757 crc32s_to_write(new std::vector<CRCRecord>()); 761 new std::vector<CRCRecord>());
758 762
759 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN); 763 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
760 764
761 if (state_ == STATE_READY) { 765 if (state_ == STATE_READY) {
762 DCHECK(synchronous_entry_); 766 DCHECK(synchronous_entry_);
763 state_ = STATE_IO_PENDING; 767 state_ = STATE_IO_PENDING;
764 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { 768 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
765 if (have_written_[i]) { 769 if (have_written_[i]) {
766 if (GetDataSize(i) == crc32s_end_offset_[i]) { 770 if (GetDataSize(i) == crc32s_end_offset_[i]) {
767 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; 771 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
768 crc32s_to_write->push_back(CRCRecord(i, true, crc)); 772 crc32s_to_write->push_back(CRCRecord(i, true, crc));
769 } else { 773 } else {
770 crc32s_to_write->push_back(CRCRecord(i, false, 0)); 774 crc32s_to_write->push_back(CRCRecord(i, false, 0));
771 } 775 }
772 } 776 }
773 } 777 }
774 } else { 778 } else {
775 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); 779 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
776 } 780 }
777 781
778 if (synchronous_entry_) { 782 if (synchronous_entry_) {
779 Closure task = 783 Closure task = base::Bind(
780 base::Bind(&SimpleSynchronousEntry::Close, 784 &SimpleSynchronousEntry::Close,
781 base::Unretained(synchronous_entry_), 785 base::Unretained(synchronous_entry_),
782 SimpleEntryStat(last_used_, last_modified_, data_size_, 786 SimpleEntryStat(
783 sparse_data_size_), 787 last_used_, last_modified_, data_size_, sparse_data_size_),
784 base::Passed(&crc32s_to_write), 788 base::Passed(&crc32s_to_write),
785 stream_0_data_); 789 stream_0_data_);
786 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); 790 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
787 synchronous_entry_ = NULL; 791 synchronous_entry_ = NULL;
788 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 792 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
789 793
790 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { 794 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
791 if (!have_written_[i]) { 795 if (!have_written_[i]) {
792 SIMPLE_CACHE_UMA(ENUMERATION, 796 SIMPLE_CACHE_UMA(ENUMERATION,
793 "CheckCRCResult", cache_type_, 797 "CheckCRCResult",
794 crc_check_state_[i], CRC_CHECK_MAX); 798 cache_type_,
799 crc_check_state_[i],
800 CRC_CHECK_MAX);
795 } 801 }
796 } 802 }
797 } else { 803 } else {
798 CloseOperationComplete(); 804 CloseOperationComplete();
799 } 805 }
800 } 806 }
801 807
802 void SimpleEntryImpl::ReadDataInternal(int stream_index, 808 void SimpleEntryImpl::ReadDataInternal(int stream_index,
803 int offset, 809 int offset,
804 net::IOBuffer* buf, 810 net::IOBuffer* buf,
805 int buf_len, 811 int buf_len,
806 const CompletionCallback& callback) { 812 const CompletionCallback& callback) {
807 DCHECK(io_thread_checker_.CalledOnValidThread()); 813 DCHECK(io_thread_checker_.CalledOnValidThread());
808 ScopedOperationRunner operation_runner(this); 814 ScopedOperationRunner operation_runner(this);
809 815
810 if (net_log_.IsLogging()) { 816 if (net_log_.IsLogging()) {
811 net_log_.AddEvent( 817 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
812 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN, 818 CreateNetLogReadWriteDataCallback(
813 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, 819 stream_index, offset, buf_len, false));
814 false));
815 } 820 }
816 821
817 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { 822 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
818 if (!callback.is_null()) { 823 if (!callback.is_null()) {
819 RecordReadResult(cache_type_, READ_RESULT_BAD_STATE); 824 RecordReadResult(cache_type_, READ_RESULT_BAD_STATE);
820 // Note that the API states that client-provided callbacks for entry-level 825 // Note that the API states that client-provided callbacks for entry-level
821 // (i.e. non-backend) operations (e.g. read, write) are invoked even if 826 // (i.e. non-backend) operations (e.g. read, write) are invoked even if
822 // the backend was already destroyed. 827 // the backend was already destroyed.
823 MessageLoopProxy::current()->PostTask( 828 MessageLoopProxy::current()->PostTask(
824 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); 829 FROM_HERE, base::Bind(callback, net::ERR_FAILED));
825 } 830 }
826 if (net_log_.IsLogging()) { 831 if (net_log_.IsLogging()) {
827 net_log_.AddEvent( 832 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
828 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, 833 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
829 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
830 } 834 }
831 return; 835 return;
832 } 836 }
833 DCHECK_EQ(STATE_READY, state_); 837 DCHECK_EQ(STATE_READY, state_);
834 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) { 838 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
835 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN); 839 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN);
836 // If there is nothing to read, we bail out before setting state_ to 840 // If there is nothing to read, we bail out before setting state_ to
837 // STATE_IO_PENDING. 841 // STATE_IO_PENDING.
838 if (!callback.is_null()) 842 if (!callback.is_null())
839 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0)); 843 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0));
(...skipping 11 matching lines...) Expand all
851 } 855 }
852 return; 856 return;
853 } 857 }
854 858
855 state_ = STATE_IO_PENDING; 859 state_ = STATE_IO_PENDING;
856 if (!doomed_ && backend_.get()) 860 if (!doomed_ && backend_.get())
857 backend_->index()->UseIfExists(entry_hash_); 861 backend_->index()->UseIfExists(entry_hash_);
858 862
859 scoped_ptr<uint32> read_crc32(new uint32()); 863 scoped_ptr<uint32> read_crc32(new uint32());
860 scoped_ptr<int> result(new int()); 864 scoped_ptr<int> result(new int());
861 scoped_ptr<SimpleEntryStat> entry_stat( 865 scoped_ptr<SimpleEntryStat> entry_stat(new SimpleEntryStat(
862 new SimpleEntryStat(last_used_, last_modified_, data_size_, 866 last_used_, last_modified_, data_size_, sparse_data_size_));
863 sparse_data_size_));
864 Closure task = base::Bind( 867 Closure task = base::Bind(
865 &SimpleSynchronousEntry::ReadData, 868 &SimpleSynchronousEntry::ReadData,
866 base::Unretained(synchronous_entry_), 869 base::Unretained(synchronous_entry_),
867 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len), 870 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
868 make_scoped_refptr(buf), 871 make_scoped_refptr(buf),
869 read_crc32.get(), 872 read_crc32.get(),
870 entry_stat.get(), 873 entry_stat.get(),
871 result.get()); 874 result.get());
872 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, 875 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
873 this, 876 this,
874 stream_index, 877 stream_index,
875 offset, 878 offset,
876 callback, 879 callback,
877 base::Passed(&read_crc32), 880 base::Passed(&read_crc32),
878 base::Passed(&entry_stat), 881 base::Passed(&entry_stat),
879 base::Passed(&result)); 882 base::Passed(&result));
880 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 883 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
881 } 884 }
882 885
883 void SimpleEntryImpl::WriteDataInternal(int stream_index, 886 void SimpleEntryImpl::WriteDataInternal(int stream_index,
884 int offset, 887 int offset,
885 net::IOBuffer* buf, 888 net::IOBuffer* buf,
886 int buf_len, 889 int buf_len,
887 const CompletionCallback& callback, 890 const CompletionCallback& callback,
888 bool truncate) { 891 bool truncate) {
889 DCHECK(io_thread_checker_.CalledOnValidThread()); 892 DCHECK(io_thread_checker_.CalledOnValidThread());
890 ScopedOperationRunner operation_runner(this); 893 ScopedOperationRunner operation_runner(this);
891 894
892 if (net_log_.IsLogging()) { 895 if (net_log_.IsLogging()) {
893 net_log_.AddEvent( 896 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
894 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN, 897 CreateNetLogReadWriteDataCallback(
895 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, 898 stream_index, offset, buf_len, truncate));
896 truncate));
897 } 899 }
898 900
899 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { 901 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
900 RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE); 902 RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE);
901 if (net_log_.IsLogging()) { 903 if (net_log_.IsLogging()) {
902 net_log_.AddEvent( 904 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
903 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, 905 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
904 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
905 } 906 }
906 if (!callback.is_null()) { 907 if (!callback.is_null()) {
907 MessageLoopProxy::current()->PostTask( 908 MessageLoopProxy::current()->PostTask(
908 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); 909 FROM_HERE, base::Bind(callback, net::ERR_FAILED));
909 } 910 }
910 // |this| may be destroyed after return here. 911 // |this| may be destroyed after return here.
911 return; 912 return;
912 } 913 }
913 914
914 DCHECK_EQ(STATE_READY, state_); 915 DCHECK_EQ(STATE_READY, state_);
915 916
916 // Since stream 0 data is kept in memory, it will be written immediatly. 917 // Since stream 0 data is kept in memory, it will be written immediatly.
917 if (stream_index == 0) { 918 if (stream_index == 0) {
918 int ret_value = SetStream0Data(buf, offset, buf_len, truncate); 919 int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
919 if (!callback.is_null()) { 920 if (!callback.is_null()) {
920 MessageLoopProxy::current()->PostTask(FROM_HERE, 921 MessageLoopProxy::current()->PostTask(FROM_HERE,
921 base::Bind(callback, ret_value)); 922 base::Bind(callback, ret_value));
922 } 923 }
923 return; 924 return;
924 } 925 }
925 926
926 // Ignore zero-length writes that do not change the file size. 927 // Ignore zero-length writes that do not change the file size.
927 if (buf_len == 0) { 928 if (buf_len == 0) {
928 int32 data_size = data_size_[stream_index]; 929 int32 data_size = data_size_[stream_index];
929 if (truncate ? (offset == data_size) : (offset <= data_size)) { 930 if (truncate ? (offset == data_size) : (offset <= data_size)) {
930 RecordWriteResult(cache_type_, WRITE_RESULT_FAST_EMPTY_RETURN); 931 RecordWriteResult(cache_type_, WRITE_RESULT_FAST_EMPTY_RETURN);
931 if (!callback.is_null()) { 932 if (!callback.is_null()) {
932 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( 933 MessageLoopProxy::current()->PostTask(FROM_HERE,
933 callback, 0)); 934 base::Bind(callback, 0));
934 } 935 }
935 return; 936 return;
936 } 937 }
937 } 938 }
938 state_ = STATE_IO_PENDING; 939 state_ = STATE_IO_PENDING;
939 if (!doomed_ && backend_.get()) 940 if (!doomed_ && backend_.get())
940 backend_->index()->UseIfExists(entry_hash_); 941 backend_->index()->UseIfExists(entry_hash_);
941 942
942 AdvanceCrc(buf, offset, buf_len, stream_index); 943 AdvanceCrc(buf, offset, buf_len, stream_index);
943 944
944 // |entry_stat| needs to be initialized before modifying |data_size_|. 945 // |entry_stat| needs to be initialized before modifying |data_size_|.
945 scoped_ptr<SimpleEntryStat> entry_stat( 946 scoped_ptr<SimpleEntryStat> entry_stat(new SimpleEntryStat(
946 new SimpleEntryStat(last_used_, last_modified_, data_size_, 947 last_used_, last_modified_, data_size_, sparse_data_size_));
947 sparse_data_size_));
948 if (truncate) { 948 if (truncate) {
949 data_size_[stream_index] = offset + buf_len; 949 data_size_[stream_index] = offset + buf_len;
950 } else { 950 } else {
951 data_size_[stream_index] = std::max(offset + buf_len, 951 data_size_[stream_index] =
952 GetDataSize(stream_index)); 952 std::max(offset + buf_len, GetDataSize(stream_index));
953 } 953 }
954 954
955 // Since we don't know the correct values for |last_used_| and 955 // Since we don't know the correct values for |last_used_| and
956 // |last_modified_| yet, we make this approximation. 956 // |last_modified_| yet, we make this approximation.
957 last_used_ = last_modified_ = base::Time::Now(); 957 last_used_ = last_modified_ = base::Time::Now();
958 958
959 have_written_[stream_index] = true; 959 have_written_[stream_index] = true;
960 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF 960 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
961 // record will have to be rewritten. 961 // record will have to be rewritten.
962 if (stream_index == 1) 962 if (stream_index == 1)
963 have_written_[0] = true; 963 have_written_[0] = true;
964 964
965 scoped_ptr<int> result(new int()); 965 scoped_ptr<int> result(new int());
966 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData, 966 Closure task =
967 base::Unretained(synchronous_entry_), 967 base::Bind(&SimpleSynchronousEntry::WriteData,
968 SimpleSynchronousEntry::EntryOperationData( 968 base::Unretained(synchronous_entry_),
969 stream_index, offset, buf_len, truncate, 969 SimpleSynchronousEntry::EntryOperationData(
970 doomed_), 970 stream_index, offset, buf_len, truncate, doomed_),
971 make_scoped_refptr(buf), 971 make_scoped_refptr(buf),
972 entry_stat.get(), 972 entry_stat.get(),
973 result.get()); 973 result.get());
974 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, 974 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
975 this, 975 this,
976 stream_index, 976 stream_index,
977 callback, 977 callback,
978 base::Passed(&entry_stat), 978 base::Passed(&entry_stat),
979 base::Passed(&result)); 979 base::Passed(&result));
980 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 980 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
981 } 981 }
982 982
983 void SimpleEntryImpl::ReadSparseDataInternal( 983 void SimpleEntryImpl::ReadSparseDataInternal(
984 int64 sparse_offset, 984 int64 sparse_offset,
985 net::IOBuffer* buf, 985 net::IOBuffer* buf,
986 int buf_len, 986 int buf_len,
987 const CompletionCallback& callback) { 987 const CompletionCallback& callback) {
988 DCHECK(io_thread_checker_.CalledOnValidThread()); 988 DCHECK(io_thread_checker_.CalledOnValidThread());
989 ScopedOperationRunner operation_runner(this); 989 ScopedOperationRunner operation_runner(this);
990 990
991 DCHECK_EQ(STATE_READY, state_); 991 DCHECK_EQ(STATE_READY, state_);
992 state_ = STATE_IO_PENDING; 992 state_ = STATE_IO_PENDING;
993 993
994 scoped_ptr<int> result(new int()); 994 scoped_ptr<int> result(new int());
995 scoped_ptr<base::Time> last_used(new base::Time()); 995 scoped_ptr<base::Time> last_used(new base::Time());
996 Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData, 996 Closure task = base::Bind(
997 base::Unretained(synchronous_entry_), 997 &SimpleSynchronousEntry::ReadSparseData,
998 SimpleSynchronousEntry::EntryOperationData( 998 base::Unretained(synchronous_entry_),
999 sparse_offset, buf_len), 999 SimpleSynchronousEntry::EntryOperationData(sparse_offset, buf_len),
1000 make_scoped_refptr(buf), 1000 make_scoped_refptr(buf),
1001 last_used.get(), 1001 last_used.get(),
1002 result.get()); 1002 result.get());
1003 Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete, 1003 Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete,
1004 this, 1004 this,
1005 callback, 1005 callback,
1006 base::Passed(&last_used), 1006 base::Passed(&last_used),
1007 base::Passed(&result)); 1007 base::Passed(&result));
1008 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 1008 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1009 } 1009 }
1010 1010
1011 void SimpleEntryImpl::WriteSparseDataInternal( 1011 void SimpleEntryImpl::WriteSparseDataInternal(
1012 int64 sparse_offset, 1012 int64 sparse_offset,
1013 net::IOBuffer* buf, 1013 net::IOBuffer* buf,
1014 int buf_len, 1014 int buf_len,
1015 const CompletionCallback& callback) { 1015 const CompletionCallback& callback) {
1016 DCHECK(io_thread_checker_.CalledOnValidThread()); 1016 DCHECK(io_thread_checker_.CalledOnValidThread());
1017 ScopedOperationRunner operation_runner(this); 1017 ScopedOperationRunner operation_runner(this);
1018 1018
1019 DCHECK_EQ(STATE_READY, state_); 1019 DCHECK_EQ(STATE_READY, state_);
1020 state_ = STATE_IO_PENDING; 1020 state_ = STATE_IO_PENDING;
1021 1021
1022 int64 max_sparse_data_size = kint64max; 1022 int64 max_sparse_data_size = kint64max;
1023 if (backend_.get()) { 1023 if (backend_.get()) {
1024 int64 max_cache_size = backend_->index()->max_size(); 1024 int64 max_cache_size = backend_->index()->max_size();
1025 max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor; 1025 max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
1026 } 1026 }
1027 1027
1028 scoped_ptr<SimpleEntryStat> entry_stat( 1028 scoped_ptr<SimpleEntryStat> entry_stat(new SimpleEntryStat(
1029 new SimpleEntryStat(last_used_, last_modified_, data_size_, 1029 last_used_, last_modified_, data_size_, sparse_data_size_));
1030 sparse_data_size_));
1031 1030
1032 last_used_ = last_modified_ = base::Time::Now(); 1031 last_used_ = last_modified_ = base::Time::Now();
1033 1032
1034 scoped_ptr<int> result(new int()); 1033 scoped_ptr<int> result(new int());
1035 Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData, 1034 Closure task = base::Bind(
1036 base::Unretained(synchronous_entry_), 1035 &SimpleSynchronousEntry::WriteSparseData,
1037 SimpleSynchronousEntry::EntryOperationData( 1036 base::Unretained(synchronous_entry_),
1038 sparse_offset, buf_len), 1037 SimpleSynchronousEntry::EntryOperationData(sparse_offset, buf_len),
1039 make_scoped_refptr(buf), 1038 make_scoped_refptr(buf),
1040 max_sparse_data_size, 1039 max_sparse_data_size,
1041 entry_stat.get(), 1040 entry_stat.get(),
1042 result.get()); 1041 result.get());
1043 Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete, 1042 Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete,
1044 this, 1043 this,
1045 callback, 1044 callback,
1046 base::Passed(&entry_stat), 1045 base::Passed(&entry_stat),
1047 base::Passed(&result)); 1046 base::Passed(&result));
1048 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 1047 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1049 } 1048 }
1050 1049
1051 void SimpleEntryImpl::GetAvailableRangeInternal( 1050 void SimpleEntryImpl::GetAvailableRangeInternal(
1052 int64 sparse_offset, 1051 int64 sparse_offset,
1053 int len, 1052 int len,
1054 int64* out_start, 1053 int64* out_start,
1055 const CompletionCallback& callback) { 1054 const CompletionCallback& callback) {
1056 DCHECK(io_thread_checker_.CalledOnValidThread()); 1055 DCHECK(io_thread_checker_.CalledOnValidThread());
1057 ScopedOperationRunner operation_runner(this); 1056 ScopedOperationRunner operation_runner(this);
1058 1057
1059 DCHECK_EQ(STATE_READY, state_); 1058 DCHECK_EQ(STATE_READY, state_);
1060 state_ = STATE_IO_PENDING; 1059 state_ = STATE_IO_PENDING;
1061 1060
1062 scoped_ptr<int> result(new int()); 1061 scoped_ptr<int> result(new int());
1063 Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange, 1062 Closure task =
1064 base::Unretained(synchronous_entry_), 1063 base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
1065 SimpleSynchronousEntry::EntryOperationData( 1064 base::Unretained(synchronous_entry_),
1066 sparse_offset, len), 1065 SimpleSynchronousEntry::EntryOperationData(sparse_offset, len),
1067 out_start, 1066 out_start,
1068 result.get()); 1067 result.get());
1069 Closure reply = base::Bind( 1068 Closure reply =
1070 &SimpleEntryImpl::GetAvailableRangeOperationComplete, 1069 base::Bind(&SimpleEntryImpl::GetAvailableRangeOperationComplete,
1071 this, 1070 this,
1072 callback, 1071 callback,
1073 base::Passed(&result)); 1072 base::Passed(&result));
1074 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 1073 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1075 } 1074 }
1076 1075
1077 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) { 1076 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
1078 PostTaskAndReplyWithResult( 1077 PostTaskAndReplyWithResult(
1079 worker_pool_, FROM_HERE, 1078 worker_pool_,
1079 FROM_HERE,
1080 base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, entry_hash_), 1080 base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, entry_hash_),
1081 base::Bind(&SimpleEntryImpl::DoomOperationComplete, this, callback, 1081 base::Bind(
1082 state_)); 1082 &SimpleEntryImpl::DoomOperationComplete, this, callback, state_));
1083 state_ = STATE_IO_PENDING; 1083 state_ = STATE_IO_PENDING;
1084 } 1084 }
1085 1085
1086 void SimpleEntryImpl::CreationOperationComplete( 1086 void SimpleEntryImpl::CreationOperationComplete(
1087 const CompletionCallback& completion_callback, 1087 const CompletionCallback& completion_callback,
1088 const base::TimeTicks& start_time, 1088 const base::TimeTicks& start_time,
1089 scoped_ptr<SimpleEntryCreationResults> in_results, 1089 scoped_ptr<SimpleEntryCreationResults> in_results,
1090 Entry** out_entry, 1090 Entry** out_entry,
1091 net::NetLog::EventType end_event_type) { 1091 net::NetLog::EventType end_event_type) {
1092 DCHECK(io_thread_checker_.CalledOnValidThread()); 1092 DCHECK(io_thread_checker_.CalledOnValidThread());
1093 DCHECK_EQ(state_, STATE_IO_PENDING); 1093 DCHECK_EQ(state_, STATE_IO_PENDING);
1094 DCHECK(in_results); 1094 DCHECK(in_results);
1095 ScopedOperationRunner operation_runner(this); 1095 ScopedOperationRunner operation_runner(this);
1096 SIMPLE_CACHE_UMA(BOOLEAN, 1096 SIMPLE_CACHE_UMA(BOOLEAN,
1097 "EntryCreationResult", cache_type_, 1097 "EntryCreationResult",
1098 cache_type_,
1098 in_results->result == net::OK); 1099 in_results->result == net::OK);
1099 if (in_results->result != net::OK) { 1100 if (in_results->result != net::OK) {
1100 if (in_results->result != net::ERR_FILE_EXISTS) 1101 if (in_results->result != net::ERR_FILE_EXISTS)
1101 MarkAsDoomed(); 1102 MarkAsDoomed();
1102 1103
1103 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED); 1104 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
1104 PostClientCallback(completion_callback, net::ERR_FAILED); 1105 PostClientCallback(completion_callback, net::ERR_FAILED);
1105 MakeUninitialized(); 1106 MakeUninitialized();
1106 return; 1107 return;
1107 } 1108 }
(...skipping 13 matching lines...) Expand all
1121 } 1122 }
1122 if (key_.empty()) { 1123 if (key_.empty()) {
1123 SetKey(synchronous_entry_->key()); 1124 SetKey(synchronous_entry_->key());
1124 } else { 1125 } else {
1125 // This should only be triggered when creating an entry. The key check in 1126 // This should only be triggered when creating an entry. The key check in
1126 // the open case is handled in SimpleBackendImpl. 1127 // the open case is handled in SimpleBackendImpl.
1127 DCHECK_EQ(key_, synchronous_entry_->key()); 1128 DCHECK_EQ(key_, synchronous_entry_->key());
1128 } 1129 }
1129 UpdateDataFromEntryStat(in_results->entry_stat); 1130 UpdateDataFromEntryStat(in_results->entry_stat);
1130 SIMPLE_CACHE_UMA(TIMES, 1131 SIMPLE_CACHE_UMA(TIMES,
1131 "EntryCreationTime", cache_type_, 1132 "EntryCreationTime",
1133 cache_type_,
1132 (base::TimeTicks::Now() - start_time)); 1134 (base::TimeTicks::Now() - start_time));
1133 AdjustOpenEntryCountBy(cache_type_, 1); 1135 AdjustOpenEntryCountBy(cache_type_, 1);
1134 1136
1135 net_log_.AddEvent(end_event_type); 1137 net_log_.AddEvent(end_event_type);
1136 PostClientCallback(completion_callback, net::OK); 1138 PostClientCallback(completion_callback, net::OK);
1137 } 1139 }
1138 1140
1139 void SimpleEntryImpl::EntryOperationComplete( 1141 void SimpleEntryImpl::EntryOperationComplete(
1140 const CompletionCallback& completion_callback, 1142 const CompletionCallback& completion_callback,
1141 const SimpleEntryStat& entry_stat, 1143 const SimpleEntryStat& entry_stat,
1142 scoped_ptr<int> result) { 1144 scoped_ptr<int> result) {
1143 DCHECK(io_thread_checker_.CalledOnValidThread()); 1145 DCHECK(io_thread_checker_.CalledOnValidThread());
1144 DCHECK(synchronous_entry_); 1146 DCHECK(synchronous_entry_);
1145 DCHECK_EQ(STATE_IO_PENDING, state_); 1147 DCHECK_EQ(STATE_IO_PENDING, state_);
1146 DCHECK(result); 1148 DCHECK(result);
1147 if (*result < 0) { 1149 if (*result < 0) {
1148 state_ = STATE_FAILURE; 1150 state_ = STATE_FAILURE;
1149 MarkAsDoomed(); 1151 MarkAsDoomed();
1150 } else { 1152 } else {
1151 state_ = STATE_READY; 1153 state_ = STATE_READY;
1152 UpdateDataFromEntryStat(entry_stat); 1154 UpdateDataFromEntryStat(entry_stat);
1153 } 1155 }
1154 1156
1155 if (!completion_callback.is_null()) { 1157 if (!completion_callback.is_null()) {
1156 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( 1158 MessageLoopProxy::current()->PostTask(
1157 completion_callback, *result)); 1159 FROM_HERE, base::Bind(completion_callback, *result));
1158 } 1160 }
1159 RunNextOperationIfNeeded(); 1161 RunNextOperationIfNeeded();
1160 } 1162 }
1161 1163
1162 void SimpleEntryImpl::ReadOperationComplete( 1164 void SimpleEntryImpl::ReadOperationComplete(
1163 int stream_index, 1165 int stream_index,
1164 int offset, 1166 int offset,
1165 const CompletionCallback& completion_callback, 1167 const CompletionCallback& completion_callback,
1166 scoped_ptr<uint32> read_crc32, 1168 scoped_ptr<uint32> read_crc32,
1167 scoped_ptr<SimpleEntryStat> entry_stat, 1169 scoped_ptr<SimpleEntryStat> entry_stat,
1168 scoped_ptr<int> result) { 1170 scoped_ptr<int> result) {
1169 DCHECK(io_thread_checker_.CalledOnValidThread()); 1171 DCHECK(io_thread_checker_.CalledOnValidThread());
1170 DCHECK(synchronous_entry_); 1172 DCHECK(synchronous_entry_);
1171 DCHECK_EQ(STATE_IO_PENDING, state_); 1173 DCHECK_EQ(STATE_IO_PENDING, state_);
1172 DCHECK(read_crc32); 1174 DCHECK(read_crc32);
1173 DCHECK(result); 1175 DCHECK(result);
1174 1176
1175 if (*result > 0 && 1177 if (*result > 0 &&
1176 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) { 1178 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
1177 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END; 1179 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
1178 } 1180 }
1179 1181
1180 if (*result > 0 && crc32s_end_offset_[stream_index] == offset) { 1182 if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
1181 uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0) 1183 uint32 current_crc =
1182 : crc32s_[stream_index]; 1184 offset == 0 ? crc32(0, Z_NULL, 0) : crc32s_[stream_index];
1183 crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result); 1185 crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
1184 crc32s_end_offset_[stream_index] += *result; 1186 crc32s_end_offset_[stream_index] += *result;
1185 if (!have_written_[stream_index] && 1187 if (!have_written_[stream_index] &&
1186 GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) { 1188 GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
1187 // We have just read a file from start to finish, and so we have 1189 // We have just read a file from start to finish, and so we have
1188 // computed a crc of the entire file. We can check it now. If a cache 1190 // computed a crc of the entire file. We can check it now. If a cache
1189 // entry has a single reader, the normal pattern is to read from start 1191 // entry has a single reader, the normal pattern is to read from start
1190 // to finish. 1192 // to finish.
1191 1193
1192 // Other cases are possible. In the case of two readers on the same 1194 // Other cases are possible. In the case of two readers on the same
1193 // entry, one reader can be behind the other. In this case we compute 1195 // entry, one reader can be behind the other. In this case we compute
1194 // the crc as the most advanced reader progresses, and check it for 1196 // the crc as the most advanced reader progresses, and check it for
1195 // both readers as they read the last byte. 1197 // both readers as they read the last byte.
1196 1198
1197 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN); 1199 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1198 1200
1199 scoped_ptr<int> new_result(new int()); 1201 scoped_ptr<int> new_result(new int());
1200 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord, 1202 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1201 base::Unretained(synchronous_entry_), 1203 base::Unretained(synchronous_entry_),
1202 stream_index, 1204 stream_index,
1203 *entry_stat, 1205 *entry_stat,
1204 crc32s_[stream_index], 1206 crc32s_[stream_index],
1205 new_result.get()); 1207 new_result.get());
1206 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete, 1208 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1207 this, *result, stream_index, 1209 this,
1210 *result,
1211 stream_index,
1208 completion_callback, 1212 completion_callback,
1209 base::Passed(&new_result)); 1213 base::Passed(&new_result));
1210 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 1214 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1211 crc_check_state_[stream_index] = CRC_CHECK_DONE; 1215 crc_check_state_[stream_index] = CRC_CHECK_DONE;
1212 return; 1216 return;
1213 } 1217 }
1214 } 1218 }
1215 1219
1216 if (*result < 0) { 1220 if (*result < 0) {
1217 crc32s_end_offset_[stream_index] = 0; 1221 crc32s_end_offset_[stream_index] = 0;
1218 } 1222 }
1219 1223
1220 if (*result < 0) { 1224 if (*result < 0) {
1221 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); 1225 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1222 } else { 1226 } else {
1223 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); 1227 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1224 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END && 1228 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1225 offset + *result == GetDataSize(stream_index)) { 1229 offset + *result == GetDataSize(stream_index)) {
1226 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; 1230 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1227 } 1231 }
1228 } 1232 }
1229 if (net_log_.IsLogging()) { 1233 if (net_log_.IsLogging()) {
1230 net_log_.AddEvent( 1234 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1231 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, 1235 CreateNetLogReadWriteCompleteCallback(*result));
1232 CreateNetLogReadWriteCompleteCallback(*result));
1233 } 1236 }
1234 1237
1235 EntryOperationComplete(completion_callback, *entry_stat, result.Pass()); 1238 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1236 } 1239 }
1237 1240
1238 void SimpleEntryImpl::WriteOperationComplete( 1241 void SimpleEntryImpl::WriteOperationComplete(
1239 int stream_index, 1242 int stream_index,
1240 const CompletionCallback& completion_callback, 1243 const CompletionCallback& completion_callback,
1241 scoped_ptr<SimpleEntryStat> entry_stat, 1244 scoped_ptr<SimpleEntryStat> entry_stat,
1242 scoped_ptr<int> result) { 1245 scoped_ptr<int> result) {
1243 if (*result >= 0) 1246 if (*result >= 0)
1244 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); 1247 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1245 else 1248 else
1246 RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE); 1249 RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE);
1247 if (net_log_.IsLogging()) { 1250 if (net_log_.IsLogging()) {
1248 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, 1251 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1249 CreateNetLogReadWriteCompleteCallback(*result)); 1252 CreateNetLogReadWriteCompleteCallback(*result));
1250 } 1253 }
1251 1254
1252 if (*result < 0) { 1255 if (*result < 0) {
1253 crc32s_end_offset_[stream_index] = 0; 1256 crc32s_end_offset_[stream_index] = 0;
1254 } 1257 }
1255 1258
1256 EntryOperationComplete(completion_callback, *entry_stat, result.Pass()); 1259 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1257 } 1260 }
1258 1261
1259 void SimpleEntryImpl::ReadSparseOperationComplete( 1262 void SimpleEntryImpl::ReadSparseOperationComplete(
1260 const CompletionCallback& completion_callback, 1263 const CompletionCallback& completion_callback,
1261 scoped_ptr<base::Time> last_used, 1264 scoped_ptr<base::Time> last_used,
1262 scoped_ptr<int> result) { 1265 scoped_ptr<int> result) {
1263 DCHECK(io_thread_checker_.CalledOnValidThread()); 1266 DCHECK(io_thread_checker_.CalledOnValidThread());
1264 DCHECK(synchronous_entry_); 1267 DCHECK(synchronous_entry_);
1265 DCHECK(result); 1268 DCHECK(result);
1266 1269
1267 SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_, 1270 SimpleEntryStat entry_stat(
1268 sparse_data_size_); 1271 *last_used, last_modified_, data_size_, sparse_data_size_);
1269 EntryOperationComplete(completion_callback, entry_stat, result.Pass()); 1272 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1270 } 1273 }
1271 1274
1272 void SimpleEntryImpl::WriteSparseOperationComplete( 1275 void SimpleEntryImpl::WriteSparseOperationComplete(
1273 const CompletionCallback& completion_callback, 1276 const CompletionCallback& completion_callback,
1274 scoped_ptr<SimpleEntryStat> entry_stat, 1277 scoped_ptr<SimpleEntryStat> entry_stat,
1275 scoped_ptr<int> result) { 1278 scoped_ptr<int> result) {
1276 DCHECK(io_thread_checker_.CalledOnValidThread()); 1279 DCHECK(io_thread_checker_.CalledOnValidThread());
1277 DCHECK(synchronous_entry_); 1280 DCHECK(synchronous_entry_);
1278 DCHECK(result); 1281 DCHECK(result);
1279 1282
1280 EntryOperationComplete(completion_callback, *entry_stat, result.Pass()); 1283 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1281 } 1284 }
1282 1285
1283 void SimpleEntryImpl::GetAvailableRangeOperationComplete( 1286 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1284 const CompletionCallback& completion_callback, 1287 const CompletionCallback& completion_callback,
1285 scoped_ptr<int> result) { 1288 scoped_ptr<int> result) {
1286 DCHECK(io_thread_checker_.CalledOnValidThread()); 1289 DCHECK(io_thread_checker_.CalledOnValidThread());
1287 DCHECK(synchronous_entry_); 1290 DCHECK(synchronous_entry_);
1288 DCHECK(result); 1291 DCHECK(result);
1289 1292
1290 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_, 1293 SimpleEntryStat entry_stat(
1291 sparse_data_size_); 1294 last_used_, last_modified_, data_size_, sparse_data_size_);
1292 EntryOperationComplete(completion_callback, entry_stat, result.Pass()); 1295 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1293 } 1296 }
1294 1297
1295 void SimpleEntryImpl::DoomOperationComplete( 1298 void SimpleEntryImpl::DoomOperationComplete(const CompletionCallback& callback,
1296 const CompletionCallback& callback, 1299 State state_to_restore,
1297 State state_to_restore, 1300 int result) {
1298 int result) {
1299 state_ = state_to_restore; 1301 state_ = state_to_restore;
1300 if (!callback.is_null()) 1302 if (!callback.is_null())
1301 callback.Run(result); 1303 callback.Run(result);
1302 RunNextOperationIfNeeded(); 1304 RunNextOperationIfNeeded();
1303 if (backend_) 1305 if (backend_)
1304 backend_->OnDoomComplete(entry_hash_); 1306 backend_->OnDoomComplete(entry_hash_);
1305 } 1307 }
1306 1308
1307 void SimpleEntryImpl::ChecksumOperationComplete( 1309 void SimpleEntryImpl::ChecksumOperationComplete(
1308 int orig_result, 1310 int orig_result,
1309 int stream_index, 1311 int stream_index,
1310 const CompletionCallback& completion_callback, 1312 const CompletionCallback& completion_callback,
1311 scoped_ptr<int> result) { 1313 scoped_ptr<int> result) {
1312 DCHECK(io_thread_checker_.CalledOnValidThread()); 1314 DCHECK(io_thread_checker_.CalledOnValidThread());
1313 DCHECK(synchronous_entry_); 1315 DCHECK(synchronous_entry_);
1314 DCHECK_EQ(STATE_IO_PENDING, state_); 1316 DCHECK_EQ(STATE_IO_PENDING, state_);
1315 DCHECK(result); 1317 DCHECK(result);
1316 1318
1317 if (net_log_.IsLogging()) { 1319 if (net_log_.IsLogging()) {
1318 net_log_.AddEventWithNetErrorCode( 1320 net_log_.AddEventWithNetErrorCode(
1319 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END, 1321 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END, *result);
1320 *result);
1321 } 1322 }
1322 1323
1323 if (*result == net::OK) { 1324 if (*result == net::OK) {
1324 *result = orig_result; 1325 *result = orig_result;
1325 if (orig_result >= 0) 1326 if (orig_result >= 0)
1326 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); 1327 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1327 else 1328 else
1328 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); 1329 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1329 } else { 1330 } else {
1330 RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE); 1331 RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE);
1331 } 1332 }
1332 if (net_log_.IsLogging()) { 1333 if (net_log_.IsLogging()) {
1333 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, 1334 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1334 CreateNetLogReadWriteCompleteCallback(*result)); 1335 CreateNetLogReadWriteCompleteCallback(*result));
1335 } 1336 }
1336 1337
1337 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_, 1338 SimpleEntryStat entry_stat(
1338 sparse_data_size_); 1339 last_used_, last_modified_, data_size_, sparse_data_size_);
1339 EntryOperationComplete(completion_callback, entry_stat, result.Pass()); 1340 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1340 } 1341 }
1341 1342
1342 void SimpleEntryImpl::CloseOperationComplete() { 1343 void SimpleEntryImpl::CloseOperationComplete() {
1343 DCHECK(!synchronous_entry_); 1344 DCHECK(!synchronous_entry_);
1344 DCHECK_EQ(0, open_count_); 1345 DCHECK_EQ(0, open_count_);
1345 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ || 1346 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1346 STATE_UNINITIALIZED == state_); 1347 STATE_UNINITIALIZED == state_);
1347 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END); 1348 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
1348 AdjustOpenEntryCountBy(cache_type_, -1); 1349 AdjustOpenEntryCountBy(cache_type_, -1);
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1396 type = READ_ALONE_IN_QUEUE; 1397 type = READ_ALONE_IN_QUEUE;
1397 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) { 1398 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1398 type = READ_FOLLOWS_READ; 1399 type = READ_FOLLOWS_READ;
1399 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) { 1400 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1400 if (executing_operation_->ConflictsWith(operation)) 1401 if (executing_operation_->ConflictsWith(operation))
1401 type = READ_FOLLOWS_CONFLICTING_WRITE; 1402 type = READ_FOLLOWS_CONFLICTING_WRITE;
1402 else 1403 else
1403 type = READ_FOLLOWS_NON_CONFLICTING_WRITE; 1404 type = READ_FOLLOWS_NON_CONFLICTING_WRITE;
1404 } 1405 }
1405 SIMPLE_CACHE_UMA(ENUMERATION, 1406 SIMPLE_CACHE_UMA(ENUMERATION,
1406 "ReadIsParallelizable", cache_type_, 1407 "ReadIsParallelizable",
1407 type, READ_DEPENDENCY_TYPE_MAX); 1408 cache_type_,
1409 type,
1410 READ_DEPENDENCY_TYPE_MAX);
1408 } 1411 }
1409 1412
1410 void SimpleEntryImpl::RecordWriteDependencyType( 1413 void SimpleEntryImpl::RecordWriteDependencyType(
1411 const SimpleEntryOperation& operation) const { 1414 const SimpleEntryOperation& operation) const {
1412 if (!executing_operation_) 1415 if (!executing_operation_)
1413 return; 1416 return;
1414 // Used in histograms, please only add entries at the end. 1417 // Used in histograms, please only add entries at the end.
1415 enum WriteDependencyType { 1418 enum WriteDependencyType {
1416 WRITE_OPTIMISTIC = 0, 1419 WRITE_OPTIMISTIC = 0,
1417 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1, 1420 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
(...skipping 18 matching lines...) Expand all
1436 : WRITE_FOLLOWS_NON_CONFLICTING_READ; 1439 : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1437 } else if (executing_operation_->optimistic()) { 1440 } else if (executing_operation_->optimistic()) {
1438 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC 1441 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1439 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC; 1442 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1440 } else { 1443 } else {
1441 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE 1444 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1442 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE; 1445 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1443 } 1446 }
1444 } 1447 }
1445 SIMPLE_CACHE_UMA(ENUMERATION, 1448 SIMPLE_CACHE_UMA(ENUMERATION,
1446 "WriteDependencyType", cache_type_, 1449 "WriteDependencyType",
1447 type, WRITE_DEPENDENCY_TYPE_MAX); 1450 cache_type_,
1451 type,
1452 WRITE_DEPENDENCY_TYPE_MAX);
1448 } 1453 }
1449 1454
1450 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf, 1455 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
1451 int offset, 1456 int offset,
1452 int buf_len) { 1457 int buf_len) {
1453 if (buf_len < 0) { 1458 if (buf_len < 0) {
1454 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); 1459 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1455 return 0; 1460 return 0;
1456 } 1461 }
1457 memcpy(buf->data(), stream_0_data_->data() + offset, buf_len); 1462 memcpy(buf->data(), stream_0_data_->data() + offset, buf_len);
1458 UpdateDataFromEntryStat( 1463 UpdateDataFromEntryStat(SimpleEntryStat(
1459 SimpleEntryStat(base::Time::Now(), last_modified_, data_size_, 1464 base::Time::Now(), last_modified_, data_size_, sparse_data_size_));
1460 sparse_data_size_));
1461 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); 1465 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1462 return buf_len; 1466 return buf_len;
1463 } 1467 }
1464 1468
1465 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf, 1469 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
1466 int offset, 1470 int offset,
1467 int buf_len, 1471 int buf_len,
1468 bool truncate) { 1472 bool truncate) {
1469 // Currently, stream 0 is only used for HTTP headers, and always writes them 1473 // Currently, stream 0 is only used for HTTP headers, and always writes them
1470 // with a single, truncating write. Detect these writes and record the size 1474 // with a single, truncating write. Detect these writes and record the size
(...skipping 16 matching lines...) Expand all
1487 // zero-filled. 1491 // zero-filled.
1488 const int fill_size = offset <= data_size ? 0 : offset - data_size; 1492 const int fill_size = offset <= data_size ? 0 : offset - data_size;
1489 if (fill_size > 0) 1493 if (fill_size > 0)
1490 memset(stream_0_data_->data() + data_size, 0, fill_size); 1494 memset(stream_0_data_->data() + data_size, 0, fill_size);
1491 if (buf) 1495 if (buf)
1492 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len); 1496 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
1493 data_size_[0] = buffer_size; 1497 data_size_[0] = buffer_size;
1494 } 1498 }
1495 base::Time modification_time = base::Time::Now(); 1499 base::Time modification_time = base::Time::Now();
1496 AdvanceCrc(buf, offset, buf_len, 0); 1500 AdvanceCrc(buf, offset, buf_len, 0);
1497 UpdateDataFromEntryStat( 1501 UpdateDataFromEntryStat(SimpleEntryStat(
1498 SimpleEntryStat(modification_time, modification_time, data_size_, 1502 modification_time, modification_time, data_size_, sparse_data_size_));
1499 sparse_data_size_));
1500 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); 1503 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1501 return buf_len; 1504 return buf_len;
1502 } 1505 }
1503 1506
1504 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer, 1507 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer,
1505 int offset, 1508 int offset,
1506 int length, 1509 int length,
1507 int stream_index) { 1510 int stream_index) {
1508 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) 1511 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
1509 // if |offset == 0| or we have already computed the CRC for [0 .. offset). 1512 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
1510 // We rely on most write operations being sequential, start to end to compute 1513 // We rely on most write operations being sequential, start to end to compute
1511 // the crc of the data. When we write to an entry and close without having 1514 // the crc of the data. When we write to an entry and close without having
1512 // done a sequential write, we don't check the CRC on read. 1515 // done a sequential write, we don't check the CRC on read.
1513 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { 1516 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
1514 uint32 initial_crc = 1517 uint32 initial_crc =
1515 (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0); 1518 (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
1516 if (length > 0) { 1519 if (length > 0) {
1517 crc32s_[stream_index] = crc32( 1520 crc32s_[stream_index] = crc32(
1518 initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length); 1521 initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length);
1519 } 1522 }
1520 crc32s_end_offset_[stream_index] = offset + length; 1523 crc32s_end_offset_[stream_index] = offset + length;
1521 } else if (offset < crc32s_end_offset_[stream_index]) { 1524 } else if (offset < crc32s_end_offset_[stream_index]) {
1522 // If a range for which the crc32 was already computed is rewritten, the 1525 // If a range for which the crc32 was already computed is rewritten, the
1523 // computation of the crc32 need to start from 0 again. 1526 // computation of the crc32 need to start from 0 again.
1524 crc32s_end_offset_[stream_index] = 0; 1527 crc32s_end_offset_[stream_index] = 0;
1525 } 1528 }
1526 } 1529 }
1527 1530
1528 } // namespace disk_cache 1531 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698