OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/http/mock_http_cache.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/message_loop/message_loop.h" | |
9 #include "net/base/completion_callback.h" | |
10 #include "net/base/net_errors.h" | |
11 #include "testing/gtest/include/gtest/gtest.h" | |
12 | |
13 namespace { | |
14 | |
15 // We can override the test mode for a given operation by setting this global | |
16 // variable. | |
17 int g_test_mode = 0; | |
18 | |
19 int GetTestModeForEntry(const std::string& key) { | |
20 // 'key' is prefixed with an identifier if it corresponds to a cached POST. | |
21 // Skip past that to locate the actual URL. | |
22 // | |
23 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an | |
24 // URL corresponding to a registered MockTransaction. It would be good to | |
25 // have another way to access the test_mode. | |
26 GURL url; | |
27 if (isdigit(key[0])) { | |
28 size_t slash = key.find('/'); | |
29 DCHECK(slash != std::string::npos); | |
30 url = GURL(key.substr(slash + 1)); | |
31 } else { | |
32 url = GURL(key); | |
33 } | |
34 const MockTransaction* t = FindMockTransaction(url); | |
35 DCHECK(t); | |
36 return t->test_mode; | |
37 } | |
38 | |
39 void CallbackForwader(const net::CompletionCallback& callback, int result) { | |
40 callback.Run(result); | |
41 } | |
42 | |
43 } // namespace | |
44 | |
45 //----------------------------------------------------------------------------- | |
46 | |
47 struct MockDiskEntry::CallbackInfo { | |
48 scoped_refptr<MockDiskEntry> entry; | |
49 net::CompletionCallback callback; | |
50 int result; | |
51 }; | |
52 | |
53 MockDiskEntry::MockDiskEntry(const std::string& key) | |
54 : key_(key), doomed_(false), sparse_(false), | |
55 fail_requests_(false), fail_sparse_requests_(false), busy_(false), | |
56 delayed_(false) { | |
57 test_mode_ = GetTestModeForEntry(key); | |
58 } | |
59 | |
60 void MockDiskEntry::Doom() { | |
61 doomed_ = true; | |
62 } | |
63 | |
64 void MockDiskEntry::Close() { | |
65 Release(); | |
66 } | |
67 | |
68 std::string MockDiskEntry::GetKey() const { | |
69 return key_; | |
70 } | |
71 | |
72 base::Time MockDiskEntry::GetLastUsed() const { | |
73 return base::Time::FromInternalValue(0); | |
74 } | |
75 | |
76 base::Time MockDiskEntry::GetLastModified() const { | |
77 return base::Time::FromInternalValue(0); | |
78 } | |
79 | |
80 int32 MockDiskEntry::GetDataSize(int index) const { | |
81 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices); | |
82 return static_cast<int32>(data_[index].size()); | |
83 } | |
84 | |
85 int MockDiskEntry::ReadData( | |
86 int index, int offset, net::IOBuffer* buf, int buf_len, | |
87 const net::CompletionCallback& callback) { | |
88 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices); | |
89 DCHECK(!callback.is_null()); | |
90 | |
91 if (fail_requests_) | |
92 return net::ERR_CACHE_READ_FAILURE; | |
93 | |
94 if (offset < 0 || offset > static_cast<int>(data_[index].size())) | |
95 return net::ERR_FAILED; | |
96 if (static_cast<size_t>(offset) == data_[index].size()) | |
97 return 0; | |
98 | |
99 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset); | |
100 memcpy(buf->data(), &data_[index][offset], num); | |
101 | |
102 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) | |
103 return num; | |
104 | |
105 CallbackLater(callback, num); | |
106 return net::ERR_IO_PENDING; | |
107 } | |
108 | |
109 int MockDiskEntry::WriteData( | |
110 int index, int offset, net::IOBuffer* buf, int buf_len, | |
111 const net::CompletionCallback& callback, bool truncate) { | |
112 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices); | |
113 DCHECK(!callback.is_null()); | |
114 DCHECK(truncate); | |
115 | |
116 if (fail_requests_) { | |
117 CallbackLater(callback, net::ERR_CACHE_READ_FAILURE); | |
118 return net::ERR_IO_PENDING; | |
119 } | |
120 | |
121 if (offset < 0 || offset > static_cast<int>(data_[index].size())) | |
122 return net::ERR_FAILED; | |
123 | |
124 data_[index].resize(offset + buf_len); | |
125 if (buf_len) | |
126 memcpy(&data_[index][offset], buf->data(), buf_len); | |
127 | |
128 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) | |
129 return buf_len; | |
130 | |
131 CallbackLater(callback, buf_len); | |
132 return net::ERR_IO_PENDING; | |
133 } | |
134 | |
135 int MockDiskEntry::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | |
136 const net::CompletionCallback& callback) { | |
137 DCHECK(!callback.is_null()); | |
138 if (fail_sparse_requests_) | |
139 return net::ERR_NOT_IMPLEMENTED; | |
140 if (!sparse_ || busy_) | |
141 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
142 if (offset < 0) | |
143 return net::ERR_FAILED; | |
144 | |
145 if (fail_requests_) | |
146 return net::ERR_CACHE_READ_FAILURE; | |
147 | |
148 DCHECK(offset < kint32max); | |
149 int real_offset = static_cast<int>(offset); | |
150 if (!buf_len) | |
151 return 0; | |
152 | |
153 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, | |
154 buf_len); | |
155 memcpy(buf->data(), &data_[1][real_offset], num); | |
156 | |
157 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) | |
158 return num; | |
159 | |
160 CallbackLater(callback, num); | |
161 busy_ = true; | |
162 delayed_ = false; | |
163 return net::ERR_IO_PENDING; | |
164 } | |
165 | |
166 int MockDiskEntry::WriteSparseData(int64 offset, net::IOBuffer* buf, | |
167 int buf_len, | |
168 const net::CompletionCallback& callback) { | |
169 DCHECK(!callback.is_null()); | |
170 if (fail_sparse_requests_) | |
171 return net::ERR_NOT_IMPLEMENTED; | |
172 if (busy_) | |
173 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
174 if (!sparse_) { | |
175 if (data_[1].size()) | |
176 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
177 sparse_ = true; | |
178 } | |
179 if (offset < 0) | |
180 return net::ERR_FAILED; | |
181 if (!buf_len) | |
182 return 0; | |
183 | |
184 if (fail_requests_) | |
185 return net::ERR_CACHE_READ_FAILURE; | |
186 | |
187 DCHECK(offset < kint32max); | |
188 int real_offset = static_cast<int>(offset); | |
189 | |
190 if (static_cast<int>(data_[1].size()) < real_offset + buf_len) | |
191 data_[1].resize(real_offset + buf_len); | |
192 | |
193 memcpy(&data_[1][real_offset], buf->data(), buf_len); | |
194 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) | |
195 return buf_len; | |
196 | |
197 CallbackLater(callback, buf_len); | |
198 return net::ERR_IO_PENDING; | |
199 } | |
200 | |
201 int MockDiskEntry::GetAvailableRange(int64 offset, int len, int64* start, | |
202 const net::CompletionCallback& callback) { | |
203 DCHECK(!callback.is_null()); | |
204 if (!sparse_ || busy_) | |
205 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
206 if (offset < 0) | |
207 return net::ERR_FAILED; | |
208 | |
209 if (fail_requests_) | |
210 return net::ERR_CACHE_READ_FAILURE; | |
211 | |
212 *start = offset; | |
213 DCHECK(offset < kint32max); | |
214 int real_offset = static_cast<int>(offset); | |
215 if (static_cast<int>(data_[1].size()) < real_offset) | |
216 return 0; | |
217 | |
218 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len); | |
219 int count = 0; | |
220 for (; num > 0; num--, real_offset++) { | |
221 if (!count) { | |
222 if (data_[1][real_offset]) { | |
223 count++; | |
224 *start = real_offset; | |
225 } | |
226 } else { | |
227 if (!data_[1][real_offset]) | |
228 break; | |
229 count++; | |
230 } | |
231 } | |
232 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) | |
233 return count; | |
234 | |
235 CallbackLater(callback, count); | |
236 return net::ERR_IO_PENDING; | |
237 } | |
238 | |
239 bool MockDiskEntry::CouldBeSparse() const { | |
240 if (fail_sparse_requests_) | |
241 return false; | |
242 return sparse_; | |
243 } | |
244 | |
245 void MockDiskEntry::CancelSparseIO() { | |
246 cancel_ = true; | |
247 } | |
248 | |
249 int MockDiskEntry::ReadyForSparseIO(const net::CompletionCallback& callback) { | |
250 if (fail_sparse_requests_) | |
251 return net::ERR_NOT_IMPLEMENTED; | |
252 if (!cancel_) | |
253 return net::OK; | |
254 | |
255 cancel_ = false; | |
256 DCHECK(!callback.is_null()); | |
257 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) | |
258 return net::OK; | |
259 | |
260 // The pending operation is already in the message loop (and hopefully | |
261 // already in the second pass). Just notify the caller that it finished. | |
262 CallbackLater(callback, 0); | |
263 return net::ERR_IO_PENDING; | |
264 } | |
265 | |
266 // If |value| is true, don't deliver any completion callbacks until called | |
267 // again with |value| set to false. Caution: remember to enable callbacks | |
268 // again or all subsequent tests will fail. | |
269 // Static. | |
270 void MockDiskEntry::IgnoreCallbacks(bool value) { | |
271 if (ignore_callbacks_ == value) | |
272 return; | |
273 ignore_callbacks_ = value; | |
274 if (!value) | |
275 StoreAndDeliverCallbacks(false, NULL, net::CompletionCallback(), 0); | |
276 } | |
277 | |
278 MockDiskEntry::~MockDiskEntry() { | |
279 } | |
280 | |
281 // Unlike the callbacks for MockHttpTransaction, we want this one to run even | |
282 // if the consumer called Close on the MockDiskEntry. We achieve that by | |
283 // leveraging the fact that this class is reference counted. | |
284 void MockDiskEntry::CallbackLater(const net::CompletionCallback& callback, | |
285 int result) { | |
286 if (ignore_callbacks_) | |
287 return StoreAndDeliverCallbacks(true, this, callback, result); | |
288 base::MessageLoop::current()->PostTask( | |
289 FROM_HERE, | |
290 base::Bind(&MockDiskEntry::RunCallback, this, callback, result)); | |
291 } | |
292 | |
293 void MockDiskEntry::RunCallback( | |
294 const net::CompletionCallback& callback, int result) { | |
295 if (busy_) { | |
296 // This is kind of hacky, but controlling the behavior of just this entry | |
297 // from a test is sort of complicated. What we really want to do is | |
298 // delay the delivery of a sparse IO operation a little more so that the | |
299 // request start operation (async) will finish without seeing the end of | |
300 // this operation (already posted to the message loop)... and without | |
301 // just delaying for n mS (which may cause trouble with slow bots). So | |
302 // we re-post this operation (all async sparse IO operations will take two | |
303 // trips through the message loop instead of one). | |
304 if (!delayed_) { | |
305 delayed_ = true; | |
306 return CallbackLater(callback, result); | |
307 } | |
308 } | |
309 busy_ = false; | |
310 callback.Run(result); | |
311 } | |
312 | |
313 // When |store| is true, stores the callback to be delivered later; otherwise | |
314 // delivers any callback previously stored. | |
315 // Static. | |
316 void MockDiskEntry::StoreAndDeliverCallbacks( | |
317 bool store, MockDiskEntry* entry, const net::CompletionCallback& callback, | |
318 int result) { | |
319 static std::vector<CallbackInfo> callback_list; | |
320 if (store) { | |
321 CallbackInfo c = {entry, callback, result}; | |
322 callback_list.push_back(c); | |
323 } else { | |
324 for (size_t i = 0; i < callback_list.size(); i++) { | |
325 CallbackInfo& c = callback_list[i]; | |
326 c.entry->CallbackLater(c.callback, c.result); | |
327 } | |
328 callback_list.clear(); | |
329 } | |
330 } | |
331 | |
332 // Statics. | |
333 bool MockDiskEntry::cancel_ = false; | |
334 bool MockDiskEntry::ignore_callbacks_ = false; | |
335 | |
336 //----------------------------------------------------------------------------- | |
337 | |
338 MockDiskCache::MockDiskCache() | |
339 : open_count_(0), create_count_(0), fail_requests_(false), | |
340 soft_failures_(false), double_create_check_(true), | |
341 fail_sparse_requests_(false) { | |
342 } | |
343 | |
344 MockDiskCache::~MockDiskCache() { | |
345 ReleaseAll(); | |
346 } | |
347 | |
348 net::CacheType MockDiskCache::GetCacheType() const { | |
349 return net::DISK_CACHE; | |
350 } | |
351 | |
352 int32 MockDiskCache::GetEntryCount() const { | |
353 return static_cast<int32>(entries_.size()); | |
354 } | |
355 | |
356 int MockDiskCache::OpenEntry(const std::string& key, disk_cache::Entry** entry, | |
357 const net::CompletionCallback& callback) { | |
358 DCHECK(!callback.is_null()); | |
359 if (fail_requests_) | |
360 return net::ERR_CACHE_OPEN_FAILURE; | |
361 | |
362 EntryMap::iterator it = entries_.find(key); | |
363 if (it == entries_.end()) | |
364 return net::ERR_CACHE_OPEN_FAILURE; | |
365 | |
366 if (it->second->is_doomed()) { | |
367 it->second->Release(); | |
368 entries_.erase(it); | |
369 return net::ERR_CACHE_OPEN_FAILURE; | |
370 } | |
371 | |
372 open_count_++; | |
373 | |
374 it->second->AddRef(); | |
375 *entry = it->second; | |
376 | |
377 if (soft_failures_) | |
378 it->second->set_fail_requests(); | |
379 | |
380 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) | |
381 return net::OK; | |
382 | |
383 CallbackLater(callback, net::OK); | |
384 return net::ERR_IO_PENDING; | |
385 } | |
386 | |
387 int MockDiskCache::CreateEntry(const std::string& key, | |
388 disk_cache::Entry** entry, | |
389 const net::CompletionCallback& callback) { | |
390 DCHECK(!callback.is_null()); | |
391 if (fail_requests_) | |
392 return net::ERR_CACHE_CREATE_FAILURE; | |
393 | |
394 EntryMap::iterator it = entries_.find(key); | |
395 if (it != entries_.end()) { | |
396 if (!it->second->is_doomed()) { | |
397 if (double_create_check_) | |
398 NOTREACHED(); | |
399 else | |
400 return net::ERR_CACHE_CREATE_FAILURE; | |
401 } | |
402 it->second->Release(); | |
403 entries_.erase(it); | |
404 } | |
405 | |
406 create_count_++; | |
407 | |
408 MockDiskEntry* new_entry = new MockDiskEntry(key); | |
409 | |
410 new_entry->AddRef(); | |
411 entries_[key] = new_entry; | |
412 | |
413 new_entry->AddRef(); | |
414 *entry = new_entry; | |
415 | |
416 if (soft_failures_) | |
417 new_entry->set_fail_requests(); | |
418 | |
419 if (fail_sparse_requests_) | |
420 new_entry->set_fail_sparse_requests(); | |
421 | |
422 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) | |
423 return net::OK; | |
424 | |
425 CallbackLater(callback, net::OK); | |
426 return net::ERR_IO_PENDING; | |
427 } | |
428 | |
429 int MockDiskCache::DoomEntry(const std::string& key, | |
430 const net::CompletionCallback& callback) { | |
431 DCHECK(!callback.is_null()); | |
432 EntryMap::iterator it = entries_.find(key); | |
433 if (it != entries_.end()) { | |
434 it->second->Release(); | |
435 entries_.erase(it); | |
436 } | |
437 | |
438 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) | |
439 return net::OK; | |
440 | |
441 CallbackLater(callback, net::OK); | |
442 return net::ERR_IO_PENDING; | |
443 } | |
444 | |
445 int MockDiskCache::DoomAllEntries(const net::CompletionCallback& callback) { | |
446 return net::ERR_NOT_IMPLEMENTED; | |
447 } | |
448 | |
449 int MockDiskCache::DoomEntriesBetween(const base::Time initial_time, | |
450 const base::Time end_time, | |
451 const net::CompletionCallback& callback) { | |
452 return net::ERR_NOT_IMPLEMENTED; | |
453 } | |
454 | |
455 int MockDiskCache::DoomEntriesSince(const base::Time initial_time, | |
456 const net::CompletionCallback& callback) { | |
457 return net::ERR_NOT_IMPLEMENTED; | |
458 } | |
459 | |
460 class MockDiskCache::NotImplementedIterator : public Iterator { | |
461 public: | |
462 int OpenNextEntry(disk_cache::Entry** next_entry, | |
463 const net::CompletionCallback& callback) override { | |
464 return net::ERR_NOT_IMPLEMENTED; | |
465 } | |
466 }; | |
467 | |
468 scoped_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() { | |
469 return scoped_ptr<Iterator>(new NotImplementedIterator()); | |
470 } | |
471 | |
472 void MockDiskCache::GetStats( | |
473 std::vector<std::pair<std::string, std::string> >* stats) { | |
474 } | |
475 | |
476 void MockDiskCache::OnExternalCacheHit(const std::string& key) { | |
477 } | |
478 | |
479 void MockDiskCache::ReleaseAll() { | |
480 EntryMap::iterator it = entries_.begin(); | |
481 for (; it != entries_.end(); ++it) | |
482 it->second->Release(); | |
483 entries_.clear(); | |
484 } | |
485 | |
486 void MockDiskCache::CallbackLater(const net::CompletionCallback& callback, | |
487 int result) { | |
488 base::MessageLoop::current()->PostTask( | |
489 FROM_HERE, base::Bind(&CallbackForwader, callback, result)); | |
490 } | |
491 | |
492 //----------------------------------------------------------------------------- | |
493 | |
494 int MockBackendFactory::CreateBackend(net::NetLog* net_log, | |
495 scoped_ptr<disk_cache::Backend>* backend, | |
496 const net::CompletionCallback& callback) { | |
497 backend->reset(new MockDiskCache()); | |
498 return net::OK; | |
499 } | |
500 | |
501 //----------------------------------------------------------------------------- | |
502 | |
503 MockHttpCache::MockHttpCache() | |
504 : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) { | |
505 } | |
506 | |
507 MockHttpCache::MockHttpCache(net::HttpCache::BackendFactory* disk_cache_factory) | |
508 : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) { | |
509 } | |
510 | |
511 MockDiskCache* MockHttpCache::disk_cache() { | |
512 net::TestCompletionCallback cb; | |
513 disk_cache::Backend* backend; | |
514 int rv = http_cache_.GetBackend(&backend, cb.callback()); | |
515 rv = cb.GetResult(rv); | |
516 return (rv == net::OK) ? static_cast<MockDiskCache*>(backend) : NULL; | |
517 } | |
518 | |
519 int MockHttpCache::CreateTransaction(scoped_ptr<net::HttpTransaction>* trans) { | |
520 return http_cache_.CreateTransaction(net::DEFAULT_PRIORITY, trans); | |
521 } | |
522 | |
523 void MockHttpCache::BypassCacheLock() { | |
524 http_cache_.BypassLockForTest(); | |
525 } | |
526 | |
527 void MockHttpCache::FailConditionalizations() { | |
528 http_cache_.FailConditionalizationForTest(); | |
529 } | |
530 | |
531 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry, | |
532 net::HttpResponseInfo* response_info, | |
533 bool* response_truncated) { | |
534 int size = disk_entry->GetDataSize(0); | |
535 | |
536 net::TestCompletionCallback cb; | |
537 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size)); | |
538 int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback()); | |
539 rv = cb.GetResult(rv); | |
540 EXPECT_EQ(size, rv); | |
541 | |
542 return net::HttpCache::ParseResponseInfo(buffer->data(), size, | |
543 response_info, | |
544 response_truncated); | |
545 } | |
546 | |
547 bool MockHttpCache::WriteResponseInfo( | |
548 disk_cache::Entry* disk_entry, const net::HttpResponseInfo* response_info, | |
549 bool skip_transient_headers, bool response_truncated) { | |
550 Pickle pickle; | |
551 response_info->Persist( | |
552 &pickle, skip_transient_headers, response_truncated); | |
553 | |
554 net::TestCompletionCallback cb; | |
555 scoped_refptr<net::WrappedIOBuffer> data(new net::WrappedIOBuffer( | |
556 reinterpret_cast<const char*>(pickle.data()))); | |
557 int len = static_cast<int>(pickle.size()); | |
558 | |
559 int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true); | |
560 rv = cb.GetResult(rv); | |
561 return (rv == len); | |
562 } | |
563 | |
564 bool MockHttpCache::OpenBackendEntry(const std::string& key, | |
565 disk_cache::Entry** entry) { | |
566 net::TestCompletionCallback cb; | |
567 int rv = disk_cache()->OpenEntry(key, entry, cb.callback()); | |
568 return (cb.GetResult(rv) == net::OK); | |
569 } | |
570 | |
571 bool MockHttpCache::CreateBackendEntry(const std::string& key, | |
572 disk_cache::Entry** entry, | |
573 net::NetLog* net_log) { | |
574 net::TestCompletionCallback cb; | |
575 int rv = disk_cache()->CreateEntry(key, entry, cb.callback()); | |
576 return (cb.GetResult(rv) == net::OK); | |
577 } | |
578 | |
579 // Static. | |
580 int MockHttpCache::GetTestMode(int test_mode) { | |
581 if (!g_test_mode) | |
582 return test_mode; | |
583 | |
584 return g_test_mode; | |
585 } | |
586 | |
587 // Static. | |
588 void MockHttpCache::SetTestMode(int test_mode) { | |
589 g_test_mode = test_mode; | |
590 } | |
591 | |
592 //----------------------------------------------------------------------------- | |
593 | |
594 int MockDiskCacheNoCB::CreateEntry(const std::string& key, | |
595 disk_cache::Entry** entry, | |
596 const net::CompletionCallback& callback) { | |
597 return net::ERR_IO_PENDING; | |
598 } | |
599 | |
600 //----------------------------------------------------------------------------- | |
601 | |
602 int MockBackendNoCbFactory::CreateBackend( | |
603 net::NetLog* net_log, scoped_ptr<disk_cache::Backend>* backend, | |
604 const net::CompletionCallback& callback) { | |
605 backend->reset(new MockDiskCacheNoCB()); | |
606 return net::OK; | |
607 } | |
608 | |
609 //----------------------------------------------------------------------------- | |
610 | |
611 MockBlockingBackendFactory::MockBlockingBackendFactory() | |
612 : backend_(NULL), | |
613 block_(true), | |
614 fail_(false) { | |
615 } | |
616 | |
617 MockBlockingBackendFactory::~MockBlockingBackendFactory() { | |
618 } | |
619 | |
620 int MockBlockingBackendFactory::CreateBackend( | |
621 net::NetLog* net_log, scoped_ptr<disk_cache::Backend>* backend, | |
622 const net::CompletionCallback& callback) { | |
623 if (!block_) { | |
624 if (!fail_) | |
625 backend->reset(new MockDiskCache()); | |
626 return Result(); | |
627 } | |
628 | |
629 backend_ = backend; | |
630 callback_ = callback; | |
631 return net::ERR_IO_PENDING; | |
632 } | |
633 | |
634 void MockBlockingBackendFactory::FinishCreation() { | |
635 block_ = false; | |
636 if (!callback_.is_null()) { | |
637 if (!fail_) | |
638 backend_->reset(new MockDiskCache()); | |
639 net::CompletionCallback cb = callback_; | |
640 callback_.Reset(); | |
641 cb.Run(Result()); // This object can be deleted here. | |
642 } | |
643 } | |
OLD | NEW |