Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(81)

Side by Side Diff: net/http/http_cache_unittest.cc

Issue 8588011: Split DiskCacheBasedSSLHostInfo unit tests to its own (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/http/http_cache.h" 5 #include "net/http/http_cache.h"
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/hash_tables.h"
9 #include "base/memory/scoped_vector.h" 8 #include "base/memory/scoped_vector.h"
10 #include "base/message_loop.h" 9 #include "base/message_loop.h"
11 #include "base/string_util.h" 10 #include "base/string_util.h"
12 #include "base/stringprintf.h" 11 #include "base/stringprintf.h"
13 #include "net/base/cache_type.h" 12 #include "net/base/cache_type.h"
14 #include "net/base/cert_status_flags.h" 13 #include "net/base/cert_status_flags.h"
15 #include "net/base/host_port_pair.h" 14 #include "net/base/host_port_pair.h"
16 #include "net/base/load_flags.h" 15 #include "net/base/load_flags.h"
17 #include "net/base/net_errors.h" 16 #include "net/base/net_errors.h"
18 #include "net/base/net_log_unittest.h" 17 #include "net/base/net_log_unittest.h"
19 #include "net/base/ssl_cert_request_info.h" 18 #include "net/base/ssl_cert_request_info.h"
20 #include "net/base/ssl_config_service.h"
21 #include "net/disk_cache/disk_cache.h" 19 #include "net/disk_cache/disk_cache.h"
22 #include "net/http/disk_cache_based_ssl_host_info.h"
23 #include "net/http/http_byte_range.h" 20 #include "net/http/http_byte_range.h"
24 #include "net/http/http_request_headers.h" 21 #include "net/http/http_request_headers.h"
25 #include "net/http/http_request_info.h" 22 #include "net/http/http_request_info.h"
26 #include "net/http/http_response_headers.h" 23 #include "net/http/http_response_headers.h"
27 #include "net/http/http_response_info.h" 24 #include "net/http/http_response_info.h"
28 #include "net/http/http_transaction.h" 25 #include "net/http/http_transaction.h"
29 #include "net/http/http_transaction_unittest.h" 26 #include "net/http/http_transaction_unittest.h"
30 #include "net/http/http_util.h" 27 #include "net/http/http_util.h"
28 #include "net/http/mock_http_cache.h"
31 #include "testing/gtest/include/gtest/gtest.h" 29 #include "testing/gtest/include/gtest/gtest.h"
32 30
33 using base::Time; 31 using base::Time;
34 32
35 namespace { 33 namespace {
36 34
37 int GetTestModeForEntry(const std::string& key) {
38 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
39 // Skip past that to locate the actual URL.
40 //
41 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
42 // URL corresponding to a registered MockTransaction. It would be good to
43 // have another way to access the test_mode.
44 GURL url;
45 if (isdigit(key[0])) {
46 size_t slash = key.find('/');
47 DCHECK(slash != std::string::npos);
48 url = GURL(key.substr(slash + 1));
49 } else {
50 url = GURL(key);
51 }
52 const MockTransaction* t = FindMockTransaction(url);
53 DCHECK(t);
54 return t->test_mode;
55 }
56
57 // We can override the test mode for a given operation by setting this global
58 // variable. Just remember to reset it after the test!.
59 int g_test_mode = 0;
60
61 // Returns the test mode after considering the global override.
62 int GetEffectiveTestMode(int test_mode) {
63 if (!g_test_mode)
64 return test_mode;
65
66 return g_test_mode;
67 }
68
69 //-----------------------------------------------------------------------------
70 // mock disk cache (a very basic memory cache implementation)
71
72 static const int kNumCacheEntryDataIndices = 3;
73
74 class MockDiskEntry : public disk_cache::Entry,
75 public base::RefCounted<MockDiskEntry> {
76 public:
77 MockDiskEntry()
78 : test_mode_(0), doomed_(false), sparse_(false),
79 fail_requests_(false), busy_(false), delayed_(false) {
80 }
81
82 explicit MockDiskEntry(const std::string& key)
83 : key_(key), doomed_(false), sparse_(false),
84 fail_requests_(false), busy_(false), delayed_(false) {
85 test_mode_ = GetTestModeForEntry(key);
86 }
87
88 bool is_doomed() const { return doomed_; }
89
90 virtual void Doom() {
91 doomed_ = true;
92 }
93
94 virtual void Close() {
95 Release();
96 }
97
98 virtual std::string GetKey() const {
99 if (fail_requests_)
100 return std::string();
101 return key_;
102 }
103
104 virtual Time GetLastUsed() const {
105 return Time::FromInternalValue(0);
106 }
107
108 virtual Time GetLastModified() const {
109 return Time::FromInternalValue(0);
110 }
111
112 virtual int32 GetDataSize(int index) const {
113 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
114 return static_cast<int32>(data_[index].size());
115 }
116
117 virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
118 net::OldCompletionCallback* callback) {
119 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
120 DCHECK(callback);
121
122 if (fail_requests_)
123 return net::ERR_CACHE_READ_FAILURE;
124
125 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
126 return net::ERR_FAILED;
127 if (static_cast<size_t>(offset) == data_[index].size())
128 return 0;
129
130 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
131 memcpy(buf->data(), &data_[index][offset], num);
132
133 if (GetEffectiveTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
134 return num;
135
136 CallbackLater(callback, num);
137 return net::ERR_IO_PENDING;
138 }
139
140 virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
141 net::OldCompletionCallback* callback, bool truncate) {
142 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
143 DCHECK(callback);
144 DCHECK(truncate);
145
146 if (fail_requests_) {
147 CallbackLater(callback, net::ERR_CACHE_READ_FAILURE);
148 return net::ERR_IO_PENDING;
149 }
150
151 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
152 return net::ERR_FAILED;
153
154 data_[index].resize(offset + buf_len);
155 if (buf_len)
156 memcpy(&data_[index][offset], buf->data(), buf_len);
157
158 if (GetEffectiveTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
159 return buf_len;
160
161 CallbackLater(callback, buf_len);
162 return net::ERR_IO_PENDING;
163 }
164
165 virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
166 net::OldCompletionCallback* callback) {
167 DCHECK(callback);
168 if (!sparse_ || busy_)
169 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
170 if (offset < 0)
171 return net::ERR_FAILED;
172
173 if (fail_requests_)
174 return net::ERR_CACHE_READ_FAILURE;
175
176 DCHECK(offset < kint32max);
177 int real_offset = static_cast<int>(offset);
178 if (!buf_len)
179 return 0;
180
181 int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
182 buf_len);
183 memcpy(buf->data(), &data_[1][real_offset], num);
184
185 if (GetEffectiveTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
186 return num;
187
188 CallbackLater(callback, num);
189 busy_ = true;
190 delayed_ = false;
191 return net::ERR_IO_PENDING;
192 }
193
194 virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
195 net::OldCompletionCallback* callback) {
196 DCHECK(callback);
197 if (busy_)
198 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
199 if (!sparse_) {
200 if (data_[1].size())
201 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
202 sparse_ = true;
203 }
204 if (offset < 0)
205 return net::ERR_FAILED;
206 if (!buf_len)
207 return 0;
208
209 if (fail_requests_)
210 return net::ERR_CACHE_READ_FAILURE;
211
212 DCHECK(offset < kint32max);
213 int real_offset = static_cast<int>(offset);
214
215 if (static_cast<int>(data_[1].size()) < real_offset + buf_len)
216 data_[1].resize(real_offset + buf_len);
217
218 memcpy(&data_[1][real_offset], buf->data(), buf_len);
219 if (GetEffectiveTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
220 return buf_len;
221
222 CallbackLater(callback, buf_len);
223 return net::ERR_IO_PENDING;
224 }
225
226 virtual int GetAvailableRange(int64 offset, int len, int64* start,
227 net::OldCompletionCallback* callback) {
228 DCHECK(callback);
229 if (!sparse_ || busy_)
230 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
231 if (offset < 0)
232 return net::ERR_FAILED;
233
234 if (fail_requests_)
235 return net::ERR_CACHE_READ_FAILURE;
236
237 *start = offset;
238 DCHECK(offset < kint32max);
239 int real_offset = static_cast<int>(offset);
240 if (static_cast<int>(data_[1].size()) < real_offset)
241 return 0;
242
243 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
244 int count = 0;
245 for (; num > 0; num--, real_offset++) {
246 if (!count) {
247 if (data_[1][real_offset]) {
248 count++;
249 *start = real_offset;
250 }
251 } else {
252 if (!data_[1][real_offset])
253 break;
254 count++;
255 }
256 }
257 if (GetEffectiveTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
258 return count;
259
260 CallbackLater(callback, count);
261 return net::ERR_IO_PENDING;
262 }
263
264 virtual bool CouldBeSparse() const {
265 return sparse_;
266 }
267
268 virtual void CancelSparseIO() { cancel_ = true; }
269
270 virtual int ReadyForSparseIO(net::OldCompletionCallback* completion_callback) {
271 if (!cancel_)
272 return net::OK;
273
274 cancel_ = false;
275 DCHECK(completion_callback);
276 if (GetEffectiveTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
277 return net::OK;
278
279 // The pending operation is already in the message loop (and hopefuly
280 // already in the second pass). Just notify the caller that it finished.
281 CallbackLater(completion_callback, 0);
282 return net::ERR_IO_PENDING;
283 }
284
285 // Fail most subsequent requests.
286 void set_fail_requests() { fail_requests_ = true; }
287
288 // If |value| is true, don't deliver any completion callbacks until called
289 // again with |value| set to false. Caution: remember to enable callbacks
290 // again or all subsequent tests will fail.
291 static void IgnoreCallbacks(bool value) {
292 if (ignore_callbacks_ == value)
293 return;
294 ignore_callbacks_ = value;
295 if (!value)
296 StoreAndDeliverCallbacks(false, NULL, NULL, 0);
297 }
298
299 private:
300 friend class base::RefCounted<MockDiskEntry>;
301
302 struct CallbackInfo {
303 scoped_refptr<MockDiskEntry> entry;
304 net::OldCompletionCallback* callback;
305 int result;
306 };
307
308 ~MockDiskEntry() {}
309
310 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
311 // if the consumer called Close on the MockDiskEntry. We achieve that by
312 // leveraging the fact that this class is reference counted.
313 void CallbackLater(net::OldCompletionCallback* callback, int result) {
314 if (ignore_callbacks_)
315 return StoreAndDeliverCallbacks(true, this, callback, result);
316 MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
317 &MockDiskEntry::RunCallback, this, callback, result));
318 }
319 void RunCallback(net::OldCompletionCallback* callback, int result) {
320 if (busy_) {
321 // This is kind of hacky, but controlling the behavior of just this entry
322 // from a test is sort of complicated. What we really want to do is
323 // delay the delivery of a sparse IO operation a little more so that the
324 // request start operation (async) will finish without seeing the end of
325 // this operation (already posted to the message loop)... and without
326 // just delaying for n mS (which may cause trouble with slow bots). So
327 // we re-post this operation (all async sparse IO operations will take two
328 // trips trhough the message loop instead of one).
329 if (!delayed_) {
330 delayed_ = true;
331 return CallbackLater(callback, result);
332 }
333 }
334 busy_ = false;
335 callback->Run(result);
336 }
337
338 // When |store| is true, stores the callback to be delivered later; otherwise
339 // delivers any callback previously stored.
340 static void StoreAndDeliverCallbacks(bool store, MockDiskEntry* entry,
341 net::OldCompletionCallback* callback,
342 int result) {
343 static std::vector<CallbackInfo> callback_list;
344 if (store) {
345 CallbackInfo c = {entry, callback, result};
346 callback_list.push_back(c);
347 } else {
348 for (size_t i = 0; i < callback_list.size(); i++) {
349 CallbackInfo& c = callback_list[i];
350 c.entry->CallbackLater(c.callback, c.result);
351 }
352 callback_list.clear();
353 }
354 }
355
356 std::string key_;
357 std::vector<char> data_[kNumCacheEntryDataIndices];
358 int test_mode_;
359 bool doomed_;
360 bool sparse_;
361 bool fail_requests_;
362 bool busy_;
363 bool delayed_;
364 static bool cancel_;
365 static bool ignore_callbacks_;
366 };
367
368 // Statics.
369 bool MockDiskEntry::cancel_ = false;
370 bool MockDiskEntry::ignore_callbacks_ = false;
371
372 class MockDiskCache : public disk_cache::Backend {
373 public:
374 MockDiskCache()
375 : open_count_(0), create_count_(0), fail_requests_(false),
376 soft_failures_(false) {
377 }
378
379 ~MockDiskCache() {
380 ReleaseAll();
381 }
382
383 virtual int32 GetEntryCount() const {
384 return static_cast<int32>(entries_.size());
385 }
386
387 virtual int OpenEntry(const std::string& key, disk_cache::Entry** entry,
388 net::OldCompletionCallback* callback) {
389 DCHECK(callback);
390 if (fail_requests_)
391 return net::ERR_CACHE_OPEN_FAILURE;
392
393 EntryMap::iterator it = entries_.find(key);
394 if (it == entries_.end())
395 return net::ERR_CACHE_OPEN_FAILURE;
396
397 if (it->second->is_doomed()) {
398 it->second->Release();
399 entries_.erase(it);
400 return net::ERR_CACHE_OPEN_FAILURE;
401 }
402
403 open_count_++;
404
405 it->second->AddRef();
406 *entry = it->second;
407
408 if (soft_failures_)
409 it->second->set_fail_requests();
410
411 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
412 return net::OK;
413
414 CallbackLater(callback, net::OK);
415 return net::ERR_IO_PENDING;
416 }
417
418 virtual int CreateEntry(const std::string& key, disk_cache::Entry** entry,
419 net::OldCompletionCallback* callback) {
420 DCHECK(callback);
421 if (fail_requests_)
422 return net::ERR_CACHE_CREATE_FAILURE;
423
424 EntryMap::iterator it = entries_.find(key);
425 if (it != entries_.end()) {
426 DCHECK(it->second->is_doomed());
427 it->second->Release();
428 entries_.erase(it);
429 }
430
431 create_count_++;
432
433 MockDiskEntry* new_entry = new MockDiskEntry(key);
434
435 new_entry->AddRef();
436 entries_[key] = new_entry;
437
438 new_entry->AddRef();
439 *entry = new_entry;
440
441 if (soft_failures_)
442 new_entry->set_fail_requests();
443
444 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
445 return net::OK;
446
447 CallbackLater(callback, net::OK);
448 return net::ERR_IO_PENDING;
449 }
450
451 virtual int DoomEntry(const std::string& key,
452 net::OldCompletionCallback* callback) {
453 DCHECK(callback);
454 EntryMap::iterator it = entries_.find(key);
455 if (it != entries_.end()) {
456 it->second->Release();
457 entries_.erase(it);
458 }
459
460 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
461 return net::OK;
462
463 CallbackLater(callback, net::OK);
464 return net::ERR_IO_PENDING;
465 }
466
467 virtual int DoomAllEntries(net::OldCompletionCallback* callback) {
468 return net::ERR_NOT_IMPLEMENTED;
469 }
470
471 virtual int DoomEntriesBetween(const base::Time initial_time,
472 const base::Time end_time,
473 net::OldCompletionCallback* callback) {
474 return net::ERR_NOT_IMPLEMENTED;
475 }
476
477 virtual int DoomEntriesSince(const base::Time initial_time,
478 net::OldCompletionCallback* callback) {
479 return net::ERR_NOT_IMPLEMENTED;
480 }
481
482 virtual int OpenNextEntry(void** iter, disk_cache::Entry** next_entry,
483 net::OldCompletionCallback* callback) {
484 return net::ERR_NOT_IMPLEMENTED;
485 }
486
487 virtual void EndEnumeration(void** iter) {}
488
489 virtual void GetStats(
490 std::vector<std::pair<std::string, std::string> >* stats) {
491 }
492
493 virtual void OnExternalCacheHit(const std::string& key) {}
494
495 // returns number of times a cache entry was successfully opened
496 int open_count() const { return open_count_; }
497
498 // returns number of times a cache entry was successfully created
499 int create_count() const { return create_count_; }
500
501 // Fail any subsequent CreateEntry and OpenEntry.
502 void set_fail_requests() { fail_requests_ = true; }
503
504 // Return entries that fail some of their requests.
505 void set_soft_failures(bool value) { soft_failures_ = value; }
506
507 void ReleaseAll() {
508 EntryMap::iterator it = entries_.begin();
509 for (; it != entries_.end(); ++it)
510 it->second->Release();
511 entries_.clear();
512 }
513
514 private:
515 typedef base::hash_map<std::string, MockDiskEntry*> EntryMap;
516
517 class CallbackRunner : public Task {
518 public:
519 CallbackRunner(net::OldCompletionCallback* callback, int result)
520 : callback_(callback), result_(result) {}
521 virtual void Run() {
522 callback_->Run(result_);
523 }
524
525 private:
526 net::OldCompletionCallback* callback_;
527 int result_;
528 DISALLOW_COPY_AND_ASSIGN(CallbackRunner);
529 };
530
531 void CallbackLater(net::OldCompletionCallback* callback, int result) {
532 MessageLoop::current()->PostTask(FROM_HERE,
533 new CallbackRunner(callback, result));
534 }
535
536 EntryMap entries_;
537 int open_count_;
538 int create_count_;
539 bool fail_requests_;
540 bool soft_failures_;
541 };
542
543 class MockBackendFactory : public net::HttpCache::BackendFactory {
544 public:
545 virtual int CreateBackend(net::NetLog* /* net_log */,
546 disk_cache::Backend** backend,
547 net::OldCompletionCallback* callback) {
548 *backend = new MockDiskCache();
549 return net::OK;
550 }
551 };
552
553 class MockHttpCache {
554 public:
555 MockHttpCache()
556 : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) {
557 }
558
559 explicit MockHttpCache(net::HttpCache::BackendFactory* disk_cache_factory)
560 : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) {
561 }
562
563 net::HttpCache* http_cache() { return &http_cache_; }
564
565 MockNetworkLayer* network_layer() {
566 return static_cast<MockNetworkLayer*>(http_cache_.network_layer());
567 }
568 MockDiskCache* disk_cache() {
569 TestOldCompletionCallback cb;
570 disk_cache::Backend* backend;
571 int rv = http_cache_.GetBackend(&backend, &cb);
572 rv = cb.GetResult(rv);
573 return (rv == net::OK) ? static_cast<MockDiskCache*>(backend) : NULL;
574 }
575
576 // Helper function for reading response info from the disk cache.
577 static bool ReadResponseInfo(disk_cache::Entry* disk_entry,
578 net::HttpResponseInfo* response_info,
579 bool* response_truncated) {
580 int size = disk_entry->GetDataSize(0);
581
582 TestOldCompletionCallback cb;
583 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
584 int rv = disk_entry->ReadData(0, 0, buffer, size, &cb);
585 rv = cb.GetResult(rv);
586 EXPECT_EQ(size, rv);
587
588 return net::HttpCache::ParseResponseInfo(buffer->data(), size,
589 response_info,
590 response_truncated);
591 }
592
593 // Helper function for writing response info into the disk cache.
594 static bool WriteResponseInfo(disk_cache::Entry* disk_entry,
595 const net::HttpResponseInfo* response_info,
596 bool skip_transient_headers,
597 bool response_truncated) {
598 Pickle pickle;
599 response_info->Persist(
600 &pickle, skip_transient_headers, response_truncated);
601
602 TestOldCompletionCallback cb;
603 scoped_refptr<net::WrappedIOBuffer> data(new net::WrappedIOBuffer(
604 reinterpret_cast<const char*>(pickle.data())));
605 int len = static_cast<int>(pickle.size());
606
607 int rv = disk_entry->WriteData(0, 0, data, len, &cb, true);
608 rv = cb.GetResult(rv);
609 return (rv == len);
610 }
611
612 // Helper function to synchronously open a backend entry.
613 bool OpenBackendEntry(const std::string& key, disk_cache::Entry** entry) {
614 TestOldCompletionCallback cb;
615 int rv = disk_cache()->OpenEntry(key, entry, &cb);
616 return (cb.GetResult(rv) == net::OK);
617 }
618
619 // Helper function to synchronously create a backend entry.
620 bool CreateBackendEntry(const std::string& key, disk_cache::Entry** entry,
621 net::NetLog* /* net_log */) {
622 TestOldCompletionCallback cb;
623 int rv = disk_cache()->CreateEntry(key, entry, &cb);
624 return (cb.GetResult(rv) == net::OK);
625 }
626
627 private:
628 net::HttpCache http_cache_;
629 };
630
631 // This version of the disk cache doesn't invoke CreateEntry callbacks.
632 class MockDiskCacheNoCB : public MockDiskCache {
633 virtual int CreateEntry(const std::string& key, disk_cache::Entry** entry,
634 net::OldCompletionCallback* callback) {
635 return net::ERR_IO_PENDING;
636 }
637 };
638
639 class MockBackendNoCbFactory : public net::HttpCache::BackendFactory {
640 public:
641 virtual int CreateBackend(net::NetLog* /* net_log */,
642 disk_cache::Backend** backend,
643 net::OldCompletionCallback* callback) {
644 *backend = new MockDiskCacheNoCB();
645 return net::OK;
646 }
647 };
648
649 // This backend factory allows us to control the backend instantiation.
650 class MockBlockingBackendFactory : public net::HttpCache::BackendFactory {
651 public:
652 MockBlockingBackendFactory()
653 : backend_(NULL), callback_(NULL), block_(true), fail_(false) {}
654
655 virtual int CreateBackend(net::NetLog* /* net_log */,
656 disk_cache::Backend** backend,
657 net::OldCompletionCallback* callback) {
658 if (!block_) {
659 if (!fail_)
660 *backend = new MockDiskCache();
661 return Result();
662 }
663
664 backend_ = backend;
665 callback_ = callback;
666 return net::ERR_IO_PENDING;
667 }
668
669 // Completes the backend creation. Any blocked call will be notified via the
670 // provided callback.
671 void FinishCreation() {
672 block_ = false;
673 if (callback_) {
674 if (!fail_)
675 *backend_ = new MockDiskCache();
676 net::OldCompletionCallback* cb = callback_;
677 callback_ = NULL;
678 cb->Run(Result()); // This object can be deleted here.
679 }
680 }
681
682 disk_cache::Backend** backend() { return backend_; }
683 void set_fail(bool fail) { fail_ = fail; }
684
685 net::OldCompletionCallback* callback() { return callback_; }
686
687 private:
688 int Result() { return fail_ ? net::ERR_FAILED : net::OK; }
689
690 disk_cache::Backend** backend_;
691 net::OldCompletionCallback* callback_;
692 bool block_;
693 bool fail_;
694 };
695
696 class DeleteCacheOldCompletionCallback : public TestOldCompletionCallback { 35 class DeleteCacheOldCompletionCallback : public TestOldCompletionCallback {
697 public: 36 public:
698 explicit DeleteCacheOldCompletionCallback(MockHttpCache* cache) 37 explicit DeleteCacheOldCompletionCallback(MockHttpCache* cache)
699 : cache_(cache) {} 38 : cache_(cache) {}
700 39
701 virtual void RunWithParams(const Tuple1<int>& params) { 40 virtual void RunWithParams(const Tuple1<int>& params) {
702 delete cache_; 41 delete cache_;
703 TestOldCompletionCallback::RunWithParams(params); 42 TestOldCompletionCallback::RunWithParams(params);
704 } 43 }
705 44
(...skipping 328 matching lines...) Expand 10 before | Expand all | Expand 10 after
1034 373
1035 int result; 374 int result;
1036 TestOldCompletionCallback callback; 375 TestOldCompletionCallback callback;
1037 scoped_ptr<net::HttpTransaction> trans; 376 scoped_ptr<net::HttpTransaction> trans;
1038 }; 377 };
1039 378
1040 } // namespace 379 } // namespace
1041 380
1042 381
1043 //----------------------------------------------------------------------------- 382 //-----------------------------------------------------------------------------
1044 // HttpCache tests 383 // Tests.
1045 384
1046 TEST(HttpCache, CreateThenDestroy) { 385 TEST(HttpCache, CreateThenDestroy) {
1047 MockHttpCache cache; 386 MockHttpCache cache;
1048 387
1049 scoped_ptr<net::HttpTransaction> trans; 388 scoped_ptr<net::HttpTransaction> trans;
1050 int rv = cache.http_cache()->CreateTransaction(&trans); 389 int rv = cache.http_cache()->CreateTransaction(&trans);
1051 EXPECT_EQ(net::OK, rv); 390 EXPECT_EQ(net::OK, rv);
1052 ASSERT_TRUE(trans.get()); 391 ASSERT_TRUE(trans.get());
1053 } 392 }
1054 393
(...skipping 3279 matching lines...) Expand 10 before | Expand all | Expand 10 after
4334 rv = c->trans->Read(buf, buf->size(), &c->callback); 3673 rv = c->trans->Read(buf, buf->size(), &c->callback);
4335 if (rv == net::ERR_IO_PENDING) 3674 if (rv == net::ERR_IO_PENDING)
4336 rv = c->callback.WaitForResult(); 3675 rv = c->callback.WaitForResult();
4337 EXPECT_EQ(buf->size(), rv); 3676 EXPECT_EQ(buf->size(), rv);
4338 3677
4339 // We want to cancel the request when the transaction is busy. 3678 // We want to cancel the request when the transaction is busy.
4340 rv = c->trans->Read(buf, buf->size(), &c->callback); 3679 rv = c->trans->Read(buf, buf->size(), &c->callback);
4341 EXPECT_EQ(net::ERR_IO_PENDING, rv); 3680 EXPECT_EQ(net::ERR_IO_PENDING, rv);
4342 EXPECT_FALSE(c->callback.have_result()); 3681 EXPECT_FALSE(c->callback.have_result());
4343 3682
4344 g_test_mode = TEST_MODE_SYNC_ALL; 3683 MockHttpCache::SetTestMode(TEST_MODE_SYNC_ALL);
4345 3684
4346 // Destroy the transaction. 3685 // Destroy the transaction.
4347 c->trans.reset(); 3686 c->trans.reset();
4348 g_test_mode = 0; 3687 MockHttpCache::SetTestMode(0);
4349 3688
4350 // Make sure that we don't invoke the callback. We may have an issue if the 3689 // Make sure that we don't invoke the callback. We may have an issue if the
4351 // UrlRequestJob is killed directly (without cancelling the UrlRequest) so we 3690 // UrlRequestJob is killed directly (without cancelling the UrlRequest) so we
4352 // could end up with the transaction being deleted twice if we send any 3691 // could end up with the transaction being deleted twice if we send any
4353 // notification from the transaction destructor (see http://crbug.com/31723). 3692 // notification from the transaction destructor (see http://crbug.com/31723).
4354 EXPECT_FALSE(c->callback.have_result()); 3693 EXPECT_FALSE(c->callback.have_result());
4355 3694
4356 // Verify that the entry is marked as incomplete. 3695 // Verify that the entry is marked as incomplete.
4357 disk_cache::Entry* entry; 3696 disk_cache::Entry* entry;
4358 ASSERT_TRUE(cache.OpenBackendEntry(kSimpleGET_Transaction.url, &entry)); 3697 ASSERT_TRUE(cache.OpenBackendEntry(kSimpleGET_Transaction.url, &entry));
(...skipping 817 matching lines...) Expand 10 before | Expand all | Expand 10 after
5176 4515
5177 // Verify that the entry is marked as incomplete. 4516 // Verify that the entry is marked as incomplete.
5178 disk_cache::Entry* entry; 4517 disk_cache::Entry* entry;
5179 ASSERT_TRUE(cache.OpenBackendEntry(kSimpleGET_Transaction.url, &entry)); 4518 ASSERT_TRUE(cache.OpenBackendEntry(kSimpleGET_Transaction.url, &entry));
5180 net::HttpResponseInfo response; 4519 net::HttpResponseInfo response;
5181 bool truncated = false; 4520 bool truncated = false;
5182 EXPECT_TRUE(MockHttpCache::ReadResponseInfo(entry, &response, &truncated)); 4521 EXPECT_TRUE(MockHttpCache::ReadResponseInfo(entry, &response, &truncated));
5183 EXPECT_TRUE(truncated); 4522 EXPECT_TRUE(truncated);
5184 entry->Close(); 4523 entry->Close();
5185 } 4524 }
5186
5187 //-----------------------------------------------------------------------------
5188 // DiskCacheBasedSSLHostInfo tests
5189
5190 class DeleteSSLHostInfoOldCompletionCallback : public TestOldCompletionCallback {
5191 public:
5192 explicit DeleteSSLHostInfoOldCompletionCallback(net::SSLHostInfo* ssl_host_inf o)
5193 : ssl_host_info_(ssl_host_info) {}
5194
5195 virtual void RunWithParams(const Tuple1<int>& params) {
5196 delete ssl_host_info_;
5197 TestOldCompletionCallback::RunWithParams(params);
5198 }
5199
5200 private:
5201 net::SSLHostInfo* ssl_host_info_;
5202 };
5203
5204 // Tests that we can delete a DiskCacheBasedSSLHostInfo object in a
5205 // completion callback for DiskCacheBasedSSLHostInfo::WaitForDataReady.
5206 TEST(DiskCacheBasedSSLHostInfo, DeleteInCallback) {
5207 net::CertVerifier cert_verifier;
5208 // Use the blocking mock backend factory to force asynchronous completion
5209 // of ssl_host_info->WaitForDataReady(), so that the callback will run.
5210 MockBlockingBackendFactory* factory = new MockBlockingBackendFactory();
5211 MockHttpCache cache(factory);
5212 net::SSLConfig ssl_config;
5213 net::SSLHostInfo* ssl_host_info =
5214 new net::DiskCacheBasedSSLHostInfo("https://www.verisign.com", ssl_config,
5215 &cert_verifier, cache.http_cache());
5216 ssl_host_info->Start();
5217 DeleteSSLHostInfoOldCompletionCallback callback(ssl_host_info);
5218 int rv = ssl_host_info->WaitForDataReady(&callback);
5219 EXPECT_EQ(net::ERR_IO_PENDING, rv);
5220 // Now complete the backend creation and let the callback run.
5221 factory->FinishCreation();
5222 EXPECT_EQ(net::OK, callback.GetResult(rv));
5223 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698