OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/http/mock_http_cache.h" | 5 #include "net/http/mock_http_cache.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
9 #include "net/base/net_errors.h" | 9 #include "net/base/net_errors.h" |
10 #include "testing/gtest/include/gtest/gtest.h" | 10 #include "testing/gtest/include/gtest/gtest.h" |
11 | 11 |
12 namespace { | 12 namespace { |
13 | 13 |
| 14 class OldCallbackRunner : public Task { |
| 15 public: |
| 16 OldCallbackRunner(net::OldCompletionCallback* callback, int result) |
| 17 : callback_(callback), result_(result) {} |
| 18 virtual void Run() { |
| 19 callback_->Run(result_); |
| 20 } |
| 21 |
| 22 private: |
| 23 net::OldCompletionCallback* callback_; |
| 24 int result_; |
| 25 |
| 26 DISALLOW_COPY_AND_ASSIGN(OldCallbackRunner); |
| 27 }; |
| 28 |
| 29 void CompletionCallbackRunner( |
| 30 const net::CompletionCallback& callback, int result) { |
| 31 callback.Run(result); |
| 32 } |
| 33 |
14 int GetTestModeForEntry(const std::string& key) { | 34 int GetTestModeForEntry(const std::string& key) { |
15 // 'key' is prefixed with an identifier if it corresponds to a cached POST. | 35 // 'key' is prefixed with an identifier if it corresponds to a cached POST. |
16 // Skip past that to locate the actual URL. | 36 // Skip past that to locate the actual URL. |
17 // | 37 // |
18 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an | 38 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an |
19 // URL corresponding to a registered MockTransaction. It would be good to | 39 // URL corresponding to a registered MockTransaction. It would be good to |
20 // have another way to access the test_mode. | 40 // have another way to access the test_mode. |
21 GURL url; | 41 GURL url; |
22 if (isdigit(key[0])) { | 42 if (isdigit(key[0])) { |
23 size_t slash = key.find('/'); | 43 size_t slash = key.find('/'); |
(...skipping 10 matching lines...) Expand all Loading... |
34 // We can override the test mode for a given operation by setting this global | 54 // We can override the test mode for a given operation by setting this global |
35 // variable. | 55 // variable. |
36 int g_test_mode = 0; | 56 int g_test_mode = 0; |
37 | 57 |
38 } // namespace | 58 } // namespace |
39 | 59 |
40 //----------------------------------------------------------------------------- | 60 //----------------------------------------------------------------------------- |
41 | 61 |
42 struct MockDiskEntry::CallbackInfo { | 62 struct MockDiskEntry::CallbackInfo { |
43 scoped_refptr<MockDiskEntry> entry; | 63 scoped_refptr<MockDiskEntry> entry; |
44 net::OldCompletionCallback* callback; | 64 net::OldCompletionCallback* old_callback; |
| 65 net::CompletionCallback callback; |
45 int result; | 66 int result; |
46 }; | 67 }; |
47 | 68 |
48 MockDiskEntry::MockDiskEntry() | 69 MockDiskEntry::MockDiskEntry() |
49 : test_mode_(0), doomed_(false), sparse_(false), | 70 : test_mode_(0), doomed_(false), sparse_(false), |
50 fail_requests_(false), busy_(false), delayed_(false) { | 71 fail_requests_(false), busy_(false), delayed_(false) { |
51 } | 72 } |
52 | 73 |
53 MockDiskEntry::MockDiskEntry(const std::string& key) | 74 MockDiskEntry::MockDiskEntry(const std::string& key) |
54 : key_(key), doomed_(false), sparse_(false), | 75 : key_(key), doomed_(false), sparse_(false), |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
99 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset); | 120 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset); |
100 memcpy(buf->data(), &data_[index][offset], num); | 121 memcpy(buf->data(), &data_[index][offset], num); |
101 | 122 |
102 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) | 123 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) |
103 return num; | 124 return num; |
104 | 125 |
105 CallbackLater(callback, num); | 126 CallbackLater(callback, num); |
106 return net::ERR_IO_PENDING; | 127 return net::ERR_IO_PENDING; |
107 } | 128 } |
108 | 129 |
| 130 int MockDiskEntry::ReadData( |
| 131 int index, int offset, net::IOBuffer* buf, int buf_len, |
| 132 const net::CompletionCallback& callback) { |
| 133 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices); |
| 134 DCHECK(!callback.is_null()); |
| 135 |
| 136 if (fail_requests_) |
| 137 return net::ERR_CACHE_READ_FAILURE; |
| 138 |
| 139 if (offset < 0 || offset > static_cast<int>(data_[index].size())) |
| 140 return net::ERR_FAILED; |
| 141 if (static_cast<size_t>(offset) == data_[index].size()) |
| 142 return 0; |
| 143 |
| 144 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset); |
| 145 memcpy(buf->data(), &data_[index][offset], num); |
| 146 |
| 147 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) |
| 148 return num; |
| 149 |
| 150 CallbackLater(callback, num); |
| 151 return net::ERR_IO_PENDING; |
| 152 } |
| 153 |
109 int MockDiskEntry::WriteData(int index, int offset, net::IOBuffer* buf, | 154 int MockDiskEntry::WriteData(int index, int offset, net::IOBuffer* buf, |
110 int buf_len, net::OldCompletionCallback* callback, | 155 int buf_len, net::OldCompletionCallback* callback, |
111 bool truncate) { | 156 bool truncate) { |
112 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices); | 157 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices); |
113 DCHECK(callback); | 158 DCHECK(callback); |
114 DCHECK(truncate); | 159 DCHECK(truncate); |
115 | 160 |
116 if (fail_requests_) { | 161 if (fail_requests_) { |
117 CallbackLater(callback, net::ERR_CACHE_READ_FAILURE); | 162 CallbackLater(callback, net::ERR_CACHE_READ_FAILURE); |
118 return net::ERR_IO_PENDING; | 163 return net::ERR_IO_PENDING; |
119 } | 164 } |
120 | 165 |
121 if (offset < 0 || offset > static_cast<int>(data_[index].size())) | 166 if (offset < 0 || offset > static_cast<int>(data_[index].size())) |
122 return net::ERR_FAILED; | 167 return net::ERR_FAILED; |
123 | 168 |
124 data_[index].resize(offset + buf_len); | 169 data_[index].resize(offset + buf_len); |
125 if (buf_len) | 170 if (buf_len) |
126 memcpy(&data_[index][offset], buf->data(), buf_len); | 171 memcpy(&data_[index][offset], buf->data(), buf_len); |
127 | 172 |
128 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) | 173 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) |
129 return buf_len; | 174 return buf_len; |
130 | 175 |
131 CallbackLater(callback, buf_len); | 176 CallbackLater(callback, buf_len); |
132 return net::ERR_IO_PENDING; | 177 return net::ERR_IO_PENDING; |
133 } | 178 } |
134 | 179 |
| 180 int MockDiskEntry::WriteData( |
| 181 int index, int offset, net::IOBuffer* buf, int buf_len, |
| 182 const net::CompletionCallback& callback, bool truncate) { |
| 183 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices); |
| 184 DCHECK(truncate); |
| 185 DCHECK(!callback.is_null()); |
| 186 |
| 187 if (fail_requests_) { |
| 188 CallbackLater(callback, net::ERR_CACHE_READ_FAILURE); |
| 189 return net::ERR_IO_PENDING; |
| 190 } |
| 191 |
| 192 if (offset < 0 || offset > static_cast<int>(data_[index].size())) |
| 193 return net::ERR_FAILED; |
| 194 |
| 195 data_[index].resize(offset + buf_len); |
| 196 if (buf_len) |
| 197 memcpy(&data_[index][offset], buf->data(), buf_len); |
| 198 |
| 199 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) |
| 200 return buf_len; |
| 201 |
| 202 CallbackLater(callback, buf_len); |
| 203 return net::ERR_IO_PENDING; |
| 204 } |
| 205 |
135 int MockDiskEntry::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 206 int MockDiskEntry::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
136 net::OldCompletionCallback* callback) { | 207 net::OldCompletionCallback* callback) { |
137 DCHECK(callback); | 208 DCHECK(callback); |
138 if (!sparse_ || busy_) | 209 if (!sparse_ || busy_) |
139 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | 210 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
140 if (offset < 0) | 211 if (offset < 0) |
141 return net::ERR_FAILED; | 212 return net::ERR_FAILED; |
142 | 213 |
143 if (fail_requests_) | 214 if (fail_requests_) |
144 return net::ERR_CACHE_READ_FAILURE; | 215 return net::ERR_CACHE_READ_FAILURE; |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
257 | 328 |
258 // If |value| is true, don't deliver any completion callbacks until called | 329 // If |value| is true, don't deliver any completion callbacks until called |
259 // again with |value| set to false. Caution: remember to enable callbacks | 330 // again with |value| set to false. Caution: remember to enable callbacks |
260 // again or all subsequent tests will fail. | 331 // again or all subsequent tests will fail. |
261 // Static. | 332 // Static. |
262 void MockDiskEntry::IgnoreCallbacks(bool value) { | 333 void MockDiskEntry::IgnoreCallbacks(bool value) { |
263 if (ignore_callbacks_ == value) | 334 if (ignore_callbacks_ == value) |
264 return; | 335 return; |
265 ignore_callbacks_ = value; | 336 ignore_callbacks_ = value; |
266 if (!value) | 337 if (!value) |
267 StoreAndDeliverCallbacks(false, NULL, NULL, 0); | 338 StoreAndDeliverCallbacks(false, NULL, NULL, net::CompletionCallback(), 0); |
268 } | 339 } |
269 | 340 |
270 MockDiskEntry::~MockDiskEntry() { | 341 MockDiskEntry::~MockDiskEntry() { |
271 } | 342 } |
272 | 343 |
273 // Unlike the callbacks for MockHttpTransaction, we want this one to run even | 344 // Unlike the callbacks for MockHttpTransaction, we want this one to run even |
274 // if the consumer called Close on the MockDiskEntry. We achieve that by | 345 // if the consumer called Close on the MockDiskEntry. We achieve that by |
275 // leveraging the fact that this class is reference counted. | 346 // leveraging the fact that this class is reference counted. |
276 void MockDiskEntry::CallbackLater(net::OldCompletionCallback* callback, | 347 void MockDiskEntry::CallbackLater(net::OldCompletionCallback* callback, |
277 int result) { | 348 int result) { |
278 if (ignore_callbacks_) | 349 if (ignore_callbacks_) { |
279 return StoreAndDeliverCallbacks(true, this, callback, result); | 350 StoreAndDeliverCallbacks(true, this, callback, |
| 351 net::CompletionCallback(), result); |
| 352 return; |
| 353 } |
| 354 |
| 355 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
| 356 &MockDiskEntry::RunOldCallback, this, callback, result)); |
| 357 } |
| 358 |
| 359 void MockDiskEntry::CallbackLater(const net::CompletionCallback& callback, |
| 360 int result) { |
| 361 if (ignore_callbacks_) { |
| 362 StoreAndDeliverCallbacks(true, this, NULL, callback, result); |
| 363 return; |
| 364 } |
| 365 |
280 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | 366 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
281 &MockDiskEntry::RunCallback, this, callback, result)); | 367 &MockDiskEntry::RunCallback, this, callback, result)); |
282 } | 368 } |
283 | 369 |
284 void MockDiskEntry::RunCallback(net::OldCompletionCallback* callback, int result
) { | 370 void MockDiskEntry::RunOldCallback( |
| 371 net::OldCompletionCallback* callback, int result) { |
285 if (busy_) { | 372 if (busy_) { |
286 // This is kind of hacky, but controlling the behavior of just this entry | 373 // This is kind of hacky, but controlling the behavior of just this entry |
287 // from a test is sort of complicated. What we really want to do is | 374 // from a test is sort of complicated. What we really want to do is |
288 // delay the delivery of a sparse IO operation a little more so that the | 375 // delay the delivery of a sparse IO operation a little more so that the |
289 // request start operation (async) will finish without seeing the end of | 376 // request start operation (async) will finish without seeing the end of |
290 // this operation (already posted to the message loop)... and without | 377 // this operation (already posted to the message loop)... and without |
291 // just delaying for n mS (which may cause trouble with slow bots). So | 378 // just delaying for n mS (which may cause trouble with slow bots). So |
292 // we re-post this operation (all async sparse IO operations will take two | 379 // we re-post this operation (all async sparse IO operations will take two |
293 // trips trhough the message loop instead of one). | 380 // trips through the message loop instead of one). |
294 if (!delayed_) { | 381 if (!delayed_) { |
295 delayed_ = true; | 382 delayed_ = true; |
296 return CallbackLater(callback, result); | 383 return CallbackLater(callback, result); |
297 } | 384 } |
298 } | 385 } |
299 busy_ = false; | 386 busy_ = false; |
300 callback->Run(result); | 387 callback->Run(result); |
301 } | 388 } |
302 | 389 |
| 390 void MockDiskEntry::RunCallback( |
| 391 const net::CompletionCallback& callback, int result) { |
| 392 if (busy_) { |
| 393 // This is kind of hacky, but controlling the behavior of just this entry |
| 394 // from a test is sort of complicated. What we really want to do is |
| 395 // delay the delivery of a sparse IO operation a little more so that the |
| 396 // request start operation (async) will finish without seeing the end of |
| 397 // this operation (already posted to the message loop)... and without |
| 398 // just delaying for n mS (which may cause trouble with slow bots). So |
| 399 // we re-post this operation (all async sparse IO operations will take two |
| 400 // trips through the message loop instead of one). |
| 401 if (!delayed_) { |
| 402 delayed_ = true; |
| 403 return CallbackLater(callback, result); |
| 404 } |
| 405 } |
| 406 busy_ = false; |
| 407 callback.Run(result); |
| 408 } |
| 409 |
303 // When |store| is true, stores the callback to be delivered later; otherwise | 410 // When |store| is true, stores the callback to be delivered later; otherwise |
304 // delivers any callback previously stored. | 411 // delivers any callback previously stored. |
305 // Static. | 412 // Static. |
306 void MockDiskEntry::StoreAndDeliverCallbacks(bool store, MockDiskEntry* entry, | 413 void MockDiskEntry::StoreAndDeliverCallbacks( |
307 net::OldCompletionCallback* callbac
k, | 414 bool store, MockDiskEntry* entry, |
308 int result) { | 415 net::OldCompletionCallback* old_callback, |
| 416 const net::CompletionCallback& callback, int result) { |
309 static std::vector<CallbackInfo> callback_list; | 417 static std::vector<CallbackInfo> callback_list; |
310 if (store) { | 418 if (store) { |
311 CallbackInfo c = {entry, callback, result}; | 419 CallbackInfo c = {entry, old_callback, callback, result}; |
312 callback_list.push_back(c); | 420 callback_list.push_back(c); |
313 } else { | 421 } else { |
314 for (size_t i = 0; i < callback_list.size(); i++) { | 422 for (size_t i = 0; i < callback_list.size(); ++i) { |
315 CallbackInfo& c = callback_list[i]; | 423 CallbackInfo& c = callback_list[i]; |
316 c.entry->CallbackLater(c.callback, c.result); | 424 if (c.old_callback) |
| 425 c.entry->CallbackLater(c.old_callback, c.result); |
| 426 else |
| 427 c.entry->CallbackLater(c.callback, c.result); |
317 } | 428 } |
318 callback_list.clear(); | 429 callback_list.clear(); |
319 } | 430 } |
320 } | 431 } |
321 | 432 |
322 // Statics. | 433 // Statics. |
323 bool MockDiskEntry::cancel_ = false; | 434 bool MockDiskEntry::cancel_ = false; |
324 bool MockDiskEntry::ignore_callbacks_ = false; | 435 bool MockDiskEntry::ignore_callbacks_ = false; |
325 | 436 |
326 //----------------------------------------------------------------------------- | 437 //----------------------------------------------------------------------------- |
327 | 438 |
328 class MockDiskCache::CallbackRunner : public Task { | |
329 public: | |
330 CallbackRunner(net::OldCompletionCallback* callback, int result) | |
331 : callback_(callback), result_(result) {} | |
332 virtual void Run() { | |
333 callback_->Run(result_); | |
334 } | |
335 | |
336 private: | |
337 net::OldCompletionCallback* callback_; | |
338 int result_; | |
339 DISALLOW_COPY_AND_ASSIGN(CallbackRunner); | |
340 }; | |
341 | |
342 MockDiskCache::MockDiskCache() | 439 MockDiskCache::MockDiskCache() |
343 : open_count_(0), create_count_(0), fail_requests_(false), | 440 : open_count_(0), create_count_(0), fail_requests_(false), |
344 soft_failures_(false), double_create_check_(true) { | 441 soft_failures_(false), double_create_check_(true) { |
345 } | 442 } |
346 | 443 |
347 MockDiskCache::~MockDiskCache() { | 444 MockDiskCache::~MockDiskCache() { |
348 ReleaseAll(); | 445 ReleaseAll(); |
349 } | 446 } |
350 | 447 |
351 int32 MockDiskCache::GetEntryCount() const { | 448 int32 MockDiskCache::GetEntryCount() const { |
(...skipping 24 matching lines...) Expand all Loading... |
376 if (soft_failures_) | 473 if (soft_failures_) |
377 it->second->set_fail_requests(); | 474 it->second->set_fail_requests(); |
378 | 475 |
379 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) | 476 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) |
380 return net::OK; | 477 return net::OK; |
381 | 478 |
382 CallbackLater(callback, net::OK); | 479 CallbackLater(callback, net::OK); |
383 return net::ERR_IO_PENDING; | 480 return net::ERR_IO_PENDING; |
384 } | 481 } |
385 | 482 |
| 483 int MockDiskCache::OpenEntry(const std::string& key, disk_cache::Entry** entry, |
| 484 const net::CompletionCallback& callback) { |
| 485 DCHECK(!callback.is_null()); |
| 486 |
| 487 if (fail_requests_) |
| 488 return net::ERR_CACHE_OPEN_FAILURE; |
| 489 |
| 490 EntryMap::iterator it = entries_.find(key); |
| 491 if (it == entries_.end()) |
| 492 return net::ERR_CACHE_OPEN_FAILURE; |
| 493 |
| 494 if (it->second->is_doomed()) { |
| 495 it->second->Release(); |
| 496 entries_.erase(it); |
| 497 return net::ERR_CACHE_OPEN_FAILURE; |
| 498 } |
| 499 |
| 500 open_count_++; |
| 501 |
| 502 it->second->AddRef(); |
| 503 *entry = it->second; |
| 504 |
| 505 if (soft_failures_) |
| 506 it->second->set_fail_requests(); |
| 507 |
| 508 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) |
| 509 return net::OK; |
| 510 |
| 511 CallbackLater(callback, net::OK); |
| 512 return net::ERR_IO_PENDING; |
| 513 } |
| 514 |
386 int MockDiskCache::CreateEntry(const std::string& key, | 515 int MockDiskCache::CreateEntry(const std::string& key, |
387 disk_cache::Entry** entry, | 516 disk_cache::Entry** entry, |
388 net::OldCompletionCallback* callback) { | 517 net::OldCompletionCallback* callback) { |
389 DCHECK(callback); | 518 DCHECK(callback); |
390 if (fail_requests_) | 519 if (fail_requests_) |
391 return net::ERR_CACHE_CREATE_FAILURE; | 520 return net::ERR_CACHE_CREATE_FAILURE; |
392 | 521 |
393 EntryMap::iterator it = entries_.find(key); | 522 EntryMap::iterator it = entries_.find(key); |
394 if (it != entries_.end()) { | 523 if (it != entries_.end()) { |
395 if (!it->second->is_doomed()) { | 524 if (!it->second->is_doomed()) { |
(...skipping 19 matching lines...) Expand all Loading... |
415 if (soft_failures_) | 544 if (soft_failures_) |
416 new_entry->set_fail_requests(); | 545 new_entry->set_fail_requests(); |
417 | 546 |
418 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) | 547 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) |
419 return net::OK; | 548 return net::OK; |
420 | 549 |
421 CallbackLater(callback, net::OK); | 550 CallbackLater(callback, net::OK); |
422 return net::ERR_IO_PENDING; | 551 return net::ERR_IO_PENDING; |
423 } | 552 } |
424 | 553 |
| 554 int MockDiskCache::CreateEntry(const std::string& key, |
| 555 disk_cache::Entry** entry, |
| 556 const net::CompletionCallback& callback) { |
| 557 DCHECK(!callback.is_null()); |
| 558 |
| 559 if (fail_requests_) |
| 560 return net::ERR_CACHE_CREATE_FAILURE; |
| 561 |
| 562 EntryMap::iterator it = entries_.find(key); |
| 563 if (it != entries_.end()) { |
| 564 if (!it->second->is_doomed()) { |
| 565 if (double_create_check_) |
| 566 NOTREACHED(); |
| 567 else |
| 568 return net::ERR_CACHE_CREATE_FAILURE; |
| 569 } |
| 570 it->second->Release(); |
| 571 entries_.erase(it); |
| 572 } |
| 573 |
| 574 create_count_++; |
| 575 |
| 576 MockDiskEntry* new_entry = new MockDiskEntry(key); |
| 577 |
| 578 new_entry->AddRef(); |
| 579 entries_[key] = new_entry; |
| 580 |
| 581 new_entry->AddRef(); |
| 582 *entry = new_entry; |
| 583 |
| 584 if (soft_failures_) |
| 585 new_entry->set_fail_requests(); |
| 586 |
| 587 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) |
| 588 return net::OK; |
| 589 |
| 590 CallbackLater(callback, net::OK); |
| 591 return net::ERR_IO_PENDING; |
| 592 } |
| 593 |
425 int MockDiskCache::DoomEntry(const std::string& key, | 594 int MockDiskCache::DoomEntry(const std::string& key, |
426 net::OldCompletionCallback* callback) { | 595 net::OldCompletionCallback* callback) { |
427 DCHECK(callback); | 596 DCHECK(callback); |
428 EntryMap::iterator it = entries_.find(key); | 597 EntryMap::iterator it = entries_.find(key); |
429 if (it != entries_.end()) { | 598 if (it != entries_.end()) { |
430 it->second->Release(); | 599 it->second->Release(); |
431 entries_.erase(it); | 600 entries_.erase(it); |
432 } | 601 } |
433 | 602 |
434 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) | 603 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
471 void MockDiskCache::ReleaseAll() { | 640 void MockDiskCache::ReleaseAll() { |
472 EntryMap::iterator it = entries_.begin(); | 641 EntryMap::iterator it = entries_.begin(); |
473 for (; it != entries_.end(); ++it) | 642 for (; it != entries_.end(); ++it) |
474 it->second->Release(); | 643 it->second->Release(); |
475 entries_.clear(); | 644 entries_.clear(); |
476 } | 645 } |
477 | 646 |
478 void MockDiskCache::CallbackLater(net::OldCompletionCallback* callback, | 647 void MockDiskCache::CallbackLater(net::OldCompletionCallback* callback, |
479 int result) { | 648 int result) { |
480 MessageLoop::current()->PostTask(FROM_HERE, | 649 MessageLoop::current()->PostTask(FROM_HERE, |
481 new CallbackRunner(callback, result)); | 650 new OldCallbackRunner(callback, result)); |
| 651 } |
| 652 |
| 653 void MockDiskCache::CallbackLater(const net::CompletionCallback& callback, |
| 654 int result) { |
| 655 MessageLoop::current()->PostTask( |
| 656 FROM_HERE, base::Bind(&CompletionCallbackRunner, callback, result)); |
482 } | 657 } |
483 | 658 |
484 //----------------------------------------------------------------------------- | 659 //----------------------------------------------------------------------------- |
485 | 660 |
486 int MockBackendFactory::CreateBackend(net::NetLog* net_log, | 661 int MockBackendFactory::CreateBackend(net::NetLog* net_log, |
487 disk_cache::Backend** backend, | 662 disk_cache::Backend** backend, |
488 net::OldCompletionCallback* callback) { | 663 net::OldCompletionCallback* callback) { |
489 *backend = new MockDiskCache(); | 664 *backend = new MockDiskCache(); |
490 return net::OK; | 665 return net::OK; |
491 } | 666 } |
492 | 667 |
493 //----------------------------------------------------------------------------- | 668 //----------------------------------------------------------------------------- |
494 | 669 |
495 MockHttpCache::MockHttpCache() | 670 MockHttpCache::MockHttpCache() |
496 : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) { | 671 : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) { |
497 } | 672 } |
498 | 673 |
499 MockHttpCache::MockHttpCache(net::HttpCache::BackendFactory* disk_cache_factory) | 674 MockHttpCache::MockHttpCache(net::HttpCache::BackendFactory* disk_cache_factory) |
500 : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) { | 675 : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) { |
501 } | 676 } |
502 | 677 |
503 MockDiskCache* MockHttpCache::disk_cache() { | 678 MockDiskCache* MockHttpCache::disk_cache() { |
504 TestOldCompletionCallback cb; | 679 net::TestCompletionCallback cb; |
505 disk_cache::Backend* backend; | 680 disk_cache::Backend* backend; |
506 int rv = http_cache_.GetBackend(&backend, &cb); | 681 int rv = http_cache_.GetBackend(&backend, cb.callback()); |
507 rv = cb.GetResult(rv); | 682 rv = cb.GetResult(rv); |
508 return (rv == net::OK) ? static_cast<MockDiskCache*>(backend) : NULL; | 683 return (rv == net::OK) ? static_cast<MockDiskCache*>(backend) : NULL; |
509 } | 684 } |
510 | 685 |
511 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry, | 686 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry, |
512 net::HttpResponseInfo* response_info, | 687 net::HttpResponseInfo* response_info, |
513 bool* response_truncated) { | 688 bool* response_truncated) { |
514 int size = disk_entry->GetDataSize(0); | 689 int size = disk_entry->GetDataSize(0); |
515 | 690 |
516 TestOldCompletionCallback cb; | 691 TestOldCompletionCallback cb; |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
609 void MockBlockingBackendFactory::FinishCreation() { | 784 void MockBlockingBackendFactory::FinishCreation() { |
610 block_ = false; | 785 block_ = false; |
611 if (callback_) { | 786 if (callback_) { |
612 if (!fail_) | 787 if (!fail_) |
613 *backend_ = new MockDiskCache(); | 788 *backend_ = new MockDiskCache(); |
614 net::OldCompletionCallback* cb = callback_; | 789 net::OldCompletionCallback* cb = callback_; |
615 callback_ = NULL; | 790 callback_ = NULL; |
616 cb->Run(Result()); // This object can be deleted here. | 791 cb->Run(Result()); // This object can be deleted here. |
617 } | 792 } |
618 } | 793 } |
OLD | NEW |